hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
72decc21ca2961e8a38eb5c2184f5983376ae141.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void hotspotOpt1(float *p, float* tIn, float *tOut, float sdc,
int nx, int ny, int nz,
float ce, float cw,
float cn, float cs,
float ct, float cb,
float cc)
{
float amb_temp = 80.0;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int c = i + j * nx;
int xy = nx * ny;
int W = (i == 0) ? c : c - 1;
int E = (i == nx-1) ? c : c + 1;
int N = (j == 0) ? c : c - nx;
int S = (j == ny-1) ? c : c + nx;
float temp1, temp2, temp3;
temp1 = temp2 = tIn[c];
temp3 = tIn[c+xy];
tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S]
+ cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp;
c += xy;
W += xy;
E += xy;
N += xy;
S += xy;
for (int k = 1; k < nz-1; ++k) {
temp1 = temp2;
temp2 = temp3;
temp3 = tIn[c+xy];
tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S]
+ cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp;
c += xy;
W += xy;
E += xy;
N += xy;
S += xy;
}
temp1 = temp2;
temp2 = temp3;
tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S]
+ cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp;
return;
}
void hotspot_opt1(float *p, float *tIn, float *tOut,
int nx, int ny, int nz,
float Cap,
float Rx, float Ry, float Rz,
float dt, int numiter)
{
uint64_t time1=0, time2=0, totalTime=0, totalTime2;
float ce, cw, cn, cs, ct, cb, cc;
float stepDivCap = dt / Cap;
ce = cw =stepDivCap/ Rx;
cn = cs =stepDivCap/ Ry;
ct = cb =stepDivCap/ Rz;
cc = 1.0 - (2.0*ce + 2.0*cn + 3.0*ct);
size_t s = sizeof(float) * nx * ny * nz;
float *tIn_d, *tOut_d, *p_d;
hipMalloc((void**)&p_d,s);
hipMalloc((void**)&tIn_d,s);
hipMalloc((void**)&tOut_d,s);
hipMemcpy(tIn_d, tIn, s, hipMemcpyHostToDevice);
hipMemcpy(p_d, p, s, hipMemcpyHostToDevice);
hipFuncSetCacheConfig(hotspotOpt1, hipFuncCachePreferL1);
dim3 block_dim(64, 4, 1);
dim3 grid_dim(nx / 64, ny / 4, 1);
totalTime1 = getTime();
for (int i = 0; i < numiter; ++i) {
time1 = getTime();
hipLaunchKernelGGL(( hotspotOpt1), dim3(grid_dim), dim3(block_dim), 0, 0,
p_d, tIn_d, tOut_d, stepDivCap, nx, ny, nz, ce, cw, cn, cs, ct, cb, cc);
time2 = getTime();
printf("1, hotspot3D, %d, %d, %d, %d, %d, %d, \n", numiter, i, nx, ny, nz, (uint64_t)(time2 - time1));
float *t = tIn_d;
tIn_d = tOut_d;
tOut_d = t;
}
totalTime2 = getTime();
printf("1, , , , , , , %d\n", (uint64_t)(totalTime2 - totalTime1));
hipDeviceSynchronize();
hipMemcpy(tOut, tOut_d, s, hipMemcpyDeviceToHost);
hipFree(p_d);
hipFree(tIn_d);
hipFree(tOut_d);
return;
}
| 72decc21ca2961e8a38eb5c2184f5983376ae141.cu | __global__ void hotspotOpt1(float *p, float* tIn, float *tOut, float sdc,
int nx, int ny, int nz,
float ce, float cw,
float cn, float cs,
float ct, float cb,
float cc)
{
float amb_temp = 80.0;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int c = i + j * nx;
int xy = nx * ny;
int W = (i == 0) ? c : c - 1;
int E = (i == nx-1) ? c : c + 1;
int N = (j == 0) ? c : c - nx;
int S = (j == ny-1) ? c : c + nx;
float temp1, temp2, temp3;
temp1 = temp2 = tIn[c];
temp3 = tIn[c+xy];
tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S]
+ cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp;
c += xy;
W += xy;
E += xy;
N += xy;
S += xy;
for (int k = 1; k < nz-1; ++k) {
temp1 = temp2;
temp2 = temp3;
temp3 = tIn[c+xy];
tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S]
+ cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp;
c += xy;
W += xy;
E += xy;
N += xy;
S += xy;
}
temp1 = temp2;
temp2 = temp3;
tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S]
+ cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp;
return;
}
void hotspot_opt1(float *p, float *tIn, float *tOut,
int nx, int ny, int nz,
float Cap,
float Rx, float Ry, float Rz,
float dt, int numiter)
{
uint64_t time1=0, time2=0, totalTime=0, totalTime2;
float ce, cw, cn, cs, ct, cb, cc;
float stepDivCap = dt / Cap;
ce = cw =stepDivCap/ Rx;
cn = cs =stepDivCap/ Ry;
ct = cb =stepDivCap/ Rz;
cc = 1.0 - (2.0*ce + 2.0*cn + 3.0*ct);
size_t s = sizeof(float) * nx * ny * nz;
float *tIn_d, *tOut_d, *p_d;
cudaMalloc((void**)&p_d,s);
cudaMalloc((void**)&tIn_d,s);
cudaMalloc((void**)&tOut_d,s);
cudaMemcpy(tIn_d, tIn, s, cudaMemcpyHostToDevice);
cudaMemcpy(p_d, p, s, cudaMemcpyHostToDevice);
cudaFuncSetCacheConfig(hotspotOpt1, cudaFuncCachePreferL1);
dim3 block_dim(64, 4, 1);
dim3 grid_dim(nx / 64, ny / 4, 1);
totalTime1 = getTime();
for (int i = 0; i < numiter; ++i) {
time1 = getTime();
hotspotOpt1<<<grid_dim, block_dim>>>
(p_d, tIn_d, tOut_d, stepDivCap, nx, ny, nz, ce, cw, cn, cs, ct, cb, cc);
time2 = getTime();
printf("1, hotspot3D, %d, %d, %d, %d, %d, %d, \n", numiter, i, nx, ny, nz, (uint64_t)(time2 - time1));
float *t = tIn_d;
tIn_d = tOut_d;
tOut_d = t;
}
totalTime2 = getTime();
printf("1, , , , , , , %d\n", (uint64_t)(totalTime2 - totalTime1));
cudaDeviceSynchronize();
cudaMemcpy(tOut, tOut_d, s, cudaMemcpyDeviceToHost);
cudaFree(p_d);
cudaFree(tIn_d);
cudaFree(tOut_d);
return;
}
|
b0b7c1fbbb332b487a5b91f5f7ec913954bb5bda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
#include "linalg.h"
#include "state.cuh"
#include <cstdio>
#include <vector>
template <int dim>
__global__ void P2G_backward(TState<dim> state, TState<dim> next_state) {
// Scatter particle gradients to grid nodes
// P2G part of back-propagation
int part_id = blockIdx.x * blockDim.x + threadIdx.x;
if (part_id >= state.num_particles) {
return;
}
auto x = state.get_x(part_id);
auto v = state.get_v(part_id);
auto F = state.get_F(part_id);
auto C = state.get_C(part_id);
auto grad_x_next = next_state.get_grad_x(part_id);
auto grad_v_next = next_state.get_grad_v(part_id);
auto grad_F_next = next_state.get_grad_F(part_id);
auto grad_C_next = next_state.get_grad_C(part_id);
// (A) v_p^n+1, accumulate
grad_v_next = grad_v_next + state.dt * grad_x_next;
// (B) C_p^n+1, accumulate
for (int alpha = 0; alpha < dim; alpha++) {
for (int beta = 0; beta < dim; beta++) {
for (int gamma = 0; gamma < dim; gamma++) {
grad_C_next[alpha][beta] +=
state.dt * grad_F_next[alpha][gamma] * F[beta][gamma];
}
}
}
// Accumulate to grad_v and grad_C
// next_state.set_grad_v(part_id, grad_v_next);
// next_state.set_grad_C(part_id, grad_C_next);
TransferCommon<dim, true> tc(state, x);
for (int i = 0; i < kernel_volume<dim>(); i++) {
real N = tc.w(i);
auto dpos = tc.dpos(i);
// (C) v_i^n
real grad_v_i[dim];
for (int alpha = 0; alpha < dim; alpha++) {
grad_v_i[alpha] = grad_v_next[alpha] * N;
for (int beta = 0; beta < dim; beta++) {
grad_v_i[alpha] +=
state.invD * N * grad_C_next[alpha][beta] * dpos[beta];
}
}
auto grad_n =
state.grad_grid_node(tc.base_coord + offset_from_scalar<dim>(i));
for (int d = 0; d < dim; d++) {
// printf("grad_v_i %d %f\n", d, grad_v_i[d]);
atomicAdd(&grad_n[d], grad_v_i[d]);
}
}
}
TC_FORCE_INLINE __device__ real H(real x) {
return x >= 0 ? 1 : 0;
}
template <int dim>
__global__ void grid_backward(TState<dim> state) {
// Scatter particle gradients to grid nodes
// P2G part of back-propagation
using Vector = typename TState<dim>::Vector;
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < state.num_cells) {
auto node = state.grid_node(id);
auto grad_node = state.grad_grid_node(id);
if (node[dim] > 0) {
// (D)
// Convert grad_v to grad_p
// grad_p = grad_v / m
auto m = node[dim];
real inv_m = 1.0f / m;
auto grad_v_i = Vector(grad_node);
auto v_i = Vector(state.grid_star_node(id));
auto v_i_with_g = v_i;
for (int i = 0; i < dim; i++) {
v_i_with_g[i] += state.gravity[i] * state.dt;
}
auto v_i_star = Vector(state.grid_node(id));
auto bc = state.grid_node_bc(id);
auto normal = Vector(bc);
auto lin = v_i_with_g.dot(normal);
real coeff = bc[dim];
if (coeff == -1) {
// sticky
grad_v_i = Vector(0.0f);
} else if (normal.length2() > 0) {
auto vit = v_i_with_g - lin * normal;
auto lit = sqrt(vit.length2() + 1e-7);
auto vithat = (1.0f / lit) * vit;
auto R = lit + coeff * min(lin, 0.0f);
auto litstar = max(R, 0.0f);
auto vistar = litstar * vithat + max(lin, 0.0f) * normal;
auto r = vistar - v_i_star;
for (int i = 0; i < dim; i++) {
if (fabs(r[i]) > 1e-6)
printf("mismatch r %f\n", r[i]);
}
auto grad_v_i_star = grad_v_i;
auto grad_litstar = 0.0f;
for (int i = 0; i < dim; i++) {
grad_litstar += grad_v_i_star[i] * vithat[i];
}
Vector grad_vithat = litstar * grad_v_i_star;
auto grad_lit = grad_litstar * H(R);
for (int i = 0; i < dim; i++) {
grad_lit += -1 / (lit * lit) * vit[i] * grad_vithat[i];
}
auto grad_vit = (1 / lit) * (grad_lit * vit + grad_vithat);
auto grad_lin = grad_litstar * H(R) * coeff * H(-lin);
for (int i = 0; i < dim; i++) {
grad_lin -= grad_vit[i] * normal[i];
grad_lin += H(lin) * normal[i] * grad_v_i_star[i];
}
auto new_grad_v_i = grad_lin * normal + grad_vit;
grad_v_i = new_grad_v_i;
}
auto grad_p = inv_m * grad_v_i;
// (E)
real grad_m = 0;
for (int alpha = 0; alpha < dim; alpha++) {
grad_m -= inv_m * v_i[alpha] * grad_v_i[alpha];
grad_node[alpha] = grad_p[alpha];
}
grad_node[dim] = grad_m;
}
}
}
// (F), (G), (H), (I), (J)
template <int dim>
__global__ void G2P_backward(TState<dim> state, TState<dim> next_state) {
// Scatter particle gradients to grid nodes
// P2G part of back-propagation
int part_id = blockIdx.x * blockDim.x + threadIdx.x;
if (part_id >= state.num_particles) {
return;
}
auto x = state.get_x(part_id);
auto v = state.get_v(part_id);
auto F = state.get_F(part_id);
auto C = state.get_C(part_id);
auto P = state.get_P(part_id);
auto A = state.get_A(part_id);
auto grad_F_next = next_state.get_grad_F(part_id);
auto grad_C_next = next_state.get_grad_C(part_id);
auto grad_P_next = next_state.get_grad_P(part_id);
auto grad_v_next = next_state.get_grad_v(part_id);
auto grad_x_next = next_state.get_grad_x(part_id);
auto C_next = next_state.get_C(part_id);
// (A) v_p^n+1, accumulate
grad_v_next = grad_v_next + state.dt * grad_x_next;
// (B) C_p^n+1, accumulate
for (int alpha = 0; alpha < dim; alpha++) {
for (int beta = 0; beta < dim; beta++) {
for (int gamma = 0; gamma < dim; gamma++) {
grad_C_next[alpha][beta] +=
state.dt * grad_F_next[alpha][gamma] * F[beta][gamma];
}
}
}
TMatrix<real, dim> grad_P, grad_F, grad_C;
TransferCommon<dim, true> tc(state, x);
{
/*
real dx = 1e-4f;
TransferCommon<true> tc2(state, x + Vector(0, 0, dx));
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
for (int k = 0; k < dim; k++) {
auto d = tc.dw(i, j, k);
printf("%f %f\n", d[2], (tc2.w(i, j, k) - tc.w(i, j, k))/ dx);
}
}
}
*/
}
TVector<real, dim> grad_v;
real grad_P_scale = state.dt * state.invD * state.V_p;
// (G) Compute grad_P
for (int i = 0; i < kernel_volume<dim>(); i++) {
real N = tc.w(i);
auto dpos = tc.dpos(i);
auto grad_p = state.get_grad_grid_velocity(tc.base_coord +
offset_from_scalar<dim>(i));
auto grad_N = tc.dw(i);
for (int alpha = 0; alpha < dim; alpha++) {
for (int beta = 0; beta < dim; beta++) {
// (G) P_p^n
for (int gamma = 0; gamma < dim; gamma++) {
grad_P[alpha][beta] +=
-N * grad_P_scale * grad_p[alpha] * F[gamma][beta] * dpos[gamma];
}
// (I) C_p^n
if (mpm_enalbe_apic)
grad_C[alpha][beta] += N * grad_p[alpha] * state.m_p * dpos[beta];
}
}
}
// (H) term 2
if (mpm_enalbe_force) {
Times_Rotated_dP_dF_FixedCorotated(state.mu, state.lambda, F, grad_P,
grad_F);
/*
TMatrix<real, dim> grad_F2;
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
auto inc = F, dec = F;
real delta = 1e-4f;
inc[i][j] += delta;
dec[i][j] -= delta;
auto diff = (1 / (2 * delta)) * (PK1(state.mu, state.lambda, inc) -
PK1(state.mu, state.lambda, dec));
grad_F2 = grad_F2 + grad_P[i][j] * diff;
}
}
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
printf("%d %d: %f %f\n", i, j, grad_F2[i][j] * 1e8,
grad_F[i][j] * 1e8);
}
}
grad_F = grad_F2;
*/
}
for (int alpha = 0; alpha < dim; alpha++) {
for (int beta = 0; beta < dim; beta++) {
// (H) term 1
for (int gamma = 0; gamma < dim; gamma++) {
grad_F[alpha][beta] +=
grad_F_next[gamma][beta] *
(real(gamma == alpha) + state.dt * C_next[gamma][alpha]) +
grad_P[alpha][gamma] * A[beta][gamma];
}
}
}
typename TState<dim>::Matrix grad_A;
for (int alpha = 0; alpha < dim; alpha++) {
for (int beta = 0; beta < dim; beta++) {
for (int gamma = 0; gamma < dim; gamma++) {
grad_A[alpha][beta] +=
grad_P[gamma][beta] * F[gamma][alpha];
}
}
}
state.set_grad_A(part_id, grad_A);
// (J) term 1
auto grad_x = next_state.get_grad_x(part_id);
// printf("grad_x %f\n", grad_x[0]);
auto G = (mpm_enalbe_force) * -state.invD * state.dt * state.V_p * P *
transposed(F);
if (mpm_enalbe_apic) {
G = G + state.m_p * C;
}
for (int i = 0; i < kernel_volume<dim>(); i++) {
real N = tc.w(i);
auto dpos = tc.dpos(i);
auto grid_coord = tc.base_coord + offset_from_scalar<dim>(i);
auto grad_p = state.get_grad_grid_velocity(grid_coord);
for (int d = 0; d < dim; d++) {
// printf("grad p[%d] %.10f\n", d, grad_p[d]);
}
auto grad_N = tc.dw(i);
auto n = state.grid_node(grid_coord);
auto mi = state.get_grid_mass(grid_coord);
// printf(" m m %f %f\n", mi, n[dim]);
auto vi = state.get_grid_velocity(grid_coord);
auto grad_mi = state.grad_grid_node(grid_coord)[dim];
// printf("%.10f\n", grad_p[0]);
// printf("%.10f\n", grad_p[1]);
// printf("%.10f\n", grad_p[2]);
// printf("\n");
for (int alpha = 0; alpha < dim; alpha++) {
// (F) v_p^n
grad_v[alpha] += N * state.m_p * grad_p[alpha];
// (J) term 5
grad_x[alpha] += grad_N[alpha] * grad_mi * state.m_p;
for (int beta = 0; beta < dim; beta++) {
for (int gamma = 0; gamma < dim; gamma++) {
// (H), term 3
grad_F[alpha][beta] +=
-N * grad_p[gamma] * grad_P_scale * P[gamma][beta] * dpos[alpha];
}
// (J), term 2
grad_x[alpha] += grad_v_next[beta] * grad_N[alpha] * vi[beta];
// (J), term 3
auto tmp = -grad_C_next[beta][alpha] * N * vi[beta];
for (int gamma = 0; gamma < dim; gamma++) {
tmp +=
grad_C_next[beta][gamma] * grad_N[alpha] * vi[beta] * dpos[gamma];
}
grad_x[alpha] += state.invD * tmp;
// auto tmp = grad_N[alpha] * vi[beta] * dpos[alpha] - N * vi[beta];
// grad_x[alpha] += state.invD * grad_C_next[beta][alpha] * tmp;
// (J), term 4
grad_x[alpha] +=
grad_p[beta] *
(grad_N[alpha] * (state.m_p * v[beta] + (G * dpos)[beta]) -
N * G[beta][alpha]);
}
}
}
state.set_grad_x(part_id, grad_x);
/*
for (int i = 0; i < dim; i++) {
printf("v %d %f %f\n", i, grad_v[i], grad_x[i]);
}
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
printf("m %d %d %f %f %f %f\n", i, j, grad_F[i][j], grad_C[i][j], F[i][j],
grad_P[i][j]);
}
}
*/
state.set_grad_v(part_id, grad_v);
if (mpm_enalbe_force)
state.set_grad_F(part_id, grad_F);
state.set_grad_C(part_id, grad_C);
}
__device__ real rand_real(int i) {
real t = sinf(i) * 100.0;
return t - floor(t);
}
__global__ void check2d(int k_) {
constexpr int dim = 2;
int k = k_;
auto rand = [&]() { return rand_real(k++); };
using Vector = TVector<real, 2>;
auto grad_v_i = Vector(rand(), rand());
auto v_i = Vector(rand() * 2 - 1, rand() * 2 - 1);
// auto v_i = Vector(-0.5, 0.0);
auto angle = rand() * 2 * 3.14f;
auto normal = Vector(sinf(angle), cosf(angle));
// auto normal = Vector(1, 0);
auto coeff = rand();
// auto coeff = 0;
auto forward = [&](Vector v_i) {
auto lin = v_i.dot(normal);
auto vit = v_i - lin * normal;
auto lit = sqrt(vit.length2() + 1e-7);
auto vithat = (1.0f / lit) * vit;
auto R = lit + coeff * min(lin, 0.0f);
auto litstar = max(R, 0.0f);
auto vistar = litstar * vithat + max(lin, 0.0f) * normal;
return vistar.dot(grad_v_i);
};
auto lin = v_i.dot(normal);
auto vit = v_i - lin * normal;
auto lit = sqrt(vit.length2() + 1e-7);
auto vithat = (1.0f / lit) * vit;
auto R = lit + coeff * min(lin, 0.0f);
auto litstar = max(R, 0.0f);
auto vistar = litstar * vithat + max(lin, 0.0f) * normal;
auto grad_v_i_star = grad_v_i;
auto grad_litstar = 0.0f;
for (int i = 0; i < dim; i++) {
grad_litstar += grad_v_i_star[i] * vithat[i];
}
Vector grad_vithat = litstar * grad_v_i_star;
auto grad_lit = grad_litstar * H(R);
for (int i = 0; i < dim; i++) {
grad_lit += -1 / (lit * lit) * vit[i] * grad_vithat[i];
}
auto grad_vit = (1 / lit) * (grad_lit * vit + grad_vithat);
auto grad_lin = grad_litstar * H(R) * coeff * H(-lin);
/*
printf("lit %f\n", lit);
for (int i = 0; i < dim; i++) {
printf("gradlitstar %f\n", grad_litstar);
}
printf("gradlin %f\n", grad_lin);
*/
for (int i = 0; i < dim; i++) {
// printf("normal [%d] %f\n", i, normal[i]);
grad_lin -= grad_vit[i] * normal[i];
grad_lin += H(lin) * normal[i] * grad_v_i_star[i];
}
auto new_grad_v_i = grad_lin * normal + grad_vit;
real dx = 1e-4f;
for (int d = 0; d < dim; d++) {
Vector delta;
delta[d] = dx;
real f0 = forward(v_i + delta);
real f1 = forward(v_i - delta);
real grad = (f0 - f1) / (2 * dx);
// printf("f0, 1 = %f %f\n", f0, f1);
if (fabs(grad - new_grad_v_i[d]) > 1e-3f) {
printf("errr %d %f %f\n", d, grad, new_grad_v_i[d]);
} else {
// printf("pass %d %f %f\n", d, grad, new_grad_v_i[d]);
}
}
}
template <int dim>
void backward(TState<dim> &state, TState<dim> &next) {
state.clear_gradients();
int num_blocks =
(state.num_particles + particle_block_dim - 1) / particle_block_dim;
int num_blocks_grid = state.grid_size();
hipLaunchKernelGGL(( P2G_backward<dim>), dim3(num_blocks), dim3(particle_block_dim), 0, 0, state, next);
auto err = hipDeviceSynchronize();
hipLaunchKernelGGL(( grid_backward<dim>)
, dim3(state.num_cells / grid_block_dim + 1), dim3(grid_block_dim), 0, 0, state);
hipLaunchKernelGGL(( G2P_backward<dim>), dim3(num_blocks), dim3(particle_block_dim), 0, 0, state, next);
if (err) {
printf("Launch: %s\n", hipGetErrorString(err));
exit(-1);
}
}
void MPMGradKernelLauncher(int dim,
int *res,
int num_particles,
real dx,
real dt,
real E,
real nu,
real m_p,
real V_p,
real *gravity,
const real *inx,
const real *inv,
const real *inF,
const real *inC,
const real *inA,
const real *ingrid,
const real *outx,
const real *outv,
const real *outF,
const real *outC,
const real *outP,
const real *outgrid,
const real *outgrid_star,
real *grad_inx,
real *grad_inv,
real *grad_inF,
real *grad_inC,
real *grad_inA,
real *grad_ingrid,
const real *grad_outx,
const real *grad_outv,
const real *grad_outF,
const real *grad_outC,
const real *grad_outP,
const real *grad_outgrid,
const real *grad_outgrid_star) {
if (dim == 2) {
/*
for (int i = 0; i < 10000; i++) {
check2d<<<1, 1>>>(i * 100);
}
exit(-1);
*/
constexpr int dim = 2;
auto current = new TState<dim>(
res, num_particles, dx, dt, gravity, (real *)inx, (real *)inv,
(real *)inF, (real *)inC, (real *)outP, (real *)inA, (real *)outgrid,
grad_inx, grad_inv, grad_inF, grad_inC, grad_inA, (real *)grad_outP,
(real *)grad_outgrid);
current->grid_bc = const_cast<real *>(ingrid);
current->grid_star_storage = const_cast<real *>(outgrid_star);
current->set(V_p, m_p, E, nu);
auto next = new TState<dim>(
res, num_particles, dx, dt, gravity, (real *)outx, (real *)outv,
(real *)outF, (real *)outC, nullptr, nullptr, nullptr,
(real *)grad_outx, (real *)grad_outv, (real *)grad_outF,
(real *)grad_outC, nullptr, nullptr, nullptr);
next->set(V_p, m_p, E, nu);
backward<dim>(*current, *next);
} else {
constexpr int dim = 3;
auto current = new TState<dim>(
res, num_particles, dx, dt, gravity, (real *)inx, (real *)inv,
(real *)inF, (real *)inC, (real *)outP, (real *)inA, (real *)outgrid,
grad_inx, grad_inv, grad_inF, grad_inC, grad_inA, (real *)grad_outP,
(real *)grad_outgrid);
current->grid_bc = const_cast<real *>(ingrid);
current->grid_star_storage = const_cast<real *>(outgrid_star);
current->set(V_p, m_p, E, nu);
auto next = new TState<dim>(
res, num_particles, dx, dt, gravity, (real *)outx, (real *)outv,
(real *)outF, (real *)outC, nullptr, nullptr, nullptr,
(real *)grad_outx, (real *)grad_outv, (real *)grad_outF,
(real *)grad_outC, nullptr, nullptr, nullptr);
next->set(V_p, m_p, E, nu);
backward<dim>(*current, *next);
}
}
template <int dim>
void backward_mpm_state(void *state_, void *next_state_) {
TState<dim> *state = reinterpret_cast<TState<dim> *>(state_);
TState<dim> *next_state = reinterpret_cast<TState<dim> *>(next_state_);
backward<dim>(*state, *next_state);
}
template void backward_mpm_state<2>(void *state_, void *next_state_);
template void backward_mpm_state<3>(void *state_, void *next_state_);
void set_grad_loss(void *state_) {
constexpr int dim = 3;
TState<3> *state = reinterpret_cast<TState<3> *>(state_);
state->clear_gradients();
int num_particles = state->num_particles;
std::vector<float> grad_x_host(num_particles * dim);
for (int i = 0; i < num_particles; i++) {
grad_x_host[i] = 1.0f / num_particles;
}
hipMemcpy(state->grad_x_storage, grad_x_host.data(),
sizeof(real) * dim * num_particles, hipMemcpyHostToDevice);
}
| b0b7c1fbbb332b487a5b91f5f7ec913954bb5bda.cu | #include "kernels.h"
#include "linalg.h"
#include "state.cuh"
#include <cstdio>
#include <vector>
template <int dim>
__global__ void P2G_backward(TState<dim> state, TState<dim> next_state) {
// Scatter particle gradients to grid nodes
// P2G part of back-propagation
int part_id = blockIdx.x * blockDim.x + threadIdx.x;
if (part_id >= state.num_particles) {
return;
}
auto x = state.get_x(part_id);
auto v = state.get_v(part_id);
auto F = state.get_F(part_id);
auto C = state.get_C(part_id);
auto grad_x_next = next_state.get_grad_x(part_id);
auto grad_v_next = next_state.get_grad_v(part_id);
auto grad_F_next = next_state.get_grad_F(part_id);
auto grad_C_next = next_state.get_grad_C(part_id);
// (A) v_p^n+1, accumulate
grad_v_next = grad_v_next + state.dt * grad_x_next;
// (B) C_p^n+1, accumulate
for (int alpha = 0; alpha < dim; alpha++) {
for (int beta = 0; beta < dim; beta++) {
for (int gamma = 0; gamma < dim; gamma++) {
grad_C_next[alpha][beta] +=
state.dt * grad_F_next[alpha][gamma] * F[beta][gamma];
}
}
}
// Accumulate to grad_v and grad_C
// next_state.set_grad_v(part_id, grad_v_next);
// next_state.set_grad_C(part_id, grad_C_next);
TransferCommon<dim, true> tc(state, x);
for (int i = 0; i < kernel_volume<dim>(); i++) {
real N = tc.w(i);
auto dpos = tc.dpos(i);
// (C) v_i^n
real grad_v_i[dim];
for (int alpha = 0; alpha < dim; alpha++) {
grad_v_i[alpha] = grad_v_next[alpha] * N;
for (int beta = 0; beta < dim; beta++) {
grad_v_i[alpha] +=
state.invD * N * grad_C_next[alpha][beta] * dpos[beta];
}
}
auto grad_n =
state.grad_grid_node(tc.base_coord + offset_from_scalar<dim>(i));
for (int d = 0; d < dim; d++) {
// printf("grad_v_i %d %f\n", d, grad_v_i[d]);
atomicAdd(&grad_n[d], grad_v_i[d]);
}
}
}
TC_FORCE_INLINE __device__ real H(real x) {
return x >= 0 ? 1 : 0;
}
template <int dim>
__global__ void grid_backward(TState<dim> state) {
// Scatter particle gradients to grid nodes
// P2G part of back-propagation
using Vector = typename TState<dim>::Vector;
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < state.num_cells) {
auto node = state.grid_node(id);
auto grad_node = state.grad_grid_node(id);
if (node[dim] > 0) {
// (D)
// Convert grad_v to grad_p
// grad_p = grad_v / m
auto m = node[dim];
real inv_m = 1.0f / m;
auto grad_v_i = Vector(grad_node);
auto v_i = Vector(state.grid_star_node(id));
auto v_i_with_g = v_i;
for (int i = 0; i < dim; i++) {
v_i_with_g[i] += state.gravity[i] * state.dt;
}
auto v_i_star = Vector(state.grid_node(id));
auto bc = state.grid_node_bc(id);
auto normal = Vector(bc);
auto lin = v_i_with_g.dot(normal);
real coeff = bc[dim];
if (coeff == -1) {
// sticky
grad_v_i = Vector(0.0f);
} else if (normal.length2() > 0) {
auto vit = v_i_with_g - lin * normal;
auto lit = sqrt(vit.length2() + 1e-7);
auto vithat = (1.0f / lit) * vit;
auto R = lit + coeff * min(lin, 0.0f);
auto litstar = max(R, 0.0f);
auto vistar = litstar * vithat + max(lin, 0.0f) * normal;
auto r = vistar - v_i_star;
for (int i = 0; i < dim; i++) {
if (fabs(r[i]) > 1e-6)
printf("mismatch r %f\n", r[i]);
}
auto grad_v_i_star = grad_v_i;
auto grad_litstar = 0.0f;
for (int i = 0; i < dim; i++) {
grad_litstar += grad_v_i_star[i] * vithat[i];
}
Vector grad_vithat = litstar * grad_v_i_star;
auto grad_lit = grad_litstar * H(R);
for (int i = 0; i < dim; i++) {
grad_lit += -1 / (lit * lit) * vit[i] * grad_vithat[i];
}
auto grad_vit = (1 / lit) * (grad_lit * vit + grad_vithat);
auto grad_lin = grad_litstar * H(R) * coeff * H(-lin);
for (int i = 0; i < dim; i++) {
grad_lin -= grad_vit[i] * normal[i];
grad_lin += H(lin) * normal[i] * grad_v_i_star[i];
}
auto new_grad_v_i = grad_lin * normal + grad_vit;
grad_v_i = new_grad_v_i;
}
auto grad_p = inv_m * grad_v_i;
// (E)
real grad_m = 0;
for (int alpha = 0; alpha < dim; alpha++) {
grad_m -= inv_m * v_i[alpha] * grad_v_i[alpha];
grad_node[alpha] = grad_p[alpha];
}
grad_node[dim] = grad_m;
}
}
}
// (F), (G), (H), (I), (J)
template <int dim>
__global__ void G2P_backward(TState<dim> state, TState<dim> next_state) {
// Scatter particle gradients to grid nodes
// P2G part of back-propagation
int part_id = blockIdx.x * blockDim.x + threadIdx.x;
if (part_id >= state.num_particles) {
return;
}
auto x = state.get_x(part_id);
auto v = state.get_v(part_id);
auto F = state.get_F(part_id);
auto C = state.get_C(part_id);
auto P = state.get_P(part_id);
auto A = state.get_A(part_id);
auto grad_F_next = next_state.get_grad_F(part_id);
auto grad_C_next = next_state.get_grad_C(part_id);
auto grad_P_next = next_state.get_grad_P(part_id);
auto grad_v_next = next_state.get_grad_v(part_id);
auto grad_x_next = next_state.get_grad_x(part_id);
auto C_next = next_state.get_C(part_id);
// (A) v_p^n+1, accumulate
grad_v_next = grad_v_next + state.dt * grad_x_next;
// (B) C_p^n+1, accumulate
for (int alpha = 0; alpha < dim; alpha++) {
for (int beta = 0; beta < dim; beta++) {
for (int gamma = 0; gamma < dim; gamma++) {
grad_C_next[alpha][beta] +=
state.dt * grad_F_next[alpha][gamma] * F[beta][gamma];
}
}
}
TMatrix<real, dim> grad_P, grad_F, grad_C;
TransferCommon<dim, true> tc(state, x);
{
/*
real dx = 1e-4f;
TransferCommon<true> tc2(state, x + Vector(0, 0, dx));
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
for (int k = 0; k < dim; k++) {
auto d = tc.dw(i, j, k);
printf("%f %f\n", d[2], (tc2.w(i, j, k) - tc.w(i, j, k))/ dx);
}
}
}
*/
}
TVector<real, dim> grad_v;
real grad_P_scale = state.dt * state.invD * state.V_p;
// (G) Compute grad_P
for (int i = 0; i < kernel_volume<dim>(); i++) {
real N = tc.w(i);
auto dpos = tc.dpos(i);
auto grad_p = state.get_grad_grid_velocity(tc.base_coord +
offset_from_scalar<dim>(i));
auto grad_N = tc.dw(i);
for (int alpha = 0; alpha < dim; alpha++) {
for (int beta = 0; beta < dim; beta++) {
// (G) P_p^n
for (int gamma = 0; gamma < dim; gamma++) {
grad_P[alpha][beta] +=
-N * grad_P_scale * grad_p[alpha] * F[gamma][beta] * dpos[gamma];
}
// (I) C_p^n
if (mpm_enalbe_apic)
grad_C[alpha][beta] += N * grad_p[alpha] * state.m_p * dpos[beta];
}
}
}
// (H) term 2
if (mpm_enalbe_force) {
Times_Rotated_dP_dF_FixedCorotated(state.mu, state.lambda, F, grad_P,
grad_F);
/*
TMatrix<real, dim> grad_F2;
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
auto inc = F, dec = F;
real delta = 1e-4f;
inc[i][j] += delta;
dec[i][j] -= delta;
auto diff = (1 / (2 * delta)) * (PK1(state.mu, state.lambda, inc) -
PK1(state.mu, state.lambda, dec));
grad_F2 = grad_F2 + grad_P[i][j] * diff;
}
}
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
printf("%d %d: %f %f\n", i, j, grad_F2[i][j] * 1e8,
grad_F[i][j] * 1e8);
}
}
grad_F = grad_F2;
*/
}
for (int alpha = 0; alpha < dim; alpha++) {
for (int beta = 0; beta < dim; beta++) {
// (H) term 1
for (int gamma = 0; gamma < dim; gamma++) {
grad_F[alpha][beta] +=
grad_F_next[gamma][beta] *
(real(gamma == alpha) + state.dt * C_next[gamma][alpha]) +
grad_P[alpha][gamma] * A[beta][gamma];
}
}
}
typename TState<dim>::Matrix grad_A;
for (int alpha = 0; alpha < dim; alpha++) {
for (int beta = 0; beta < dim; beta++) {
for (int gamma = 0; gamma < dim; gamma++) {
grad_A[alpha][beta] +=
grad_P[gamma][beta] * F[gamma][alpha];
}
}
}
state.set_grad_A(part_id, grad_A);
// (J) term 1
auto grad_x = next_state.get_grad_x(part_id);
// printf("grad_x %f\n", grad_x[0]);
auto G = (mpm_enalbe_force) * -state.invD * state.dt * state.V_p * P *
transposed(F);
if (mpm_enalbe_apic) {
G = G + state.m_p * C;
}
for (int i = 0; i < kernel_volume<dim>(); i++) {
real N = tc.w(i);
auto dpos = tc.dpos(i);
auto grid_coord = tc.base_coord + offset_from_scalar<dim>(i);
auto grad_p = state.get_grad_grid_velocity(grid_coord);
for (int d = 0; d < dim; d++) {
// printf("grad p[%d] %.10f\n", d, grad_p[d]);
}
auto grad_N = tc.dw(i);
auto n = state.grid_node(grid_coord);
auto mi = state.get_grid_mass(grid_coord);
// printf(" m m %f %f\n", mi, n[dim]);
auto vi = state.get_grid_velocity(grid_coord);
auto grad_mi = state.grad_grid_node(grid_coord)[dim];
// printf("%.10f\n", grad_p[0]);
// printf("%.10f\n", grad_p[1]);
// printf("%.10f\n", grad_p[2]);
// printf("\n");
for (int alpha = 0; alpha < dim; alpha++) {
// (F) v_p^n
grad_v[alpha] += N * state.m_p * grad_p[alpha];
// (J) term 5
grad_x[alpha] += grad_N[alpha] * grad_mi * state.m_p;
for (int beta = 0; beta < dim; beta++) {
for (int gamma = 0; gamma < dim; gamma++) {
// (H), term 3
grad_F[alpha][beta] +=
-N * grad_p[gamma] * grad_P_scale * P[gamma][beta] * dpos[alpha];
}
// (J), term 2
grad_x[alpha] += grad_v_next[beta] * grad_N[alpha] * vi[beta];
// (J), term 3
auto tmp = -grad_C_next[beta][alpha] * N * vi[beta];
for (int gamma = 0; gamma < dim; gamma++) {
tmp +=
grad_C_next[beta][gamma] * grad_N[alpha] * vi[beta] * dpos[gamma];
}
grad_x[alpha] += state.invD * tmp;
// auto tmp = grad_N[alpha] * vi[beta] * dpos[alpha] - N * vi[beta];
// grad_x[alpha] += state.invD * grad_C_next[beta][alpha] * tmp;
// (J), term 4
grad_x[alpha] +=
grad_p[beta] *
(grad_N[alpha] * (state.m_p * v[beta] + (G * dpos)[beta]) -
N * G[beta][alpha]);
}
}
}
state.set_grad_x(part_id, grad_x);
/*
for (int i = 0; i < dim; i++) {
printf("v %d %f %f\n", i, grad_v[i], grad_x[i]);
}
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
printf("m %d %d %f %f %f %f\n", i, j, grad_F[i][j], grad_C[i][j], F[i][j],
grad_P[i][j]);
}
}
*/
state.set_grad_v(part_id, grad_v);
if (mpm_enalbe_force)
state.set_grad_F(part_id, grad_F);
state.set_grad_C(part_id, grad_C);
}
__device__ real rand_real(int i) {
real t = sinf(i) * 100.0;
return t - floor(t);
}
__global__ void check2d(int k_) {
constexpr int dim = 2;
int k = k_;
auto rand = [&]() { return rand_real(k++); };
using Vector = TVector<real, 2>;
auto grad_v_i = Vector(rand(), rand());
auto v_i = Vector(rand() * 2 - 1, rand() * 2 - 1);
// auto v_i = Vector(-0.5, 0.0);
auto angle = rand() * 2 * 3.14f;
auto normal = Vector(sinf(angle), cosf(angle));
// auto normal = Vector(1, 0);
auto coeff = rand();
// auto coeff = 0;
auto forward = [&](Vector v_i) {
auto lin = v_i.dot(normal);
auto vit = v_i - lin * normal;
auto lit = sqrt(vit.length2() + 1e-7);
auto vithat = (1.0f / lit) * vit;
auto R = lit + coeff * min(lin, 0.0f);
auto litstar = max(R, 0.0f);
auto vistar = litstar * vithat + max(lin, 0.0f) * normal;
return vistar.dot(grad_v_i);
};
auto lin = v_i.dot(normal);
auto vit = v_i - lin * normal;
auto lit = sqrt(vit.length2() + 1e-7);
auto vithat = (1.0f / lit) * vit;
auto R = lit + coeff * min(lin, 0.0f);
auto litstar = max(R, 0.0f);
auto vistar = litstar * vithat + max(lin, 0.0f) * normal;
auto grad_v_i_star = grad_v_i;
auto grad_litstar = 0.0f;
for (int i = 0; i < dim; i++) {
grad_litstar += grad_v_i_star[i] * vithat[i];
}
Vector grad_vithat = litstar * grad_v_i_star;
auto grad_lit = grad_litstar * H(R);
for (int i = 0; i < dim; i++) {
grad_lit += -1 / (lit * lit) * vit[i] * grad_vithat[i];
}
auto grad_vit = (1 / lit) * (grad_lit * vit + grad_vithat);
auto grad_lin = grad_litstar * H(R) * coeff * H(-lin);
/*
printf("lit %f\n", lit);
for (int i = 0; i < dim; i++) {
printf("gradlitstar %f\n", grad_litstar);
}
printf("gradlin %f\n", grad_lin);
*/
for (int i = 0; i < dim; i++) {
// printf("normal [%d] %f\n", i, normal[i]);
grad_lin -= grad_vit[i] * normal[i];
grad_lin += H(lin) * normal[i] * grad_v_i_star[i];
}
auto new_grad_v_i = grad_lin * normal + grad_vit;
real dx = 1e-4f;
for (int d = 0; d < dim; d++) {
Vector delta;
delta[d] = dx;
real f0 = forward(v_i + delta);
real f1 = forward(v_i - delta);
real grad = (f0 - f1) / (2 * dx);
// printf("f0, 1 = %f %f\n", f0, f1);
if (fabs(grad - new_grad_v_i[d]) > 1e-3f) {
printf("errr %d %f %f\n", d, grad, new_grad_v_i[d]);
} else {
// printf("pass %d %f %f\n", d, grad, new_grad_v_i[d]);
}
}
}
template <int dim>
void backward(TState<dim> &state, TState<dim> &next) {
state.clear_gradients();
int num_blocks =
(state.num_particles + particle_block_dim - 1) / particle_block_dim;
int num_blocks_grid = state.grid_size();
P2G_backward<dim><<<num_blocks, particle_block_dim>>>(state, next);
auto err = cudaThreadSynchronize();
grid_backward<dim>
<<<state.num_cells / grid_block_dim + 1, grid_block_dim>>>(state);
G2P_backward<dim><<<num_blocks, particle_block_dim>>>(state, next);
if (err) {
printf("Launch: %s\n", cudaGetErrorString(err));
exit(-1);
}
}
void MPMGradKernelLauncher(int dim,
int *res,
int num_particles,
real dx,
real dt,
real E,
real nu,
real m_p,
real V_p,
real *gravity,
const real *inx,
const real *inv,
const real *inF,
const real *inC,
const real *inA,
const real *ingrid,
const real *outx,
const real *outv,
const real *outF,
const real *outC,
const real *outP,
const real *outgrid,
const real *outgrid_star,
real *grad_inx,
real *grad_inv,
real *grad_inF,
real *grad_inC,
real *grad_inA,
real *grad_ingrid,
const real *grad_outx,
const real *grad_outv,
const real *grad_outF,
const real *grad_outC,
const real *grad_outP,
const real *grad_outgrid,
const real *grad_outgrid_star) {
if (dim == 2) {
/*
for (int i = 0; i < 10000; i++) {
check2d<<<1, 1>>>(i * 100);
}
exit(-1);
*/
constexpr int dim = 2;
auto current = new TState<dim>(
res, num_particles, dx, dt, gravity, (real *)inx, (real *)inv,
(real *)inF, (real *)inC, (real *)outP, (real *)inA, (real *)outgrid,
grad_inx, grad_inv, grad_inF, grad_inC, grad_inA, (real *)grad_outP,
(real *)grad_outgrid);
current->grid_bc = const_cast<real *>(ingrid);
current->grid_star_storage = const_cast<real *>(outgrid_star);
current->set(V_p, m_p, E, nu);
auto next = new TState<dim>(
res, num_particles, dx, dt, gravity, (real *)outx, (real *)outv,
(real *)outF, (real *)outC, nullptr, nullptr, nullptr,
(real *)grad_outx, (real *)grad_outv, (real *)grad_outF,
(real *)grad_outC, nullptr, nullptr, nullptr);
next->set(V_p, m_p, E, nu);
backward<dim>(*current, *next);
} else {
constexpr int dim = 3;
auto current = new TState<dim>(
res, num_particles, dx, dt, gravity, (real *)inx, (real *)inv,
(real *)inF, (real *)inC, (real *)outP, (real *)inA, (real *)outgrid,
grad_inx, grad_inv, grad_inF, grad_inC, grad_inA, (real *)grad_outP,
(real *)grad_outgrid);
current->grid_bc = const_cast<real *>(ingrid);
current->grid_star_storage = const_cast<real *>(outgrid_star);
current->set(V_p, m_p, E, nu);
auto next = new TState<dim>(
res, num_particles, dx, dt, gravity, (real *)outx, (real *)outv,
(real *)outF, (real *)outC, nullptr, nullptr, nullptr,
(real *)grad_outx, (real *)grad_outv, (real *)grad_outF,
(real *)grad_outC, nullptr, nullptr, nullptr);
next->set(V_p, m_p, E, nu);
backward<dim>(*current, *next);
}
}
template <int dim>
void backward_mpm_state(void *state_, void *next_state_) {
TState<dim> *state = reinterpret_cast<TState<dim> *>(state_);
TState<dim> *next_state = reinterpret_cast<TState<dim> *>(next_state_);
backward<dim>(*state, *next_state);
}
template void backward_mpm_state<2>(void *state_, void *next_state_);
template void backward_mpm_state<3>(void *state_, void *next_state_);
void set_grad_loss(void *state_) {
constexpr int dim = 3;
TState<3> *state = reinterpret_cast<TState<3> *>(state_);
state->clear_gradients();
int num_particles = state->num_particles;
std::vector<float> grad_x_host(num_particles * dim);
for (int i = 0; i < num_particles; i++) {
grad_x_host[i] = 1.0f / num_particles;
}
cudaMemcpy(state->grad_x_storage, grad_x_host.data(),
sizeof(real) * dim * num_particles, cudaMemcpyHostToDevice);
}
|
dc695964f550b15f06b42eee8a07b40813fa98d8.hip | // !!! This is a file automatically generated by hipify!!!
/*
This file is part of the MRF_CUDA package (https://github.com/chixindebaoyu/MRF_CUDA).
The MIT License (MIT)
Copyright (c) Dong Wang
Permission is hereby granted, free of charge, to any person obtaining a # copy
of this software and associated documentation files (the "Software"), to # deal
in the Software without restriction, including without limitation the # rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included # in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE
SOFTWARE.
*/
#include "functions.h"
extern int blocksize;
extern int gridsize;
extern hipStream_t stream[2];
int
main(int argc, char *argv[])
{
// Setup CUDA
hipSetDevice(0);
cuTry(hipStreamCreate(&stream[0]));
cuTry(hipStreamCreate(&stream[1]));
// Setup default values
const char *in_mrf, *in_img, *in_b1map, *out_atoms, *out_maps, *out_params;
in_mrf = "../data/MRF001.csv";
in_img = NULL;
in_b1map = NULL;
out_maps = NULL;
out_atoms = NULL;
out_params = NULL;
const char *T1, *T2, *B0, *B1, *echo_type;
T1 = "20:20:3000";
T2 = "20:10:1000";
B0 = "0";
B1 = "1";
echo_type = "epg_ssfp";
int nparams = 4;
int nmaps = nparams + 1;
int nstates = 101;
size_t nreps = 1500;
size_t NATOMS = 0;
size_t natoms = 0;
// Read command line
struct option long_options[] =
{
{"b1map", 1, NULL, 'M'},
{"atoms", 1, NULL, 'a'},
{"map", 1, NULL, 'm'},
{"params", 1, NULL, 'p'},
{"T1", 1, NULL, 't'},
{"T2", 1, NULL, 's'},
{"B0", 1, NULL, 'b'},
{"B1", 1, NULL, 'r'},
{"nstates", 1, NULL, 'w'},
{"echo_type", 1, NULL, 'e'},
{"nreps", 1, NULL, 'T'},
{"gridsize", 1, NULL, 'G'},
{"blocksize", 1, NULL, 'B'},
{"help", 0, 0, 'h'}
};
extern int optind;
opterr = 0;
int option_index = 0;
int c;
while ((c =
getopt_long(argc, argv, "M:N:a:m:p:t:s:b:r:w:e:T:G:B:h",
long_options, &option_index)) != -1)
{
switch (c)
{
case 'M':
in_b1map = optarg;
break;
case 'N':
NATOMS = atoi(optarg);
break;
case 'a':
out_atoms = optarg;
break;
case 'm':
out_maps = optarg;
break;
case 'p':
out_params = optarg;
break;
case 't':
T1 = optarg;
break;
case 's':
T2 = optarg;
break;
case 'b':
B0 = optarg;
break;
case 'r':
B1 = optarg;
break;
case 'w':
nstates = atoi(optarg);
break;
case 'e':
echo_type = optarg;
break;
case 'T':
nreps = atoi(optarg);
break;
case 'G':
gridsize = atoi(optarg);
break;
case 'B':
blocksize = atoi(optarg);
break;
case 'h':
default:
print_usage();
return 1;
}
}
int csv_count = 0;
int ra_count = 0;
const char *tmp;
while (optind <= argc-1)
{
tmp = argv[optind];
if ((strstr(tmp,".ra") != NULL) && (ra_count == 0))
{
in_img = tmp;
ra_count ++;
}
if ((strstr(tmp,".csv") != NULL) && (csv_count == 0))
{
in_mrf = tmp;
csv_count ++;
}
if (csv_count+ra_count == 2)
break;
optind ++;
}
printf("\n");
printf("nstates : %d\n", nstates);
printf("Blocksize : %d\n", blocksize);
printf("Gridsize : %d\n", gridsize);
printf("Echo : %s\n", echo_type);
printf("\n");
// Parse parameters
size_t l_t1, l_t2, l_b0, l_b1;
float *h_t1, *h_t2, *h_b0, *h_b1;
if (NATOMS != 0)
{
printf("Hard coding for parameters...\n");
l_b1 = 11;
l_b0 = 21;
l_t1 = roundf(sqrtf(NATOMS / l_b0 / l_b1));
if (l_t1 == 0)
l_t1 = 1;
l_t2 = l_t1;
h_t1 = (float *) safe_malloc(l_t1 * sizeof(float));
logspace(h_t1, 100.f, 3000.f, l_t1);
h_t2 = (float *) safe_malloc(l_t2 * sizeof(float));
logspace(h_t2, 20.f, 1000.f, l_t2);
h_b0 = (float *) safe_malloc(l_b0 * sizeof(float));
logspace(h_b0, -150.f, 150.f, l_b0);
h_b1 = (float *) safe_malloc(l_b1 * sizeof(float));
logspace(h_b1, 0.5, 1.5, l_b1);
}
else
{
printf("Parsing parameters...\n");
printf("T1 : %s\n", T1);
printf("T2 : %s\n", T2);
printf("B0 : %s\n", B0);
printf("B1 : %s\n", B1);
l_t1 = parse_length(T1);
h_t1 = (float *) safe_malloc(l_t1 * sizeof(float));
parse_params(h_t1, T1, l_t1);
l_t2 = parse_length(T2);
h_t2 = (float *) safe_malloc(l_t2 * sizeof(float));
parse_params(h_t2, T2, l_t2);
l_b0 = parse_length(B0);
h_b0 = (float *) safe_malloc(l_b0 * sizeof(float));
parse_params(h_b0, B0, l_b0);
l_b1 = parse_length(B1);
h_b1 = (float *) safe_malloc(l_b1 * sizeof(float));
parse_params(h_b1, B1, l_b1);
}
// The number of atoms of the whole dictionary
NATOMS = compute_natoms(h_t1, h_t2, l_t1, l_t2, l_b0, l_b1);
// The number of atoms per b1
natoms = NATOMS/l_b1;
printf("l_t1: %lu, l_t2: %lu, l_b0: %lu, l_b1: %lu\n",
l_t1, l_t2, l_b0, l_b1);
printf("NATOMS: %lu, natoms: %lu\n", NATOMS, natoms);
printf("Removing situations when T1 <= T2\n");
printf("NATOMS might be smaller than input\n");
if (NATOMS == 0)
err(EX_SOFTWARE, "Number of atoms is 0!\n");
printf("\n");
// Transfer h_t1, h_t2, h_b0 and h_b1 into h_params;
float *h_params =
(float *) safe_malloc(NATOMS * nparams * sizeof(float));
trans_params(h_params, h_t1, h_t2, h_b0, h_b1, l_t1, l_t2, l_b0, l_b1,
NATOMS, nparams);
float *d_params;
cuTry(hipMalloc((void **) &d_params,
nparams * NATOMS * sizeof(float)));
cuTry(hipMemcpyAsync(d_params, h_params,
nparams * NATOMS * sizeof(float), hipMemcpyHostToDevice,
stream[1]));
safe_free(h_t1);
safe_free(h_t2);
safe_free(h_b0);
safe_free(h_b1);
safe_free(h_params);
// Read input mrf
printf("Input mrf: %s\n", in_mrf);
printf("Reading %s\n", in_mrf);
size_t NREPS = csv_dim(in_mrf) - 1;
if (nreps > NREPS)
nreps = NREPS;
printf("NREPS: %lu, nreps: %lu\n", NREPS, nreps);
float *h_mrf = (float *) safe_malloc(NREPS * 4 * sizeof(float));
csv_read(h_mrf, NREPS, in_mrf);
printf("\n");
// Run MRF Dictionary
float2 *d_atoms;
cuTry(hipMalloc((void **) &d_atoms,
nreps * NATOMS * sizeof(float2)));
clock_t start, stop;
start = clock();
MRF_dict(d_atoms, d_params, h_mrf, nreps, NREPS, NATOMS, natoms,
nparams, nstates, echo_type);
hipDeviceSynchronize();
stop = clock();
printf("Elapsed dictionary time: %.2f s\n",
((float) (stop - start)) / CLOCKS_PER_SEC);
printf("\n");
safe_free(h_mrf);
// Run matching if in_img is specified
if ((in_img == NULL) && (out_maps != NULL))
printf("NO data specified! Abort matching!\n");
if (in_img != NULL)
{
printf("Input img: %s\n", in_img);
ra_t ra_img;
printf("Reading %s\n", in_img);
ra_read(&ra_img, in_img);
float2 *h_img = (float2 *) ra_img.data;
size_t nt = ra_img.dims[0];
size_t nvoxels = ra_img.dims[1] * ra_img.dims[2];
printf("nt: %lu, nx: %llu, ny: %llu\n", nt, ra_img.dims[1],
ra_img.dims[2]);
printf("\n");
// Run MRF Macthing
float *d_maps;
cuTry(hipMalloc((void **) &d_maps,
nmaps * nvoxels * sizeof(float)));
start = clock();
MRF_match(d_maps, h_img, d_atoms, d_params,
nreps, nt, NATOMS, natoms, nvoxels, nparams, nmaps, in_b1map);
hipDeviceSynchronize();
stop = clock();
printf("Elapsed matching time: %.2f s\n",
((float) (stop - start)) / CLOCKS_PER_SEC);
printf("\n");
// Save maps
if (out_maps == NULL)
out_maps = "../result/maps.ra";
printf("Output maps: %s\n", out_maps);
float *h_map;
h_map = (float *) safe_malloc(nmaps * nvoxels * sizeof(float));
cuTry(hipMemcpyAsync(h_map, d_maps,
nmaps * nvoxels * sizeof(float),
hipMemcpyDeviceToHost, stream[1]));
save_rafile(h_map, out_maps, nmaps, nvoxels);
safe_free(h_map);
ra_free(&ra_img);
cuTry(hipFree(d_maps));
}
// Save atoms
if (out_atoms != NULL)
{
printf("Output atoms: %s\n", out_atoms);
float2 *h_atoms;
h_atoms = (float2 *) safe_malloc(nreps * NATOMS * sizeof(float2));
cuTry(hipMemcpyAsync(h_atoms, d_atoms,
nreps * NATOMS * sizeof(float2), hipMemcpyDeviceToHost,
stream[0]));
save_rafile(h_atoms, out_atoms, nreps, NATOMS);
safe_free(h_atoms);
cuTry(hipFree(d_atoms));
}
else
cuTry(hipFree(d_atoms));
// Save parameters
if (out_params != NULL)
{
printf("Output params: %s\n", out_params);
float *h_params;
h_params = (float *) safe_malloc(nparams * NATOMS * sizeof(float));
cuTry(hipMemcpyAsync(h_params, d_params,
nparams * NATOMS * sizeof(float), hipMemcpyDeviceToHost,
stream[0]));
save_rafile(h_params, out_params, nparams, NATOMS);
safe_free(h_params);
cuTry(hipFree(d_params));
}
else
cuTry(hipFree(d_params));
// Destory cuda stream
cuTry(hipStreamDestroy(stream[0]));
cuTry(hipStreamDestroy(stream[1]));
return 0;
}
| dc695964f550b15f06b42eee8a07b40813fa98d8.cu | /*
This file is part of the MRF_CUDA package (https://github.com/chixindebaoyu/MRF_CUDA).
The MIT License (MIT)
Copyright (c) Dong Wang
Permission is hereby granted, free of charge, to any person obtaining a # copy
of this software and associated documentation files (the "Software"), to # deal
in the Software without restriction, including without limitation the # rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included # in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE
SOFTWARE.
*/
#include "functions.h"
extern int blocksize;
extern int gridsize;
extern cudaStream_t stream[2];
int
main(int argc, char *argv[])
{
// Setup CUDA
cudaSetDevice(0);
cuTry(cudaStreamCreate(&stream[0]));
cuTry(cudaStreamCreate(&stream[1]));
// Setup default values
const char *in_mrf, *in_img, *in_b1map, *out_atoms, *out_maps, *out_params;
in_mrf = "../data/MRF001.csv";
in_img = NULL;
in_b1map = NULL;
out_maps = NULL;
out_atoms = NULL;
out_params = NULL;
const char *T1, *T2, *B0, *B1, *echo_type;
T1 = "20:20:3000";
T2 = "20:10:1000";
B0 = "0";
B1 = "1";
echo_type = "epg_ssfp";
int nparams = 4;
int nmaps = nparams + 1;
int nstates = 101;
size_t nreps = 1500;
size_t NATOMS = 0;
size_t natoms = 0;
// Read command line
struct option long_options[] =
{
{"b1map", 1, NULL, 'M'},
{"atoms", 1, NULL, 'a'},
{"map", 1, NULL, 'm'},
{"params", 1, NULL, 'p'},
{"T1", 1, NULL, 't'},
{"T2", 1, NULL, 's'},
{"B0", 1, NULL, 'b'},
{"B1", 1, NULL, 'r'},
{"nstates", 1, NULL, 'w'},
{"echo_type", 1, NULL, 'e'},
{"nreps", 1, NULL, 'T'},
{"gridsize", 1, NULL, 'G'},
{"blocksize", 1, NULL, 'B'},
{"help", 0, 0, 'h'}
};
extern int optind;
opterr = 0;
int option_index = 0;
int c;
while ((c =
getopt_long(argc, argv, "M:N:a:m:p:t:s:b:r:w:e:T:G:B:h",
long_options, &option_index)) != -1)
{
switch (c)
{
case 'M':
in_b1map = optarg;
break;
case 'N':
NATOMS = atoi(optarg);
break;
case 'a':
out_atoms = optarg;
break;
case 'm':
out_maps = optarg;
break;
case 'p':
out_params = optarg;
break;
case 't':
T1 = optarg;
break;
case 's':
T2 = optarg;
break;
case 'b':
B0 = optarg;
break;
case 'r':
B1 = optarg;
break;
case 'w':
nstates = atoi(optarg);
break;
case 'e':
echo_type = optarg;
break;
case 'T':
nreps = atoi(optarg);
break;
case 'G':
gridsize = atoi(optarg);
break;
case 'B':
blocksize = atoi(optarg);
break;
case 'h':
default:
print_usage();
return 1;
}
}
int csv_count = 0;
int ra_count = 0;
const char *tmp;
while (optind <= argc-1)
{
tmp = argv[optind];
if ((strstr(tmp,".ra") != NULL) && (ra_count == 0))
{
in_img = tmp;
ra_count ++;
}
if ((strstr(tmp,".csv") != NULL) && (csv_count == 0))
{
in_mrf = tmp;
csv_count ++;
}
if (csv_count+ra_count == 2)
break;
optind ++;
}
printf("\n");
printf("nstates : %d\n", nstates);
printf("Blocksize : %d\n", blocksize);
printf("Gridsize : %d\n", gridsize);
printf("Echo : %s\n", echo_type);
printf("\n");
// Parse parameters
size_t l_t1, l_t2, l_b0, l_b1;
float *h_t1, *h_t2, *h_b0, *h_b1;
if (NATOMS != 0)
{
printf("Hard coding for parameters...\n");
l_b1 = 11;
l_b0 = 21;
l_t1 = roundf(sqrtf(NATOMS / l_b0 / l_b1));
if (l_t1 == 0)
l_t1 = 1;
l_t2 = l_t1;
h_t1 = (float *) safe_malloc(l_t1 * sizeof(float));
logspace(h_t1, 100.f, 3000.f, l_t1);
h_t2 = (float *) safe_malloc(l_t2 * sizeof(float));
logspace(h_t2, 20.f, 1000.f, l_t2);
h_b0 = (float *) safe_malloc(l_b0 * sizeof(float));
logspace(h_b0, -150.f, 150.f, l_b0);
h_b1 = (float *) safe_malloc(l_b1 * sizeof(float));
logspace(h_b1, 0.5, 1.5, l_b1);
}
else
{
printf("Parsing parameters...\n");
printf("T1 : %s\n", T1);
printf("T2 : %s\n", T2);
printf("B0 : %s\n", B0);
printf("B1 : %s\n", B1);
l_t1 = parse_length(T1);
h_t1 = (float *) safe_malloc(l_t1 * sizeof(float));
parse_params(h_t1, T1, l_t1);
l_t2 = parse_length(T2);
h_t2 = (float *) safe_malloc(l_t2 * sizeof(float));
parse_params(h_t2, T2, l_t2);
l_b0 = parse_length(B0);
h_b0 = (float *) safe_malloc(l_b0 * sizeof(float));
parse_params(h_b0, B0, l_b0);
l_b1 = parse_length(B1);
h_b1 = (float *) safe_malloc(l_b1 * sizeof(float));
parse_params(h_b1, B1, l_b1);
}
// The number of atoms of the whole dictionary
NATOMS = compute_natoms(h_t1, h_t2, l_t1, l_t2, l_b0, l_b1);
// The number of atoms per b1
natoms = NATOMS/l_b1;
printf("l_t1: %lu, l_t2: %lu, l_b0: %lu, l_b1: %lu\n",
l_t1, l_t2, l_b0, l_b1);
printf("NATOMS: %lu, natoms: %lu\n", NATOMS, natoms);
printf("Removing situations when T1 <= T2\n");
printf("NATOMS might be smaller than input\n");
if (NATOMS == 0)
err(EX_SOFTWARE, "Number of atoms is 0!\n");
printf("\n");
// Transfer h_t1, h_t2, h_b0 and h_b1 into h_params;
float *h_params =
(float *) safe_malloc(NATOMS * nparams * sizeof(float));
trans_params(h_params, h_t1, h_t2, h_b0, h_b1, l_t1, l_t2, l_b0, l_b1,
NATOMS, nparams);
float *d_params;
cuTry(cudaMalloc((void **) &d_params,
nparams * NATOMS * sizeof(float)));
cuTry(cudaMemcpyAsync(d_params, h_params,
nparams * NATOMS * sizeof(float), cudaMemcpyHostToDevice,
stream[1]));
safe_free(h_t1);
safe_free(h_t2);
safe_free(h_b0);
safe_free(h_b1);
safe_free(h_params);
// Read input mrf
printf("Input mrf: %s\n", in_mrf);
printf("Reading %s\n", in_mrf);
size_t NREPS = csv_dim(in_mrf) - 1;
if (nreps > NREPS)
nreps = NREPS;
printf("NREPS: %lu, nreps: %lu\n", NREPS, nreps);
float *h_mrf = (float *) safe_malloc(NREPS * 4 * sizeof(float));
csv_read(h_mrf, NREPS, in_mrf);
printf("\n");
// Run MRF Dictionary
float2 *d_atoms;
cuTry(cudaMalloc((void **) &d_atoms,
nreps * NATOMS * sizeof(float2)));
clock_t start, stop;
start = clock();
MRF_dict(d_atoms, d_params, h_mrf, nreps, NREPS, NATOMS, natoms,
nparams, nstates, echo_type);
cudaDeviceSynchronize();
stop = clock();
printf("Elapsed dictionary time: %.2f s\n",
((float) (stop - start)) / CLOCKS_PER_SEC);
printf("\n");
safe_free(h_mrf);
// Run matching if in_img is specified
if ((in_img == NULL) && (out_maps != NULL))
printf("NO data specified! Abort matching!\n");
if (in_img != NULL)
{
printf("Input img: %s\n", in_img);
ra_t ra_img;
printf("Reading %s\n", in_img);
ra_read(&ra_img, in_img);
float2 *h_img = (float2 *) ra_img.data;
size_t nt = ra_img.dims[0];
size_t nvoxels = ra_img.dims[1] * ra_img.dims[2];
printf("nt: %lu, nx: %llu, ny: %llu\n", nt, ra_img.dims[1],
ra_img.dims[2]);
printf("\n");
// Run MRF Macthing
float *d_maps;
cuTry(cudaMalloc((void **) &d_maps,
nmaps * nvoxels * sizeof(float)));
start = clock();
MRF_match(d_maps, h_img, d_atoms, d_params,
nreps, nt, NATOMS, natoms, nvoxels, nparams, nmaps, in_b1map);
cudaDeviceSynchronize();
stop = clock();
printf("Elapsed matching time: %.2f s\n",
((float) (stop - start)) / CLOCKS_PER_SEC);
printf("\n");
// Save maps
if (out_maps == NULL)
out_maps = "../result/maps.ra";
printf("Output maps: %s\n", out_maps);
float *h_map;
h_map = (float *) safe_malloc(nmaps * nvoxels * sizeof(float));
cuTry(cudaMemcpyAsync(h_map, d_maps,
nmaps * nvoxels * sizeof(float),
cudaMemcpyDeviceToHost, stream[1]));
save_rafile(h_map, out_maps, nmaps, nvoxels);
safe_free(h_map);
ra_free(&ra_img);
cuTry(cudaFree(d_maps));
}
// Save atoms
if (out_atoms != NULL)
{
printf("Output atoms: %s\n", out_atoms);
float2 *h_atoms;
h_atoms = (float2 *) safe_malloc(nreps * NATOMS * sizeof(float2));
cuTry(cudaMemcpyAsync(h_atoms, d_atoms,
nreps * NATOMS * sizeof(float2), cudaMemcpyDeviceToHost,
stream[0]));
save_rafile(h_atoms, out_atoms, nreps, NATOMS);
safe_free(h_atoms);
cuTry(cudaFree(d_atoms));
}
else
cuTry(cudaFree(d_atoms));
// Save parameters
if (out_params != NULL)
{
printf("Output params: %s\n", out_params);
float *h_params;
h_params = (float *) safe_malloc(nparams * NATOMS * sizeof(float));
cuTry(cudaMemcpyAsync(h_params, d_params,
nparams * NATOMS * sizeof(float), cudaMemcpyDeviceToHost,
stream[0]));
save_rafile(h_params, out_params, nparams, NATOMS);
safe_free(h_params);
cuTry(cudaFree(d_params));
}
else
cuTry(cudaFree(d_params));
// Destory cuda stream
cuTry(cudaStreamDestroy(stream[0]));
cuTry(cudaStreamDestroy(stream[1]));
return 0;
}
|
128b012496f5e563481dd868aa154678e43019cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THZCTensorMath.h"
#include "THZCGeneral.h"
#include "THZCGeneral.cuh"
#include "THZCBlas.h"
#include "THZCTensorCopy.h"
#include "THZCApply.cuh"
#include "THZCReduce.cuh"
#include <thrust/functional.h>
// #include <thrust/complex.h>
// typedef thrust::complex<float> ccx;
// ccx toCcx(cx val) {
// return ccx(crealf(val), cimagf(val));
// }
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<class BinaryOp>
__global__ void THZCudaTensor_kernel_scanOuterDim(ccx *tgt_, ccx *src_,
unsigned num_orows, unsigned num_irows, unsigned row_size,
ccx init, BinaryOp binary_op)
{
for (unsigned orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (unsigned irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
ccx *src = src_ + orow * row_size * num_irows + irow;
ccx *tgt = tgt_ + orow * row_size * num_irows + irow;
ccx acc = init;
for (unsigned col = 0; col < row_size; ++col) {
acc = binary_op(acc, *src);
*tgt = acc;
src += num_irows;
tgt += num_irows;
}
}
}
}
template<class BinaryOp>
__host__ void THZCudaTensor_scanOuterDim(THCState *state, THZCudaTensor *tgt, THZCudaTensor *src, long dimension,
cx init, BinaryOp binary_op)
{
unsigned ndim = THZCudaTensor_nDimension(state, src);
// Treat all outer dimensions (i.e. dim < dimension) as one.
unsigned num_orows = 1;
for (unsigned dim = 0; dim < dimension; dim++) {
num_orows *= THZCudaTensor_size(state, src, dim);
}
unsigned row_size = THZCudaTensor_size(state, src, dimension);
// Treat all inner dimensions (i.e. dim > dimension) as one.
unsigned num_irows = 1;
for (unsigned dim = dimension + 1; dim < ndim; dim++) {
num_irows *= THZCudaTensor_size(state, src, dim);
}
dim3 threads(min(512, num_irows));
unsigned maxGridDim = 1024;
dim3 grid(min(maxGridDim, num_orows), min(maxGridDim, THZCCeilDiv(num_irows, threads.x)));
hipLaunchKernelGGL(( THZCudaTensor_kernel_scanOuterDim), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
(ccx*)THZCudaTensor_data(state, tgt), (ccx*)THZCudaTensor_data(state, src), num_orows, num_irows, row_size, toCcx(init), binary_op);
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess) {
THError(hipGetErrorString(errcode));
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void THZCudaTensor_kernel_scanInnermostDim(ccx *tgt_, ccx *src_,
unsigned num_rows, unsigned row_size,
ccx init, BinaryFunction binary_op)
{
__shared__ ccx sbuf[num_threads_y][2 * num_threads_x];
ccx* row_buf = sbuf[threadIdx.y];
for (unsigned block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
unsigned row = block_row + threadIdx.y;
ccx block_total = (ccx)init;
ccx *row_src = (ccx*)(src_ + row * row_size);
ccx *row_tgt = (ccx*)(tgt_ + row * row_size);
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (unsigned block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
unsigned col1 = block_col + threadIdx.x;
unsigned col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_src[col1];
} else {
row_buf[threadIdx.x] = init;
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_src[col2];
} else {
row_buf[num_threads_x + threadIdx.x] = init;
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
row_buf[0] = binary_op(row_buf[0], block_total);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (unsigned s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
unsigned offset = (2 * threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Down-sweep.
for (unsigned s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
unsigned offset = 2 * (threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x];
if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x];
}
block_total = row_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
template<class BinaryFunction>
__host__ void THZCudaTensor_scanInnermostDim(THCState *state, THZCudaTensor *tgt, THZCudaTensor *src, cx init, BinaryFunction binary_op)
{
unsigned ndim = THZCudaTensor_nDimension(state, src);
// Treat all outer dimensions as a single dimension.
unsigned num_rows = 1;
for (unsigned dim = 0; dim < ndim - 1; dim++) {
num_rows *= THZCudaTensor_size(state, src, dim);
}
unsigned row_size = THZCudaTensor_size(state, src, ndim - 1);
dim3 threads(16, 32);
dim3 grid(min(1024, THZCCeilDiv(num_rows, threads.y)));
hipLaunchKernelGGL(( THZCudaTensor_kernel_scanInnermostDim<16, 32>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
(ccx*)THZCudaTensor_data(state, tgt), (ccx*)THZCudaTensor_data(state, src), num_rows, row_size, toCcx(init), binary_op);
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess) {
THError(hipGetErrorString(errcode));
}
}
template<class BinaryFunction>
void THZCudaTensor_scanDim(THCState *state, THZCudaTensor *self_, THZCudaTensor *src, long dimension, cx init, BinaryFunction binary_op)
{
THZCudaTensor_resizeAs(state, self_, src);
THZCudaTensor *self = THZCudaTensor_newContiguous(state, self_);
src = THZCudaTensor_newContiguous(state, src);
if (dimension == THZCudaTensor_nDimension(state, src) - 1) {
THZCudaTensor_scanInnermostDim(state, self, src, init, binary_op);
} else {
THZCudaTensor_scanOuterDim(state, self, src, dimension, init, binary_op);
}
THZCudaTensor_free(state, src);
THZCudaTensor_freeCopyTo(state, self, self_);
}
void THZCudaTensor_cumsum(THCState *state, THZCudaTensor *self, THZCudaTensor *src, long dimension)
{
THAssert(THZCudaTensor_checkGPU(state, 2, self, src));
return THZCudaTensor_scanDim(state, self, src, dimension, 0.0f, thrust::plus<ccx>());
}
void THZCudaTensor_cumprod(THCState *state, THZCudaTensor *self, THZCudaTensor *src, long dimension)
{
THAssert(THZCudaTensor_checkGPU(state, 2, self, src));
return THZCudaTensor_scanDim(state, self, src, dimension, 1.0f, thrust::multiplies<ccx>());
}
| 128b012496f5e563481dd868aa154678e43019cb.cu | #include "THZCTensorMath.h"
#include "THZCGeneral.h"
#include "THZCGeneral.cuh"
#include "THZCBlas.h"
#include "THZCTensorCopy.h"
#include "THZCApply.cuh"
#include "THZCReduce.cuh"
#include <thrust/functional.h>
// #include <thrust/complex.h>
// typedef thrust::complex<float> ccx;
// ccx toCcx(cx val) {
// return ccx(crealf(val), cimagf(val));
// }
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<class BinaryOp>
__global__ void THZCudaTensor_kernel_scanOuterDim(ccx *tgt_, ccx *src_,
unsigned num_orows, unsigned num_irows, unsigned row_size,
ccx init, BinaryOp binary_op)
{
for (unsigned orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (unsigned irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
ccx *src = src_ + orow * row_size * num_irows + irow;
ccx *tgt = tgt_ + orow * row_size * num_irows + irow;
ccx acc = init;
for (unsigned col = 0; col < row_size; ++col) {
acc = binary_op(acc, *src);
*tgt = acc;
src += num_irows;
tgt += num_irows;
}
}
}
}
template<class BinaryOp>
__host__ void THZCudaTensor_scanOuterDim(THCState *state, THZCudaTensor *tgt, THZCudaTensor *src, long dimension,
cx init, BinaryOp binary_op)
{
unsigned ndim = THZCudaTensor_nDimension(state, src);
// Treat all outer dimensions (i.e. dim < dimension) as one.
unsigned num_orows = 1;
for (unsigned dim = 0; dim < dimension; dim++) {
num_orows *= THZCudaTensor_size(state, src, dim);
}
unsigned row_size = THZCudaTensor_size(state, src, dimension);
// Treat all inner dimensions (i.e. dim > dimension) as one.
unsigned num_irows = 1;
for (unsigned dim = dimension + 1; dim < ndim; dim++) {
num_irows *= THZCudaTensor_size(state, src, dim);
}
dim3 threads(min(512, num_irows));
unsigned maxGridDim = 1024;
dim3 grid(min(maxGridDim, num_orows), min(maxGridDim, THZCCeilDiv(num_irows, threads.x)));
THZCudaTensor_kernel_scanOuterDim<<<grid, threads, 0, THCState_getCurrentStream(state)>>>(
(ccx*)THZCudaTensor_data(state, tgt), (ccx*)THZCudaTensor_data(state, src), num_orows, num_irows, row_size, toCcx(init), binary_op);
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess) {
THError(cudaGetErrorString(errcode));
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void THZCudaTensor_kernel_scanInnermostDim(ccx *tgt_, ccx *src_,
unsigned num_rows, unsigned row_size,
ccx init, BinaryFunction binary_op)
{
__shared__ ccx sbuf[num_threads_y][2 * num_threads_x];
ccx* row_buf = sbuf[threadIdx.y];
for (unsigned block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
unsigned row = block_row + threadIdx.y;
ccx block_total = (ccx)init;
ccx *row_src = (ccx*)(src_ + row * row_size);
ccx *row_tgt = (ccx*)(tgt_ + row * row_size);
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (unsigned block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
unsigned col1 = block_col + threadIdx.x;
unsigned col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_src[col1];
} else {
row_buf[threadIdx.x] = init;
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_src[col2];
} else {
row_buf[num_threads_x + threadIdx.x] = init;
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
row_buf[0] = binary_op(row_buf[0], block_total);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (unsigned s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
unsigned offset = (2 * threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Down-sweep.
for (unsigned s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
unsigned offset = 2 * (threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x];
if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x];
}
block_total = row_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
template<class BinaryFunction>
__host__ void THZCudaTensor_scanInnermostDim(THCState *state, THZCudaTensor *tgt, THZCudaTensor *src, cx init, BinaryFunction binary_op)
{
unsigned ndim = THZCudaTensor_nDimension(state, src);
// Treat all outer dimensions as a single dimension.
unsigned num_rows = 1;
for (unsigned dim = 0; dim < ndim - 1; dim++) {
num_rows *= THZCudaTensor_size(state, src, dim);
}
unsigned row_size = THZCudaTensor_size(state, src, ndim - 1);
dim3 threads(16, 32);
dim3 grid(min(1024, THZCCeilDiv(num_rows, threads.y)));
THZCudaTensor_kernel_scanInnermostDim<16, 32><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(
(ccx*)THZCudaTensor_data(state, tgt), (ccx*)THZCudaTensor_data(state, src), num_rows, row_size, toCcx(init), binary_op);
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess) {
THError(cudaGetErrorString(errcode));
}
}
template<class BinaryFunction>
void THZCudaTensor_scanDim(THCState *state, THZCudaTensor *self_, THZCudaTensor *src, long dimension, cx init, BinaryFunction binary_op)
{
THZCudaTensor_resizeAs(state, self_, src);
THZCudaTensor *self = THZCudaTensor_newContiguous(state, self_);
src = THZCudaTensor_newContiguous(state, src);
if (dimension == THZCudaTensor_nDimension(state, src) - 1) {
THZCudaTensor_scanInnermostDim(state, self, src, init, binary_op);
} else {
THZCudaTensor_scanOuterDim(state, self, src, dimension, init, binary_op);
}
THZCudaTensor_free(state, src);
THZCudaTensor_freeCopyTo(state, self, self_);
}
void THZCudaTensor_cumsum(THCState *state, THZCudaTensor *self, THZCudaTensor *src, long dimension)
{
THAssert(THZCudaTensor_checkGPU(state, 2, self, src));
return THZCudaTensor_scanDim(state, self, src, dimension, 0.0f, thrust::plus<ccx>());
}
void THZCudaTensor_cumprod(THCState *state, THZCudaTensor *self, THZCudaTensor *src, long dimension)
{
THAssert(THZCudaTensor_checkGPU(state, 2, self, src));
return THZCudaTensor_scanDim(state, self, src, dimension, 1.0f, thrust::multiplies<ccx>());
}
|
28cc49f4c703e73b0dd82fe1d0e19d5de2ea4980.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mc_kernel_call.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_s = NULL;
hipMalloc(&d_s, XSIZE*YSIZE);
float T = 1;
float K = 1;
float S0 = 1;
float sigma = 1;
float mu = 1;
float r = 1;
float dt = 1;
float *d_normals = NULL;
hipMalloc(&d_normals, XSIZE*YSIZE);
unsigned N_STEPS = 1;
unsigned N_PATHS = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mc_kernel_call), dim3(gridBlock),dim3(threadBlock), 0, 0, d_s,T,K,S0,sigma,mu,r,dt,d_normals,N_STEPS,N_PATHS);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mc_kernel_call), dim3(gridBlock),dim3(threadBlock), 0, 0, d_s,T,K,S0,sigma,mu,r,dt,d_normals,N_STEPS,N_PATHS);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mc_kernel_call), dim3(gridBlock),dim3(threadBlock), 0, 0, d_s,T,K,S0,sigma,mu,r,dt,d_normals,N_STEPS,N_PATHS);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 28cc49f4c703e73b0dd82fe1d0e19d5de2ea4980.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mc_kernel_call.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_s = NULL;
cudaMalloc(&d_s, XSIZE*YSIZE);
float T = 1;
float K = 1;
float S0 = 1;
float sigma = 1;
float mu = 1;
float r = 1;
float dt = 1;
float *d_normals = NULL;
cudaMalloc(&d_normals, XSIZE*YSIZE);
unsigned N_STEPS = 1;
unsigned N_PATHS = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mc_kernel_call<<<gridBlock,threadBlock>>>(d_s,T,K,S0,sigma,mu,r,dt,d_normals,N_STEPS,N_PATHS);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mc_kernel_call<<<gridBlock,threadBlock>>>(d_s,T,K,S0,sigma,mu,r,dt,d_normals,N_STEPS,N_PATHS);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mc_kernel_call<<<gridBlock,threadBlock>>>(d_s,T,K,S0,sigma,mu,r,dt,d_normals,N_STEPS,N_PATHS);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d7d08466cf8bdc77d56a11488c48ac0e77806c47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-04-24
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "Div.h"
#include "Div.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
division of data arrays in a element-wise manner c(i) = a(i)/b(i)
>> a - data array a
>> b - data array b
>> c - result data array
>> size - size of c
*/
__global__
void KernelDivElementWise(DTYPE * a, DTYPE * b, DTYPE * c, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
c[i] = a[i] / b[i];
}
/*
division of data arrays in a element-wise manner c(i) = a(i)/b(i) + \alpha*c(i)
>> a - data array a
>> b - data array b
>> c - result data array
>> size - size of c
>> alpha - the coefficient
*/
__global__
void KernelDivElementWiseV2(DTYPE * a, DTYPE * b, DTYPE * c, int size, DTYPE alpha)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
c[i] = a[i] / b[i] + alpha * c[i];
}
/*
division of two tensors in a element-wise manner c(i) = a(i)/b(i).
Note that a and b can be of different sizes here, i.e.,
|a_lead| <= |c_lead| and |b_lead| <= |c_lead|
where |a_lead| means the size of the leading dimension of a
>> a - tensor a
>> b - tensor b
>> c - result tensor
>> alpha - the coefficient
>> stride - the number of items we go over when move next along the leading dimension in a block
>> ldSizeA - size of the leading dimension of a
>> ldSizeB - size of the leading dimension of b
>> ldSizeC - size of the leading dimension of c
>> blockNum - number of blocks
*/
template<int nonZeroAlpha> __global__
void KernelDivElementWiseTensorDynamic(DTYPE * a, DTYPE * b, DTYPE * c, DTYPE alpha,
int stride, int ldSizeA, int ldSizeB, int ldSizeC, int blockNum)
{
__shared__ DTYPE* ap[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ DTYPE* bp[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ DTYPE* cp[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= blockNum * stride || j >= ldSizeC)
return;
if (threadIdx.y == 0) {
int block = i / stride;
int size = block * stride;
ap[threadIdx.x] = a + size * ldSizeA;
bp[threadIdx.x] = b + size * ldSizeB;
cp[threadIdx.x] = c + size * ldSizeC;
}
__syncthreads();
int aj = j >= ldSizeA ? j % ldSizeA : j;
int bj = j >= ldSizeB ? j % ldSizeB : j;
int offseti = i % stride;
if (nonZeroAlpha == 0)
cp[threadIdx.x][j * ldSizeC + offseti] = ap[threadIdx.x][aj * ldSizeA + offseti] / bp[threadIdx.x][bj * ldSizeB + offseti];
else
cp[threadIdx.x][j * ldSizeC + offseti] = ap[threadIdx.x][aj * ldSizeA + offseti] / bp[threadIdx.x][bj * ldSizeB + offseti]
+ alpha * cp[threadIdx.x][j * ldSizeC + offseti];
}
/*
element-wise division of two tensors
c(i) = a(i)*b(i) + \alpha * c(i)
where i is the item index
>> a - tensor a
>> b - tensor b
>> c - result tensor
>> alpha - the coefficient
>> leadingDim - dimension along which we perform broadcasting
*/
void _CudaDiv(const XTensor * a, const XTensor * b, XTensor * c, DTYPE alpha, int leadingDim)
{
int leadingDimRDI = a->order - leadingDim - 1;
CheckNTErrors((a->unitNum <= c->unitNum && b->unitNum <= c->unitNum),
"Unmatched tensors in multiplication!");
CheckNTErrors((a->order == b->order && a->order == c->order), "Unmatched tensors!");
int stride = 1;
int blockSizeA = 1;
int blockNum = 1;
int dimensionSizeA = a->dimSizeRDI[leadingDimRDI];
int dimensionSizeB = b->dimSizeRDI[leadingDimRDI];
int dimensionSizeC = c->dimSizeRDI[leadingDimRDI];
for (int i = 0; i < a->order; i++) {
if (i != leadingDimRDI) {
CheckNTErrors((a->dimSizeRDI[i] == b->dimSizeRDI[i] &&
a->dimSizeRDI[i] == c->dimSizeRDI[i]),
"Unmatched tensors!");
}
if (i < leadingDimRDI)
stride *= a->dimSizeRDI[i];
}
blockSizeA = stride * dimensionSizeA;
blockNum = a->unitNum / blockSizeA;
int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup);
if (!a->isSparse && !b->isSparse) {
if (a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE) {
int cudaGridSize[3];
int cudaBlockSize[3];
if (a->unitNum == c->unitNum && b->unitNum == c->unitNum) {
GDevs.GetCudaThread(a->devID, c->unitNum, cudaGridSize, cudaBlockSize);
dim3 blocks(cudaGridSize[0]), threads(cudaBlockSize[0]);
if (alpha == 0)
KernelDivElementWise << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, c->unitNum);
else
KernelDivElementWiseV2 << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, c->unitNum, alpha);
}
else {
GDevs.GetCudaThread2D(c->devID, stride * blockNum, dimensionSizeC, MAX_INT, cudaGridSize, cudaBlockSize);
dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]);
if (alpha == 0) {
KernelDivElementWiseTensorDynamic<0> << <blocks, threads >> >
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, 0,
stride, dimensionSizeA, dimensionSizeB, dimensionSizeC, blockNum);
}
else {
KernelDivElementWiseTensorDynamic<1> << <blocks, threads >> >
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, alpha,
stride, dimensionSizeA, dimensionSizeB, dimensionSizeC, blockNum);
}
}
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
BacktoCudaDev(a->devID, devIDBackup);
}
#endif // USE_ROCM
} // namespace nts(NiuTrans.Tensor) | d7d08466cf8bdc77d56a11488c48ac0e77806c47.cu | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-04-24
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "Div.h"
#include "Div.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
division of data arrays in a element-wise manner c(i) = a(i)/b(i)
>> a - data array a
>> b - data array b
>> c - result data array
>> size - size of c
*/
__global__
void KernelDivElementWise(DTYPE * a, DTYPE * b, DTYPE * c, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
c[i] = a[i] / b[i];
}
/*
division of data arrays in a element-wise manner c(i) = a(i)/b(i) + \alpha*c(i)
>> a - data array a
>> b - data array b
>> c - result data array
>> size - size of c
>> alpha - the coefficient
*/
__global__
void KernelDivElementWiseV2(DTYPE * a, DTYPE * b, DTYPE * c, int size, DTYPE alpha)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
c[i] = a[i] / b[i] + alpha * c[i];
}
/*
division of two tensors in a element-wise manner c(i) = a(i)/b(i).
Note that a and b can be of different sizes here, i.e.,
|a_lead| <= |c_lead| and |b_lead| <= |c_lead|
where |a_lead| means the size of the leading dimension of a
>> a - tensor a
>> b - tensor b
>> c - result tensor
>> alpha - the coefficient
>> stride - the number of items we go over when move next along the leading dimension in a block
>> ldSizeA - size of the leading dimension of a
>> ldSizeB - size of the leading dimension of b
>> ldSizeC - size of the leading dimension of c
>> blockNum - number of blocks
*/
template<int nonZeroAlpha> __global__
void KernelDivElementWiseTensorDynamic(DTYPE * a, DTYPE * b, DTYPE * c, DTYPE alpha,
int stride, int ldSizeA, int ldSizeB, int ldSizeC, int blockNum)
{
__shared__ DTYPE* ap[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ DTYPE* bp[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ DTYPE* cp[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= blockNum * stride || j >= ldSizeC)
return;
if (threadIdx.y == 0) {
int block = i / stride;
int size = block * stride;
ap[threadIdx.x] = a + size * ldSizeA;
bp[threadIdx.x] = b + size * ldSizeB;
cp[threadIdx.x] = c + size * ldSizeC;
}
__syncthreads();
int aj = j >= ldSizeA ? j % ldSizeA : j;
int bj = j >= ldSizeB ? j % ldSizeB : j;
int offseti = i % stride;
if (nonZeroAlpha == 0)
cp[threadIdx.x][j * ldSizeC + offseti] = ap[threadIdx.x][aj * ldSizeA + offseti] / bp[threadIdx.x][bj * ldSizeB + offseti];
else
cp[threadIdx.x][j * ldSizeC + offseti] = ap[threadIdx.x][aj * ldSizeA + offseti] / bp[threadIdx.x][bj * ldSizeB + offseti]
+ alpha * cp[threadIdx.x][j * ldSizeC + offseti];
}
/*
element-wise division of two tensors
c(i) = a(i)*b(i) + \alpha * c(i)
where i is the item index
>> a - tensor a
>> b - tensor b
>> c - result tensor
>> alpha - the coefficient
>> leadingDim - dimension along which we perform broadcasting
*/
void _CudaDiv(const XTensor * a, const XTensor * b, XTensor * c, DTYPE alpha, int leadingDim)
{
int leadingDimRDI = a->order - leadingDim - 1;
CheckNTErrors((a->unitNum <= c->unitNum && b->unitNum <= c->unitNum),
"Unmatched tensors in multiplication!");
CheckNTErrors((a->order == b->order && a->order == c->order), "Unmatched tensors!");
int stride = 1;
int blockSizeA = 1;
int blockNum = 1;
int dimensionSizeA = a->dimSizeRDI[leadingDimRDI];
int dimensionSizeB = b->dimSizeRDI[leadingDimRDI];
int dimensionSizeC = c->dimSizeRDI[leadingDimRDI];
for (int i = 0; i < a->order; i++) {
if (i != leadingDimRDI) {
CheckNTErrors((a->dimSizeRDI[i] == b->dimSizeRDI[i] &&
a->dimSizeRDI[i] == c->dimSizeRDI[i]),
"Unmatched tensors!");
}
if (i < leadingDimRDI)
stride *= a->dimSizeRDI[i];
}
blockSizeA = stride * dimensionSizeA;
blockNum = a->unitNum / blockSizeA;
int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup);
if (!a->isSparse && !b->isSparse) {
if (a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE) {
int cudaGridSize[3];
int cudaBlockSize[3];
if (a->unitNum == c->unitNum && b->unitNum == c->unitNum) {
GDevs.GetCudaThread(a->devID, c->unitNum, cudaGridSize, cudaBlockSize);
dim3 blocks(cudaGridSize[0]), threads(cudaBlockSize[0]);
if (alpha == 0)
KernelDivElementWise << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, c->unitNum);
else
KernelDivElementWiseV2 << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, c->unitNum, alpha);
}
else {
GDevs.GetCudaThread2D(c->devID, stride * blockNum, dimensionSizeC, MAX_INT, cudaGridSize, cudaBlockSize);
dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]);
if (alpha == 0) {
KernelDivElementWiseTensorDynamic<0> << <blocks, threads >> >
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, 0,
stride, dimensionSizeA, dimensionSizeB, dimensionSizeC, blockNum);
}
else {
KernelDivElementWiseTensorDynamic<1> << <blocks, threads >> >
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, alpha,
stride, dimensionSizeA, dimensionSizeB, dimensionSizeC, blockNum);
}
}
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
BacktoCudaDev(a->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) |
1fcab8837515baeca187617ed5e83c459f2c5e76.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/trace_grad_impl.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h"
template <typename T, typename S>
__global__ void TraceGrad(S size, const T *y_grad, const S *input_shape, T *output) {
S matrix_col = input_shape[1];
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += gridDim.x * blockDim.x) {
size_t row_num = pos / matrix_col;
size_t col_num = pos % matrix_col;
if (row_num < size && row_num == col_num) {
output[pos] = *y_grad;
} else {
output[pos] = 0;
}
}
return;
}
template <typename T, typename S>
void CalTraceGrad(S size, const T *y_grad, const S *input_shape, T *output, const uint32_t &device_id,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( TraceGrad<T, S>)
, dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, y_grad, input_shape, output);
return;
}
template CUDA_LIB_EXPORT void CalTraceGrad<uint8_t, int32_t>(int32_t size, const uint8_t *y_grad,
const int32_t *input_shape, uint8_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint16_t, int32_t>(int32_t size, const uint16_t *y_grad,
const int32_t *input_shape, uint16_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint32_t, int32_t>(int32_t size, const uint32_t *y_grad,
const int32_t *input_shape, uint32_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint64_t, int32_t>(int32_t size, const uint64_t *y_grad,
const int32_t *input_shape, uint64_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int8_t, int32_t>(int32_t size, const int8_t *y_grad,
const int32_t *input_shape, int8_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int16_t, int32_t>(int32_t size, const int16_t *y_grad,
const int32_t *input_shape, int16_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int32_t, int32_t>(int32_t size, const int32_t *y_grad,
const int32_t *input_shape, int32_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int64_t, int32_t>(int32_t size, const int64_t *y_grad,
const int32_t *input_shape, int64_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<half, int32_t>(int32_t size, const half *y_grad, const int32_t *input_shape,
half *output, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<float, int32_t>(int32_t size, const float *y_grad,
const int32_t *input_shape, float *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<double, int32_t>(int32_t size, const double *y_grad,
const int32_t *input_shape, double *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<Complex<float>, int32_t>(int32_t size, const Complex<float> *y_grad,
const int32_t *input_shape, Complex<float> *output,
const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<Complex<double>, int32_t>(int32_t size, const Complex<double> *y_grad,
const int32_t *input_shape,
Complex<double> *output, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint8_t, int64_t>(int64_t size, const uint8_t *y_grad,
const int64_t *input_shape, uint8_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint16_t, int64_t>(int64_t size, const uint16_t *y_grad,
const int64_t *input_shape, uint16_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint32_t, int64_t>(int64_t size, const uint32_t *y_grad,
const int64_t *input_shape, uint32_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint64_t, int64_t>(int64_t size, const uint64_t *y_grad,
const int64_t *input_shape, uint64_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int8_t, int64_t>(int64_t size, const int8_t *y_grad,
const int64_t *input_shape, int8_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int16_t, int64_t>(int64_t size, const int16_t *y_grad,
const int64_t *input_shape, int16_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int32_t, int64_t>(int64_t size, const int32_t *y_grad,
const int64_t *input_shape, int32_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int64_t, int64_t>(int64_t size, const int64_t *y_grad,
const int64_t *input_shape, int64_t *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<half, int64_t>(int64_t size, const half *y_grad, const int64_t *input_shape,
half *output, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<float, int64_t>(int64_t size, const float *y_grad,
const int64_t *input_shape, float *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<double, int64_t>(int64_t size, const double *y_grad,
const int64_t *input_shape, double *output,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<Complex<float>, int64_t>(int64_t size, const Complex<float> *y_grad,
const int64_t *input_shape, Complex<float> *output,
const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<Complex<double>, int64_t>(int64_t size, const Complex<double> *y_grad,
const int64_t *input_shape,
Complex<double> *output, const uint32_t &device_id,
hipStream_t cuda_stream);
| 1fcab8837515baeca187617ed5e83c459f2c5e76.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/trace_grad_impl.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h"
template <typename T, typename S>
__global__ void TraceGrad(S size, const T *y_grad, const S *input_shape, T *output) {
S matrix_col = input_shape[1];
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += gridDim.x * blockDim.x) {
size_t row_num = pos / matrix_col;
size_t col_num = pos % matrix_col;
if (row_num < size && row_num == col_num) {
output[pos] = *y_grad;
} else {
output[pos] = 0;
}
}
return;
}
template <typename T, typename S>
void CalTraceGrad(S size, const T *y_grad, const S *input_shape, T *output, const uint32_t &device_id,
cudaStream_t cuda_stream) {
TraceGrad<T, S>
<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>(size, y_grad, input_shape, output);
return;
}
template CUDA_LIB_EXPORT void CalTraceGrad<uint8_t, int32_t>(int32_t size, const uint8_t *y_grad,
const int32_t *input_shape, uint8_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint16_t, int32_t>(int32_t size, const uint16_t *y_grad,
const int32_t *input_shape, uint16_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint32_t, int32_t>(int32_t size, const uint32_t *y_grad,
const int32_t *input_shape, uint32_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint64_t, int32_t>(int32_t size, const uint64_t *y_grad,
const int32_t *input_shape, uint64_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int8_t, int32_t>(int32_t size, const int8_t *y_grad,
const int32_t *input_shape, int8_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int16_t, int32_t>(int32_t size, const int16_t *y_grad,
const int32_t *input_shape, int16_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int32_t, int32_t>(int32_t size, const int32_t *y_grad,
const int32_t *input_shape, int32_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int64_t, int32_t>(int32_t size, const int64_t *y_grad,
const int32_t *input_shape, int64_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<half, int32_t>(int32_t size, const half *y_grad, const int32_t *input_shape,
half *output, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<float, int32_t>(int32_t size, const float *y_grad,
const int32_t *input_shape, float *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<double, int32_t>(int32_t size, const double *y_grad,
const int32_t *input_shape, double *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<Complex<float>, int32_t>(int32_t size, const Complex<float> *y_grad,
const int32_t *input_shape, Complex<float> *output,
const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<Complex<double>, int32_t>(int32_t size, const Complex<double> *y_grad,
const int32_t *input_shape,
Complex<double> *output, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint8_t, int64_t>(int64_t size, const uint8_t *y_grad,
const int64_t *input_shape, uint8_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint16_t, int64_t>(int64_t size, const uint16_t *y_grad,
const int64_t *input_shape, uint16_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint32_t, int64_t>(int64_t size, const uint32_t *y_grad,
const int64_t *input_shape, uint32_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<uint64_t, int64_t>(int64_t size, const uint64_t *y_grad,
const int64_t *input_shape, uint64_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int8_t, int64_t>(int64_t size, const int8_t *y_grad,
const int64_t *input_shape, int8_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int16_t, int64_t>(int64_t size, const int16_t *y_grad,
const int64_t *input_shape, int16_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int32_t, int64_t>(int64_t size, const int32_t *y_grad,
const int64_t *input_shape, int32_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<int64_t, int64_t>(int64_t size, const int64_t *y_grad,
const int64_t *input_shape, int64_t *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<half, int64_t>(int64_t size, const half *y_grad, const int64_t *input_shape,
half *output, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<float, int64_t>(int64_t size, const float *y_grad,
const int64_t *input_shape, float *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<double, int64_t>(int64_t size, const double *y_grad,
const int64_t *input_shape, double *output,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<Complex<float>, int64_t>(int64_t size, const Complex<float> *y_grad,
const int64_t *input_shape, Complex<float> *output,
const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalTraceGrad<Complex<double>, int64_t>(int64_t size, const Complex<double> *y_grad,
const int64_t *input_shape,
Complex<double> *output, const uint32_t &device_id,
cudaStream_t cuda_stream);
|
1617ba0aa6d9d29fcf5a0790f07be9b3f310fd3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/concatenate.cuh>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/dictionary/detail/concatenate.hpp>
#include <cudf/lists/detail/concatenate.hpp>
#include <cudf/strings/detail/concatenate.hpp>
#include <cudf/structs/detail/concatenate.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/transform_scan.h>
#include <algorithm>
#include <numeric>
#include <utility>
namespace cudf {
namespace detail {
// From benchmark data, the fused kernel optimization appears to perform better
// when there are more than a trivial number of columns, or when the null mask
// can also be computed at the same time
constexpr bool use_fused_kernel_heuristic(bool const has_nulls, size_t const num_columns)
{
return has_nulls || num_columns > 4;
}
auto create_device_views(host_span<column_view const> views, rmm::cuda_stream_view stream)
{
// Create device views for each input view
using CDViewPtr = decltype(
column_device_view::create(std::declval<column_view>(), std::declval<rmm::cuda_stream_view>()));
auto device_view_owners = std::vector<CDViewPtr>(views.size());
std::transform(views.begin(), views.end(), device_view_owners.begin(), [stream](auto const& col) {
return column_device_view::create(col, stream);
});
// Assemble contiguous array of device views
auto device_views = thrust::host_vector<column_device_view>();
device_views.reserve(views.size());
std::transform(device_view_owners.cbegin(),
device_view_owners.cend(),
std::back_inserter(device_views),
[](auto const& col) { return *col; });
auto d_views = make_device_uvector_async(device_views, stream);
// Compute the partition offsets
auto offsets = thrust::host_vector<size_t>(views.size() + 1);
thrust::transform_inclusive_scan(
thrust::host,
device_views.cbegin(),
device_views.cend(),
std::next(offsets.begin()),
[](auto const& col) { return col.size(); },
thrust::plus{});
auto d_offsets = make_device_uvector_async(offsets, stream);
auto const output_size = offsets.back();
return std::make_tuple(
std::move(device_view_owners), std::move(d_views), std::move(d_offsets), output_size);
}
/**
* @brief Concatenates the null mask bits of all the column device views in the
* `views` array to the destination bitmask.
*
* @param views Array of column_device_view
* @param output_offsets Prefix sum of sizes of elements of `views`
* @param number_of_views Size of `views` array
* @param dest_mask The output buffer to copy null masks into
* @param number_of_mask_bits The total number of null masks bits that are being
* copied
*/
__global__ void concatenate_masks_kernel(column_device_view const* views,
size_t const* output_offsets,
size_type number_of_views,
bitmask_type* dest_mask,
size_type number_of_mask_bits)
{
size_type mask_index = threadIdx.x + blockIdx.x * blockDim.x;
auto active_mask = __ballot_sync(0xFFFF'FFFF, mask_index < number_of_mask_bits);
while (mask_index < number_of_mask_bits) {
size_type const source_view_index =
thrust::upper_bound(
thrust::seq, output_offsets, output_offsets + number_of_views, mask_index) -
output_offsets - 1;
bool bit_is_set = true;
if (source_view_index < number_of_views) {
size_type const column_element_index = mask_index - output_offsets[source_view_index];
bit_is_set = views[source_view_index].is_valid(column_element_index);
}
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
if (threadIdx.x % detail::warp_size == 0) { dest_mask[word_index(mask_index)] = new_word; }
mask_index += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, mask_index < number_of_mask_bits);
}
}
void concatenate_masks(device_span<column_device_view const> d_views,
device_span<size_t const> d_offsets,
bitmask_type* dest_mask,
size_type output_size,
rmm::cuda_stream_view stream)
{
constexpr size_type block_size{256};
cudf::detail::grid_1d config(output_size, block_size);
hipLaunchKernelGGL(( concatenate_masks_kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream.value(),
d_views.data(),
d_offsets.data(),
static_cast<size_type>(d_views.size()),
dest_mask,
output_size);
}
void concatenate_masks(host_span<column_view const> views,
bitmask_type* dest_mask,
rmm::cuda_stream_view stream)
{
// Preprocess and upload inputs to device memory
auto const device_views = create_device_views(views, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_offsets = std::get<2>(device_views);
auto const output_size = std::get<3>(device_views);
concatenate_masks(d_views, d_offsets, dest_mask, output_size, stream);
}
template <typename T, size_type block_size, bool Nullable>
__global__ void fused_concatenate_kernel(column_device_view const* input_views,
size_t const* input_offsets,
size_type num_input_views,
mutable_column_device_view output_view,
size_type* out_valid_count)
{
auto const output_size = output_view.size();
auto* output_data = output_view.data<T>();
size_type output_index = threadIdx.x + blockIdx.x * blockDim.x;
size_type warp_valid_count = 0;
unsigned active_mask;
if (Nullable) { active_mask = __ballot_sync(0xFFFF'FFFF, output_index < output_size); }
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
// thrust::prev isn't in CUDA 10.0, so subtracting 1 here instead
auto const offset_it =
-1 + thrust::upper_bound(
thrust::seq, input_offsets, input_offsets + num_input_views, output_index);
size_type const partition_index = offset_it - input_offsets;
// Copy input data to output
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
auto const* input_data = input_view.data<T>();
output_data[output_index] = input_data[offset_index];
if (Nullable) {
bool const bit_is_set = input_view.is_valid(offset_index);
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
// First thread writes bitmask word
if (threadIdx.x % detail::warp_size == 0) {
output_view.null_mask()[word_index(output_index)] = new_word;
}
warp_valid_count += __popc(new_word);
}
output_index += blockDim.x * gridDim.x;
if (Nullable) { active_mask = __ballot_sync(active_mask, output_index < output_size); }
}
if (Nullable) {
using detail::single_lane_block_sum_reduce;
auto block_valid_count = single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count, block_valid_count); }
}
}
template <typename T>
std::unique_ptr<column> fused_concatenate(host_span<column_view const> views,
bool const has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using mask_policy = cudf::mask_allocation_policy;
// Preprocess and upload inputs to device memory
auto const device_views = create_device_views(views, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_offsets = std::get<2>(device_views);
auto const output_size = std::get<3>(device_views);
CUDF_EXPECTS(output_size < static_cast<std::size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated rows exceeds size_type range");
// Allocate output
auto const policy = has_nulls ? mask_policy::ALWAYS : mask_policy::NEVER;
auto out_col = detail::allocate_like(views.front(), output_size, policy, stream, mr);
out_col->set_null_count(0); // prevent null count from being materialized
auto out_view = out_col->mutable_view();
auto d_out_view = mutable_column_device_view::create(out_view, stream);
rmm::device_scalar<size_type> d_valid_count(0, stream);
// Launch kernel
constexpr size_type block_size{256};
cudf::detail::grid_1d config(output_size, block_size);
auto const kernel = has_nulls ? fused_concatenate_kernel<T, block_size, true>
: fused_concatenate_kernel<T, block_size, false>;
hipLaunchKernelGGL(( kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream.value(),
d_views.data(),
d_offsets.data(),
static_cast<size_type>(d_views.size()),
*d_out_view,
d_valid_count.data());
if (has_nulls) { out_col->set_null_count(output_size - d_valid_count.value(stream)); }
return out_col;
}
template <typename T>
std::unique_ptr<column> for_each_concatenate(host_span<column_view const> views,
bool const has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type const total_element_count =
std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) {
return accumulator + v.size();
});
using mask_policy = cudf::mask_allocation_policy;
auto const policy = has_nulls ? mask_policy::ALWAYS : mask_policy::NEVER;
auto col = cudf::detail::allocate_like(views.front(), total_element_count, policy, stream, mr);
col->set_null_count(0); // prevent null count from being materialized...
auto m_view = col->mutable_view(); // ...when we take a mutable view
auto count = 0;
for (auto& v : views) {
thrust::copy(rmm::exec_policy(stream), v.begin<T>(), v.end<T>(), m_view.begin<T>() + count);
count += v.size();
}
// If concatenated column is nullable, proceed to calculate it
if (has_nulls) {
cudf::detail::concatenate_masks(views, (col->mutable_view()).null_mask(), stream);
}
return col;
}
struct concatenate_dispatch {
host_span<column_view const> views;
rmm::cuda_stream_view stream;
rmm::mr::device_memory_resource* mr;
// fixed width
template <typename T>
std::unique_ptr<column> operator()()
{
bool const has_nulls =
std::any_of(views.begin(), views.end(), [](auto const& col) { return col.has_nulls(); });
// Use a heuristic to guess when the fused kernel will be faster
if (use_fused_kernel_heuristic(has_nulls, views.size())) {
return fused_concatenate<T>(views, has_nulls, stream, mr);
} else {
return for_each_concatenate<T>(views, has_nulls, stream, mr);
}
}
};
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::dictionary32>()
{
return cudf::dictionary::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::string_view>()
{
return cudf::strings::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::list_view>()
{
return cudf::lists::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::struct_view>()
{
return cudf::structs::detail::concatenate(views, stream, mr);
}
namespace {
void bounds_and_type_check(host_span<column_view const> cols, rmm::cuda_stream_view stream);
/**
* @brief Functor for traversing child columns and recursively verifying concatenation
* bounds and types.
*/
class traverse_children {
public:
// nothing to do for simple types.
template <typename T>
void operator()(host_span<column_view const>, rmm::cuda_stream_view)
{
}
private:
// verify length of concatenated offsets.
void check_offsets_size(host_span<column_view const> cols)
{
// offsets. we can't just add up the total sizes of all offset child columns because each one
// has an extra value, regardless of the # of parent rows. So we have to add up the total # of
// rows in the base column and add 1 at the end
size_t const total_offset_count =
std::accumulate(cols.begin(),
cols.end(),
std::size_t{},
[](size_t a, auto const& b) -> size_t { return a + b.size(); }) +
1;
// note: output text must include "exceeds size_type range" for python error handling
CUDF_EXPECTS(total_offset_count <= static_cast<size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated offsets exceeds size_type range");
}
};
template <>
void traverse_children::operator()<cudf::string_view>(host_span<column_view const> cols,
rmm::cuda_stream_view stream)
{
// verify offsets
check_offsets_size(cols);
// chars
size_t const total_char_count = std::accumulate(
cols.begin(), cols.end(), std::size_t{}, [stream](size_t a, auto const& b) -> size_t {
strings_column_view scv(b);
return a + (scv.is_empty() ? 0
// if the column is unsliced, skip the offset retrieval.
: scv.offset() > 0
? cudf::detail::get_value<offset_type>(
scv.offsets(), scv.offset() + scv.size(), stream) -
cudf::detail::get_value<offset_type>(scv.offsets(), scv.offset(), stream)
// if the offset() is 0, it can still be sliced to a shorter length. in this case
// we only need to read a single offset. otherwise just return the full length
// (chars_size())
: scv.size() + 1 == scv.offsets().size()
? scv.chars_size()
: cudf::detail::get_value<offset_type>(scv.offsets(), scv.size(), stream));
});
// note: output text must include "exceeds size_type range" for python error handling
CUDF_EXPECTS(total_char_count <= static_cast<size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated chars exceeds size_type range");
}
template <>
void traverse_children::operator()<cudf::struct_view>(host_span<column_view const> cols,
rmm::cuda_stream_view stream)
{
// march each child
auto child_iter = thrust::make_counting_iterator(0);
auto const num_children = cols.front().num_children();
std::vector<column_view> nth_children;
nth_children.reserve(cols.size());
std::for_each(child_iter, child_iter + num_children, [&](auto child_index) {
std::transform(cols.begin(),
cols.end(),
std::back_inserter(nth_children),
[child_index, stream](column_view const& col) {
structs_column_view scv(col);
return scv.get_sliced_child(child_index);
});
bounds_and_type_check(nth_children, stream);
nth_children.clear();
});
}
template <>
void traverse_children::operator()<cudf::list_view>(host_span<column_view const> cols,
rmm::cuda_stream_view stream)
{
// verify offsets
check_offsets_size(cols);
// recurse into the child columns
std::vector<column_view> nth_children;
nth_children.reserve(cols.size());
std::transform(
cols.begin(), cols.end(), std::back_inserter(nth_children), [stream](column_view const& col) {
lists_column_view lcv(col);
return lcv.get_sliced_child(stream);
});
bounds_and_type_check(nth_children, stream);
}
/**
* @brief Verifies that the sum of the sizes of all the columns to be concatenated
* will not exceed the max value of size_type, and verifies all column types match
*
* @param columns_to_concat Span of columns to check
*
* @throws cudf::logic_error if the total length of the concatenated columns would
* exceed the max value of size_type
*
* @throws cudf::logic_error if all of the input column types don't match
*/
void bounds_and_type_check(host_span<column_view const> cols, rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(std::all_of(cols.begin(),
cols.end(),
[expected_type = cols.front().type()](auto const& c) {
return c.type() == expected_type;
}),
"Type mismatch in columns to concatenate.");
// total size of all concatenated rows
size_t const total_row_count =
std::accumulate(cols.begin(), cols.end(), std::size_t{}, [](size_t a, auto const& b) {
return a + static_cast<size_t>(b.size());
});
// note: output text must include "exceeds size_type range" for python error handling
CUDF_EXPECTS(total_row_count <= static_cast<size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated rows exceeds size_type range");
// traverse children
cudf::type_dispatcher(cols.front().type(), traverse_children{}, cols, stream);
}
} // anonymous namespace
// Concatenates the elements from a vector of column_views
std::unique_ptr<column> concatenate(host_span<column_view const> columns_to_concat,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(not columns_to_concat.empty(), "Unexpected empty list of columns to concatenate.");
// verify all types match and that we won't overflow size_type in output size
bounds_and_type_check(columns_to_concat, stream);
if (std::all_of(columns_to_concat.begin(), columns_to_concat.end(), [](column_view const& c) {
return c.is_empty();
})) {
return empty_like(columns_to_concat.front());
}
return type_dispatcher<dispatch_storage_type>(
columns_to_concat.front().type(), concatenate_dispatch{columns_to_concat, stream, mr});
}
std::unique_ptr<table> concatenate(host_span<table_view const> tables_to_concat,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (tables_to_concat.empty()) { return std::make_unique<table>(); }
table_view const first_table = tables_to_concat.front();
CUDF_EXPECTS(std::all_of(tables_to_concat.begin(),
tables_to_concat.end(),
[&first_table](auto const& t) {
return t.num_columns() == first_table.num_columns();
}),
"Mismatch in table columns to concatenate.");
std::vector<std::unique_ptr<column>> concat_columns;
for (size_type i = 0; i < first_table.num_columns(); ++i) {
std::vector<column_view> cols;
std::transform(tables_to_concat.begin(),
tables_to_concat.end(),
std::back_inserter(cols),
[i](auto const& t) { return t.column(i); });
// verify all types match and that we won't overflow size_type in output size
bounds_and_type_check(cols, stream);
concat_columns.emplace_back(detail::concatenate(cols, stream, mr));
}
return std::make_unique<table>(std::move(concat_columns));
}
} // namespace detail
rmm::device_buffer concatenate_masks(host_span<column_view const> views,
rmm::mr::device_memory_resource* mr)
{
bool const has_nulls =
std::any_of(views.begin(), views.end(), [](const column_view col) { return col.has_nulls(); });
if (has_nulls) {
size_type const total_element_count =
std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) {
return accumulator + v.size();
});
rmm::device_buffer null_mask =
create_null_mask(total_element_count, mask_state::UNINITIALIZED, mr);
detail::concatenate_masks(
views, static_cast<bitmask_type*>(null_mask.data()), rmm::cuda_stream_default);
return null_mask;
}
// no nulls, so return an empty device buffer
return rmm::device_buffer{0, rmm::cuda_stream_default, mr};
}
// Concatenates the elements from a vector of column_views
std::unique_ptr<column> concatenate(host_span<column_view const> columns_to_concat,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(columns_to_concat, rmm::cuda_stream_default, mr);
}
std::unique_ptr<table> concatenate(host_span<table_view const> tables_to_concat,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(tables_to_concat, rmm::cuda_stream_default, mr);
}
} // namespace cudf
| 1617ba0aa6d9d29fcf5a0790f07be9b3f310fd3b.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/concatenate.cuh>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/dictionary/detail/concatenate.hpp>
#include <cudf/lists/detail/concatenate.hpp>
#include <cudf/strings/detail/concatenate.hpp>
#include <cudf/structs/detail/concatenate.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/transform_scan.h>
#include <algorithm>
#include <numeric>
#include <utility>
namespace cudf {
namespace detail {
// From benchmark data, the fused kernel optimization appears to perform better
// when there are more than a trivial number of columns, or when the null mask
// can also be computed at the same time
constexpr bool use_fused_kernel_heuristic(bool const has_nulls, size_t const num_columns)
{
return has_nulls || num_columns > 4;
}
auto create_device_views(host_span<column_view const> views, rmm::cuda_stream_view stream)
{
// Create device views for each input view
using CDViewPtr = decltype(
column_device_view::create(std::declval<column_view>(), std::declval<rmm::cuda_stream_view>()));
auto device_view_owners = std::vector<CDViewPtr>(views.size());
std::transform(views.begin(), views.end(), device_view_owners.begin(), [stream](auto const& col) {
return column_device_view::create(col, stream);
});
// Assemble contiguous array of device views
auto device_views = thrust::host_vector<column_device_view>();
device_views.reserve(views.size());
std::transform(device_view_owners.cbegin(),
device_view_owners.cend(),
std::back_inserter(device_views),
[](auto const& col) { return *col; });
auto d_views = make_device_uvector_async(device_views, stream);
// Compute the partition offsets
auto offsets = thrust::host_vector<size_t>(views.size() + 1);
thrust::transform_inclusive_scan(
thrust::host,
device_views.cbegin(),
device_views.cend(),
std::next(offsets.begin()),
[](auto const& col) { return col.size(); },
thrust::plus{});
auto d_offsets = make_device_uvector_async(offsets, stream);
auto const output_size = offsets.back();
return std::make_tuple(
std::move(device_view_owners), std::move(d_views), std::move(d_offsets), output_size);
}
/**
* @brief Concatenates the null mask bits of all the column device views in the
* `views` array to the destination bitmask.
*
* @param views Array of column_device_view
* @param output_offsets Prefix sum of sizes of elements of `views`
* @param number_of_views Size of `views` array
* @param dest_mask The output buffer to copy null masks into
* @param number_of_mask_bits The total number of null masks bits that are being
* copied
*/
__global__ void concatenate_masks_kernel(column_device_view const* views,
size_t const* output_offsets,
size_type number_of_views,
bitmask_type* dest_mask,
size_type number_of_mask_bits)
{
size_type mask_index = threadIdx.x + blockIdx.x * blockDim.x;
auto active_mask = __ballot_sync(0xFFFF'FFFF, mask_index < number_of_mask_bits);
while (mask_index < number_of_mask_bits) {
size_type const source_view_index =
thrust::upper_bound(
thrust::seq, output_offsets, output_offsets + number_of_views, mask_index) -
output_offsets - 1;
bool bit_is_set = true;
if (source_view_index < number_of_views) {
size_type const column_element_index = mask_index - output_offsets[source_view_index];
bit_is_set = views[source_view_index].is_valid(column_element_index);
}
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
if (threadIdx.x % detail::warp_size == 0) { dest_mask[word_index(mask_index)] = new_word; }
mask_index += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, mask_index < number_of_mask_bits);
}
}
void concatenate_masks(device_span<column_device_view const> d_views,
device_span<size_t const> d_offsets,
bitmask_type* dest_mask,
size_type output_size,
rmm::cuda_stream_view stream)
{
constexpr size_type block_size{256};
cudf::detail::grid_1d config(output_size, block_size);
concatenate_masks_kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>(
d_views.data(),
d_offsets.data(),
static_cast<size_type>(d_views.size()),
dest_mask,
output_size);
}
void concatenate_masks(host_span<column_view const> views,
bitmask_type* dest_mask,
rmm::cuda_stream_view stream)
{
// Preprocess and upload inputs to device memory
auto const device_views = create_device_views(views, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_offsets = std::get<2>(device_views);
auto const output_size = std::get<3>(device_views);
concatenate_masks(d_views, d_offsets, dest_mask, output_size, stream);
}
template <typename T, size_type block_size, bool Nullable>
__global__ void fused_concatenate_kernel(column_device_view const* input_views,
size_t const* input_offsets,
size_type num_input_views,
mutable_column_device_view output_view,
size_type* out_valid_count)
{
auto const output_size = output_view.size();
auto* output_data = output_view.data<T>();
size_type output_index = threadIdx.x + blockIdx.x * blockDim.x;
size_type warp_valid_count = 0;
unsigned active_mask;
if (Nullable) { active_mask = __ballot_sync(0xFFFF'FFFF, output_index < output_size); }
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
// thrust::prev isn't in CUDA 10.0, so subtracting 1 here instead
auto const offset_it =
-1 + thrust::upper_bound(
thrust::seq, input_offsets, input_offsets + num_input_views, output_index);
size_type const partition_index = offset_it - input_offsets;
// Copy input data to output
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
auto const* input_data = input_view.data<T>();
output_data[output_index] = input_data[offset_index];
if (Nullable) {
bool const bit_is_set = input_view.is_valid(offset_index);
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
// First thread writes bitmask word
if (threadIdx.x % detail::warp_size == 0) {
output_view.null_mask()[word_index(output_index)] = new_word;
}
warp_valid_count += __popc(new_word);
}
output_index += blockDim.x * gridDim.x;
if (Nullable) { active_mask = __ballot_sync(active_mask, output_index < output_size); }
}
if (Nullable) {
using detail::single_lane_block_sum_reduce;
auto block_valid_count = single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count, block_valid_count); }
}
}
template <typename T>
std::unique_ptr<column> fused_concatenate(host_span<column_view const> views,
bool const has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using mask_policy = cudf::mask_allocation_policy;
// Preprocess and upload inputs to device memory
auto const device_views = create_device_views(views, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_offsets = std::get<2>(device_views);
auto const output_size = std::get<3>(device_views);
CUDF_EXPECTS(output_size < static_cast<std::size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated rows exceeds size_type range");
// Allocate output
auto const policy = has_nulls ? mask_policy::ALWAYS : mask_policy::NEVER;
auto out_col = detail::allocate_like(views.front(), output_size, policy, stream, mr);
out_col->set_null_count(0); // prevent null count from being materialized
auto out_view = out_col->mutable_view();
auto d_out_view = mutable_column_device_view::create(out_view, stream);
rmm::device_scalar<size_type> d_valid_count(0, stream);
// Launch kernel
constexpr size_type block_size{256};
cudf::detail::grid_1d config(output_size, block_size);
auto const kernel = has_nulls ? fused_concatenate_kernel<T, block_size, true>
: fused_concatenate_kernel<T, block_size, false>;
kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>(
d_views.data(),
d_offsets.data(),
static_cast<size_type>(d_views.size()),
*d_out_view,
d_valid_count.data());
if (has_nulls) { out_col->set_null_count(output_size - d_valid_count.value(stream)); }
return out_col;
}
template <typename T>
std::unique_ptr<column> for_each_concatenate(host_span<column_view const> views,
bool const has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type const total_element_count =
std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) {
return accumulator + v.size();
});
using mask_policy = cudf::mask_allocation_policy;
auto const policy = has_nulls ? mask_policy::ALWAYS : mask_policy::NEVER;
auto col = cudf::detail::allocate_like(views.front(), total_element_count, policy, stream, mr);
col->set_null_count(0); // prevent null count from being materialized...
auto m_view = col->mutable_view(); // ...when we take a mutable view
auto count = 0;
for (auto& v : views) {
thrust::copy(rmm::exec_policy(stream), v.begin<T>(), v.end<T>(), m_view.begin<T>() + count);
count += v.size();
}
// If concatenated column is nullable, proceed to calculate it
if (has_nulls) {
cudf::detail::concatenate_masks(views, (col->mutable_view()).null_mask(), stream);
}
return col;
}
struct concatenate_dispatch {
host_span<column_view const> views;
rmm::cuda_stream_view stream;
rmm::mr::device_memory_resource* mr;
// fixed width
template <typename T>
std::unique_ptr<column> operator()()
{
bool const has_nulls =
std::any_of(views.begin(), views.end(), [](auto const& col) { return col.has_nulls(); });
// Use a heuristic to guess when the fused kernel will be faster
if (use_fused_kernel_heuristic(has_nulls, views.size())) {
return fused_concatenate<T>(views, has_nulls, stream, mr);
} else {
return for_each_concatenate<T>(views, has_nulls, stream, mr);
}
}
};
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::dictionary32>()
{
return cudf::dictionary::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::string_view>()
{
return cudf::strings::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::list_view>()
{
return cudf::lists::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::struct_view>()
{
return cudf::structs::detail::concatenate(views, stream, mr);
}
namespace {
void bounds_and_type_check(host_span<column_view const> cols, rmm::cuda_stream_view stream);
/**
* @brief Functor for traversing child columns and recursively verifying concatenation
* bounds and types.
*/
class traverse_children {
public:
// nothing to do for simple types.
template <typename T>
void operator()(host_span<column_view const>, rmm::cuda_stream_view)
{
}
private:
// verify length of concatenated offsets.
void check_offsets_size(host_span<column_view const> cols)
{
// offsets. we can't just add up the total sizes of all offset child columns because each one
// has an extra value, regardless of the # of parent rows. So we have to add up the total # of
// rows in the base column and add 1 at the end
size_t const total_offset_count =
std::accumulate(cols.begin(),
cols.end(),
std::size_t{},
[](size_t a, auto const& b) -> size_t { return a + b.size(); }) +
1;
// note: output text must include "exceeds size_type range" for python error handling
CUDF_EXPECTS(total_offset_count <= static_cast<size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated offsets exceeds size_type range");
}
};
template <>
void traverse_children::operator()<cudf::string_view>(host_span<column_view const> cols,
rmm::cuda_stream_view stream)
{
// verify offsets
check_offsets_size(cols);
// chars
size_t const total_char_count = std::accumulate(
cols.begin(), cols.end(), std::size_t{}, [stream](size_t a, auto const& b) -> size_t {
strings_column_view scv(b);
return a + (scv.is_empty() ? 0
// if the column is unsliced, skip the offset retrieval.
: scv.offset() > 0
? cudf::detail::get_value<offset_type>(
scv.offsets(), scv.offset() + scv.size(), stream) -
cudf::detail::get_value<offset_type>(scv.offsets(), scv.offset(), stream)
// if the offset() is 0, it can still be sliced to a shorter length. in this case
// we only need to read a single offset. otherwise just return the full length
// (chars_size())
: scv.size() + 1 == scv.offsets().size()
? scv.chars_size()
: cudf::detail::get_value<offset_type>(scv.offsets(), scv.size(), stream));
});
// note: output text must include "exceeds size_type range" for python error handling
CUDF_EXPECTS(total_char_count <= static_cast<size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated chars exceeds size_type range");
}
template <>
void traverse_children::operator()<cudf::struct_view>(host_span<column_view const> cols,
rmm::cuda_stream_view stream)
{
// march each child
auto child_iter = thrust::make_counting_iterator(0);
auto const num_children = cols.front().num_children();
std::vector<column_view> nth_children;
nth_children.reserve(cols.size());
std::for_each(child_iter, child_iter + num_children, [&](auto child_index) {
std::transform(cols.begin(),
cols.end(),
std::back_inserter(nth_children),
[child_index, stream](column_view const& col) {
structs_column_view scv(col);
return scv.get_sliced_child(child_index);
});
bounds_and_type_check(nth_children, stream);
nth_children.clear();
});
}
template <>
void traverse_children::operator()<cudf::list_view>(host_span<column_view const> cols,
rmm::cuda_stream_view stream)
{
// verify offsets
check_offsets_size(cols);
// recurse into the child columns
std::vector<column_view> nth_children;
nth_children.reserve(cols.size());
std::transform(
cols.begin(), cols.end(), std::back_inserter(nth_children), [stream](column_view const& col) {
lists_column_view lcv(col);
return lcv.get_sliced_child(stream);
});
bounds_and_type_check(nth_children, stream);
}
/**
* @brief Verifies that the sum of the sizes of all the columns to be concatenated
* will not exceed the max value of size_type, and verifies all column types match
*
* @param columns_to_concat Span of columns to check
*
* @throws cudf::logic_error if the total length of the concatenated columns would
* exceed the max value of size_type
*
* @throws cudf::logic_error if all of the input column types don't match
*/
void bounds_and_type_check(host_span<column_view const> cols, rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(std::all_of(cols.begin(),
cols.end(),
[expected_type = cols.front().type()](auto const& c) {
return c.type() == expected_type;
}),
"Type mismatch in columns to concatenate.");
// total size of all concatenated rows
size_t const total_row_count =
std::accumulate(cols.begin(), cols.end(), std::size_t{}, [](size_t a, auto const& b) {
return a + static_cast<size_t>(b.size());
});
// note: output text must include "exceeds size_type range" for python error handling
CUDF_EXPECTS(total_row_count <= static_cast<size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated rows exceeds size_type range");
// traverse children
cudf::type_dispatcher(cols.front().type(), traverse_children{}, cols, stream);
}
} // anonymous namespace
// Concatenates the elements from a vector of column_views
std::unique_ptr<column> concatenate(host_span<column_view const> columns_to_concat,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(not columns_to_concat.empty(), "Unexpected empty list of columns to concatenate.");
// verify all types match and that we won't overflow size_type in output size
bounds_and_type_check(columns_to_concat, stream);
if (std::all_of(columns_to_concat.begin(), columns_to_concat.end(), [](column_view const& c) {
return c.is_empty();
})) {
return empty_like(columns_to_concat.front());
}
return type_dispatcher<dispatch_storage_type>(
columns_to_concat.front().type(), concatenate_dispatch{columns_to_concat, stream, mr});
}
std::unique_ptr<table> concatenate(host_span<table_view const> tables_to_concat,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (tables_to_concat.empty()) { return std::make_unique<table>(); }
table_view const first_table = tables_to_concat.front();
CUDF_EXPECTS(std::all_of(tables_to_concat.begin(),
tables_to_concat.end(),
[&first_table](auto const& t) {
return t.num_columns() == first_table.num_columns();
}),
"Mismatch in table columns to concatenate.");
std::vector<std::unique_ptr<column>> concat_columns;
for (size_type i = 0; i < first_table.num_columns(); ++i) {
std::vector<column_view> cols;
std::transform(tables_to_concat.begin(),
tables_to_concat.end(),
std::back_inserter(cols),
[i](auto const& t) { return t.column(i); });
// verify all types match and that we won't overflow size_type in output size
bounds_and_type_check(cols, stream);
concat_columns.emplace_back(detail::concatenate(cols, stream, mr));
}
return std::make_unique<table>(std::move(concat_columns));
}
} // namespace detail
rmm::device_buffer concatenate_masks(host_span<column_view const> views,
rmm::mr::device_memory_resource* mr)
{
bool const has_nulls =
std::any_of(views.begin(), views.end(), [](const column_view col) { return col.has_nulls(); });
if (has_nulls) {
size_type const total_element_count =
std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) {
return accumulator + v.size();
});
rmm::device_buffer null_mask =
create_null_mask(total_element_count, mask_state::UNINITIALIZED, mr);
detail::concatenate_masks(
views, static_cast<bitmask_type*>(null_mask.data()), rmm::cuda_stream_default);
return null_mask;
}
// no nulls, so return an empty device buffer
return rmm::device_buffer{0, rmm::cuda_stream_default, mr};
}
// Concatenates the elements from a vector of column_views
std::unique_ptr<column> concatenate(host_span<column_view const> columns_to_concat,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(columns_to_concat, rmm::cuda_stream_default, mr);
}
std::unique_ptr<table> concatenate(host_span<table_view const> tables_to_concat,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(tables_to_concat, rmm::cuda_stream_default, mr);
}
} // namespace cudf
|
c339a4b0b013775862704c325a46c7522928522c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arbor/fvm_types.hpp>
#include "gpu_api.hpp"
#include "gpu_common.hpp"
#include "stimulus.hpp"
namespace arb {
namespace gpu {
namespace kernel {
__global__
void stimulus_current_impl(int n, stimulus_pp pp) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if (i<n) {
auto t = pp.vec_t_[pp.vec_ci_[i]];
if (t>=pp.delay[i] && t<pp.delay[i]+pp.duration[i]) {
// use subtraction because the electrode currents are specified
// in terms of current into the compartment
gpu_atomic_add(pp.vec_i_+pp.node_index_[i], -pp.weight_[i]*pp.amplitude[i]);
}
}
}
} // namespace kernel
void stimulus_current_impl(int n, const stimulus_pp& pp) {
constexpr unsigned block_dim = 128;
const unsigned grid_dim = impl::block_count(n, block_dim);
hipLaunchKernelGGL(( kernel::stimulus_current_impl), dim3(grid_dim), dim3(block_dim), 0, 0, n, pp);
}
} // namespace gpu
} // namespace arb
| c339a4b0b013775862704c325a46c7522928522c.cu | #include <arbor/fvm_types.hpp>
#include "gpu_api.hpp"
#include "gpu_common.hpp"
#include "stimulus.hpp"
namespace arb {
namespace gpu {
namespace kernel {
__global__
void stimulus_current_impl(int n, stimulus_pp pp) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
if (i<n) {
auto t = pp.vec_t_[pp.vec_ci_[i]];
if (t>=pp.delay[i] && t<pp.delay[i]+pp.duration[i]) {
// use subtraction because the electrode currents are specified
// in terms of current into the compartment
gpu_atomic_add(pp.vec_i_+pp.node_index_[i], -pp.weight_[i]*pp.amplitude[i]);
}
}
}
} // namespace kernel
void stimulus_current_impl(int n, const stimulus_pp& pp) {
constexpr unsigned block_dim = 128;
const unsigned grid_dim = impl::block_count(n, block_dim);
kernel::stimulus_current_impl<<<grid_dim, block_dim>>>(n, pp);
}
} // namespace gpu
} // namespace arb
|
ad7c55d3aff4f16a016da917c88c30fb8329f359.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "setValueSomestupidlylongnamefoobarfoobarfoobar.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data = NULL;
hipMalloc(&data, XSIZE*YSIZE);
int idx = 1;
float value = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
setValueSomestupidlylongnamefoobarfoobarfoobar), dim3(gridBlock),dim3(threadBlock), 0, 0, data,idx,value);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
setValueSomestupidlylongnamefoobarfoobarfoobar), dim3(gridBlock),dim3(threadBlock), 0, 0, data,idx,value);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
setValueSomestupidlylongnamefoobarfoobarfoobar), dim3(gridBlock),dim3(threadBlock), 0, 0, data,idx,value);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ad7c55d3aff4f16a016da917c88c30fb8329f359.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "setValueSomestupidlylongnamefoobarfoobarfoobar.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data = NULL;
cudaMalloc(&data, XSIZE*YSIZE);
int idx = 1;
float value = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
setValueSomestupidlylongnamefoobarfoobarfoobar<<<gridBlock,threadBlock>>>(data,idx,value);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
setValueSomestupidlylongnamefoobarfoobarfoobar<<<gridBlock,threadBlock>>>(data,idx,value);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
setValueSomestupidlylongnamefoobarfoobarfoobar<<<gridBlock,threadBlock>>>(data,idx,value);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
603ed172a535b34694a7bb7076934210488ae2a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal z -> s d c
@author Theo Mary
*/
#include "common_magma.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
zlascl2_full(int m, int n, const double* D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double mul = D[ind];
A += ind;
if (ind < m) {
for(int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
zlascl2_lower(int m, int n, const double* D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
double mul = D[ind];
A += ind;
if (ind < m) {
for(int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
zlascl2_upper(int m, int n, const double *D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double mul = D[ind];
A += ind;
if (ind < m) {
for(int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/**
Purpose
-------
ZLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
\param[in]
dD DOUBLE PRECISION vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
\param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlascl2_q(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dD,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
if (type == MagmaLower) {
hipLaunchKernelGGL(( zlascl2_lower) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( zlascl2_upper) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( zlascl2_full) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda);
}
}
/**
@see magmablas_zlascl2_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlascl2(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dD,
magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_int_t *info )
{
magmablas_zlascl2_q( type, m, n, dD, dA, ldda, magma_stream, info );
}
| 603ed172a535b34694a7bb7076934210488ae2a4.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal z -> s d c
@author Theo Mary
*/
#include "common_magma.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
zlascl2_full(int m, int n, const double* D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double mul = D[ind];
A += ind;
if (ind < m) {
for(int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
zlascl2_lower(int m, int n, const double* D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
double mul = D[ind];
A += ind;
if (ind < m) {
for(int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
zlascl2_upper(int m, int n, const double *D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double mul = D[ind];
A += ind;
if (ind < m) {
for(int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/**
Purpose
-------
ZLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
\param[in]
dD DOUBLE PRECISION vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
\param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlascl2_q(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dD,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
if (type == MagmaLower) {
zlascl2_lower <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
zlascl2_upper <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
zlascl2_full <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda);
}
}
/**
@see magmablas_zlascl2_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlascl2(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dD,
magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_int_t *info )
{
magmablas_zlascl2_q( type, m, n, dD, dA, ldda, magma_stream, info );
}
|
bfeecf686456103613165c514cb97040a7ab20ae.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <malloc.h>
#include <hip/hip_runtime.h>
#define SIZE 65535
__global__ void VectorAdd(int *a, int *b, int *c)
{
int i = blockIdx.x;
if(i<SIZE)
c[i]=a[i]+b[i];
}
int main()
{
clock_t start = clock();
int *a, *b, *c;
int *d_a, *d_b, *d_c;
a = (int *)malloc(SIZE*sizeof(int));
b = (int *)malloc(SIZE*sizeof(int));
c = (int *)malloc(SIZE*sizeof(int));
hipMalloc(&d_a, SIZE*sizeof(int));
hipMalloc(&d_b, SIZE*sizeof(int));
hipMalloc(&d_c, SIZE*sizeof(int));
for(int i=0;i<SIZE;i++)
{
a[i]=i;
b[i]=i;
c[i]=0;
}
hipMemcpy(d_a, a, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_c, c, SIZE*sizeof(int), hipMemcpyHostToDevice);
dim3 dimblock(SIZE,1,1);
dim3 dimGrid(1,1,1);
hipLaunchKernelGGL(( VectorAdd), dim3(dimblock),dim3(dimGrid), 0, 0, d_a, d_b, d_c);
hipMemcpy(c, d_c, SIZE*sizeof(int), hipMemcpyDeviceToHost);
for(int i=0;i<10; i++)
printf("%d ",c[i]);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
printf("Tiempo transcurrido: %f \n ",((double)clock() - start) / CLOCKS_PER_SEC);
return 0;
}
| bfeecf686456103613165c514cb97040a7ab20ae.cu | #include <stdio.h>
#include <malloc.h>
#include <cuda.h>
#define SIZE 65535
__global__ void VectorAdd(int *a, int *b, int *c)
{
int i = blockIdx.x;
if(i<SIZE)
c[i]=a[i]+b[i];
}
int main()
{
clock_t start = clock();
int *a, *b, *c;
int *d_a, *d_b, *d_c;
a = (int *)malloc(SIZE*sizeof(int));
b = (int *)malloc(SIZE*sizeof(int));
c = (int *)malloc(SIZE*sizeof(int));
cudaMalloc(&d_a, SIZE*sizeof(int));
cudaMalloc(&d_b, SIZE*sizeof(int));
cudaMalloc(&d_c, SIZE*sizeof(int));
for(int i=0;i<SIZE;i++)
{
a[i]=i;
b[i]=i;
c[i]=0;
}
cudaMemcpy(d_a, a, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, SIZE*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimblock(SIZE,1,1);
dim3 dimGrid(1,1,1);
VectorAdd<<<dimblock,dimGrid>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, SIZE*sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0;i<10; i++)
printf("%d ",c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf("Tiempo transcurrido: %f \n ",((double)clock() - start) / CLOCKS_PER_SEC);
return 0;
}
|
73cc2d49175e7e8c431dfc83f67669cdc59d9cf5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#define blockSize 512
#define real double
__global__ void redukcja (int N, real* v, real* out)
{
size_t s = threadIdx.x + blockIdx.x * blockDim.x;
int sID = threadIdx.x;
size_t i;
__shared__ real pom[blockSize];
pom[sID] = 0;
if (s<N)
pom[sID] = v[s];
__syncthreads();
for (i=1; i<blockSize; i*=2){
if (sID%(2*i)==0){
pom[sID] += pom[sID + i];
}
__syncthreads();
}
if (sID==0) out[blockIdx.x] = pom[0];
}
__global__ void wypelnij (int N, real* v)
{
size_t s = threadIdx.x + blockIdx.x * blockDim.x;
if (s<N) {
v[s] = sin(s * 2. * M_PI / 10.);
}
}
int main ()
{
size_t N = blockSize * blockSize * blockSize;
int blocks = (N + blockSize-1) / blockSize;
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
real* v;
hipMalloc( (void**) &v, N * sizeof(real) );
real* outV;
hipMalloc( (void**) &outV, blockSize * blockSize * sizeof(real) );
real* outVV;
hipMalloc( (void**) &outVV, blockSize * sizeof(real) );
real out;
int i;
int M = 10;
hipLaunchKernelGGL(( wypelnij) , dim3(blocks), dim3(blockSize), 0, 0, N, v);
hipEventRecord(event1, 0);
for (i=0; i<M; i++){
hipLaunchKernelGGL(( redukcja), dim3(blocks), dim3(blockSize), 0, 0, N, v, outV);
hipLaunchKernelGGL(( redukcja), dim3(blockSize), dim3(blockSize), 0, 0, blockSize*blockSize, outV, outVV);
hipLaunchKernelGGL(( redukcja), dim3(1), dim3(blockSize), 0, 0, blockSize, outVV, v);
}
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
hipMemcpy (&out, v, 1 * sizeof(real), hipMemcpyDeviceToHost);
printf ("Czas redukcji: %f ms wynik; %f\n", dt_ms * 1./M, out);
return 0;
}
| 73cc2d49175e7e8c431dfc83f67669cdc59d9cf5.cu | #include <cuda.h>
#include <stdio.h>
#include <math.h>
#define blockSize 512
#define real double
__global__ void redukcja (int N, real* v, real* out)
{
size_t s = threadIdx.x + blockIdx.x * blockDim.x;
int sID = threadIdx.x;
size_t i;
__shared__ real pom[blockSize];
pom[sID] = 0;
if (s<N)
pom[sID] = v[s];
__syncthreads();
for (i=1; i<blockSize; i*=2){
if (sID%(2*i)==0){
pom[sID] += pom[sID + i];
}
__syncthreads();
}
if (sID==0) out[blockIdx.x] = pom[0];
}
__global__ void wypelnij (int N, real* v)
{
size_t s = threadIdx.x + blockIdx.x * blockDim.x;
if (s<N) {
v[s] = sin(s * 2. * M_PI / 10.);
}
}
int main ()
{
size_t N = blockSize * blockSize * blockSize;
int blocks = (N + blockSize-1) / blockSize;
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
real* v;
cudaMalloc( (void**) &v, N * sizeof(real) );
real* outV;
cudaMalloc( (void**) &outV, blockSize * blockSize * sizeof(real) );
real* outVV;
cudaMalloc( (void**) &outVV, blockSize * sizeof(real) );
real out;
int i;
int M = 10;
wypelnij <<<blocks, blockSize>>> (N, v);
cudaEventRecord(event1, 0);
for (i=0; i<M; i++){
redukcja<<<blocks, blockSize>>> (N, v, outV);
redukcja<<<blockSize, blockSize>>> (blockSize*blockSize, outV, outVV);
redukcja<<<1, blockSize>>> (blockSize, outVV, v);
}
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
cudaMemcpy (&out, v, 1 * sizeof(real), cudaMemcpyDeviceToHost);
printf ("Czas redukcji: %f ms wynik; %f\n", dt_ms * 1./M, out);
return 0;
}
|
8e56cb529d7c070020e5eb370d9379d92d947e2f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include "consts.h"
__device__ void d_dec2bin(int decimal, char *bin, int size)
{
int remain;
do
{
remain = decimal%2;
decimal = decimal/2;
bin[size--] = (remain==0?'0':'1');
}while(decimal>0);
}
__device__ int d_bin2dec(char *bin, int size)
{
int i,n,sum=0;
for(i=0;i<size;i++)
{
n = (bin[i]-'0') * powf(2,size-(i+1));
sum+=n;
}
return sum;
}
__device__ void d_hex2bin(char *hex, char *bin, int h_size, int b_size)
{
int i,k;
k=0;
for(i=0;i<h_size;i++)
{
if(hex[i]=='0') d_dec2bin(0,&bin[k],3);
else if(hex[i]=='1') d_dec2bin(1,&bin[k],3);
else if(hex[i]=='2') d_dec2bin(2,&bin[k],3);
else if(hex[i]=='3') d_dec2bin(3,&bin[k],3);
else if(hex[i]=='4') d_dec2bin(4,&bin[k],3);
else if(hex[i]=='5') d_dec2bin(5,&bin[k],3);
else if(hex[i]=='6') d_dec2bin(6,&bin[k],3);
else if(hex[i]=='7') d_dec2bin(7,&bin[k],3);
else if(hex[i]=='8') d_dec2bin(8,&bin[k],3);
else if(hex[i]=='9') d_dec2bin(9,&bin[k],3);
else if(hex[i]=='A' || hex[i]=='a') d_dec2bin(10,&bin[k],3);
else if(hex[i]=='B' || hex[i]=='b') d_dec2bin(11,&bin[k],3);
else if(hex[i]=='C' || hex[i]=='c') d_dec2bin(12,&bin[k],3);
else if(hex[i]=='D' || hex[i]=='d') d_dec2bin(13,&bin[k],3);
else if(hex[i]=='E' || hex[i]=='e') d_dec2bin(14,&bin[k],3);
else if(hex[i]=='F' || hex[i]=='f') d_dec2bin(15,&bin[k],3);
k+=4;
}
}
__device__ void d_bin2hex(char *hex, char *bin, int h_size, int b_size)
{
int i;
int dec = 0;
int pos = 0;
for(i=0;i<h_size;i++)
{
dec = 0;
dec=d_bin2dec(&bin[pos],4);
switch(dec)
{
case 0: hex[i]='0';break;
case 1: hex[i]='1';break;
case 2: hex[i]='2';break;
case 3: hex[i]='3';break;
case 4: hex[i]='4';break;
case 5: hex[i]='5';break;
case 6: hex[i]='6';break;
case 7: hex[i]='7';break;
case 8: hex[i]='8';break;
case 9: hex[i]='9';break;
case 10: hex[i]='a';break;
case 11: hex[i]='b';break;
case 12: hex[i]='c';break;
case 13: hex[i]='d';break;
case 14: hex[i]='e';break;
case 15: hex[i]='f';break;
}
pos+=4;
}
}
__global__ void executeCA(Lattice *lat, char *rule)
{
int t_idx = blockDim.x*blockIdx.x + threadIdx.x;
if(t_idx < MAX_LATS)
{
int dif = 0;
int pos = 0;
char res[LAT_SIZE];
char bin[RADIUS*2+1];
int idx = 0;
for(int i=0;i<CA_RUNS;i++)
{
memset(res,'0',LAT_SIZE);
pos=0;
dif=0;
for(int j = 0; j < LAT_SIZE;j++)
{
dif = j - RADIUS;
if(dif < 0) pos = LAT_SIZE+dif;
else pos = dif;
for(int k = 0; k < RADIUS*2+1;k++)
{
bin[k] = lat[t_idx].cells[pos];
pos++;
if(pos==LAT_SIZE) pos = 0;
}
idx = d_bin2dec(bin,RADIUS*2+1);
if(idx >=0 && idx <RULE_SIZE)
res[j] = rule[idx];
memset(bin,'0',RADIUS*2+1);
}
memcpy(lat[t_idx].cells,res,LAT_SIZE);
}
}
}
| 8e56cb529d7c070020e5eb370d9379d92d947e2f.cu | #include "kernel.h"
#include "consts.h"
__device__ void d_dec2bin(int decimal, char *bin, int size)
{
int remain;
do
{
remain = decimal%2;
decimal = decimal/2;
bin[size--] = (remain==0?'0':'1');
}while(decimal>0);
}
__device__ int d_bin2dec(char *bin, int size)
{
int i,n,sum=0;
for(i=0;i<size;i++)
{
n = (bin[i]-'0') * powf(2,size-(i+1));
sum+=n;
}
return sum;
}
__device__ void d_hex2bin(char *hex, char *bin, int h_size, int b_size)
{
int i,k;
k=0;
for(i=0;i<h_size;i++)
{
if(hex[i]=='0') d_dec2bin(0,&bin[k],3);
else if(hex[i]=='1') d_dec2bin(1,&bin[k],3);
else if(hex[i]=='2') d_dec2bin(2,&bin[k],3);
else if(hex[i]=='3') d_dec2bin(3,&bin[k],3);
else if(hex[i]=='4') d_dec2bin(4,&bin[k],3);
else if(hex[i]=='5') d_dec2bin(5,&bin[k],3);
else if(hex[i]=='6') d_dec2bin(6,&bin[k],3);
else if(hex[i]=='7') d_dec2bin(7,&bin[k],3);
else if(hex[i]=='8') d_dec2bin(8,&bin[k],3);
else if(hex[i]=='9') d_dec2bin(9,&bin[k],3);
else if(hex[i]=='A' || hex[i]=='a') d_dec2bin(10,&bin[k],3);
else if(hex[i]=='B' || hex[i]=='b') d_dec2bin(11,&bin[k],3);
else if(hex[i]=='C' || hex[i]=='c') d_dec2bin(12,&bin[k],3);
else if(hex[i]=='D' || hex[i]=='d') d_dec2bin(13,&bin[k],3);
else if(hex[i]=='E' || hex[i]=='e') d_dec2bin(14,&bin[k],3);
else if(hex[i]=='F' || hex[i]=='f') d_dec2bin(15,&bin[k],3);
k+=4;
}
}
__device__ void d_bin2hex(char *hex, char *bin, int h_size, int b_size)
{
int i;
int dec = 0;
int pos = 0;
for(i=0;i<h_size;i++)
{
dec = 0;
dec=d_bin2dec(&bin[pos],4);
switch(dec)
{
case 0: hex[i]='0';break;
case 1: hex[i]='1';break;
case 2: hex[i]='2';break;
case 3: hex[i]='3';break;
case 4: hex[i]='4';break;
case 5: hex[i]='5';break;
case 6: hex[i]='6';break;
case 7: hex[i]='7';break;
case 8: hex[i]='8';break;
case 9: hex[i]='9';break;
case 10: hex[i]='a';break;
case 11: hex[i]='b';break;
case 12: hex[i]='c';break;
case 13: hex[i]='d';break;
case 14: hex[i]='e';break;
case 15: hex[i]='f';break;
}
pos+=4;
}
}
__global__ void executeCA(Lattice *lat, char *rule)
{
int t_idx = blockDim.x*blockIdx.x + threadIdx.x;
if(t_idx < MAX_LATS)
{
int dif = 0;
int pos = 0;
char res[LAT_SIZE];
char bin[RADIUS*2+1];
int idx = 0;
for(int i=0;i<CA_RUNS;i++)
{
memset(res,'0',LAT_SIZE);
pos=0;
dif=0;
for(int j = 0; j < LAT_SIZE;j++)
{
dif = j - RADIUS;
if(dif < 0) pos = LAT_SIZE+dif;
else pos = dif;
for(int k = 0; k < RADIUS*2+1;k++)
{
bin[k] = lat[t_idx].cells[pos];
pos++;
if(pos==LAT_SIZE) pos = 0;
}
idx = d_bin2dec(bin,RADIUS*2+1);
if(idx >=0 && idx <RULE_SIZE)
res[j] = rule[idx];
memset(bin,'0',RADIUS*2+1);
}
memcpy(lat[t_idx].cells,res,LAT_SIZE);
}
}
}
|
9453862aefeed4ad4e8b05a49db6c7e8a2e0cb64.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "makeError.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *err = NULL;
hipMalloc(&err, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
unsigned int Y = 1;
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
makeError), dim3(gridBlock),dim3(threadBlock), 0, 0, err,output,Y,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
makeError), dim3(gridBlock),dim3(threadBlock), 0, 0, err,output,Y,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
makeError), dim3(gridBlock),dim3(threadBlock), 0, 0, err,output,Y,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9453862aefeed4ad4e8b05a49db6c7e8a2e0cb64.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "makeError.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *err = NULL;
cudaMalloc(&err, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
unsigned int Y = 1;
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
makeError<<<gridBlock,threadBlock>>>(err,output,Y,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
makeError<<<gridBlock,threadBlock>>>(err,output,Y,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
makeError<<<gridBlock,threadBlock>>>(err,output,Y,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
21d139c5ba7294e56acc542e857ba77e992e0075.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/TensorUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/AccumulateType.h"
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THH/THHAtomics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
const int WARP_SIZE = 32;
const int MODE_SUM = 0;
const int MODE_MEAN = 1;
const int MODE_MAX = 2;
namespace at {
namespace native {
namespace {
template <typename scalar_t>
__global__ void EmbeddingBag_updateOutputKernel(
int64_t *input, int64_t *offsets, scalar_t *weight, scalar_t *output,
int64_t *offset2bag, int64_t numIndices, int64_t numBags, int64_t stride,
int mode, int64_t *bag_size, int64_t *max_indices) {
// the strategy here is that each bag x feature is handled by a single thread
using accscalar_t = acc_type<scalar_t, true>;
int64_t chunksPerBag = THCCeilDiv(stride, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < stride) {
int64_t bag = chunk / chunksPerBag;
scalar_t *weightFeat = weight + featureDim;
int64_t begin = offsets[bag];
int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices;
assert(end >= begin);
accscalar_t weightFeatSum = 0;
scalar_t weightFeatMax;
int64_t bag_size_ = 0;
int64_t maxWord = -1;
for (int64_t emb = begin; emb < end; emb++) {
const int weightRow = ((int)input[emb]) * stride;
scalar_t weightValue = weightFeat[weightRow];
if (mode == MODE_MAX) {
if (emb == begin || weightValue > weightFeatMax) {
weightFeatMax = weightValue;
maxWord = input[emb];
}
} else {
weightFeatSum += static_cast<accscalar_t>(weightValue);
}
bag_size_++;
if (featureDim == 0) {
offset2bag[emb] = bag;
}
}
if (mode == MODE_MEAN) {
weightFeatSum = weightFeatSum / static_cast<accscalar_t>(bag_size_);
bag_size[bag] = bag_size_;
}
if (mode == MODE_MEAN || mode == MODE_SUM) {
output[bag * stride + featureDim] = static_cast<scalar_t>(weightFeatSum);
}
else if (mode == MODE_MAX) {
max_indices[bag * stride + featureDim] = maxWord;
output[bag * stride + featureDim] = weightFeatMax;
}
}
}
}
// FIXME: removed the accGradParametersKernelByFeature case present in
// LookupTable. That kernel is faster at small sizes (<768 indices), which
// does not need EmbeddingBag (LookupTable + Sum works fine), but would
// still be nice to not be slow in that case.
template <typename scalar_t>
__global__ void EmbeddingBag_accGradParametersKernel_sum_avg(
int64_t *input, int64_t *indices, scalar_t *gradOutput,
scalar_t *gradWeight, int64_t *offset2bag, int64_t *count, ptrdiff_t numel,
int64_t stride, int mode, int64_t *bag_size) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value. //
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel && (idx == 0 || input[idx] != input[idx - 1])) {
do {
const int startFeature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weightRow = ((int)input[idx]) * stride;
// Note: only this line changes from LookupTable_accgradParametersKernel
const int origRow = ((int)indices[idx]);
const int seq_number = offset2bag[origRow];
const int gradOutputRow = ((int)seq_number) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int featureDim = startFeature + ii * WARP_SIZE;
if (featureDim < stride) {
gradient[ii] =
static_cast<accscalar_t>(gradOutput[gradOutputRow + featureDim]);
if (mode == MODE_MEAN) {
gradient[ii] /= bag_size[seq_number];
}
weight[ii] =
static_cast<accscalar_t>(gradWeight[weightRow + featureDim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int featureDim = startFeature + ii * WARP_SIZE;
if (featureDim < stride) {
gradWeight[weightRow + featureDim] =
static_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
Tensor embedding_bag_backward_cuda_sum_avg(
const Tensor &grad,
const Tensor &indices,
const Tensor &offset2bag,
const Tensor &bag_size_,
int64_t num_weights,
bool scale_grad_by_freq, int64_t mode) {
Tensor &bag_size = const_cast<Tensor &>(bag_size_);
auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.type());
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
ptrdiff_t numel = indices.numel();
int64_t stride = grad_weight.stride(0);
auto sorted_indices = indices.type().tensor(indices.sizes());
auto orig_indices = indices.type().tensor(indices.sizes());
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + numel, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + numel, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = indices.type().tensor(indices.sizes());
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(policy, sorted_data, sorted_data + numel,
thrust::make_constant_iterator(1),
count_data);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy, thrust::make_reverse_iterator(sorted_data + numel),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + numel),
thrust::make_reverse_iterator(count_data + numel),
thrust::equal_to<int64_t>(), thrust::maximum<int64_t>());
}
dim3 grid(THCCeilDiv(numel, (ptrdiff_t)4), THCCeilDiv(stride, (int64_t)128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.type(), "embedding_bag_backward_cuda_sum_avg_kernel", [&] {
hipLaunchKernelGGL(( EmbeddingBag_accGradParametersKernel_sum_avg<
scalar_t>), dim3(grid), dim3(block), 0, stream,
sorted_indices.data<int64_t>(), orig_indices.data<int64_t>(),
grad.data<scalar_t>(), grad_weight.data<scalar_t>(),
offset2bag.data<int64_t>(),
count.defined() ? count.data<int64_t>() : nullptr, numel, stride,
mode, bag_size.data<int64_t>());
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
template <typename scalar_t>
__global__ void EmbeddingBag_accGradParametersKernel_max(
int64_t *max_indices, scalar_t *gradOutput,
scalar_t *gradWeight, int64_t stride, int64_t numBags) {
using accscalar_t = acc_type<scalar_t, true>;
int64_t chunksPerBag = THCCeilDiv(stride, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < stride) {
int64_t bag = chunk / chunksPerBag;
int64_t word_idx = max_indices[bag * stride + featureDim];
atomicAdd(&(gradWeight[word_idx * stride + featureDim]), gradOutput[bag * stride + featureDim]);
}
}
}
Tensor embedding_bag_backward_cuda_max(const Tensor &grad,
const Tensor &max_indices,
int64_t num_weights) {
auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.type());
int64_t stride = grad_weight.stride(0);
int64_t numBags = grad.size(0);
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
dim3 block = dim3(32, 8);
int grid = 1024;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.type(), "embedding_bag_backward_cuda_max", [&] {
hipLaunchKernelGGL(( EmbeddingBag_accGradParametersKernel_max<
scalar_t>), dim3(grid), dim3(block), 0, stream,
max_indices.data<int64_t>(), grad.data<scalar_t>(),
grad_weight.data<scalar_t>(), stride, numBags);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
}
std::tuple<Tensor, Tensor, Tensor, Tensor>
embedding_bag_cuda(const Tensor &weight, const Tensor &indices,
const Tensor &offsets, const bool scale_grad_by_freq,
const int64_t mode, bool sparse) {
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_bag_cuda", indices_arg, kLong);
checkContiguous("embedding_bag_cuda", indices_arg);
auto offsets_arg = TensorArg(offsets, "offsets", 1);
checkScalarType("embedding_bag_cuda", offsets_arg, kLong);
checkContiguous("embedding_bag_cuda", offsets_arg);
auto weight_arg = TensorArg(weight, "weight", 1);
checkContiguous("embedding_bag_cuda", weight_arg);
checkSameGPU("embedding_bag_cuda", weight_arg, indices_arg);
checkSameGPU("embedding_bag_cuda", weight_arg, offsets_arg);
int64_t numIndices = indices.size(0);
int64_t numBags = offsets.size(0);
int64_t stride = weight.size(1);
auto bag_size = at::zeros(offsets.sizes(), indices.type());
auto offset2bag =
at::zeros({indices.size(0)}, indices.type()); // offset2bag = [0 0 0 0 0]
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
auto output = at::zeros({offsets.size(0), weight.size(1)}, weight.type());
Tensor max_indices;
if (mode == MODE_MAX) {
max_indices = at::zeros({offsets.size(0), weight.size(1)}, indices.type());
} else {
// No need to allocate if we aren't doing a backwards pass
max_indices = at::zeros({0}, indices.type());
}
dim3 block = dim3(32, 8);
int grid = 1024;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(weight.type(), "embedding_bag_cuda", [&] {
hipLaunchKernelGGL(( EmbeddingBag_updateOutputKernel<scalar_t>), dim3(grid), dim3(block), 0, stream,
indices.data<int64_t>(), offsets.data<int64_t>(),
weight.data<scalar_t>(), output.data<scalar_t>(),
offset2bag.data<int64_t>(), numIndices, numBags, stride, mode,
bag_size.data<int64_t>(), mode == MODE_MAX ? max_indices.data<int64_t>() : NULL);
});
THCudaCheck(hipGetLastError());
return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, offset2bag, bag_size, max_indices);
}
Tensor embedding_bag_backward_cuda(const Tensor &grad_, const Tensor &indices,
const Tensor &offsets,
const Tensor &offset2bag,
const Tensor &bag_size_,
const Tensor &max_indices,
int64_t num_weights,
bool scale_grad_by_freq, int64_t mode) {
Tensor grad = grad_.contiguous();
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_bag_cuda", indices_arg, kLong);
checkContiguous("embedding_bag_cuda", indices_arg);
auto offsets_arg = TensorArg(offsets, "offsets", 1);
checkScalarType("embedding_bag_cuda", offsets_arg, kLong);
checkContiguous("embedding_bag_cuda", offsets_arg);
auto grad_arg = TensorArg(grad, "grad", 1);
checkContiguous("embedding_bag_cuda", grad_arg);
checkSameGPU("embedding_bag_cuda", grad_arg, offsets_arg);
checkSameGPU("embedding_bag_cuda", grad_arg, indices_arg);
switch (mode) {
case MODE_SUM:
case MODE_MEAN:
return embedding_bag_backward_cuda_sum_avg(grad, indices, offset2bag, bag_size_, num_weights, scale_grad_by_freq, mode);
case MODE_MAX:
return embedding_bag_backward_cuda_max(grad, max_indices, num_weights);
default:
AT_ERROR(
"Unknown mode for embedding_bag_backward_cuda %d", mode);
}
}
}
}
| 21d139c5ba7294e56acc542e857ba77e992e0075.cu | #include "ATen/ATen.h"
#include "ATen/TensorUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/AccumulateType.h"
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THC/THCAtomics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
const int WARP_SIZE = 32;
const int MODE_SUM = 0;
const int MODE_MEAN = 1;
const int MODE_MAX = 2;
namespace at {
namespace native {
namespace {
template <typename scalar_t>
__global__ void EmbeddingBag_updateOutputKernel(
int64_t *input, int64_t *offsets, scalar_t *weight, scalar_t *output,
int64_t *offset2bag, int64_t numIndices, int64_t numBags, int64_t stride,
int mode, int64_t *bag_size, int64_t *max_indices) {
// the strategy here is that each bag x feature is handled by a single thread
using accscalar_t = acc_type<scalar_t, true>;
int64_t chunksPerBag = THCCeilDiv(stride, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < stride) {
int64_t bag = chunk / chunksPerBag;
scalar_t *weightFeat = weight + featureDim;
int64_t begin = offsets[bag];
int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices;
assert(end >= begin);
accscalar_t weightFeatSum = 0;
scalar_t weightFeatMax;
int64_t bag_size_ = 0;
int64_t maxWord = -1;
for (int64_t emb = begin; emb < end; emb++) {
const int weightRow = ((int)input[emb]) * stride;
scalar_t weightValue = weightFeat[weightRow];
if (mode == MODE_MAX) {
if (emb == begin || weightValue > weightFeatMax) {
weightFeatMax = weightValue;
maxWord = input[emb];
}
} else {
weightFeatSum += static_cast<accscalar_t>(weightValue);
}
bag_size_++;
if (featureDim == 0) {
offset2bag[emb] = bag;
}
}
if (mode == MODE_MEAN) {
weightFeatSum = weightFeatSum / static_cast<accscalar_t>(bag_size_);
bag_size[bag] = bag_size_;
}
if (mode == MODE_MEAN || mode == MODE_SUM) {
output[bag * stride + featureDim] = static_cast<scalar_t>(weightFeatSum);
}
else if (mode == MODE_MAX) {
max_indices[bag * stride + featureDim] = maxWord;
output[bag * stride + featureDim] = weightFeatMax;
}
}
}
}
// FIXME: removed the accGradParametersKernelByFeature case present in
// LookupTable. That kernel is faster at small sizes (<768 indices), which
// does not need EmbeddingBag (LookupTable + Sum works fine), but would
// still be nice to not be slow in that case.
template <typename scalar_t>
__global__ void EmbeddingBag_accGradParametersKernel_sum_avg(
int64_t *input, int64_t *indices, scalar_t *gradOutput,
scalar_t *gradWeight, int64_t *offset2bag, int64_t *count, ptrdiff_t numel,
int64_t stride, int mode, int64_t *bag_size) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value. //
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel && (idx == 0 || input[idx] != input[idx - 1])) {
do {
const int startFeature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weightRow = ((int)input[idx]) * stride;
// Note: only this line changes from LookupTable_accgradParametersKernel
const int origRow = ((int)indices[idx]);
const int seq_number = offset2bag[origRow];
const int gradOutputRow = ((int)seq_number) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int featureDim = startFeature + ii * WARP_SIZE;
if (featureDim < stride) {
gradient[ii] =
static_cast<accscalar_t>(gradOutput[gradOutputRow + featureDim]);
if (mode == MODE_MEAN) {
gradient[ii] /= bag_size[seq_number];
}
weight[ii] =
static_cast<accscalar_t>(gradWeight[weightRow + featureDim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int featureDim = startFeature + ii * WARP_SIZE;
if (featureDim < stride) {
gradWeight[weightRow + featureDim] =
static_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
Tensor embedding_bag_backward_cuda_sum_avg(
const Tensor &grad,
const Tensor &indices,
const Tensor &offset2bag,
const Tensor &bag_size_,
int64_t num_weights,
bool scale_grad_by_freq, int64_t mode) {
Tensor &bag_size = const_cast<Tensor &>(bag_size_);
auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.type());
cudaStream_t stream = globalContext().getCurrentCUDAStream();
ptrdiff_t numel = indices.numel();
int64_t stride = grad_weight.stride(0);
auto sorted_indices = indices.type().tensor(indices.sizes());
auto orig_indices = indices.type().tensor(indices.sizes());
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + numel, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + numel, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = indices.type().tensor(indices.sizes());
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(policy, sorted_data, sorted_data + numel,
thrust::make_constant_iterator(1),
count_data);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy, thrust::make_reverse_iterator(sorted_data + numel),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + numel),
thrust::make_reverse_iterator(count_data + numel),
thrust::equal_to<int64_t>(), thrust::maximum<int64_t>());
}
dim3 grid(THCCeilDiv(numel, (ptrdiff_t)4), THCCeilDiv(stride, (int64_t)128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.type(), "embedding_bag_backward_cuda_sum_avg_kernel", [&] {
EmbeddingBag_accGradParametersKernel_sum_avg<
scalar_t><<<grid, block, 0, stream>>>(
sorted_indices.data<int64_t>(), orig_indices.data<int64_t>(),
grad.data<scalar_t>(), grad_weight.data<scalar_t>(),
offset2bag.data<int64_t>(),
count.defined() ? count.data<int64_t>() : nullptr, numel, stride,
mode, bag_size.data<int64_t>());
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
template <typename scalar_t>
__global__ void EmbeddingBag_accGradParametersKernel_max(
int64_t *max_indices, scalar_t *gradOutput,
scalar_t *gradWeight, int64_t stride, int64_t numBags) {
using accscalar_t = acc_type<scalar_t, true>;
int64_t chunksPerBag = THCCeilDiv(stride, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < stride) {
int64_t bag = chunk / chunksPerBag;
int64_t word_idx = max_indices[bag * stride + featureDim];
atomicAdd(&(gradWeight[word_idx * stride + featureDim]), gradOutput[bag * stride + featureDim]);
}
}
}
Tensor embedding_bag_backward_cuda_max(const Tensor &grad,
const Tensor &max_indices,
int64_t num_weights) {
auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.type());
int64_t stride = grad_weight.stride(0);
int64_t numBags = grad.size(0);
cudaStream_t stream = globalContext().getCurrentCUDAStream();
dim3 block = dim3(32, 8);
int grid = 1024;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.type(), "embedding_bag_backward_cuda_max", [&] {
EmbeddingBag_accGradParametersKernel_max<
scalar_t><<<grid, block, 0, stream>>>(
max_indices.data<int64_t>(), grad.data<scalar_t>(),
grad_weight.data<scalar_t>(), stride, numBags);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
}
std::tuple<Tensor, Tensor, Tensor, Tensor>
embedding_bag_cuda(const Tensor &weight, const Tensor &indices,
const Tensor &offsets, const bool scale_grad_by_freq,
const int64_t mode, bool sparse) {
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_bag_cuda", indices_arg, kLong);
checkContiguous("embedding_bag_cuda", indices_arg);
auto offsets_arg = TensorArg(offsets, "offsets", 1);
checkScalarType("embedding_bag_cuda", offsets_arg, kLong);
checkContiguous("embedding_bag_cuda", offsets_arg);
auto weight_arg = TensorArg(weight, "weight", 1);
checkContiguous("embedding_bag_cuda", weight_arg);
checkSameGPU("embedding_bag_cuda", weight_arg, indices_arg);
checkSameGPU("embedding_bag_cuda", weight_arg, offsets_arg);
int64_t numIndices = indices.size(0);
int64_t numBags = offsets.size(0);
int64_t stride = weight.size(1);
auto bag_size = at::zeros(offsets.sizes(), indices.type());
auto offset2bag =
at::zeros({indices.size(0)}, indices.type()); // offset2bag = [0 0 0 0 0]
cudaStream_t stream = globalContext().getCurrentCUDAStream();
auto output = at::zeros({offsets.size(0), weight.size(1)}, weight.type());
Tensor max_indices;
if (mode == MODE_MAX) {
max_indices = at::zeros({offsets.size(0), weight.size(1)}, indices.type());
} else {
// No need to allocate if we aren't doing a backwards pass
max_indices = at::zeros({0}, indices.type());
}
dim3 block = dim3(32, 8);
int grid = 1024;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(weight.type(), "embedding_bag_cuda", [&] {
EmbeddingBag_updateOutputKernel<scalar_t><<<grid, block, 0, stream>>>(
indices.data<int64_t>(), offsets.data<int64_t>(),
weight.data<scalar_t>(), output.data<scalar_t>(),
offset2bag.data<int64_t>(), numIndices, numBags, stride, mode,
bag_size.data<int64_t>(), mode == MODE_MAX ? max_indices.data<int64_t>() : NULL);
});
THCudaCheck(cudaGetLastError());
return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, offset2bag, bag_size, max_indices);
}
Tensor embedding_bag_backward_cuda(const Tensor &grad_, const Tensor &indices,
const Tensor &offsets,
const Tensor &offset2bag,
const Tensor &bag_size_,
const Tensor &max_indices,
int64_t num_weights,
bool scale_grad_by_freq, int64_t mode) {
Tensor grad = grad_.contiguous();
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_bag_cuda", indices_arg, kLong);
checkContiguous("embedding_bag_cuda", indices_arg);
auto offsets_arg = TensorArg(offsets, "offsets", 1);
checkScalarType("embedding_bag_cuda", offsets_arg, kLong);
checkContiguous("embedding_bag_cuda", offsets_arg);
auto grad_arg = TensorArg(grad, "grad", 1);
checkContiguous("embedding_bag_cuda", grad_arg);
checkSameGPU("embedding_bag_cuda", grad_arg, offsets_arg);
checkSameGPU("embedding_bag_cuda", grad_arg, indices_arg);
switch (mode) {
case MODE_SUM:
case MODE_MEAN:
return embedding_bag_backward_cuda_sum_avg(grad, indices, offset2bag, bag_size_, num_weights, scale_grad_by_freq, mode);
case MODE_MAX:
return embedding_bag_backward_cuda_max(grad, max_indices, num_weights);
default:
AT_ERROR(
"Unknown mode for embedding_bag_backward_cuda %d", mode);
}
}
}
}
|
113cd0ba553a523575fad891f9776e0160138d4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "F5.cuh"
#include "IO.h"
#include "constants.cuh"
#include <iostream>
#include <vector>
#include <iterator>
#include <fstream>
/*
* Shifted and Rotated Rosenbrock's Function
*
* as defined in "Problem Definitions and Evaluation Criteria for the
* CEC 2013 Special Session and Competition on Real-Parameter Optimization",
* by Liang, J.J., Qu, B.-Y., Suganthan, P.N., Hernandez-Diaz, A.G.,
* Computational Intelligence Laboratory, Zhengzhou University, Zhengzhou,
* China and Nanyang Technological University, Singapore, Technical Report,
* v. 2012, p. 3-18, 2013.
*/
F5::F5(uint _dim, uint _ps):Benchmarks()
{
n_dim = _dim;
ps = _ps;
min = -100.0;
max = +100.0;
ID = 5;
n_threads = 32;
n_blocks = (ps%n_threads)? (ps/n_threads)+1 : (ps/n_threads);
/* ---------------------------------------------- */
/* Load a shift vector to test the bench function */
std::string file_name = "data-files/shift_rosenbrock.mat";
std::string vec_name = "Shift - Rosenbrock";
IO * io = new IO();
std::ifstream file(file_name);
if( not file.is_open() ){
std::cout << "\"data-files/shift_rosenbrock.mat\" could not be opened\n";
exit(-1);
}
auto loaded_vec = io->load_vector<float>( vec_name, file ) ;
file.close();
/* ---------------------------------------------- */
checkCudaErrors(hipMemcpyToSymbol(shift, (void *) loaded_vec.data(), n_dim * sizeof(float)));
/* ---------------------------------------------- */
/* Load a rotate matrix */
if(!(n_dim==2 or n_dim==5 or n_dim==10 or n_dim==20 or n_dim==30 or n_dim==50 or n_dim==100)){
printf("\nError: Rotation matrix are only defined for D = 2,5,10,20,30,50,100.\n");
exit(-1);
}
file_name = "data-files/rot/M_D" + std::to_string(n_dim) + ".txt";
vec_name = "M_D" + std::to_string(n_dim);
file.open(file_name, std::ifstream::in);
if( not file.is_open() ){
std::cout << "Error opening rotation matrix file\n";
exit(-1);
}
loaded_vec = io->load_vector<float>( vec_name, file ) ;
file.close();
/* ---------------------------------------------- */
checkCudaErrors(hipMemcpyToSymbol(m_rotation, (void *) loaded_vec.data(), n_dim * n_dim * sizeof(float)));
}
F5::~F5()
{
/*empty*/
}
__global__ void computeK2_F5(float * x, float * f){
uint id_p, id_d, ps, ndim, i, stride;
id_p = blockIdx.x;
id_d = threadIdx.x;
ps = params.ps;
ndim = params.n_dim;
stride = id_p * ndim;
float a, b, t1, t2;
__shared__ float r[128];
__shared__ float z[100];
__shared__ float R[10000];
__shared__ float z_rot[100];
r[id_d] = 0.0f;
if( id_d < ndim ){
z[id_d] = (x[stride+id_d] - shift[id_d]) * 0.02048;
//each dimension load your rotation column from rotation matrix
for( i = 0; i < ndim; i++ ){
R[(id_d*ndim)+i] = m_rotation[(id_d*ndim)+i];
}
}
__syncthreads();
if( id_d < ndim ){
z_rot[id_d] = 0.0;
for( i = 0; i < ndim; i++ ){
z_rot[id_d] += z[i] * R[(id_d*ndim)+i];
}
z_rot[id_d] += 1.0;
}
__syncthreads();
if( id_d < (ndim-1) ){
a = z_rot[id_d];
b = z_rot[id_d+1];
t1 = b - (a * a);
t2 = a - 1.0;
t1 *= t1;
t2 *= t2;
r[id_d] = (100.0 * t1) + t2;
__syncthreads();
/* Simple reduce sum */
if( id_d < 64 && ndim == 100)
r[id_d] += r[id_d + 64];
__syncthreads();
if( id_d < 32 )
r[id_d] += r[id_d + 32];
__syncthreads();
if( id_d < 16 )
r[id_d] += r[id_d + 16];
__syncthreads();
if( id_d < 8 )
r[id_d] += r[id_d + 8];
__syncthreads();
if( id_d < 4 )
r[id_d] += r[id_d + 4];
__syncthreads();
if( id_d < 2 )
r[id_d] += r[id_d + 2];
__syncthreads();
if( id_d == 0 )
r[id_d] += r[id_d + 1];
__syncthreads();
if( id_d == 0 )
f[id_p] = r[0];
}
}
__global__ void computeK_F5(float * x, float * f){
uint id_p = threadIdx.x + (blockIdx.x * blockDim.x);
uint ps = params.ps;
if( id_p < ps ){
uint ndim = params.n_dim;
uint id_d = id_p * ndim;
uint i, j;
//The constant 2.048/100 is needed because on rotate operation
//the value of a dimension can be higher than bounds;
float z[100];
//shift
for( i = 0; i < ndim; i++ )
z[i] = (x[id_d + i] - shift[i]) * 2.048/100;
float z_rot[100];
//rotation
for( i = 0; i < ndim; i++ ){
z_rot[i] = 0.0;
for( j = 0; j < ndim; j++ )
z_rot[i] += z[j] * m_rotation[i * ndim + j];
z_rot[i] += 1.0;
}
float s = 0.0, t1, t2;
for(uint i = 0; i < (ndim - 1); i++){
t1 = z_rot[i+1] - (z_rot[i] * z_rot[i]);
t2 = z_rot[i] - 1.0;
t1 *= t1;
t2 *= t2;
s += (100.0 * t1) + t2;
}
if( s <= 10e-08 )
s = 0.0;
f[id_p] = s;
}
}
void F5::compute(float * x, float * f){
//computeK_F5<<< n_blocks, n_threads >>>(x, f);
hipLaunchKernelGGL(( computeK2_F5), dim3(ps), dim3(128) , 0, 0, x, f);
checkCudaErrors(hipGetLastError());
}
| 113cd0ba553a523575fad891f9776e0160138d4c.cu | #include "F5.cuh"
#include "IO.h"
#include "constants.cuh"
#include <iostream>
#include <vector>
#include <iterator>
#include <fstream>
/*
* Shifted and Rotated Rosenbrock's Function
*
* as defined in "Problem Definitions and Evaluation Criteria for the
* CEC 2013 Special Session and Competition on Real-Parameter Optimization",
* by Liang, J.J., Qu, B.-Y., Suganthan, P.N., Hernandez-Diaz, A.G.,
* Computational Intelligence Laboratory, Zhengzhou University, Zhengzhou,
* China and Nanyang Technological University, Singapore, Technical Report,
* v. 2012, p. 3-18, 2013.
*/
F5::F5(uint _dim, uint _ps):Benchmarks()
{
n_dim = _dim;
ps = _ps;
min = -100.0;
max = +100.0;
ID = 5;
n_threads = 32;
n_blocks = (ps%n_threads)? (ps/n_threads)+1 : (ps/n_threads);
/* ---------------------------------------------- */
/* Load a shift vector to test the bench function */
std::string file_name = "data-files/shift_rosenbrock.mat";
std::string vec_name = "Shift - Rosenbrock";
IO * io = new IO();
std::ifstream file(file_name);
if( not file.is_open() ){
std::cout << "\"data-files/shift_rosenbrock.mat\" could not be opened\n";
exit(-1);
}
auto loaded_vec = io->load_vector<float>( vec_name, file ) ;
file.close();
/* ---------------------------------------------- */
checkCudaErrors(cudaMemcpyToSymbol(shift, (void *) loaded_vec.data(), n_dim * sizeof(float)));
/* ---------------------------------------------- */
/* Load a rotate matrix */
if(!(n_dim==2 or n_dim==5 or n_dim==10 or n_dim==20 or n_dim==30 or n_dim==50 or n_dim==100)){
printf("\nError: Rotation matrix are only defined for D = 2,5,10,20,30,50,100.\n");
exit(-1);
}
file_name = "data-files/rot/M_D" + std::to_string(n_dim) + ".txt";
vec_name = "M_D" + std::to_string(n_dim);
file.open(file_name, std::ifstream::in);
if( not file.is_open() ){
std::cout << "Error opening rotation matrix file\n";
exit(-1);
}
loaded_vec = io->load_vector<float>( vec_name, file ) ;
file.close();
/* ---------------------------------------------- */
checkCudaErrors(cudaMemcpyToSymbol(m_rotation, (void *) loaded_vec.data(), n_dim * n_dim * sizeof(float)));
}
F5::~F5()
{
/*empty*/
}
__global__ void computeK2_F5(float * x, float * f){
uint id_p, id_d, ps, ndim, i, stride;
id_p = blockIdx.x;
id_d = threadIdx.x;
ps = params.ps;
ndim = params.n_dim;
stride = id_p * ndim;
float a, b, t1, t2;
__shared__ float r[128];
__shared__ float z[100];
__shared__ float R[10000];
__shared__ float z_rot[100];
r[id_d] = 0.0f;
if( id_d < ndim ){
z[id_d] = (x[stride+id_d] - shift[id_d]) * 0.02048;
//each dimension load your rotation column from rotation matrix
for( i = 0; i < ndim; i++ ){
R[(id_d*ndim)+i] = m_rotation[(id_d*ndim)+i];
}
}
__syncthreads();
if( id_d < ndim ){
z_rot[id_d] = 0.0;
for( i = 0; i < ndim; i++ ){
z_rot[id_d] += z[i] * R[(id_d*ndim)+i];
}
z_rot[id_d] += 1.0;
}
__syncthreads();
if( id_d < (ndim-1) ){
a = z_rot[id_d];
b = z_rot[id_d+1];
t1 = b - (a * a);
t2 = a - 1.0;
t1 *= t1;
t2 *= t2;
r[id_d] = (100.0 * t1) + t2;
__syncthreads();
/* Simple reduce sum */
if( id_d < 64 && ndim == 100)
r[id_d] += r[id_d + 64];
__syncthreads();
if( id_d < 32 )
r[id_d] += r[id_d + 32];
__syncthreads();
if( id_d < 16 )
r[id_d] += r[id_d + 16];
__syncthreads();
if( id_d < 8 )
r[id_d] += r[id_d + 8];
__syncthreads();
if( id_d < 4 )
r[id_d] += r[id_d + 4];
__syncthreads();
if( id_d < 2 )
r[id_d] += r[id_d + 2];
__syncthreads();
if( id_d == 0 )
r[id_d] += r[id_d + 1];
__syncthreads();
if( id_d == 0 )
f[id_p] = r[0];
}
}
__global__ void computeK_F5(float * x, float * f){
uint id_p = threadIdx.x + (blockIdx.x * blockDim.x);
uint ps = params.ps;
if( id_p < ps ){
uint ndim = params.n_dim;
uint id_d = id_p * ndim;
uint i, j;
//The constant 2.048/100 is needed because on rotate operation
//the value of a dimension can be higher than bounds;
float z[100];
//shift
for( i = 0; i < ndim; i++ )
z[i] = (x[id_d + i] - shift[i]) * 2.048/100;
float z_rot[100];
//rotation
for( i = 0; i < ndim; i++ ){
z_rot[i] = 0.0;
for( j = 0; j < ndim; j++ )
z_rot[i] += z[j] * m_rotation[i * ndim + j];
z_rot[i] += 1.0;
}
float s = 0.0, t1, t2;
for(uint i = 0; i < (ndim - 1); i++){
t1 = z_rot[i+1] - (z_rot[i] * z_rot[i]);
t2 = z_rot[i] - 1.0;
t1 *= t1;
t2 *= t2;
s += (100.0 * t1) + t2;
}
if( s <= 10e-08 )
s = 0.0;
f[id_p] = s;
}
}
void F5::compute(float * x, float * f){
//computeK_F5<<< n_blocks, n_threads >>>(x, f);
computeK2_F5<<< ps, 128 >>>(x, f);
checkCudaErrors(cudaGetLastError());
}
|
a468ff5e268042deb0940bf85377e32b2b080a05.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "reg_addArrays_kernel_float4.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *array1_d = NULL;
hipMalloc(&array1_d, XSIZE*YSIZE);
float4 *array2_d = NULL;
hipMalloc(&array2_d, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
reg_addArrays_kernel_float4), dim3(gridBlock),dim3(threadBlock), 0, 0, array1_d,array2_d);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
reg_addArrays_kernel_float4), dim3(gridBlock),dim3(threadBlock), 0, 0, array1_d,array2_d);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
reg_addArrays_kernel_float4), dim3(gridBlock),dim3(threadBlock), 0, 0, array1_d,array2_d);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a468ff5e268042deb0940bf85377e32b2b080a05.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "reg_addArrays_kernel_float4.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *array1_d = NULL;
cudaMalloc(&array1_d, XSIZE*YSIZE);
float4 *array2_d = NULL;
cudaMalloc(&array2_d, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
reg_addArrays_kernel_float4<<<gridBlock,threadBlock>>>(array1_d,array2_d);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
reg_addArrays_kernel_float4<<<gridBlock,threadBlock>>>(array1_d,array2_d);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
reg_addArrays_kernel_float4<<<gridBlock,threadBlock>>>(array1_d,array2_d);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
551b54aac99b5db7f7f009e61822d6e89a893fc3.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdio>
#include <fstream>
#include <cstdlib>
using namespace std;
//#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
//#include <sdkHelper.h>
#define TIMES 1
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////HELP FUNCTIONS/////////////////////////////////////////////////
void RandomInit(float* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() / (float)RAND_MAX;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////__NAIVE_MATRIX_MULTIPLICATION_///////////////////////////////////////////////
// Device code
// Compute C = A * B
#define TILEWIDTH_X 16
#define TILEWIDTH_Y 16
#define TILE_WIDTH 16
__global__ void matrixMultiply(float* d_M, float* d_N, float* d_P,
int Width) {
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the Pd element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the M and N tiles required to compute the Pd element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {
// Coolaborative loading of Md and Nd tiles into shared memory
ds_M[ty][tx] = d_M[Row*Width + m*TILE_WIDTH+tx];
ds_N[ty][tx] = d_N[(m*TILE_WIDTH+ty)*Width+Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += ds_M[ty][k] * ds_N[k][tx];
__syncthreads();
}
d_P[Row*Width+Col] = Pvalue;
}
void MatrixMulOnHost(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
for (int i = 0; i < numARows; ++i)
for (int j = 0; j < numBColumns; ++j) {
float sum = 0;
for (int k = 0; k < numAColumns; ++k) {
float a = A[i * numAColumns + k];
float b = B[k * numBColumns + j];
sum += a * b;
}
C[i * numCColumns + j] = sum;
}
}
int MatrixMulti(int numARows, int numAColumns, int numBRows, int numBColumns, int blockx, int blocky, bool optimzed, bool define=false)
{
if(!optimzed)
printf("NAIVE MATRIX MULTIPLICATION\n");
else if(define)
printf("Optimzed MATRIX MULTIPLICATION with static shared memory allocation\n");
else
printf("Optimzed MATRIX MULTIPLICATION with static dynamic memory allocation\n");
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numCRows = numARows;; // number of rows in the matrix C (you have to set this)
int numCColumns = numBColumns;; // number of columns in the matrix C (you have to set this)
double total_time=0;
//StopWatchInterface* timer;
int sizeA = numARows*numAColumns*sizeof(float);
int sizeB = numBRows*numBColumns*sizeof(float);
int sizeC = numCRows*numCColumns*sizeof(float);
if(numAColumns != numBRows)
{
cout<<"Error in inputs dimension! A columns != B rows"<<endl;
exit(-1);
}
// Allocate input vectors h_A and h_B in host memory
hostA = (float*)malloc(sizeA);
hostB = (float*)malloc(sizeB);
hostC = (float*)malloc(sizeC);
// Initialize input vectors
RandomInit(hostA, numARows*numAColumns);
RandomInit(hostB, numBRows*numBColumns);
RandomInit(hostC, numBRows*numBColumns);
cout<<"The dimensions of A are "<<numARows<<" x "<<numAColumns<<endl;
cout<<"The dimensions of B are "<<numBRows<<" x "<<numBColumns<<endl;
//Allocate GPU memory here
// checkCudaErrors(hipMalloc(&deviceA, sizeA));
// checkCudaErrors(hipMalloc(&deviceB, sizeB));
// checkCudaErrors(hipMalloc(&deviceC, sizeC));
//@@ Copy memory to the GPU here
//checkCudaErrors(hipMemcpy(deviceA, hostA, sizeA, hipMemcpyHostToDevice));
// checkCudaErrors(hipMemcpy(deviceB, hostB, sizeB, hipMemcpyHostToDevice));
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
dim3 dimBlock, dimGrid;
dimBlock = dim3(blockx, blocky);
dimGrid = dim3((numCColumns+blockx-1)/blockx, (numCRows+blocky-1)/blocky);
//matrixMultiply<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC,numAColumns);
hipLaunchKernelGGL(( matrixMultiply), dim3(dimGrid), dim3(dimBlock), 0, 0, hostA, hostB, hostC, numAColumns);
getLastCudaError("kernel launch failure");
checkCudaErrors(hipDeviceSynchronize());
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double dSeconds = total_time/((double)TIMES * 1000);
double dNumOps = 2.0 * (double)numARows * (double)numAColumns * (double)numBColumns;
double gflops = 1.0e-9 * dNumOps/dSeconds;
cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl;
//@@ Copy the GPU memory back to the CPU here
//checkCudaErrors(hipMemcpy(hostC, deviceC, sizeC, hipMemcpyDeviceToHost));
// Verify result
//float* hostcpu = (float*)malloc(sizeC);
/*MatrixMulOnHost(hostA,hostB,hostcpu,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
int i;
int j;
for (i = 0; i < numCRows; ++i)
for(j=0; j<numCColumns; j++)
{
if (fabs(hostC[i*numCColumns + j] - hostcpu[i*numCColumns + j]) > 1e-3)
{
break;
}
}*/
//@@ Free the GPU memory here
///checkCudaErrors(hipFree(deviceA));
// checkCudaErrors(hipFree(deviceB));
//checkCudaErrors(hipFree(deviceC));
// hipDeviceReset();
free(hostA);
free(hostB);
free(hostC);
//free(hostcpu);
/*if(i == numCRows && j == numCColumns)
cout<<"SUCCSESS"<<endl;
else
cout<<"FAILED"<<endl; */
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc,char *argv[])
{
if(argc < 6)
printf("Unsuffcient number of arguments!\n");
else
{
MatrixMulti(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]), atoi(argv[4]), atoi(argv[5]), atoi(argv[6]), false);
}
}
| 551b54aac99b5db7f7f009e61822d6e89a893fc3.cu | #include <iostream>
#include <cstdio>
#include <fstream>
#include <cstdlib>
using namespace std;
//#include <cuda_runtime.h>
#include "cuda.h"
//#include <sdkHelper.h>
#define TIMES 1
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////HELP FUNCTIONS/////////////////////////////////////////////////
void RandomInit(float* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() / (float)RAND_MAX;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////__NAIVE_MATRIX_MULTIPLICATION_///////////////////////////////////////////////
// Device code
// Compute C = A * B
#define TILEWIDTH_X 16
#define TILEWIDTH_Y 16
#define TILE_WIDTH 16
__global__ void matrixMultiply(float* d_M, float* d_N, float* d_P,
int Width) {
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the Pd element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the M and N tiles required to compute the Pd element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {
// Coolaborative loading of Md and Nd tiles into shared memory
ds_M[ty][tx] = d_M[Row*Width + m*TILE_WIDTH+tx];
ds_N[ty][tx] = d_N[(m*TILE_WIDTH+ty)*Width+Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += ds_M[ty][k] * ds_N[k][tx];
__syncthreads();
}
d_P[Row*Width+Col] = Pvalue;
}
void MatrixMulOnHost(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
for (int i = 0; i < numARows; ++i)
for (int j = 0; j < numBColumns; ++j) {
float sum = 0;
for (int k = 0; k < numAColumns; ++k) {
float a = A[i * numAColumns + k];
float b = B[k * numBColumns + j];
sum += a * b;
}
C[i * numCColumns + j] = sum;
}
}
int MatrixMulti(int numARows, int numAColumns, int numBRows, int numBColumns, int blockx, int blocky, bool optimzed, bool define=false)
{
if(!optimzed)
printf("NAIVE MATRIX MULTIPLICATION\n");
else if(define)
printf("Optimzed MATRIX MULTIPLICATION with static shared memory allocation\n");
else
printf("Optimzed MATRIX MULTIPLICATION with static dynamic memory allocation\n");
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numCRows = numARows;; // number of rows in the matrix C (you have to set this)
int numCColumns = numBColumns;; // number of columns in the matrix C (you have to set this)
double total_time=0;
//StopWatchInterface* timer;
int sizeA = numARows*numAColumns*sizeof(float);
int sizeB = numBRows*numBColumns*sizeof(float);
int sizeC = numCRows*numCColumns*sizeof(float);
if(numAColumns != numBRows)
{
cout<<"Error in inputs dimension! A columns != B rows"<<endl;
exit(-1);
}
// Allocate input vectors h_A and h_B in host memory
hostA = (float*)malloc(sizeA);
hostB = (float*)malloc(sizeB);
hostC = (float*)malloc(sizeC);
// Initialize input vectors
RandomInit(hostA, numARows*numAColumns);
RandomInit(hostB, numBRows*numBColumns);
RandomInit(hostC, numBRows*numBColumns);
cout<<"The dimensions of A are "<<numARows<<" x "<<numAColumns<<endl;
cout<<"The dimensions of B are "<<numBRows<<" x "<<numBColumns<<endl;
//Allocate GPU memory here
// checkCudaErrors(cudaMalloc(&deviceA, sizeA));
// checkCudaErrors(cudaMalloc(&deviceB, sizeB));
// checkCudaErrors(cudaMalloc(&deviceC, sizeC));
//@@ Copy memory to the GPU here
//checkCudaErrors(cudaMemcpy(deviceA, hostA, sizeA, cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(deviceB, hostB, sizeB, cudaMemcpyHostToDevice));
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
dim3 dimBlock, dimGrid;
dimBlock = dim3(blockx, blocky);
dimGrid = dim3((numCColumns+blockx-1)/blockx, (numCRows+blocky-1)/blocky);
//matrixMultiply<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC,numAColumns);
matrixMultiply<<<dimGrid, dimBlock>>>(hostA, hostB, hostC, numAColumns);
getLastCudaError("kernel launch failure");
checkCudaErrors(cudaThreadSynchronize());
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double dSeconds = total_time/((double)TIMES * 1000);
double dNumOps = 2.0 * (double)numARows * (double)numAColumns * (double)numBColumns;
double gflops = 1.0e-9 * dNumOps/dSeconds;
cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl;
//@@ Copy the GPU memory back to the CPU here
//checkCudaErrors(cudaMemcpy(hostC, deviceC, sizeC, cudaMemcpyDeviceToHost));
// Verify result
//float* hostcpu = (float*)malloc(sizeC);
/*MatrixMulOnHost(hostA,hostB,hostcpu,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
int i;
int j;
for (i = 0; i < numCRows; ++i)
for(j=0; j<numCColumns; j++)
{
if (fabs(hostC[i*numCColumns + j] - hostcpu[i*numCColumns + j]) > 1e-3)
{
break;
}
}*/
//@@ Free the GPU memory here
///checkCudaErrors(cudaFree(deviceA));
// checkCudaErrors(cudaFree(deviceB));
//checkCudaErrors(cudaFree(deviceC));
// cudaDeviceReset();
free(hostA);
free(hostB);
free(hostC);
//free(hostcpu);
/*if(i == numCRows && j == numCColumns)
cout<<"SUCCSESS"<<endl;
else
cout<<"FAILED"<<endl; */
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc,char *argv[])
{
if(argc < 6)
printf("Unsuffcient number of arguments!\n");
else
{
MatrixMulti(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]), atoi(argv[4]), atoi(argv[5]), atoi(argv[6]), false);
}
}
|
programming3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <sys/time.h>
#include <ctime>
#include <fstream>
#include <cmath>
#include <cstdlib>
using namespace std;
//Eratosthanes' sieve on odds
__global__ static void sieve(char *primes, int n, int root)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + 3;
if (i < root && primes[i] == 0)
{
for (long j = i * i; j <= n; j += i)
{
primes[j] = 1;
}
}
}
//Eratosthanes' sieve on evens
__global__ static void Evens(char* P, int n)
{
long i = blockIdx.x * blockDim.x + threadIdx.x + threadIdx.x + 4;
if (i < n) {
P[i] = 1;
}
}
__global__ static void Init(char* P)
{
P[0] = 1;
P[1] = 1;
}
__host__ void isPrime(char* P, int max)
{
int blockSize = 32;
long root = sqrt(max);
char* d_Primes = NULL;
long sizePrimes = sizeof(char) * max;
hipMalloc(&d_Primes, sizePrimes);
hipMemset(d_Primes, 0, sizePrimes);
dim3 dimBlock(blockSize);
dim3 dimGrid((root + dimBlock.x) / dimBlock.x);
dim3 dimGridEven((max + dimBlock.x) / dimBlock.x);
hipLaunchKernelGGL(( Init), dim3(1),dim3(1), 0, 0, d_Primes);
hipLaunchKernelGGL(( Evens), dim3(dimGridEven), dim3(dimBlock), 0, 0, d_Primes, max);
hipLaunchKernelGGL(( sieve), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Primes, max, root);
hipMemcpy(P, d_Primes, sizePrimes, hipMemcpyDeviceToHost);
hipFree(d_Primes);
}
int main(){
struct timeval start, end;
long mtime, seconds, useconds;
char *primes;
long long sum;
long long num;
cout << "enter number to sum primes to: " << endl;
cin >> num;
primes = (char*)malloc(num);
memset(primes, 0, num);
if (num < 2) {
cout << "no primes to sum!" << endl;;
return 0;
}
else{
sum = 2;
}
gettimeofday(&start, NULL);
isPrime(primes, num);
for (long n = 3; n <= num - 1; n += 2) {
if (primes[n] == 0){ //Indicates primacy
//cout << n << " is prime." << endl;
sum += n;
if(num >= 1 + n*n && num < (n+1)*(n + 1)) {
sum -= n*n;
}
}
}
free(primes);
gettimeofday(&end, NULL);
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
mtime = ((seconds) * 1000 + useconds/1000.0);
cout << "sum under " << num << " is " << sum << endl;
cout << "time: " << mtime << " milliseconds\n" << endl;
return 0;
}
| programming3.cu | #include <iostream>
#include <sys/time.h>
#include <ctime>
#include <fstream>
#include <cmath>
#include <cstdlib>
using namespace std;
//Eratosthanes' sieve on odds
__global__ static void sieve(char *primes, int n, int root)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + 3;
if (i < root && primes[i] == 0)
{
for (long j = i * i; j <= n; j += i)
{
primes[j] = 1;
}
}
}
//Eratosthanes' sieve on evens
__global__ static void Evens(char* P, int n)
{
long i = blockIdx.x * blockDim.x + threadIdx.x + threadIdx.x + 4;
if (i < n) {
P[i] = 1;
}
}
__global__ static void Init(char* P)
{
P[0] = 1;
P[1] = 1;
}
__host__ void isPrime(char* P, int max)
{
int blockSize = 32;
long root = sqrt(max);
char* d_Primes = NULL;
long sizePrimes = sizeof(char) * max;
cudaMalloc(&d_Primes, sizePrimes);
cudaMemset(d_Primes, 0, sizePrimes);
dim3 dimBlock(blockSize);
dim3 dimGrid((root + dimBlock.x) / dimBlock.x);
dim3 dimGridEven((max + dimBlock.x) / dimBlock.x);
Init<<<1,1>>>(d_Primes);
Evens<<<dimGridEven, dimBlock>>>(d_Primes, max);
sieve<<<dimGrid, dimBlock>>>(d_Primes, max, root);
cudaMemcpy(P, d_Primes, sizePrimes, cudaMemcpyDeviceToHost);
cudaFree(d_Primes);
}
int main(){
struct timeval start, end;
long mtime, seconds, useconds;
char *primes;
long long sum;
long long num;
cout << "enter number to sum primes to: " << endl;
cin >> num;
primes = (char*)malloc(num);
memset(primes, 0, num);
if (num < 2) {
cout << "no primes to sum!" << endl;;
return 0;
}
else{
sum = 2;
}
gettimeofday(&start, NULL);
isPrime(primes, num);
for (long n = 3; n <= num - 1; n += 2) {
if (primes[n] == 0){ //Indicates primacy
//cout << n << " is prime." << endl;
sum += n;
if(num >= 1 + n*n && num < (n+1)*(n + 1)) {
sum -= n*n;
}
}
}
free(primes);
gettimeofday(&end, NULL);
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
mtime = ((seconds) * 1000 + useconds/1000.0);
cout << "sum under " << num << " is " << sum << endl;
cout << "time: " << mtime << " milliseconds\n" << endl;
return 0;
}
|
5be6a3799c37e3bede98f6b5267f3c24500c853e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vanGenuchtenIntial.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *theta = NULL;
hipMalloc(&theta, XSIZE*YSIZE);
double *K = NULL;
hipMalloc(&K, XSIZE*YSIZE);
double *Ksat = NULL;
hipMalloc(&Ksat, XSIZE*YSIZE);
double *h = NULL;
hipMalloc(&h, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vanGenuchtenIntial), dim3(gridBlock),dim3(threadBlock), 0, 0, theta,K,Ksat,h,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vanGenuchtenIntial), dim3(gridBlock),dim3(threadBlock), 0, 0, theta,K,Ksat,h,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vanGenuchtenIntial), dim3(gridBlock),dim3(threadBlock), 0, 0, theta,K,Ksat,h,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5be6a3799c37e3bede98f6b5267f3c24500c853e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vanGenuchtenIntial.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *theta = NULL;
cudaMalloc(&theta, XSIZE*YSIZE);
double *K = NULL;
cudaMalloc(&K, XSIZE*YSIZE);
double *Ksat = NULL;
cudaMalloc(&Ksat, XSIZE*YSIZE);
double *h = NULL;
cudaMalloc(&h, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vanGenuchtenIntial<<<gridBlock,threadBlock>>>(theta,K,Ksat,h,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vanGenuchtenIntial<<<gridBlock,threadBlock>>>(theta,K,Ksat,h,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vanGenuchtenIntial<<<gridBlock,threadBlock>>>(theta,K,Ksat,h,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9c7491f197bb96a59224c4a0bfa2a650ea9f0b4b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <raft/random/rng.cuh>
#include <sparse/linalg/degree.cuh>
#include <iostream>
namespace raft {
namespace sparse {
template <typename T>
struct SparseDegreeInputs {
int m, n, nnz;
unsigned long long int seed;
};
template <typename T>
class SparseDegreeTests
: public ::testing::TestWithParam<SparseDegreeInputs<T>> {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
SparseDegreeInputs<T> params;
};
const std::vector<SparseDegreeInputs<float>> inputsf = {{5, 10, 5, 1234ULL}};
typedef SparseDegreeTests<float> COODegree;
TEST_P(COODegree, Result) {
int *in_rows, *verify, *results;
int in_rows_h[5] = {0, 0, 1, 2, 2};
int verify_h[5] = {2, 1, 2, 0, 0};
raft::allocate(in_rows, 5);
raft::allocate(verify, 5, true);
raft::allocate(results, 5, true);
raft::update_device(in_rows, *&in_rows_h, 5, 0);
raft::update_device(verify, *&verify_h, 5, 0);
linalg::coo_degree<32>(in_rows, 5, results, 0);
hipDeviceSynchronize();
ASSERT_TRUE(raft::devArrMatch<int>(verify, results, 5, raft::Compare<int>()));
CUDA_CHECK(hipFree(in_rows));
CUDA_CHECK(hipFree(verify));
}
typedef SparseDegreeTests<float> COODegreeNonzero;
TEST_P(COODegreeNonzero, Result) {
hipStream_t stream;
hipStreamCreate(&stream);
int *in_rows, *verify, *results;
float *in_vals;
int in_rows_h[5] = {0, 0, 1, 2, 2};
float in_vals_h[5] = {0.0, 5.0, 0.0, 1.0, 1.0};
int verify_h[5] = {1, 0, 2, 0, 0};
raft::allocate(in_rows, 5);
raft::allocate(verify, 5, true);
raft::allocate(results, 5, true);
raft::allocate(in_vals, 5, true);
raft::update_device(in_rows, *&in_rows_h, 5, 0);
raft::update_device(verify, *&verify_h, 5, 0);
raft::update_device(in_vals, *&in_vals_h, 5, 0);
linalg::coo_degree_nz<32, float>(in_rows, in_vals, 5, results, stream);
hipDeviceSynchronize();
ASSERT_TRUE(raft::devArrMatch<int>(verify, results, 5, raft::Compare<int>()));
CUDA_CHECK(hipFree(in_rows));
CUDA_CHECK(hipFree(verify));
CUDA_CHECK(hipStreamDestroy(stream));
}
INSTANTIATE_TEST_CASE_P(SparseDegreeTests, COODegree,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(SparseDegreeTests, COODegreeNonzero,
::testing::ValuesIn(inputsf));
} // namespace sparse
} // namespace raft
| 9c7491f197bb96a59224c4a0bfa2a650ea9f0b4b.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <raft/random/rng.cuh>
#include <sparse/linalg/degree.cuh>
#include <iostream>
namespace raft {
namespace sparse {
template <typename T>
struct SparseDegreeInputs {
int m, n, nnz;
unsigned long long int seed;
};
template <typename T>
class SparseDegreeTests
: public ::testing::TestWithParam<SparseDegreeInputs<T>> {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
SparseDegreeInputs<T> params;
};
const std::vector<SparseDegreeInputs<float>> inputsf = {{5, 10, 5, 1234ULL}};
typedef SparseDegreeTests<float> COODegree;
TEST_P(COODegree, Result) {
int *in_rows, *verify, *results;
int in_rows_h[5] = {0, 0, 1, 2, 2};
int verify_h[5] = {2, 1, 2, 0, 0};
raft::allocate(in_rows, 5);
raft::allocate(verify, 5, true);
raft::allocate(results, 5, true);
raft::update_device(in_rows, *&in_rows_h, 5, 0);
raft::update_device(verify, *&verify_h, 5, 0);
linalg::coo_degree<32>(in_rows, 5, results, 0);
cudaDeviceSynchronize();
ASSERT_TRUE(raft::devArrMatch<int>(verify, results, 5, raft::Compare<int>()));
CUDA_CHECK(cudaFree(in_rows));
CUDA_CHECK(cudaFree(verify));
}
typedef SparseDegreeTests<float> COODegreeNonzero;
TEST_P(COODegreeNonzero, Result) {
cudaStream_t stream;
cudaStreamCreate(&stream);
int *in_rows, *verify, *results;
float *in_vals;
int in_rows_h[5] = {0, 0, 1, 2, 2};
float in_vals_h[5] = {0.0, 5.0, 0.0, 1.0, 1.0};
int verify_h[5] = {1, 0, 2, 0, 0};
raft::allocate(in_rows, 5);
raft::allocate(verify, 5, true);
raft::allocate(results, 5, true);
raft::allocate(in_vals, 5, true);
raft::update_device(in_rows, *&in_rows_h, 5, 0);
raft::update_device(verify, *&verify_h, 5, 0);
raft::update_device(in_vals, *&in_vals_h, 5, 0);
linalg::coo_degree_nz<32, float>(in_rows, in_vals, 5, results, stream);
cudaDeviceSynchronize();
ASSERT_TRUE(raft::devArrMatch<int>(verify, results, 5, raft::Compare<int>()));
CUDA_CHECK(cudaFree(in_rows));
CUDA_CHECK(cudaFree(verify));
CUDA_CHECK(cudaStreamDestroy(stream));
}
INSTANTIATE_TEST_CASE_P(SparseDegreeTests, COODegree,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(SparseDegreeTests, COODegreeNonzero,
::testing::ValuesIn(inputsf));
} // namespace sparse
} // namespace raft
|
fbcbd5868ebb3a68c0007efd7e3c9289d0a01ca5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <limits>
#include <numeric>
#include <vector>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Funcname, function) \
__global__ void _Kernel_##T##_##Funcname(const int N, const T* x, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(x[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, InvSqrt, rsqrtf);
__device__ float cuda_sqrf(const float x) {
return x * x;
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, cuda_sqrf);
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define DELEGATE_SINCOS_CUDA_FUNCTION(T) \
__global__ void _Kernel_##T##_##SinCos( \
const int N, const T* x, T* ys, T* yc) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
sincos(x[i], ys + i, yc + i); \
} \
} \
template <> \
void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##SinCos), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, x, ys, yc); \
}
DELEGATE_SINCOS_CUDA_FUNCTION(float)
DELEGATE_SINCOS_CUDA_FUNCTION(double)
#undef DELEGATE_SINCOS_CUDA_FUNCTION
#define DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(T, Funcname, expr) \
__global__ void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
float r = convert::To<T, float>(a[i]) expr convert::To<T, float>(b[i]); \
y[i] = convert::To<float, T>(r); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, a, b, y); \
}
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(int32_t, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Sub, -);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Mul, *);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Div, /);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Sub, -);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Mul, *);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Div, /);
#undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION
#define DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(T, Funcname, func) \
__global__ void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
float r = \
func(convert::To<T, float>(a[i]), convert::To<T, float>(b[i])); \
y[i] = convert::To<float, T>(r); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, a, b, y); \
}
DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(float, ElemwiseMax, fmaxf);
#undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor<CUDAContext>* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
hipcub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); \
hipcub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
void Gemm<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
// call hipblasHgemm
CUBLAS_CHECK(hipblasHgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
const int a_stride = M * K;
const int b_stride = K * N;
const int c_stride = M * N;
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(hipblasSgemmStridedBatched(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
b_stride,
A,
lda,
a_stride,
&beta,
C,
N,
c_stride,
batch_size));
#endif
}
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}; // namespace
template <>
void GemmBatched<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
const int a_stride = M * K;
const int b_stride = K * N;
const int c_stride = M * N;
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float16, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
#else
// 3 options:
// 1) scratch != null = cast to fp32, SgemmStridedBatched, cast result to fp16
// 2) math_type == FLOAT, scratch == nullptr = looped SgemmEx
// 3) math_type == FLOAT16, scratch == nullptr = batched Hgemm
if (scratch != nullptr) {
const int A_size = a_stride * batch_size;
const int B_size = b_stride * batch_size;
// cast, hipblasSgemmStridedBatched, cast
size_t in_elems = A_size + B_size;
size_t out_elems = c_stride * batch_size;
scratch->Resize(in_elems + out_elems);
float* scratch_ptr = scratch->mutable_data<float>();
float* A_fp32 = scratch_ptr;
float* B_fp32 = scratch_ptr + A_size;
float* C_fp32 = scratch_ptr + A_size + B_size;
// cast A, B into fp32
hipLaunchKernelGGL(( HalfToFloatKernel),
dim3(CAFFE_GET_BLOCKS(A_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), A_size, (half*)A, A_fp32);
hipLaunchKernelGGL(( HalfToFloatKernel),
dim3(CAFFE_GET_BLOCKS(B_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), B_size, (half*)B, B_fp32);
// run fp32 batched Gemm
GemmBatched<float, CUDAContext>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A_fp32,
B_fp32,
beta,
C_fp32,
context);
// cast result back to fp16
hipLaunchKernelGGL(( FloatToHalfKernel),
dim3(CAFFE_GET_BLOCKS(batch_size * M * N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), batch_size * M * N, C_fp32, (half*)C);
} else {
if (math_type == TensorProto_DataType_FLOAT) {
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float16, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_ENFORCE(hipblasHgemmStridedBatched(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
b_stride,
(const __half*)A,
lda,
a_stride,
&beta_fp16,
(__half*)C,
N,
c_stride,
batch_size));
}
}
#endif
}
#if TORCH_HIP_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
TransA, TransB, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
void Gemm<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_CHECK(hipblasGemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N,
HIP_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
void GemmBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
return GemmBatched<float, CUDAContext, DefaultEngine>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
scratch,
math_type);
}
template <>
void GemmBatched<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
return GemmBatched<float16, CUDAContext, DefaultEngine>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
scratch,
math_type);
}
#endif // TORCH_HIP_VERSION >= 9000
template <>
void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_ENFORCE(hipblasSgemv(
context->cublas_handle(),
cuTransA,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AddStripedBatchKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float16);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
void Gemv<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float16* A,
const float16* x,
const float beta,
float16* y,
CUDAContext* context,
TensorProto::DataType math_type) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / hipblasHgemm
int m = (cuTransA == HIPBLAS_OP_N) ? N : M;
int k = (cuTransA == HIPBLAS_OP_N) ? M : N;
int LDA = (cuTransA == HIPBLAS_OP_N) ? m : k;
int LDC = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransA,
HIPBLAS_OP_N,
m,
1,
k,
&alpha,
A,
HIP_R_16F,
LDA,
x,
HIP_R_16F,
k,
&beta,
y,
HIP_R_16F,
LDC));
} else if (math_type == TensorProto_DataType_FLOAT16) {
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_CHECK(hipblasHgemm(
context->cublas_handle(),
cuTransA,
HIPBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
LDA,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
LDC));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
void Set<T, CUDAContext>( \
const size_t N, const T alpha, T* Y, CUDAContext* context) { \
hipLaunchKernelGGL(( SetKernel), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, alpha, Y); \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(float16);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerateUniform(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
hiprandGenerateUniformDouble(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<double>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
hipLaunchKernelGGL(( UniformIntFit),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
math::Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using hiprandGenerateNormal.
// hiprandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
hiprandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(hiprandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
float result;
CUBLAS_ENFORCE(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, &result));
context->Copy<float, CPUContext, CUDAContext>(1, &result, y);
}
template <>
void Dot<float16, CUDAContext>(
const int n,
const float16* a,
const float16* b,
float16* y,
CUDAContext* context) {
float16 result;
// execute with 32-bit math
CUBLAS_CHECK(hipblasDotEx_v2(
context->cublas_handle(),
n,
a,
HIP_R_16F,
1,
b,
HIP_R_16F,
1,
&result,
HIP_R_16F,
HIP_R_32F));
context->Copy<float16, CPUContext, CUDAContext>(1, &result, y);
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
size_t memRequired = 0;
hipcub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<TIndex>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<TIndex>{buffer_size});
}
hipcub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
template <>
void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(float16)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
hipcub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
hipcub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(float16)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
template <>
void Select<float16, CUDAContext>(
const int N,
const int D,
const float16* x,
const int* idx,
float16* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float16>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
namespace {
template <typename T>
__global__ void ScaleKernel(const int n, const float alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
// y[i] = convert::To<float,T>(convert::To<T, float>(x[i]) * alpha);
y[i] = convert::Get<T>(convert::Get<float>(x[i]) * alpha);
}
}
template <typename T>
__global__ void
ScaleKernelDeviceAlpha(const int n, const float* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * (*alpha);
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
// fp16 specialization
template <>
__global__ void ScaleKernelDeviceAlpha(
const int n,
const float* alpha,
const float16* x,
float16* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * (*alpha));
}
}
} // namespace
template <>
void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( PowKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, a, b, y);
}
template <>
void Scale<float, CUDAContext>(
const int n,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernel<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float alpha,
const float16* x,
float16* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernel<float16>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, x, y);
}
template <>
void Scale<float, CUDAContext>(
const int n,
const float* alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* x,
float16* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<float16>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, x, y);
}
template <>
void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
hipblasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
void Axpy<float16, CUDAContext>(
const int N,
const float alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
CUBLAS_CHECK(hipblasAxpyEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_16F,
X,
HIP_R_16F,
1,
Y,
HIP_R_16F,
1,
HIP_R_32F));
}
namespace {
template <typename T>
__global__ void AxpyKernel(const int n, const float* a, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = convert::Get<T>(
convert::Get<float>(x[index]) * (*a) + convert::Get<float>(y[index]));
}
}
} // namespace
template <>
void Axpy<float, CUDAContext>(
const int n,
const float* alpha,
const float* X,
float* Y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpyKernel<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, X, Y);
}
template <>
void Axpy<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpyKernel<float16>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, X, Y);
}
namespace {
template <typename T>
__global__ void
AxpbyKernel(const int n, const T a, const T* x, const T b, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = x[index] * a + y[index] * b;
}
}
} // namespace
template <>
void Axpby<float, CUDAContext>(
const int n,
const float a,
const float* x,
const float b,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpbyKernel<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, a, x, b, y);
}
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= d_img < 0 || d_img >= img_shape.data[d_i + 1];
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
atomicAdd(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
atomicAdd(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, false>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
}
template <typename T, int N>
void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, true>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
}
} // namespace
template <>
void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
hipLaunchKernelGGL(( Im2ColNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
}
template <>
void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
hipLaunchKernelGGL(( Im2ColNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
}
template <>
void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
hipLaunchKernelGGL(( Col2ImNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
hipLaunchKernelGGL(( Col2ImNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::TypedCopy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
hipMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
template <>
void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
hipMemcpyAsync(
dst,
src,
sizeof(float) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( RowwiseReduceKernel), \
::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( ColwiseReduceKernel), \
::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( maximum_kernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, alpha, x, y);
}
namespace {
std::vector<int> MakeTransposeAxes(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes) {
std::vector<int> transpose_axes(num_dims);
const int d = num_dims - num_axes;
std::copy_n(axes, num_axes, transpose_axes.begin() + d);
std::sort(transpose_axes.begin() + d, transpose_axes.end());
int p = 0;
int q = d;
for (int i = 0; i < num_dims; ++i) {
if (q < num_dims && i == transpose_axes[q]) {
++q;
} else {
transpose_axes[p++] = i;
}
}
return transpose_axes;
}
template <int D>
void ComputeTransposedStrides(
const int* X_dims,
const int* axes,
int* X_strides) {
int buff[D];
int cur_stride = 1;
for (int i = D - 1; i >= 0; --i) {
buff[i] = cur_stride;
cur_stride *= X_dims[i];
}
for (int i = 0; i < D; ++i) {
X_strides[i] = buff[axes[i]];
}
}
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<int, D> Y_dims,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += (Y_index % Y_dims.data[i]) * X_strides.data[i];
Y_index /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
template <typename T, class Reducer, int D>
void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T& init,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
ComputeTransposedStrides<D>(dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
}
hipLaunchKernelGGL(( ReduceTensorCUDAKernel<T, Reducer, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, X_strides, Y_dims, reducer, init, X, Y);
}
template <typename T, class Reducer>
void ReduceTensorCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const Reducer& reducer,
const T& init,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
const std::vector<int> transpose_axes =
MakeTransposeAxes(num_dims, dims, num_axes, axes);
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
if (transpose_axes[pivot] == pivot) {
hipLaunchKernelGGL(( RowwiseReduceKernel<T>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, reducer, init, X, Y);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
num_dims,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
dims,
transpose_axes.data(),
reducer,
init,
X,
Y,
context);
}
template <typename T>
void ReduceMeanCUDAImpl(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
ReduceTensorCUDA(
num_dims, dims, num_axes, axes, hipcub::Sum(), T(0), X, Y, context);
const int X_size =
std::accumulate(dims, dims + num_dims, 1, std::multiplies<int>());
int scale = 1;
for (int i = 0; i < num_axes; ++i) {
scale *= dims[axes[i]];
}
const int Y_size = X_size / scale;
Scale<T, CUDAContext>(
Y_size, 1.0f / static_cast<float>(scale), Y, Y, context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \
template <> \
void ReduceMin<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Min(), \
std::numeric_limits<T>::max(), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \
template <> \
void ReduceMax<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Max(), \
std::numeric_limits<T>::lowest(), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \
template <> \
void ReduceSum<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, dims, num_axes, axes, hipcub::Sum(), T(0), X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
void ReduceMean<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceMeanCUDAImpl<T>(num_dims, dims, num_axes, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += X_strides.data[i] == 0
? 0
: (Y_index_val % Y_dims.data[i]) * X_strides.data[i];
Y_index_val /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<int, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
std::copy_n(Y_dims, D, Y_dims_array.data);
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(Y_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), Y_size, X_strides_array, Y_dims_array, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, BroadcastCUDAImpl, T, X_ndim, X_dims, Y_dims, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
const int X_index = i * cols + j;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Reduce(m_val, hipcub::Sum());
v_val = BlockReduce<T>(v_storage).Reduce(v_val, hipcub::Sum());
if (threadIdx.x == 0) {
mean[i] = m_val / static_cast<T>(cols);
variance[i] = v_val / static_cast<T>(cols) - mean[i] * mean[i];
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<int, D> Y_dims,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += (Y_index % Y_dims.data[i]) * X_strides.data[i];
Y_index /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Reduce(m_val, hipcub::Sum());
v_val = BlockReduce<T>(v_storage).Reduce(v_val, hipcub::Sum());
if (threadIdx.x == 0) {
mean[i] = m_val / static_cast<T>(inner_size);
variance[i] = v_val / static_cast<T>(inner_size) - mean[i] * mean[i];
}
__syncthreads();
}
}
template <typename T, int D>
void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
ComputeTransposedStrides<D>(dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
}
hipLaunchKernelGGL(( MomentsCUDAKernel<T, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, X_strides, Y_dims, X, mean, variance);
}
template <typename T>
void MomentsCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
const std::vector<int> transpose_axes =
MakeTransposeAxes(num_dims, dims, num_axes, axes);
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
if (transpose_axes[pivot] == pivot) {
hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), outer_size, inner_size, X, mean, variance);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
num_dims,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
dims,
transpose_axes.data(),
X,
mean,
variance,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
void Moments<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* mean, \
T* variance, \
CUDAContext* context) { \
MomentsCUDA<T>( \
num_dims, dims, num_axes, axes, X, mean, variance, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
namespace {
template <typename T, int D>
__global__ void TransposeCUDAKernel(
const int size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += (Y_index_val % Y_dims.data[i]) * X_strides.data[i];
Y_index_val /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
void TransposeCUDAImpl(
const int* dims,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
ComputeTransposedStrides<D>(dims, axes, X_strides.data);
int size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
size *= dims[i];
}
hipLaunchKernelGGL(( TransposeCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, X_strides, Y_dims, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \
template <> \
void Transpose<T, CUDAContext>( \
const int ndim, \
const int* dims, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
} // namespace math
} // namespace caffe2
| fbcbd5868ebb3a68c0007efd7e3c9289d0a01ca5.cu | // Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <limits>
#include <numeric>
#include <vector>
#include <cub/block/block_reduce.cuh>
#include <cub/cub.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Funcname, function) \
__global__ void _Kernel_##T##_##Funcname(const int N, const T* x, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(x[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* context) { \
_Kernel_##T##_##Funcname<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, InvSqrt, rsqrtf);
__device__ float cuda_sqrf(const float x) {
return x * x;
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, cuda_sqrf);
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define DELEGATE_SINCOS_CUDA_FUNCTION(T) \
__global__ void _Kernel_##T##_##SinCos( \
const int N, const T* x, T* ys, T* yc) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
sincos(x[i], ys + i, yc + i); \
} \
} \
template <> \
void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
_Kernel_##T##_##SinCos<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, x, ys, yc); \
}
DELEGATE_SINCOS_CUDA_FUNCTION(float)
DELEGATE_SINCOS_CUDA_FUNCTION(double)
#undef DELEGATE_SINCOS_CUDA_FUNCTION
#define DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(T, Funcname, expr) \
__global__ void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
float r = convert::To<T, float>(a[i]) expr convert::To<T, float>(b[i]); \
y[i] = convert::To<float, T>(r); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, CUDAContext* context) { \
_Kernel_##T##_##Funcname<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, a, b, y); \
}
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(int32_t, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Sub, -);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Mul, *);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Div, /);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Sub, -);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Mul, *);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Div, /);
#undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION
#define DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(T, Funcname, func) \
__global__ void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
float r = \
func(convert::To<T, float>(a[i]), convert::To<T, float>(b[i])); \
y[i] = convert::To<float, T>(r); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, CUDAContext* context) { \
_Kernel_##T##_##Funcname<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, a, b, y); \
}
DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(float, ElemwiseMax, fmaxf);
#undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor<CUDAContext>* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
cub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); \
cub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
void Gemm<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
// call cublasHgemm
CUBLAS_CHECK(cublasHgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
const int a_stride = M * K;
const int b_stride = K * N;
const int c_stride = M * N;
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(cublasSgemmStridedBatched(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
b_stride,
A,
lda,
a_stride,
&beta,
C,
N,
c_stride,
batch_size));
#endif
}
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}; // namespace
template <>
void GemmBatched<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
const int a_stride = M * K;
const int b_stride = K * N;
const int c_stride = M * N;
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float16, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
#else
// 3 options:
// 1) scratch != null = cast to fp32, SgemmStridedBatched, cast result to fp16
// 2) math_type == FLOAT, scratch == nullptr = looped SgemmEx
// 3) math_type == FLOAT16, scratch == nullptr = batched Hgemm
if (scratch != nullptr) {
const int A_size = a_stride * batch_size;
const int B_size = b_stride * batch_size;
// cast, cublasSgemmStridedBatched, cast
size_t in_elems = A_size + B_size;
size_t out_elems = c_stride * batch_size;
scratch->Resize(in_elems + out_elems);
float* scratch_ptr = scratch->mutable_data<float>();
float* A_fp32 = scratch_ptr;
float* B_fp32 = scratch_ptr + A_size;
float* C_fp32 = scratch_ptr + A_size + B_size;
// cast A, B into fp32
HalfToFloatKernel<<<
CAFFE_GET_BLOCKS(A_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(A_size, (half*)A, A_fp32);
HalfToFloatKernel<<<
CAFFE_GET_BLOCKS(B_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(B_size, (half*)B, B_fp32);
// run fp32 batched Gemm
GemmBatched<float, CUDAContext>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A_fp32,
B_fp32,
beta,
C_fp32,
context);
// cast result back to fp16
FloatToHalfKernel<<<
CAFFE_GET_BLOCKS(batch_size * M * N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(batch_size * M * N, C_fp32, (half*)C);
} else {
if (math_type == TensorProto_DataType_FLOAT) {
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float16, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_ENFORCE(cublasHgemmStridedBatched(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
b_stride,
(const __half*)A,
lda,
a_stride,
&beta_fp16,
(__half*)C,
N,
c_stride,
batch_size));
}
}
#endif
}
#if CUDA_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
TransA, TransB, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
void Gemm<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_CHECK(cublasGemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N,
CUDA_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
void GemmBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
return GemmBatched<float, CUDAContext, DefaultEngine>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
scratch,
math_type);
}
template <>
void GemmBatched<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
return GemmBatched<float16, CUDAContext, DefaultEngine>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
scratch,
math_type);
}
#endif // CUDA_VERSION >= 9000
template <>
void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_ENFORCE(cublasSgemv(
context->cublas_handle(),
cuTransA,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
AddStripedBatchKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float16);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
void Gemv<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float16* A,
const float16* x,
const float beta,
float16* y,
CUDAContext* context,
TensorProto::DataType math_type) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / cublasHgemm
int m = (cuTransA == CUBLAS_OP_N) ? N : M;
int k = (cuTransA == CUBLAS_OP_N) ? M : N;
int LDA = (cuTransA == CUBLAS_OP_N) ? m : k;
int LDC = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransA,
CUBLAS_OP_N,
m,
1,
k,
&alpha,
A,
CUDA_R_16F,
LDA,
x,
CUDA_R_16F,
k,
&beta,
y,
CUDA_R_16F,
LDC));
} else if (math_type == TensorProto_DataType_FLOAT16) {
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_CHECK(cublasHgemm(
context->cublas_handle(),
cuTransA,
CUBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
LDA,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
LDC));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
void Set<T, CUDAContext>( \
const size_t N, const T alpha, T* Y, CUDAContext* context) { \
SetKernel<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, alpha, Y); \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(float16);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerateUniform(context->curand_generator(), r, n));
UniformShift<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
curandGenerateUniformDouble(context->curand_generator(), r, n));
UniformShift<double>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
UniformIntFit<<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
math::Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using curandGenerateNormal.
// curandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
curandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(curandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
float result;
CUBLAS_ENFORCE(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, &result));
context->Copy<float, CPUContext, CUDAContext>(1, &result, y);
}
template <>
void Dot<float16, CUDAContext>(
const int n,
const float16* a,
const float16* b,
float16* y,
CUDAContext* context) {
float16 result;
// execute with 32-bit math
CUBLAS_CHECK(cublasDotEx(
context->cublas_handle(),
n,
a,
CUDA_R_16F,
1,
b,
CUDA_R_16F,
1,
&result,
CUDA_R_16F,
CUDA_R_32F));
context->Copy<float16, CPUContext, CUDAContext>(1, &result, y);
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
size_t memRequired = 0;
cub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<TIndex>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<TIndex>{buffer_size});
}
cub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
template <>
void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(float16)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
cub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
cub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(float16)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
SelectKernel<float>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
template <>
void Select<float16, CUDAContext>(
const int N,
const int D,
const float16* x,
const int* idx,
float16* y,
CUDAContext* context) {
SelectKernel<float16>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
namespace {
template <typename T>
__global__ void ScaleKernel(const int n, const float alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
// y[i] = convert::To<float,T>(convert::To<T, float>(x[i]) * alpha);
y[i] = convert::Get<T>(convert::Get<float>(x[i]) * alpha);
}
}
template <typename T>
__global__ void
ScaleKernelDeviceAlpha(const int n, const float* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * (*alpha);
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
// fp16 specialization
template <>
__global__ void ScaleKernelDeviceAlpha(
const int n,
const float* alpha,
const float16* x,
float16* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * (*alpha));
}
}
} // namespace
template <>
void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
PowKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, a, b, y);
}
template <>
void Scale<float, CUDAContext>(
const int n,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
ScaleKernel<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float alpha,
const float16* x,
float16* y,
CUDAContext* context) {
ScaleKernel<float16>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Scale<float, CUDAContext>(
const int n,
const float* alpha,
const float* x,
float* y,
CUDAContext* context) {
ScaleKernelDeviceAlpha<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* x,
float16* y,
CUDAContext* context) {
ScaleKernelDeviceAlpha<float16>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
cublasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
void Axpy<float16, CUDAContext>(
const int N,
const float alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
CUBLAS_CHECK(cublasAxpyEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_16F,
X,
CUDA_R_16F,
1,
Y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
namespace {
template <typename T>
__global__ void AxpyKernel(const int n, const float* a, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = convert::Get<T>(
convert::Get<float>(x[index]) * (*a) + convert::Get<float>(y[index]));
}
}
} // namespace
template <>
void Axpy<float, CUDAContext>(
const int n,
const float* alpha,
const float* X,
float* Y,
CUDAContext* context) {
AxpyKernel<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, X, Y);
}
template <>
void Axpy<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
AxpyKernel<float16>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, X, Y);
}
namespace {
template <typename T>
__global__ void
AxpbyKernel(const int n, const T a, const T* x, const T b, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = x[index] * a + y[index] * b;
}
}
} // namespace
template <>
void Axpby<float, CUDAContext>(
const int n,
const float a,
const float* x,
const float b,
float* y,
CUDAContext* context) {
AxpbyKernel<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, a, x, b, y);
}
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= d_img < 0 || d_img >= img_shape.data[d_i + 1];
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
atomicAdd(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
atomicAdd(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Im2ColNdNCHWCUDAKernel<T, N, false>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
}
template <typename T, int N>
void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
Im2ColNdNCHWCUDAKernel<T, N, true>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
}
} // namespace
template <>
void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
Im2ColNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
}
template <>
void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
Im2ColNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
}
template <>
void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
Col2ImNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
Col2ImNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::TypedCopy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
cudaMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
template <>
void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
cudaMemcpyAsync(
dst,
src,
sizeof(float) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
RowwiseReduceKernel<<< \
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
ColwiseReduceKernel<<< \
std::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
maximum_kernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, x, y);
}
namespace {
std::vector<int> MakeTransposeAxes(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes) {
std::vector<int> transpose_axes(num_dims);
const int d = num_dims - num_axes;
std::copy_n(axes, num_axes, transpose_axes.begin() + d);
std::sort(transpose_axes.begin() + d, transpose_axes.end());
int p = 0;
int q = d;
for (int i = 0; i < num_dims; ++i) {
if (q < num_dims && i == transpose_axes[q]) {
++q;
} else {
transpose_axes[p++] = i;
}
}
return transpose_axes;
}
template <int D>
void ComputeTransposedStrides(
const int* X_dims,
const int* axes,
int* X_strides) {
int buff[D];
int cur_stride = 1;
for (int i = D - 1; i >= 0; --i) {
buff[i] = cur_stride;
cur_stride *= X_dims[i];
}
for (int i = 0; i < D; ++i) {
X_strides[i] = buff[axes[i]];
}
}
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<int, D> Y_dims,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += (Y_index % Y_dims.data[i]) * X_strides.data[i];
Y_index /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
template <typename T, class Reducer, int D>
void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T& init,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
ComputeTransposedStrides<D>(dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
}
ReduceTensorCUDAKernel<T, Reducer, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, X_strides, Y_dims, reducer, init, X, Y);
}
template <typename T, class Reducer>
void ReduceTensorCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const Reducer& reducer,
const T& init,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
const std::vector<int> transpose_axes =
MakeTransposeAxes(num_dims, dims, num_axes, axes);
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
if (transpose_axes[pivot] == pivot) {
RowwiseReduceKernel<T>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, reducer, init, X, Y);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
num_dims,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
dims,
transpose_axes.data(),
reducer,
init,
X,
Y,
context);
}
template <typename T>
void ReduceMeanCUDAImpl(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
ReduceTensorCUDA(
num_dims, dims, num_axes, axes, cub::Sum(), T(0), X, Y, context);
const int X_size =
std::accumulate(dims, dims + num_dims, 1, std::multiplies<int>());
int scale = 1;
for (int i = 0; i < num_axes; ++i) {
scale *= dims[axes[i]];
}
const int Y_size = X_size / scale;
Scale<T, CUDAContext>(
Y_size, 1.0f / static_cast<float>(scale), Y, Y, context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \
template <> \
void ReduceMin<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Min(), \
std::numeric_limits<T>::max(), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \
template <> \
void ReduceMax<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Max(), \
std::numeric_limits<T>::lowest(), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \
template <> \
void ReduceSum<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, dims, num_axes, axes, cub::Sum(), T(0), X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
void ReduceMean<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceMeanCUDAImpl<T>(num_dims, dims, num_axes, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += X_strides.data[i] == 0
? 0
: (Y_index_val % Y_dims.data[i]) * X_strides.data[i];
Y_index_val /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<int, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
std::copy_n(Y_dims, D, Y_dims_array.data);
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
BroadcastCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(Y_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(Y_size, X_strides_array, Y_dims_array, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, BroadcastCUDAImpl, T, X_ndim, X_dims, Y_dims, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
const int X_index = i * cols + j;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Reduce(m_val, cub::Sum());
v_val = BlockReduce<T>(v_storage).Reduce(v_val, cub::Sum());
if (threadIdx.x == 0) {
mean[i] = m_val / static_cast<T>(cols);
variance[i] = v_val / static_cast<T>(cols) - mean[i] * mean[i];
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<int, D> Y_dims,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += (Y_index % Y_dims.data[i]) * X_strides.data[i];
Y_index /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Reduce(m_val, cub::Sum());
v_val = BlockReduce<T>(v_storage).Reduce(v_val, cub::Sum());
if (threadIdx.x == 0) {
mean[i] = m_val / static_cast<T>(inner_size);
variance[i] = v_val / static_cast<T>(inner_size) - mean[i] * mean[i];
}
__syncthreads();
}
}
template <typename T, int D>
void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
ComputeTransposedStrides<D>(dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
}
MomentsCUDAKernel<T, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, X_strides, Y_dims, X, mean, variance);
}
template <typename T>
void MomentsCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
const std::vector<int> transpose_axes =
MakeTransposeAxes(num_dims, dims, num_axes, axes);
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
if (transpose_axes[pivot] == pivot) {
RowwiseMomentsCUDAKernel<T>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(outer_size, inner_size, X, mean, variance);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
num_dims,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
dims,
transpose_axes.data(),
X,
mean,
variance,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
void Moments<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* mean, \
T* variance, \
CUDAContext* context) { \
MomentsCUDA<T>( \
num_dims, dims, num_axes, axes, X, mean, variance, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
namespace {
template <typename T, int D>
__global__ void TransposeCUDAKernel(
const int size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += (Y_index_val % Y_dims.data[i]) * X_strides.data[i];
Y_index_val /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
void TransposeCUDAImpl(
const int* dims,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
ComputeTransposedStrides<D>(dims, axes, X_strides.data);
int size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
size *= dims[i];
}
TransposeCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, X_strides, Y_dims, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \
template <> \
void Transpose<T, CUDAContext>( \
const int ndim, \
const int* dims, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
} // namespace math
} // namespace caffe2
|
d3545d077b0e98b51be1cc76c5c5387538b2a2df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "../Utils/bmpimage.h"
#include "../Utils/vector3.h"
#include <stdio.h>
#include <iostream>
// The number of samples/rays we shoot for each pixel for distributed ray tracing
#define SAMPLES 1024
// Do we want to enable Anti Aliasing via jittering?
#define AA_ENABLED 1
//Useful macro to check cuda error code returned from cuda functions
#define CHECK_CUDA_ERRORS(val) Check( (val), #val, __FILE__, __LINE__ )
static void Check(hipError_t result, char const *const func, const char *const file, int const line)
{
if (result)
{
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n";
hipDeviceReset();
exit(99);
}
}
//Ray tracing data structures
//Simple struct used to collect post hit data (i.e. hit position, normal and t)
struct HitData
{
/** Ctor */
__device__ HitData() : mHitPos(0.f, 0.f, 0.f), mNormal(0.f, 1.f, 0.f) { }
Vector3 mHitPos;
Vector3 mNormal;
i32 mObjId = -1;
float t = 0.0f;
};
class Camera
{
public:
__device__ Camera(const Vector3& InEye = Vector3(0.f, 0.f, 0.f)
, const Vector3& InLookAt = Vector3(0.f, 0.f, 50.f)
, const Vector3& InUp = Vector3(0.f, 1.f, 0.f)
, float InFov = 60.f
, float InAspectRatio = 1.f
, float InTime0 = 0.0f
, float InTime1 = 1.0f) : mEye(InEye), mLookAt(InLookAt),mTime0(InTime0),mTime1(InTime1)
{
const Vector3& Fwd = InLookAt - InEye;
mW = Fwd.norm();
mU = InUp.cross(mW);
mV = mW.cross(mU);
mScaleY = tanf(DegToRad(InFov)*0.5f);
mScaleX = mScaleY * InAspectRatio;
}
~Camera() = default;
//We calculate the world space ray given the position of the pixel in image space and
//the image plane width and height.
__device__ Vector3 GetWorldSpaceRayDir(float InPx, float InPy, float InWidth, float InHeight)
{
float Alpha = ((InPx / InWidth)*2.0f - 1.0f)*mScaleX;
float Beta = ((1.0f - (InPy / InHeight))*2.0f - 1.0f)*mScaleY;
Vector3 WSRayDir = mU * Alpha + mV * Beta + mW;
return WSRayDir;
}
__device__ Vector3 GetCameraEye() const { return mEye; }
__device__ float GetFocalLength() const { return mFocalLength; }
__device__ float GetApertureSize() const { return mApertureSize; }
private:
//Convenient member variables used to cache the scale along the x and y axis of the
//camera space
float mScaleY = 1.0f;
float mScaleX = 1.0f;
/**The camera position */
Vector3 mEye;
/**The camera forward vector */
Vector3 mW;
/**The camera side vector*/
Vector3 mU;
/**The camera up vector */
Vector3 mV;
/**The camera look at */
Vector3 mLookAt;
/**Focal length */
float mFocalLength = 5.25f;
/**Aperture Size */
//float mApertureSize = 0.7f;
float mApertureSize = 0.0f;
// Motion blur variables
// Time at which the shutter was open
float mTime0;
// Time at which the shutter is closed
float mTime1;
};
//Simple ray class
class Ray
{
public:
/** Ctor */
__device__ Ray(const Vector3& InOrigin = Vector3(0, 0, 0), const Vector3& InDirection = Vector3(0, 0, 1),float InTime = 0.0f) : mOrigin(InOrigin), mDirection(InDirection),mTime(InTime) {}
/** Copy Ctor */
__device__ Ray(const Ray& InRay) : mOrigin(InRay.mOrigin), mDirection(InRay.mDirection) { }
//Method used to compute position at parameter t
__device__ Vector3 PositionAtT(float t) const
{
return mOrigin + mDirection * t;
}
// This ray origin
Vector3 mOrigin;
// This ray direction
Vector3 mDirection;
// Min t
float mTmin;
// Max t
float mTmax;
// Added for motion blur
float mTime = 0.0f;
};
//Simple sphere class
struct Sphere
{
// We also need for the sphere to move to account for motion blur
/** The center of the sphere at time 0*/
Vector3 mCenter0;
/** The center of the sphere at time 1*/
Vector3 mCenter1;
/** Let's give this sphere a color */
Vector3 mColor;
/** The radius of the sphere */
float mRadius;
/** Time at which the sphere started moving (coincides with camera shutter open) */
float mTime0 = 0.0f;
/** Time at which the sphere ended up being (coincides with camera shutter closed) */
float mTime1 = 1.0f;
__device__ Vector3 GetCenterAtTime(float Time) const noexcept
{
return mCenter0 + (mCenter1 - mCenter0)*((Time - mTime0) / (mTime1 - mTime0));
}
/** Ctor */
__device__ Sphere(const Vector3& InCenter0 = Vector3(0, 0, 0), const Vector3& InCenter1 = Vector3(0, 0, 0),const Vector3& InColor = Vector3(1,1,1),float InRadius = 1) : mCenter0(InCenter0), mCenter1(InCenter1), mColor(InColor) ,mRadius(InRadius) { }
/** Copy Ctor */
__device__ Sphere(const Sphere& InSphere) : mCenter0(InSphere.mCenter0), mCenter1(InSphere.mCenter1), mColor(InSphere.mColor),mRadius(InSphere.mRadius) { }
/** Get the color of this sphere */
__device__ Vector3 GetColor() const { return mColor; }
//Compute the ray-sphere intersection using analitic solution
__device__ bool Intersect(const Ray& InRay, float InTMin, float InTMax,float& t)
{
const Vector3& oc = (InRay.mOrigin - GetCenterAtTime(InRay.mTime));
float a = InRay.mDirection.dot(InRay.mDirection);
float b = oc.dot(InRay.mDirection);
float c = oc.dot(oc) - mRadius * mRadius;
float Disc = b * b - a * c;
float temp = 0.0f;
if (Disc > 0.0001f)
{
float SqrtDisc = sqrt(Disc);
temp = (-b - SqrtDisc) / a;
if (temp < InTMax && temp > InTMin)
{
t = temp;
return true;
}
temp = (-b + SqrtDisc) / a;
if (temp < InTMax && temp > InTMin)
{
t = temp;
return true;
}
}
return false;
}
};
__device__ static Ray GetDOFRay(const Ray& ray, float ApertureSize, float FocalLength, u32* Seed0, u32* Seed1)
{
// This is the focal point for a given primary camera ray (Dir is unit length)
auto P = ray.mOrigin + ray.mDirection * FocalLength;
// Get two random number in -0.5-0.5 range for each component
float u1 = GetRandom01(Seed0, Seed1);
float u2 = GetRandom01(Seed0, Seed1);
float r1 = 2.0f * M_PI * u1;
float r2 = u2;
auto RandVec = Vector3(cosf(r1)*r2, sinf(r1)*r2, 0.0f) * ApertureSize;
// This is the new ray origin
auto NewRayOrigin = ray.mOrigin + RandVec;
// New ray direction
auto NewRayDir = (P - NewRayOrigin).norm();
return Ray(NewRayOrigin, NewRayDir);
}
__device__ static bool GetClosestHit(const Ray& InRay, float InTMin, float InTMax,HitData& OutHitData,Sphere* InSphereList,const u32 InNumSpheres)
{
float Inf = FLT_MAX;
float tMin = Inf;
float t = 0.f;
for (i32 i=0;i<InNumSpheres;++i)
{
if (InSphereList[i].Intersect(InRay, 0.001f, FLT_MAX,t) && t < tMin )
{
tMin = t;
OutHitData.t = t;
OutHitData.mHitPos = InRay.PositionAtT(t);
OutHitData.mObjId = i;
}
}
return (tMin < Inf);
}
//the keyword __global__ instructs the CUDA compiler that this function is the entry point of our kernel
__global__ void RenderScene(const u32 ScreenWidth,const u32 ScreenHeight, float* ColorBuffer)
{
u32 x = blockIdx.x*blockDim.x + threadIdx.x;
u32 y = blockIdx.y*blockDim.y + threadIdx.y;
//Create a simple sphere list made by two spheres
const u32 kNumSpheres = 5;
Sphere SphereList[kNumSpheres] = {
Sphere(Vector3(0.0f,0.0f,1.0f),Vector3(0.0f,0.5f,1.0f),Vector3(0.0f,1.0f,0.0f)),
Sphere(Vector3(0.75f,0.0f,3.5f),Vector3(0.75f,0.0f,3.5f),Vector3(1.0f,0.0f,0.0f)),
Sphere(Vector3(1.5f,0.0f,6.0f),Vector3(1.5f,0.35f,6.0f),Vector3(1.0f,1.0f,0.0f)),
Sphere(Vector3(-0.5f,0.0f,0.0f),Vector3(-0.5f,0.0f,0.0f),Vector3(0.0f,0.0f,1.0f)),
Sphere(Vector3(-1.0f,0.0f,-1.0f),Vector3(-1.0f,0.0f,-1.0f),Vector3(1.0f,0.0f,1.0f))
};
//Prepare two color
Vector3 Green(0.0f, 1.0f, 0.0f); //Red color if we hit a primitive (in our case a sphere, but can be any type of primitive)
//Create a camera
Camera camera(Vector3(0.0f, 2.0f, -5.0f));
//Cast a ray in world space from the camera
u32 Seed0 = x;
u32 Seed1 = y;
// This is the resultant color
Vector3 ColorResult;
// Inv of the number of samples
const float InvSamples = 1.0f / ((float)SAMPLES);
// Get parameters needed for DOF computation
float FocalLength = camera.GetFocalLength();
float ApertureSize = camera.GetApertureSize();
// We use this to create a gradient for the background color. It will be a bit prettier to look at and it will give a better contrast with objects in the foreground
float GradientInterp = (static_cast<float>(y) / static_cast<float>(ScreenHeight));
Vector3 BkgColor = Lerp(Vector3(0.671f,0.875f,0.973f), Vector3(0.992f,0.941f,0.918f),GradientInterp);
// For each sample of a given pixel
// We implement distributed ray casting
// What we obtain at the end of this process is antialiasing
for (u32 i = 0; i < SAMPLES; ++i)
{
// Pick a random sample in 0-1 range
#if AA_ENABLED
float rx = GetRandom01(&Seed0, &Seed1);
float ry = GetRandom01(&Seed0, &Seed1);
//Compute the world space ray direction for each sample and then average the results
auto WSDir = camera.GetWorldSpaceRayDir(((float)x) + rx, ((float)y) + ry , ScreenWidth, ScreenHeight);
#else
auto WSDir = camera.GetWorldSpaceRayDir(((float)x), ((float)y), ScreenWidth, ScreenHeight);
#endif
//Construct a ray in world space that originates from the camera
Ray WSRay(camera.GetCameraEye(), WSDir);
// Get Random time interval between 0-1
WSRay.mTime = GetRandom01(&Seed0, &Seed1);
//Ray DOFRay = GetDOFRay(WSRay, ApertureSize, FocalLength, &Seed0, &Seed1);
//Compute intersection and set a color
HitData OutHitData;
// Get the closest hit
//bool Hit = GetClosestHit(DOFRay, 0.001f, FLT_MAX, OutHitData,SphereList,kNumSpheres);
bool Hit = GetClosestHit(WSRay, 0.001f, FLT_MAX, OutHitData, SphereList, kNumSpheres);
// Return the color for a given sample and accumulate the result
ColorResult += (Hit ? SphereList[OutHitData.mObjId].mColor : BkgColor);
}
// Average the results
ColorResult *= InvSamples;
//We access the linear ColorBuffer storing each color component separately (we could have a float3 color buffer for a more compact/cleaner solution)
int offset = (x + (ScreenHeight - y - 1) * ScreenWidth) * 3;
//Store the results of your computations
ColorBuffer[offset] = ColorResult.X();
ColorBuffer[offset + 1] = ColorResult.Y();
ColorBuffer[offset + 2] = ColorResult.Z();
}
int main()
{
//Color Buffer resolution
int ScreenWidth = 512;
int ScreenHeight = 512;
float* ColorBuffer = nullptr;
//Here we prepare our computation domain (i.e. thread blocks and threads in a block)
//Number of threads in a block (experiment with this sizes!).
//Suggenstion: make them a multiple of a warp (a warp is 32 threads wide on NVIDIA and 64 threads on AMD)
int ThreadBlockSizeX = 8;
int ThreadBlockSizeY = 8;
//Number of thread blocks
int NumOfBlockX = ScreenWidth / ThreadBlockSizeX + 1;
int NumOfBlockY = ScreenHeight / ThreadBlockSizeY + 1;
//Let's define the compute dimention domain
dim3 ThreadBlocks(NumOfBlockX, NumOfBlockY);
dim3 ThreadsInABlock(ThreadBlockSizeX, ThreadBlockSizeY);
//Color buffer size in bytes
const size_t kColorBufferSize = sizeof(float) * 3 * ScreenWidth*ScreenHeight;
//We allocate our color buffer in Unified Memory such that it'll be easy for us to access it on the host as well as on the device
CHECK_CUDA_ERRORS(hipMallocManaged(&ColorBuffer, kColorBufferSize));
//Launch the kernel that will render the scene
RenderScene << <ThreadBlocks, ThreadsInABlock >> > (ScreenWidth, ScreenHeight, ColorBuffer);
//Wait for the GPU to finish before to access results on the host
CHECK_CUDA_ERRORS(hipGetLastError());
CHECK_CUDA_ERRORS(hipDeviceSynchronize());
//Save results stored in ColorBuffer to file (could be a *.ppx or a *.bmp)
//We are ready to use the results produced on the GPU
//Dump Results on a file
const int dpi = 72;
Ray_BMP_Manager::Save("Chapter5_CudaResult.bmp", ScreenWidth, ScreenHeight, dpi, (float*)ColorBuffer);
//Done! Free up cuda allocated memory
CHECK_CUDA_ERRORS(hipFree(ColorBuffer));
return 0;
}
| d3545d077b0e98b51be1cc76c5c5387538b2a2df.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "../Utils/bmpimage.h"
#include "../Utils/vector3.h"
#include <stdio.h>
#include <iostream>
// The number of samples/rays we shoot for each pixel for distributed ray tracing
#define SAMPLES 1024
// Do we want to enable Anti Aliasing via jittering?
#define AA_ENABLED 1
//Useful macro to check cuda error code returned from cuda functions
#define CHECK_CUDA_ERRORS(val) Check( (val), #val, __FILE__, __LINE__ )
static void Check(cudaError_t result, char const *const func, const char *const file, int const line)
{
if (result)
{
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n";
cudaDeviceReset();
exit(99);
}
}
//Ray tracing data structures
//Simple struct used to collect post hit data (i.e. hit position, normal and t)
struct HitData
{
/** Ctor */
__device__ HitData() : mHitPos(0.f, 0.f, 0.f), mNormal(0.f, 1.f, 0.f) { }
Vector3 mHitPos;
Vector3 mNormal;
i32 mObjId = -1;
float t = 0.0f;
};
class Camera
{
public:
__device__ Camera(const Vector3& InEye = Vector3(0.f, 0.f, 0.f)
, const Vector3& InLookAt = Vector3(0.f, 0.f, 50.f)
, const Vector3& InUp = Vector3(0.f, 1.f, 0.f)
, float InFov = 60.f
, float InAspectRatio = 1.f
, float InTime0 = 0.0f
, float InTime1 = 1.0f) : mEye(InEye), mLookAt(InLookAt),mTime0(InTime0),mTime1(InTime1)
{
const Vector3& Fwd = InLookAt - InEye;
mW = Fwd.norm();
mU = InUp.cross(mW);
mV = mW.cross(mU);
mScaleY = tanf(DegToRad(InFov)*0.5f);
mScaleX = mScaleY * InAspectRatio;
}
~Camera() = default;
//We calculate the world space ray given the position of the pixel in image space and
//the image plane width and height.
__device__ Vector3 GetWorldSpaceRayDir(float InPx, float InPy, float InWidth, float InHeight)
{
float Alpha = ((InPx / InWidth)*2.0f - 1.0f)*mScaleX;
float Beta = ((1.0f - (InPy / InHeight))*2.0f - 1.0f)*mScaleY;
Vector3 WSRayDir = mU * Alpha + mV * Beta + mW;
return WSRayDir;
}
__device__ Vector3 GetCameraEye() const { return mEye; }
__device__ float GetFocalLength() const { return mFocalLength; }
__device__ float GetApertureSize() const { return mApertureSize; }
private:
//Convenient member variables used to cache the scale along the x and y axis of the
//camera space
float mScaleY = 1.0f;
float mScaleX = 1.0f;
/**The camera position */
Vector3 mEye;
/**The camera forward vector */
Vector3 mW;
/**The camera side vector*/
Vector3 mU;
/**The camera up vector */
Vector3 mV;
/**The camera look at */
Vector3 mLookAt;
/**Focal length */
float mFocalLength = 5.25f;
/**Aperture Size */
//float mApertureSize = 0.7f;
float mApertureSize = 0.0f;
// Motion blur variables
// Time at which the shutter was open
float mTime0;
// Time at which the shutter is closed
float mTime1;
};
//Simple ray class
class Ray
{
public:
/** Ctor */
__device__ Ray(const Vector3& InOrigin = Vector3(0, 0, 0), const Vector3& InDirection = Vector3(0, 0, 1),float InTime = 0.0f) : mOrigin(InOrigin), mDirection(InDirection),mTime(InTime) {}
/** Copy Ctor */
__device__ Ray(const Ray& InRay) : mOrigin(InRay.mOrigin), mDirection(InRay.mDirection) { }
//Method used to compute position at parameter t
__device__ Vector3 PositionAtT(float t) const
{
return mOrigin + mDirection * t;
}
// This ray origin
Vector3 mOrigin;
// This ray direction
Vector3 mDirection;
// Min t
float mTmin;
// Max t
float mTmax;
// Added for motion blur
float mTime = 0.0f;
};
//Simple sphere class
struct Sphere
{
// We also need for the sphere to move to account for motion blur
/** The center of the sphere at time 0*/
Vector3 mCenter0;
/** The center of the sphere at time 1*/
Vector3 mCenter1;
/** Let's give this sphere a color */
Vector3 mColor;
/** The radius of the sphere */
float mRadius;
/** Time at which the sphere started moving (coincides with camera shutter open) */
float mTime0 = 0.0f;
/** Time at which the sphere ended up being (coincides with camera shutter closed) */
float mTime1 = 1.0f;
__device__ Vector3 GetCenterAtTime(float Time) const noexcept
{
return mCenter0 + (mCenter1 - mCenter0)*((Time - mTime0) / (mTime1 - mTime0));
}
/** Ctor */
__device__ Sphere(const Vector3& InCenter0 = Vector3(0, 0, 0), const Vector3& InCenter1 = Vector3(0, 0, 0),const Vector3& InColor = Vector3(1,1,1),float InRadius = 1) : mCenter0(InCenter0), mCenter1(InCenter1), mColor(InColor) ,mRadius(InRadius) { }
/** Copy Ctor */
__device__ Sphere(const Sphere& InSphere) : mCenter0(InSphere.mCenter0), mCenter1(InSphere.mCenter1), mColor(InSphere.mColor),mRadius(InSphere.mRadius) { }
/** Get the color of this sphere */
__device__ Vector3 GetColor() const { return mColor; }
//Compute the ray-sphere intersection using analitic solution
__device__ bool Intersect(const Ray& InRay, float InTMin, float InTMax,float& t)
{
const Vector3& oc = (InRay.mOrigin - GetCenterAtTime(InRay.mTime));
float a = InRay.mDirection.dot(InRay.mDirection);
float b = oc.dot(InRay.mDirection);
float c = oc.dot(oc) - mRadius * mRadius;
float Disc = b * b - a * c;
float temp = 0.0f;
if (Disc > 0.0001f)
{
float SqrtDisc = sqrt(Disc);
temp = (-b - SqrtDisc) / a;
if (temp < InTMax && temp > InTMin)
{
t = temp;
return true;
}
temp = (-b + SqrtDisc) / a;
if (temp < InTMax && temp > InTMin)
{
t = temp;
return true;
}
}
return false;
}
};
__device__ static Ray GetDOFRay(const Ray& ray, float ApertureSize, float FocalLength, u32* Seed0, u32* Seed1)
{
// This is the focal point for a given primary camera ray (Dir is unit length)
auto P = ray.mOrigin + ray.mDirection * FocalLength;
// Get two random number in -0.5-0.5 range for each component
float u1 = GetRandom01(Seed0, Seed1);
float u2 = GetRandom01(Seed0, Seed1);
float r1 = 2.0f * M_PI * u1;
float r2 = u2;
auto RandVec = Vector3(cosf(r1)*r2, sinf(r1)*r2, 0.0f) * ApertureSize;
// This is the new ray origin
auto NewRayOrigin = ray.mOrigin + RandVec;
// New ray direction
auto NewRayDir = (P - NewRayOrigin).norm();
return Ray(NewRayOrigin, NewRayDir);
}
__device__ static bool GetClosestHit(const Ray& InRay, float InTMin, float InTMax,HitData& OutHitData,Sphere* InSphereList,const u32 InNumSpheres)
{
float Inf = FLT_MAX;
float tMin = Inf;
float t = 0.f;
for (i32 i=0;i<InNumSpheres;++i)
{
if (InSphereList[i].Intersect(InRay, 0.001f, FLT_MAX,t) && t < tMin )
{
tMin = t;
OutHitData.t = t;
OutHitData.mHitPos = InRay.PositionAtT(t);
OutHitData.mObjId = i;
}
}
return (tMin < Inf);
}
//the keyword __global__ instructs the CUDA compiler that this function is the entry point of our kernel
__global__ void RenderScene(const u32 ScreenWidth,const u32 ScreenHeight, float* ColorBuffer)
{
u32 x = blockIdx.x*blockDim.x + threadIdx.x;
u32 y = blockIdx.y*blockDim.y + threadIdx.y;
//Create a simple sphere list made by two spheres
const u32 kNumSpheres = 5;
Sphere SphereList[kNumSpheres] = {
Sphere(Vector3(0.0f,0.0f,1.0f),Vector3(0.0f,0.5f,1.0f),Vector3(0.0f,1.0f,0.0f)),
Sphere(Vector3(0.75f,0.0f,3.5f),Vector3(0.75f,0.0f,3.5f),Vector3(1.0f,0.0f,0.0f)),
Sphere(Vector3(1.5f,0.0f,6.0f),Vector3(1.5f,0.35f,6.0f),Vector3(1.0f,1.0f,0.0f)),
Sphere(Vector3(-0.5f,0.0f,0.0f),Vector3(-0.5f,0.0f,0.0f),Vector3(0.0f,0.0f,1.0f)),
Sphere(Vector3(-1.0f,0.0f,-1.0f),Vector3(-1.0f,0.0f,-1.0f),Vector3(1.0f,0.0f,1.0f))
};
//Prepare two color
Vector3 Green(0.0f, 1.0f, 0.0f); //Red color if we hit a primitive (in our case a sphere, but can be any type of primitive)
//Create a camera
Camera camera(Vector3(0.0f, 2.0f, -5.0f));
//Cast a ray in world space from the camera
u32 Seed0 = x;
u32 Seed1 = y;
// This is the resultant color
Vector3 ColorResult;
// Inv of the number of samples
const float InvSamples = 1.0f / ((float)SAMPLES);
// Get parameters needed for DOF computation
float FocalLength = camera.GetFocalLength();
float ApertureSize = camera.GetApertureSize();
// We use this to create a gradient for the background color. It will be a bit prettier to look at and it will give a better contrast with objects in the foreground
float GradientInterp = (static_cast<float>(y) / static_cast<float>(ScreenHeight));
Vector3 BkgColor = Lerp(Vector3(0.671f,0.875f,0.973f), Vector3(0.992f,0.941f,0.918f),GradientInterp);
// For each sample of a given pixel
// We implement distributed ray casting
// What we obtain at the end of this process is antialiasing
for (u32 i = 0; i < SAMPLES; ++i)
{
// Pick a random sample in 0-1 range
#if AA_ENABLED
float rx = GetRandom01(&Seed0, &Seed1);
float ry = GetRandom01(&Seed0, &Seed1);
//Compute the world space ray direction for each sample and then average the results
auto WSDir = camera.GetWorldSpaceRayDir(((float)x) + rx, ((float)y) + ry , ScreenWidth, ScreenHeight);
#else
auto WSDir = camera.GetWorldSpaceRayDir(((float)x), ((float)y), ScreenWidth, ScreenHeight);
#endif
//Construct a ray in world space that originates from the camera
Ray WSRay(camera.GetCameraEye(), WSDir);
// Get Random time interval between 0-1
WSRay.mTime = GetRandom01(&Seed0, &Seed1);
//Ray DOFRay = GetDOFRay(WSRay, ApertureSize, FocalLength, &Seed0, &Seed1);
//Compute intersection and set a color
HitData OutHitData;
// Get the closest hit
//bool Hit = GetClosestHit(DOFRay, 0.001f, FLT_MAX, OutHitData,SphereList,kNumSpheres);
bool Hit = GetClosestHit(WSRay, 0.001f, FLT_MAX, OutHitData, SphereList, kNumSpheres);
// Return the color for a given sample and accumulate the result
ColorResult += (Hit ? SphereList[OutHitData.mObjId].mColor : BkgColor);
}
// Average the results
ColorResult *= InvSamples;
//We access the linear ColorBuffer storing each color component separately (we could have a float3 color buffer for a more compact/cleaner solution)
int offset = (x + (ScreenHeight - y - 1) * ScreenWidth) * 3;
//Store the results of your computations
ColorBuffer[offset] = ColorResult.X();
ColorBuffer[offset + 1] = ColorResult.Y();
ColorBuffer[offset + 2] = ColorResult.Z();
}
int main()
{
//Color Buffer resolution
int ScreenWidth = 512;
int ScreenHeight = 512;
float* ColorBuffer = nullptr;
//Here we prepare our computation domain (i.e. thread blocks and threads in a block)
//Number of threads in a block (experiment with this sizes!).
//Suggenstion: make them a multiple of a warp (a warp is 32 threads wide on NVIDIA and 64 threads on AMD)
int ThreadBlockSizeX = 8;
int ThreadBlockSizeY = 8;
//Number of thread blocks
int NumOfBlockX = ScreenWidth / ThreadBlockSizeX + 1;
int NumOfBlockY = ScreenHeight / ThreadBlockSizeY + 1;
//Let's define the compute dimention domain
dim3 ThreadBlocks(NumOfBlockX, NumOfBlockY);
dim3 ThreadsInABlock(ThreadBlockSizeX, ThreadBlockSizeY);
//Color buffer size in bytes
const size_t kColorBufferSize = sizeof(float) * 3 * ScreenWidth*ScreenHeight;
//We allocate our color buffer in Unified Memory such that it'll be easy for us to access it on the host as well as on the device
CHECK_CUDA_ERRORS(cudaMallocManaged(&ColorBuffer, kColorBufferSize));
//Launch the kernel that will render the scene
RenderScene << <ThreadBlocks, ThreadsInABlock >> > (ScreenWidth, ScreenHeight, ColorBuffer);
//Wait for the GPU to finish before to access results on the host
CHECK_CUDA_ERRORS(cudaGetLastError());
CHECK_CUDA_ERRORS(cudaDeviceSynchronize());
//Save results stored in ColorBuffer to file (could be a *.ppx or a *.bmp)
//We are ready to use the results produced on the GPU
//Dump Results on a file
const int dpi = 72;
Ray_BMP_Manager::Save("Chapter5_CudaResult.bmp", ScreenWidth, ScreenHeight, dpi, (float*)ColorBuffer);
//Done! Free up cuda allocated memory
CHECK_CUDA_ERRORS(cudaFree(ColorBuffer));
return 0;
}
|
4cd9ef9a5ad5e9cc4dd393c971086e25f1c41aeb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* MRG2.cu
*
* Created on: 24-Mar-2009
* Author: Owner
*/
#include "matrix.h"
#include "rng_shared.ch"
int nb_MRG;
int nt_MRG;
#define a_MRG 1403580UL
#define b_MRG 810728UL
#define c_MRG 527612UL
#define d_MRG 1370589UL
#define m1_MRG 4294967087UL
#define m2_MRG 4294944443UL
__device__ unsigned long* d_seeds_MRG;
void seed_MRG(int nba, int nta, unsigned long* seeds) {
nb_MRG = nba;
nt_MRG = nta;
int tt = nb_MRG * nt_MRG;
int logtt = (int) log2f((float) tt);
int k = 190 - logtt; // period is ~ 2^191
unsigned long mbmodm1 = m1_MRG - b_MRG;
unsigned long mdmodm2 = m2_MRG - d_MRG;
unsigned long E[9] = { 0, a_MRG, mbmodm1, 1, 0, 0, 0, 1, 0 };
unsigned long F[9] = { c_MRG, 0, mdmodm2, 1, 0, 0, 0, 1, 0 };
unsigned long temp[9];
unsigned long E_k[9];
unsigned long F_k[9];
matrix_copy(E, E_k, 3, 3);
matrix_copy(F, F_k, 3, 3);
for (int i = 0; i < k; i++) {
matrix_times_mod(E_k, E_k, temp, 3, 3, 3, 3, m1_MRG);
matrix_copy(temp, E_k, 3, 3);
matrix_times_mod(F_k, F_k, temp, 3, 3, 3, 3, m2_MRG);
matrix_copy(temp, F_k, 3, 3);
}
hipMalloc((void**) &d_seeds_MRG, tt * 6 * sizeof(unsigned long));
unsigned long* hd_seeds = (unsigned long*) malloc(tt * 6 * sizeof(unsigned long));
unsigned long y1[3] = { seeds[0], seeds[1], seeds[2] };
unsigned long y2[3] = { seeds[3], seeds[4], seeds[5] };
unsigned long y1_n[3];
unsigned long y2_n[3];
hd_seeds[0] = y1[0];
hd_seeds[1] = y1[1];
hd_seeds[2] = y1[2];
hd_seeds[3] = y2[0];
hd_seeds[4] = y2[1];
hd_seeds[5] = y2[2];
for (int i = 1; i < tt; i++) {
matrix_times_mod(E_k, y1, y1_n, 3, 3, 3, 1, m1_MRG);
matrix_times_mod(F_k, y2, y2_n, 3, 3, 3, 1, m2_MRG);
y1[0] = y1_n[0];
y1[1] = y1_n[1];
y1[2] = y1_n[2];
y2[0] = y2_n[0];
y2[1] = y2_n[1];
y2[2] = y2_n[2];
hd_seeds[i * 6] = y1[0];
hd_seeds[i * 6 + 1] = y1[1];
hd_seeds[i * 6 + 2] = y1[2];
hd_seeds[i * 6 + 3] = y2[0];
hd_seeds[i * 6 + 4] = y2[1];
hd_seeds[i * 6 + 5] = y2[2];
}
hipMemcpy(d_seeds_MRG, hd_seeds, tt * 6 * sizeof(unsigned long), hipMemcpyHostToDevice);
// for (int i = 0; i < 6; i++) {
// h_seeds_MRG[i] = hd_seeds[tt * 6 - 6 + i];
// }
free(hd_seeds);
}
void kill_MRG() {
hipFree(d_seeds_MRG);
}
__device__ unsigned long mymod(unsigned long x, unsigned long m) {
if (x > m) {
return x % m;
} else {
return x;
}
}
__global__ void randomUI_MRG(unsigned int* d_array, int N, unsigned long* d_seeds) {
const int tt = blockDim.x * gridDim.x;
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned long y1[3] = { d_seeds[tid * 6], d_seeds[tid * 6 + 1], d_seeds[tid * 6 + 2] };
unsigned long y2[3] = { d_seeds[tid * 6 + 3], d_seeds[tid * 6 + 4], d_seeds[tid * 6 + 5] };
unsigned long y1_n;
unsigned long y2_n;
unsigned long z_n;
unsigned long t1;
unsigned long t2;
for (int i = tid; i < N; i += tt) {
t1 = (a_MRG * y1[1]) % m1_MRG;
t2 = mymod(b_MRG * y1[2], m1_MRG);
if (t2 < t1) {
y1_n = t1 - t2;
} else {
y1_n = t1 + m1_MRG - t2;
}
t1 = (c_MRG * y2[0]) % m2_MRG;
t2 = mymod(d_MRG * y2[2], m2_MRG);
if (t2 < t1) {
y2_n = t1 - t2;
} else {
y2_n = t1 + m2_MRG - t2;
}
y1[2] = y1[1];
y1[1] = y1[0];
y1[0] = y1_n;
y2[2] = y2[1];
y2[1] = y2[0];
y2[0] = y2_n;
if (y1_n > y2_n) {
z_n = y1_n - y2_n;
} else {
z_n = y1_n + m1_MRG - y2_n;
}
if (z_n > 0) {
d_array[i] = z_n;
} else {
d_array[i] = m1_MRG;
}
}
d_seeds[tid * 6] = y1[0];
d_seeds[tid * 6 + 1] = y1[1];
d_seeds[tid * 6 + 2] = y1[2];
d_seeds[tid * 6 + 3] = y2[0];
d_seeds[tid * 6 + 4] = y2[1];
d_seeds[tid * 6 + 5] = y2[2];
}
__global__ void randomF_MRG(float* d_array, int N, unsigned long* d_seeds) {
const int tt = blockDim.x * gridDim.x;
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned long y1[3] = { d_seeds[tid * 6], d_seeds[tid * 6 + 1], d_seeds[tid * 6 + 2] };
unsigned long y2[3] = { d_seeds[tid * 6 + 3], d_seeds[tid * 6 + 4], d_seeds[tid * 6 + 5] };
unsigned long y1_n;
unsigned long y2_n;
unsigned long z_n;
unsigned long t1;
unsigned long t2;
for (int i = tid; i < N; i += tt) {
t1 = (a_MRG * y1[1]) % m1_MRG;
t2 = mymod(b_MRG * y1[2], m1_MRG);
if (t2 < t1) {
y1_n = t1 - t2;
} else {
y1_n = t1 + m1_MRG - t2;
}
t1 = (c_MRG * y2[0]) % m2_MRG;
t2 = mymod(d_MRG * y2[2], m2_MRG);
if (t2 < t1) {
y2_n = t1 - t2;
} else {
y2_n = t1 + m2_MRG - t2;
}
y1[2] = y1[1];
y1[1] = y1[0];
y1[0] = y1_n;
y2[2] = y2[1];
y2[1] = y2[0];
y2[0] = y2_n;
if (y1_n > y2_n) {
z_n = y1_n - y2_n;
} else {
z_n = y1_n + m1_MRG - y2_n;
}
if (z_n > 0) {
d_array[i] = ((float) z_n) / (m1_MRG + 1);
} else {
d_array[i] = ((float) m1_MRG) / (m1_MRG + 1);
}
}
d_seeds[tid * 6] = y1[0];
d_seeds[tid * 6 + 1] = y1[1];
d_seeds[tid * 6 + 2] = y1[2];
d_seeds[tid * 6 + 3] = y2[0];
d_seeds[tid * 6 + 4] = y2[1];
d_seeds[tid * 6 + 5] = y2[2];
}
__global__ void randomIK_MRG(int* d_array, int N, unsigned int k, unsigned long* d_seeds) {
const int tt = blockDim.x * gridDim.x;
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned long y1[3] = { d_seeds[tid * 6], d_seeds[tid * 6 + 1], d_seeds[tid * 6 + 2] };
unsigned long y2[3] = { d_seeds[tid * 6 + 3], d_seeds[tid * 6 + 4], d_seeds[tid * 6 + 5] };
unsigned long y1_n;
unsigned long y2_n;
unsigned long z_n;
unsigned long t1;
unsigned long t2;
// unsigned long mbmodm1 = m1_MRG - b_MRG;
// unsigned long mdmodm2 = m2_MRG - d_MRG;
for (int i = tid; i < N; i += tt) {
t1 = (a_MRG * y1[1]) % m1_MRG;
t2 = mymod(b_MRG * y1[2], m1_MRG);
if (t2 < t1) {
y1_n = t1 - t2;
} else {
y1_n = t1 + m1_MRG - t2;
}
t1 = (c_MRG * y2[0]) % m2_MRG;
t2 = mymod(d_MRG * y2[2], m2_MRG);
if (t2 < t1) {
y2_n = t1 - t2;
} else {
y2_n = t1 + m2_MRG - t2;
}
y1[2] = y1[1];
y1[1] = y1[0];
y1[0] = y1_n;
y2[2] = y2[1];
y2[1] = y2[0];
y2[0] = y2_n;
if (y1_n > y2_n) {
z_n = y1_n - y2_n;
} else {
z_n = y1_n + m1_MRG - y2_n;
}
// z_n = (y1_n + m1_MRG - y2_n) % m1_MRG;
d_array[i] = z_n % k;
}
d_seeds[tid * 6] = y1[0];
d_seeds[tid * 6 + 1] = y1[1];
d_seeds[tid * 6 + 2] = y1[2];
d_seeds[tid * 6 + 3] = y2[0];
d_seeds[tid * 6 + 4] = y2[1];
d_seeds[tid * 6 + 5] = y2[2];
}
void populate_rand_MRG(float* array, int N) {
int tt = nb_MRG * nt_MRG;
int M = max(N, tt);
float *d_Rand;
hipMalloc((void **) &d_Rand, M * sizeof(float));
hipDeviceSynchronize();
hipLaunchKernelGGL(( randomF_MRG), dim3(nb_MRG), dim3(nt_MRG), 0, 0, d_Rand, M, d_seeds_MRG);
hipDeviceSynchronize();
hipMemcpy(array, d_Rand, N * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_Rand);
}
void populate_rand_MRG_d(float* d_array, int N) {
int tt = nb_MRG * nt_MRG;
if (N < tt) {
float* d_Rand;
hipMalloc((void**) &d_Rand, tt * sizeof(float));
hipLaunchKernelGGL(( randomF_MRG), dim3(nb_MRG), dim3(nt_MRG), 0, 0, d_Rand, tt, d_seeds_MRG);
hipMemcpy(d_array, d_Rand, N * sizeof(float), hipMemcpyDeviceToDevice);
hipFree(d_Rand);
} else {
hipLaunchKernelGGL(( randomF_MRG), dim3(nb_MRG), dim3(nt_MRG), 0, 0, d_array, N, d_seeds_MRG);
hipDeviceSynchronize();
}
}
// populates a length N array of floats on host with N(0,1) numbers
void populate_randn_MRG(float* array, int N) {
int tt = nb_MRG * nt_MRG;
int tt2 = tt * 2;
int M = max(tt2, N);
int r = N % (tt2);
if (M == N && r != 0) {
M = N + (tt2 - r);
}
float *d_Rand;
hipMalloc((void **) &d_Rand, M * sizeof(float));
hipLaunchKernelGGL(( randomF_MRG), dim3(nb_MRG), dim3(nt_MRG), 0, 0, d_Rand, M, d_seeds_MRG);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BoxMullerGPU), dim3(nb_MRG), dim3(nt_MRG), 0, 0, d_Rand, M);
hipMemcpy(array, d_Rand, N * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_Rand);
}
void populate_randIK_MRG(int* h_array, int N, int k) {
int tt = nb_MRG * nt_MRG;
int M = max(N, tt);
int *d_Rand;
hipMalloc((void **) &d_Rand, M * sizeof(int));
hipLaunchKernelGGL(( randomIK_MRG), dim3(nb_MRG), dim3(nt_MRG), 0, 0, d_Rand, M, k, d_seeds_MRG);
hipDeviceSynchronize();
hipMemcpy(h_array, d_Rand, N * sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_Rand);
}
void populate_randIK_MRG_d(int* d_array, int N, int k) {
int tt = nb_MRG * nt_MRG;
if (N < tt) {
int *d_Rand;
hipMalloc((void **) &d_Rand, tt * sizeof(int));
hipLaunchKernelGGL(( randomIK_MRG), dim3(nb_MRG), dim3(nt_MRG), 0, 0, d_Rand, tt, k, d_seeds_MRG);
hipMemcpy(d_array, d_Rand, N * sizeof(int), hipMemcpyDeviceToDevice);
hipFree(d_Rand);
} else {
hipLaunchKernelGGL(( randomIK_MRG), dim3(nb_MRG), dim3(nt_MRG), 0, 0, d_array, N, k, d_seeds_MRG);
hipDeviceSynchronize();
}
}
void populate_randn_MRG_d(float* d_array, int N) {
int tt = nb_MRG * nt_MRG;
if (N < tt * 2) {
float* temp;
hipMalloc((void**) &temp, tt * 2 * sizeof(float));
hipLaunchKernelGGL(( randomF_MRG), dim3(nb_MRG), dim3(nt_MRG), 0, 0, temp, tt * 2, d_seeds_MRG);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BoxMullerGPU), dim3(nb_MRG), dim3(nt_MRG), 0, 0, temp, tt * 2);
hipDeviceSynchronize();
hipMemcpy(d_array, temp, N * sizeof(float), hipMemcpyDeviceToDevice);
hipFree(temp);
} else {
hipLaunchKernelGGL(( randomF_MRG), dim3(nb_MRG), dim3(nt_MRG), 0, 0, d_array, N, d_seeds_MRG);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BoxMullerGPU), dim3(nb_MRG), dim3(nt_MRG), 0, 0, d_array, N);
hipDeviceSynchronize();
}
}
void populate_randUI_MRG(unsigned int* array, int N) {
int tt = nb_MRG * nt_MRG;
int M = max(N, tt);
unsigned int *d_Rand;
hipMalloc((void **) &d_Rand, M * sizeof(unsigned int));
hipDeviceSynchronize();
hipLaunchKernelGGL(( randomUI_MRG), dim3(nb_MRG), dim3(nt_MRG), 0, 0, d_Rand, M, d_seeds_MRG);
hipDeviceSynchronize();
hipMemcpy(array, d_Rand, N * sizeof(unsigned int), hipMemcpyDeviceToHost);
hipFree(d_Rand);
}
void populate_randUI_MRG_d(unsigned int* d_array, int N) {
int tt = nb_MRG * nt_MRG;
if (N < tt) {
unsigned int* d_Rand;
hipMalloc((void**) &d_Rand, tt * sizeof(unsigned int));
hipLaunchKernelGGL(( randomUI_MRG), dim3(nb_MRG), dim3(nt_MRG), 0, 0, d_Rand, tt, d_seeds_MRG);
hipMemcpy(d_array, d_Rand, N * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipFree(d_Rand);
} else {
hipLaunchKernelGGL(( randomUI_MRG), dim3(nb_MRG), dim3(nt_MRG), 0, 0, d_array, N, d_seeds_MRG);
hipDeviceSynchronize();
}
}
| 4cd9ef9a5ad5e9cc4dd393c971086e25f1c41aeb.cu | /*
* MRG2.cu
*
* Created on: 24-Mar-2009
* Author: Owner
*/
#include "matrix.h"
#include "rng_shared.ch"
int nb_MRG;
int nt_MRG;
#define a_MRG 1403580UL
#define b_MRG 810728UL
#define c_MRG 527612UL
#define d_MRG 1370589UL
#define m1_MRG 4294967087UL
#define m2_MRG 4294944443UL
__device__ unsigned long* d_seeds_MRG;
void seed_MRG(int nba, int nta, unsigned long* seeds) {
nb_MRG = nba;
nt_MRG = nta;
int tt = nb_MRG * nt_MRG;
int logtt = (int) log2f((float) tt);
int k = 190 - logtt; // period is ~ 2^191
unsigned long mbmodm1 = m1_MRG - b_MRG;
unsigned long mdmodm2 = m2_MRG - d_MRG;
unsigned long E[9] = { 0, a_MRG, mbmodm1, 1, 0, 0, 0, 1, 0 };
unsigned long F[9] = { c_MRG, 0, mdmodm2, 1, 0, 0, 0, 1, 0 };
unsigned long temp[9];
unsigned long E_k[9];
unsigned long F_k[9];
matrix_copy(E, E_k, 3, 3);
matrix_copy(F, F_k, 3, 3);
for (int i = 0; i < k; i++) {
matrix_times_mod(E_k, E_k, temp, 3, 3, 3, 3, m1_MRG);
matrix_copy(temp, E_k, 3, 3);
matrix_times_mod(F_k, F_k, temp, 3, 3, 3, 3, m2_MRG);
matrix_copy(temp, F_k, 3, 3);
}
cudaMalloc((void**) &d_seeds_MRG, tt * 6 * sizeof(unsigned long));
unsigned long* hd_seeds = (unsigned long*) malloc(tt * 6 * sizeof(unsigned long));
unsigned long y1[3] = { seeds[0], seeds[1], seeds[2] };
unsigned long y2[3] = { seeds[3], seeds[4], seeds[5] };
unsigned long y1_n[3];
unsigned long y2_n[3];
hd_seeds[0] = y1[0];
hd_seeds[1] = y1[1];
hd_seeds[2] = y1[2];
hd_seeds[3] = y2[0];
hd_seeds[4] = y2[1];
hd_seeds[5] = y2[2];
for (int i = 1; i < tt; i++) {
matrix_times_mod(E_k, y1, y1_n, 3, 3, 3, 1, m1_MRG);
matrix_times_mod(F_k, y2, y2_n, 3, 3, 3, 1, m2_MRG);
y1[0] = y1_n[0];
y1[1] = y1_n[1];
y1[2] = y1_n[2];
y2[0] = y2_n[0];
y2[1] = y2_n[1];
y2[2] = y2_n[2];
hd_seeds[i * 6] = y1[0];
hd_seeds[i * 6 + 1] = y1[1];
hd_seeds[i * 6 + 2] = y1[2];
hd_seeds[i * 6 + 3] = y2[0];
hd_seeds[i * 6 + 4] = y2[1];
hd_seeds[i * 6 + 5] = y2[2];
}
cudaMemcpy(d_seeds_MRG, hd_seeds, tt * 6 * sizeof(unsigned long), cudaMemcpyHostToDevice);
// for (int i = 0; i < 6; i++) {
// h_seeds_MRG[i] = hd_seeds[tt * 6 - 6 + i];
// }
free(hd_seeds);
}
void kill_MRG() {
cudaFree(d_seeds_MRG);
}
__device__ unsigned long mymod(unsigned long x, unsigned long m) {
if (x > m) {
return x % m;
} else {
return x;
}
}
__global__ void randomUI_MRG(unsigned int* d_array, int N, unsigned long* d_seeds) {
const int tt = blockDim.x * gridDim.x;
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned long y1[3] = { d_seeds[tid * 6], d_seeds[tid * 6 + 1], d_seeds[tid * 6 + 2] };
unsigned long y2[3] = { d_seeds[tid * 6 + 3], d_seeds[tid * 6 + 4], d_seeds[tid * 6 + 5] };
unsigned long y1_n;
unsigned long y2_n;
unsigned long z_n;
unsigned long t1;
unsigned long t2;
for (int i = tid; i < N; i += tt) {
t1 = (a_MRG * y1[1]) % m1_MRG;
t2 = mymod(b_MRG * y1[2], m1_MRG);
if (t2 < t1) {
y1_n = t1 - t2;
} else {
y1_n = t1 + m1_MRG - t2;
}
t1 = (c_MRG * y2[0]) % m2_MRG;
t2 = mymod(d_MRG * y2[2], m2_MRG);
if (t2 < t1) {
y2_n = t1 - t2;
} else {
y2_n = t1 + m2_MRG - t2;
}
y1[2] = y1[1];
y1[1] = y1[0];
y1[0] = y1_n;
y2[2] = y2[1];
y2[1] = y2[0];
y2[0] = y2_n;
if (y1_n > y2_n) {
z_n = y1_n - y2_n;
} else {
z_n = y1_n + m1_MRG - y2_n;
}
if (z_n > 0) {
d_array[i] = z_n;
} else {
d_array[i] = m1_MRG;
}
}
d_seeds[tid * 6] = y1[0];
d_seeds[tid * 6 + 1] = y1[1];
d_seeds[tid * 6 + 2] = y1[2];
d_seeds[tid * 6 + 3] = y2[0];
d_seeds[tid * 6 + 4] = y2[1];
d_seeds[tid * 6 + 5] = y2[2];
}
__global__ void randomF_MRG(float* d_array, int N, unsigned long* d_seeds) {
const int tt = blockDim.x * gridDim.x;
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned long y1[3] = { d_seeds[tid * 6], d_seeds[tid * 6 + 1], d_seeds[tid * 6 + 2] };
unsigned long y2[3] = { d_seeds[tid * 6 + 3], d_seeds[tid * 6 + 4], d_seeds[tid * 6 + 5] };
unsigned long y1_n;
unsigned long y2_n;
unsigned long z_n;
unsigned long t1;
unsigned long t2;
for (int i = tid; i < N; i += tt) {
t1 = (a_MRG * y1[1]) % m1_MRG;
t2 = mymod(b_MRG * y1[2], m1_MRG);
if (t2 < t1) {
y1_n = t1 - t2;
} else {
y1_n = t1 + m1_MRG - t2;
}
t1 = (c_MRG * y2[0]) % m2_MRG;
t2 = mymod(d_MRG * y2[2], m2_MRG);
if (t2 < t1) {
y2_n = t1 - t2;
} else {
y2_n = t1 + m2_MRG - t2;
}
y1[2] = y1[1];
y1[1] = y1[0];
y1[0] = y1_n;
y2[2] = y2[1];
y2[1] = y2[0];
y2[0] = y2_n;
if (y1_n > y2_n) {
z_n = y1_n - y2_n;
} else {
z_n = y1_n + m1_MRG - y2_n;
}
if (z_n > 0) {
d_array[i] = ((float) z_n) / (m1_MRG + 1);
} else {
d_array[i] = ((float) m1_MRG) / (m1_MRG + 1);
}
}
d_seeds[tid * 6] = y1[0];
d_seeds[tid * 6 + 1] = y1[1];
d_seeds[tid * 6 + 2] = y1[2];
d_seeds[tid * 6 + 3] = y2[0];
d_seeds[tid * 6 + 4] = y2[1];
d_seeds[tid * 6 + 5] = y2[2];
}
__global__ void randomIK_MRG(int* d_array, int N, unsigned int k, unsigned long* d_seeds) {
const int tt = blockDim.x * gridDim.x;
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned long y1[3] = { d_seeds[tid * 6], d_seeds[tid * 6 + 1], d_seeds[tid * 6 + 2] };
unsigned long y2[3] = { d_seeds[tid * 6 + 3], d_seeds[tid * 6 + 4], d_seeds[tid * 6 + 5] };
unsigned long y1_n;
unsigned long y2_n;
unsigned long z_n;
unsigned long t1;
unsigned long t2;
// unsigned long mbmodm1 = m1_MRG - b_MRG;
// unsigned long mdmodm2 = m2_MRG - d_MRG;
for (int i = tid; i < N; i += tt) {
t1 = (a_MRG * y1[1]) % m1_MRG;
t2 = mymod(b_MRG * y1[2], m1_MRG);
if (t2 < t1) {
y1_n = t1 - t2;
} else {
y1_n = t1 + m1_MRG - t2;
}
t1 = (c_MRG * y2[0]) % m2_MRG;
t2 = mymod(d_MRG * y2[2], m2_MRG);
if (t2 < t1) {
y2_n = t1 - t2;
} else {
y2_n = t1 + m2_MRG - t2;
}
y1[2] = y1[1];
y1[1] = y1[0];
y1[0] = y1_n;
y2[2] = y2[1];
y2[1] = y2[0];
y2[0] = y2_n;
if (y1_n > y2_n) {
z_n = y1_n - y2_n;
} else {
z_n = y1_n + m1_MRG - y2_n;
}
// z_n = (y1_n + m1_MRG - y2_n) % m1_MRG;
d_array[i] = z_n % k;
}
d_seeds[tid * 6] = y1[0];
d_seeds[tid * 6 + 1] = y1[1];
d_seeds[tid * 6 + 2] = y1[2];
d_seeds[tid * 6 + 3] = y2[0];
d_seeds[tid * 6 + 4] = y2[1];
d_seeds[tid * 6 + 5] = y2[2];
}
void populate_rand_MRG(float* array, int N) {
int tt = nb_MRG * nt_MRG;
int M = max(N, tt);
float *d_Rand;
cudaMalloc((void **) &d_Rand, M * sizeof(float));
cudaThreadSynchronize();
randomF_MRG<<<nb_MRG, nt_MRG>>>(d_Rand, M, d_seeds_MRG);
cudaThreadSynchronize();
cudaMemcpy(array, d_Rand, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_Rand);
}
void populate_rand_MRG_d(float* d_array, int N) {
int tt = nb_MRG * nt_MRG;
if (N < tt) {
float* d_Rand;
cudaMalloc((void**) &d_Rand, tt * sizeof(float));
randomF_MRG<<<nb_MRG, nt_MRG>>>(d_Rand, tt, d_seeds_MRG);
cudaMemcpy(d_array, d_Rand, N * sizeof(float), cudaMemcpyDeviceToDevice);
cudaFree(d_Rand);
} else {
randomF_MRG<<<nb_MRG, nt_MRG>>>(d_array, N, d_seeds_MRG);
cudaThreadSynchronize();
}
}
// populates a length N array of floats on host with N(0,1) numbers
void populate_randn_MRG(float* array, int N) {
int tt = nb_MRG * nt_MRG;
int tt2 = tt * 2;
int M = max(tt2, N);
int r = N % (tt2);
if (M == N && r != 0) {
M = N + (tt2 - r);
}
float *d_Rand;
cudaMalloc((void **) &d_Rand, M * sizeof(float));
randomF_MRG<<<nb_MRG, nt_MRG>>>(d_Rand, M, d_seeds_MRG);
cudaThreadSynchronize();
BoxMullerGPU<<<nb_MRG, nt_MRG>>>(d_Rand, M);
cudaMemcpy(array, d_Rand, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_Rand);
}
void populate_randIK_MRG(int* h_array, int N, int k) {
int tt = nb_MRG * nt_MRG;
int M = max(N, tt);
int *d_Rand;
cudaMalloc((void **) &d_Rand, M * sizeof(int));
randomIK_MRG<<<nb_MRG, nt_MRG>>>(d_Rand, M, k, d_seeds_MRG);
cudaThreadSynchronize();
cudaMemcpy(h_array, d_Rand, N * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_Rand);
}
void populate_randIK_MRG_d(int* d_array, int N, int k) {
int tt = nb_MRG * nt_MRG;
if (N < tt) {
int *d_Rand;
cudaMalloc((void **) &d_Rand, tt * sizeof(int));
randomIK_MRG<<<nb_MRG, nt_MRG>>>(d_Rand, tt, k, d_seeds_MRG);
cudaMemcpy(d_array, d_Rand, N * sizeof(int), cudaMemcpyDeviceToDevice);
cudaFree(d_Rand);
} else {
randomIK_MRG<<<nb_MRG, nt_MRG>>>(d_array, N, k, d_seeds_MRG);
cudaThreadSynchronize();
}
}
void populate_randn_MRG_d(float* d_array, int N) {
int tt = nb_MRG * nt_MRG;
if (N < tt * 2) {
float* temp;
cudaMalloc((void**) &temp, tt * 2 * sizeof(float));
randomF_MRG<<<nb_MRG, nt_MRG>>>(temp, tt * 2, d_seeds_MRG);
cudaThreadSynchronize();
BoxMullerGPU<<<nb_MRG, nt_MRG>>>(temp, tt * 2);
cudaThreadSynchronize();
cudaMemcpy(d_array, temp, N * sizeof(float), cudaMemcpyDeviceToDevice);
cudaFree(temp);
} else {
randomF_MRG<<<nb_MRG, nt_MRG>>>(d_array, N, d_seeds_MRG);
cudaThreadSynchronize();
BoxMullerGPU<<<nb_MRG, nt_MRG>>>(d_array, N);
cudaThreadSynchronize();
}
}
void populate_randUI_MRG(unsigned int* array, int N) {
int tt = nb_MRG * nt_MRG;
int M = max(N, tt);
unsigned int *d_Rand;
cudaMalloc((void **) &d_Rand, M * sizeof(unsigned int));
cudaThreadSynchronize();
randomUI_MRG<<<nb_MRG, nt_MRG>>>(d_Rand, M, d_seeds_MRG);
cudaThreadSynchronize();
cudaMemcpy(array, d_Rand, N * sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaFree(d_Rand);
}
void populate_randUI_MRG_d(unsigned int* d_array, int N) {
int tt = nb_MRG * nt_MRG;
if (N < tt) {
unsigned int* d_Rand;
cudaMalloc((void**) &d_Rand, tt * sizeof(unsigned int));
randomUI_MRG<<<nb_MRG, nt_MRG>>>(d_Rand, tt, d_seeds_MRG);
cudaMemcpy(d_array, d_Rand, N * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaFree(d_Rand);
} else {
randomUI_MRG<<<nb_MRG, nt_MRG>>>(d_array, N, d_seeds_MRG);
cudaThreadSynchronize();
}
}
|
2f29b186f5d3b5a9a7221b8043179d313cf4496f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/Float16.cuh>
//#include <faiss/gpu/utils/nvidia/fp16_emu.cuh>
#include <faiss/impl/FaissAssert.h>
#include <thrust/execution_policy.h>
#include <thrust/transform.h>
#ifdef FAISS_USE_FLOAT16
namespace faiss { namespace gpu {
bool getDeviceSupportsFloat16Math(int device) {
const auto& prop = getDeviceProperties(device);
return (prop.major >= 6 ||
(prop.major == 5 && prop.minor >= 3));
}
__half hostFloat2Half(float a) {
#if TORCH_HIP_VERSION >= 9000
__half_raw raw;
//raw.x = cpu_float2half_rn(a).x;
FAISS_ASSERT_FMT(false, "%s", "cpu_float2half_rn() not support");
return __half(raw);
#else
__half h;
//h.x = cpu_float2half_rn(a).x;
FAISS_ASSERT_FMT(false, "%s", "cpu_float2half_rn() not support");
return h;
#endif
}
} } // namespace
#endif // FAISS_USE_FLOAT16
| 2f29b186f5d3b5a9a7221b8043179d313cf4496f.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/Float16.cuh>
//#include <faiss/gpu/utils/nvidia/fp16_emu.cuh>
#include <faiss/impl/FaissAssert.h>
#include <thrust/execution_policy.h>
#include <thrust/transform.h>
#ifdef FAISS_USE_FLOAT16
namespace faiss { namespace gpu {
bool getDeviceSupportsFloat16Math(int device) {
const auto& prop = getDeviceProperties(device);
return (prop.major >= 6 ||
(prop.major == 5 && prop.minor >= 3));
}
__half hostFloat2Half(float a) {
#if CUDA_VERSION >= 9000
__half_raw raw;
//raw.x = cpu_float2half_rn(a).x;
FAISS_ASSERT_FMT(false, "%s", "cpu_float2half_rn() not support");
return __half(raw);
#else
__half h;
//h.x = cpu_float2half_rn(a).x;
FAISS_ASSERT_FMT(false, "%s", "cpu_float2half_rn() not support");
return h;
#endif
}
} } // namespace
#endif // FAISS_USE_FLOAT16
|
d97c40abee53c357697cd11f3184631258508f8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
unsigned int filter_radius;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define maxAccuracy 0.00000005
#define minAccuracy 5.00
#define COMMAND_INPUT
#define TIME_CALC
#define ACC
// #define CPU_COMPARISON
#define TILED_CONVOLUTION
#define FILTER_RADIUS 16
/*******************CUDA Error Check*******************/
#define CUDAsafeCall(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define CUDAcheckError() __cudaCheckError(__FILE__, __LINE__)
inline void gpuAssert(hipError_t err, const char *file, int line, bool abort = true) {
if(err != hipSuccess) {
fprintf(stderr, "GPUAssert: %s %s %d\n", hipGetErrorString(err), file, line);
if(abort)
exit(err);
}
}
inline void __cudaCheckError(const char *file, const int line) {
hipError_t error = hipGetLastError();
if(hipSuccess != error) {
fprintf(stderr, "CUDAcheckError failed at %s: %i: %s\n", file, line, hipGetErrorString(error));
exit(-1);
}
return; //an ola pane kala
}
/*******************CUDA Error Check*******************/
#ifdef TILED_CONVOLUTION
__constant__ float const_filter[FILTER_RADIUS*FILTER_RADIUS];
////////////////////////////////////////////////////////////////////////////////
// TILED GPU row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void tiledconvolutionRowGPU(float *d_Dst, float *d_Src, float *d_Filter, int row_width, int imageW, int imageH, int filterR, int tile_width, int block, int ratio) {
extern __shared__ float data_shared[]; //pinakas pou tha fortwthoun ta dedomena sti shared
int i, j, srcX, srcY;
int k, d, colX, rowY;
float sum;
for(i = 0; i < ratio; i++) {
for(j = 0; j < ratio; j++) {
rowY = (i * tile_width)/ratio + threadIdx.y;
srcY = blockIdx.y * block + rowY + filterR;
colX = (j * tile_width)/ratio + threadIdx.x;
srcX = blockIdx.x * block + colX + filterR;
data_shared[rowY * (tile_width + 1) + colX] = d_Src[srcY * row_width + srcX];
__syncthreads();
}
}
for(i = 0; i < ratio; i++) {
for(j = 0; j < ratio; j++) {
rowY = (i * tile_width)/ratio + threadIdx.y;
srcY = blockIdx.y * block + rowY + filterR;
colX = (j * tile_width)/ratio + threadIdx.x;
srcX = blockIdx.x * block + colX + filterR;
sum = 0;
for(k = -filterR; k <= filterR; k++) {
d = colX + k;
//an vriskomaste entos twn oriwn mporoume
//na xrisimopoihsoume th shared memory
if(d >= 0 && d < tile_width) {
sum += data_shared[rowY * (tile_width + 1) + d] * const_filter[filterR - k];
}
else {
sum += d_Src[srcY * row_width + srcX + k] * const_filter[filterR - k];
}
}
d_Dst[srcY * row_width + srcX] = sum;
__syncthreads();
}
}
}
////////////////////////////////////////////////////////////////////////////////
// TILED GPU column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void tiledconvolutionColumnGPU(float *d_Dst, float *d_Src, float *d_Filter, int row_width, int imageW, int imageH, int filterR, int tile_width, int block, int ratio) {
extern __shared__ float data_shared[]; //pinakas pou tha fortwthoun ta dedomena sti shared
int i, j, srcX, srcY;
int k, d, colX, rowY;
float sum;
for(i = 0; i < ratio; i++) {
for(j = 0; j < ratio; j++) {
rowY = (i * tile_width)/ratio + threadIdx.y;
srcY = blockIdx.y * block + rowY + filterR;
colX = (j * tile_width)/ratio + threadIdx.x;
srcX = blockIdx.x * block + colX + filterR;
data_shared[rowY * (tile_width + 1) + colX] = d_Src[srcY * row_width + srcX];
__syncthreads();
}
}
for(i = 0; i < ratio; i++) {
for(j = 0; j < ratio; j++) {
rowY = (i * tile_width)/ratio + threadIdx.y;
srcY = blockIdx.y * block + rowY + filterR;
colX = (j * tile_width)/ratio + threadIdx.x;
srcX = blockIdx.x * block + colX + filterR;
sum = 0;
for(k = -filterR; k <= filterR; k++) {
d = rowY + k;
//an vriskomaste entos twn oriwn mporoume
//na xrisimopoihsoume th shared memory
if(d >= 0 && d < tile_width) {
sum += data_shared[d * (tile_width + 1) + colX] * const_filter[filterR - k];
}
else {
sum += d_Src[(srcY + k) * row_width + srcX] * const_filter[filterR - k];
}
}
d_Dst[(srcY-filterR) * imageW + srcX - filterR] = sum;
__syncthreads();
}
}
}
#else
////////////////////////////////////////////////////////////////////////////////
// GPU row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowGPU(float *d_Dst, float *d_Src, float *d_Filter, int row_width,
int imageW, int imageH, int filterR) {
int blockID = (gridDim.x * blockIdx.y) + (gridDim.x * gridDim.y * blockIdx.z) + blockIdx.x;
int threadID = threadIdx.x + (blockID * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x);
int x = (threadID % imageH) + filterR;
int y = (threadID / imageH) + filterR;
int k, d;
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
d = x + k;
sum += d_Src[y * row_width + d] * d_Filter[filterR - k];
}
d_Dst[y * row_width + x] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// GPU column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnGPU(float *d_Dst, float *d_Src, float *d_Filter, int row_width,
int imageW, int imageH, int filterR) {
int blockID = (gridDim.x * blockIdx.y) + (gridDim.x * gridDim.y * blockIdx.z) + blockIdx.x;
int threadID = threadIdx.x + (blockID * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x);
int x = (threadID % imageH) + filterR;
int y = (threadID / imageH) + filterR;
int k, d;
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
d = y + k;
sum += d_Src[d * row_width + x] * d_Filter[filterR - k];
}
d_Dst[(y-filterR) * imageH + x - filterR] = sum;
}
#endif
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
float
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU;
int imageW;
int imageH;
unsigned int i;
#ifdef COMMAND_INPUT
#ifdef TILED_CONVOLUTION
if(argc != 4) {
printf("Error with command line args.\n");
exit(-1);
}
filter_radius = (unsigned int) atoi(argv[1]);
imageW = atoi(argv[2]);
int ratio = atoi(argv[3]); //tile to block ratio
if(ratio > imageW) {
printf("Error, imageW cannot be smaller than ratio.\n");
exit(-1);
}
#else
if(argc != 3) {
printf("Error with command line args.\n");
exit(1);
}
filter_radius = (unsigned int) atoi(argv[1]);
imageW = atoi(argv[2]);
#endif
#else
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
#endif
imageH = imageW;
#ifndef ACC
printf("Filter Radius: %i\nFilter Length: %i\n", filter_radius, FILTER_LENGTH);
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
#endif
//printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
h_Input = (float *)malloc(imageW * imageH * sizeof(float));
h_Buffer = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float));
/*****************************CUDA*****************************/
float
*d_Filter,
*d_Input,
*d_Buffer,
*d_OutputGPU,
*h_OutputGPU,
*h_InputGPU;
unsigned int row_width = filter_radius * 2 + imageH;
unsigned int size = row_width*row_width * sizeof(float);
h_OutputGPU = (float *) malloc(imageW * imageH * sizeof(float));
h_InputGPU = (float *) malloc(size);
CUDAsafeCall(hipMalloc((void **) &d_Filter, FILTER_LENGTH * sizeof(float)));
CUDAsafeCall(hipMalloc((void **) &d_Input, size));
CUDAsafeCall(hipMalloc((void **) &d_Buffer, size));
CUDAsafeCall(hipMalloc((void **) &d_OutputGPU, imageW * imageH * sizeof(float)));
/**************************************************************/
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (float)(rand() % 16);
}
for (i = 0; i < (unsigned int) imageW * imageH; i++) {
h_Input[i] = (float)rand() / ((float)RAND_MAX / 255) + (float)rand() / (float)RAND_MAX;
}
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
//printf("CPU computation...\n");
#ifdef CPU_COMPARISON
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles
#endif
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
/***************PADDING***************/
memset(h_InputGPU, 0.0, size);
CUDAsafeCall(hipMemset(d_Buffer, 0.0, size));
unsigned int x, y;
for(i = 0; i < (unsigned int) imageW * imageH; i++) {
x = filter_radius + i % imageH;
y = filter_radius + i / imageH;
h_InputGPU[y * row_width + x] = h_Input[i];
}
/*****************************CUDA ALLOC*****************************/
CUDAsafeCall(hipMemcpy(d_Filter, h_Filter, FILTER_LENGTH * sizeof(float), hipMemcpyHostToDevice));
CUDAsafeCall(hipMemcpy(d_Input, h_InputGPU, size, hipMemcpyHostToDevice));
/**************************************************************/
#ifdef TILED_CONVOLUTION
hipMemcpyToSymbol(const_filter, h_Filter, FILTER_LENGTH * sizeof(float));
int tile_width, block;
int blockDimX, gridDimX, shared_mem;
blockDimX = imageW / ratio;
if(blockDimX >= 32)
blockDimX = 32;
tile_width = blockDimX * ratio;
while(tile_width >= 128) {
blockDimX /= 2;
tile_width = blockDimX * ratio;
}
gridDimX = imageW / (blockDimX * ratio);
block = blockDimX * ratio;
shared_mem = tile_width * (tile_width + 1) * sizeof(float);
#ifndef ACC
printf("Tile: %d, %d\ndimBlock: %d, %d\ndimGrid: %d,%d\nShared memory in Bytes: %d\n", tile_width, tile_width, blockDimX, blockDimX, gridDimX, gridDimX, shared_mem);
#endif
dim3 dimBlock(blockDimX, blockDimX);
dim3 dimGrid(gridDimX, gridDimX);
#ifdef TIME_CALC
struct timespec tv1, tv2;
clock_gettime(CLOCK_MONOTONIC_RAW, &tv1);
#endif
hipLaunchKernelGGL(( tiledconvolutionRowGPU), dim3(dimGrid), dim3(dimBlock), shared_mem, 0, d_Buffer, d_Input, d_Filter, (int) row_width, imageW, imageH, filter_radius, tile_width, block, ratio);
CUDAsafeCall(hipPeekAtLastError());
CUDAsafeCall(hipDeviceSynchronize());
hipLaunchKernelGGL(( tiledconvolutionRowGPU), dim3(dimGrid), dim3(dimBlock), shared_mem, 0, d_Buffer, d_Input, d_Filter, (int) row_width, imageW, imageH, filter_radius, tile_width, block, ratio);
CUDAsafeCall(hipPeekAtLastError());
CUDAsafeCall(hipDeviceSynchronize());
#else
int blockDimX, gridDimX;
if(imageW <=32)
blockDimX = imageW;
else
blockDimX = 32;
if((imageW*imageH/1024) > 0)
gridDimX = imageW*imageH/1024;
else
gridDimX = 1;
gridDimX = sqrt(gridDimX);
if(gridDimX > 65535)
gridDimX = 65535;
dim3 dimBlock(blockDimX, blockDimX);
dim3 dimGrid(gridDimX, gridDimX);
#ifdef TIME_CALC
//start
struct timespec tv1, tv2;
clock_gettime(CLOCK_MONOTONIC_RAW, &tv1);
#endif
hipLaunchKernelGGL(( convolutionRowGPU), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Buffer, d_Input, d_Filter, (int) row_width, imageW, imageH, filter_radius);
CUDAsafeCall(hipPeekAtLastError());
CUDAsafeCall(hipDeviceSynchronize());
hipLaunchKernelGGL(( convolutionColumnGPU), dim3(dimGrid), dim3(dimBlock), 0, 0, d_OutputGPU, d_Buffer, d_Filter, (int) row_width, imageW, imageH, filter_radius);
CUDAsafeCall(hipPeekAtLastError());
CUDAsafeCall(hipDeviceSynchronize());
#endif
#ifdef TIME_CALC
//stamata to roloi
clock_gettime(CLOCK_MONOTONIC_RAW, &tv2);
#ifdef ACC
printf ("%10g \n",
(double) (tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +
(double) (tv2.tv_sec - tv1.tv_sec));
#else
printf ("GPU time: %10g seconds\n",
(double) (tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +
(double) (tv2.tv_sec - tv1.tv_sec));
#endif
#endif
CUDAsafeCall(hipMemcpy(h_OutputGPU, d_OutputGPU, imageW*imageH*sizeof(float), hipMemcpyDeviceToHost));
#ifdef CPU_COMPARISON
int err;
float acc;
for(acc = maxAccuracy; acc <= minAccuracy; acc *= 10) {
err = 0;
for(i = 0; i < (unsigned int) imageW*imageH; i++) {
if(acc < ABS(h_OutputCPU[i] - h_OutputGPU[i])) {
err = 1;
break;
}
}
if(err == 0) {
#ifndef ACC
printf("Max Accuracy: %f\n", acc);
#endif
break;
}
}
if(err) {
#ifndef ACC
printf("Image is not accurate with filter: %i x %i\n", filter_radius, filter_radius);
#endif
}
#endif
CUDAsafeCall(hipFree(d_Filter));
CUDAsafeCall(hipFree(d_Buffer));
CUDAsafeCall(hipFree(d_Input));
CUDAsafeCall(hipFree(d_OutputGPU));
free(h_OutputGPU);
free(h_InputGPU);
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
CUDAsafeCall(hipDeviceReset());
return 0;
}
| d97c40abee53c357697cd11f3184631258508f8a.cu | /*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
unsigned int filter_radius;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define maxAccuracy 0.00000005
#define minAccuracy 5.00
#define COMMAND_INPUT
#define TIME_CALC
#define ACC
// #define CPU_COMPARISON
#define TILED_CONVOLUTION
#define FILTER_RADIUS 16
/*******************CUDA Error Check*******************/
#define CUDAsafeCall(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define CUDAcheckError() __cudaCheckError(__FILE__, __LINE__)
inline void gpuAssert(cudaError_t err, const char *file, int line, bool abort = true) {
if(err != cudaSuccess) {
fprintf(stderr, "GPUAssert: %s %s %d\n", cudaGetErrorString(err), file, line);
if(abort)
exit(err);
}
}
inline void __cudaCheckError(const char *file, const int line) {
cudaError error = cudaGetLastError();
if(cudaSuccess != error) {
fprintf(stderr, "CUDAcheckError failed at %s: %i: %s\n", file, line, cudaGetErrorString(error));
exit(-1);
}
return; //an ola pane kala
}
/*******************CUDA Error Check*******************/
#ifdef TILED_CONVOLUTION
__constant__ float const_filter[FILTER_RADIUS*FILTER_RADIUS];
////////////////////////////////////////////////////////////////////////////////
// TILED GPU row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void tiledconvolutionRowGPU(float *d_Dst, float *d_Src, float *d_Filter, int row_width, int imageW, int imageH, int filterR, int tile_width, int block, int ratio) {
extern __shared__ float data_shared[]; //pinakas pou tha fortwthoun ta dedomena sti shared
int i, j, srcX, srcY;
int k, d, colX, rowY;
float sum;
for(i = 0; i < ratio; i++) {
for(j = 0; j < ratio; j++) {
rowY = (i * tile_width)/ratio + threadIdx.y;
srcY = blockIdx.y * block + rowY + filterR;
colX = (j * tile_width)/ratio + threadIdx.x;
srcX = blockIdx.x * block + colX + filterR;
data_shared[rowY * (tile_width + 1) + colX] = d_Src[srcY * row_width + srcX];
__syncthreads();
}
}
for(i = 0; i < ratio; i++) {
for(j = 0; j < ratio; j++) {
rowY = (i * tile_width)/ratio + threadIdx.y;
srcY = blockIdx.y * block + rowY + filterR;
colX = (j * tile_width)/ratio + threadIdx.x;
srcX = blockIdx.x * block + colX + filterR;
sum = 0;
for(k = -filterR; k <= filterR; k++) {
d = colX + k;
//an vriskomaste entos twn oriwn mporoume
//na xrisimopoihsoume th shared memory
if(d >= 0 && d < tile_width) {
sum += data_shared[rowY * (tile_width + 1) + d] * const_filter[filterR - k];
}
else {
sum += d_Src[srcY * row_width + srcX + k] * const_filter[filterR - k];
}
}
d_Dst[srcY * row_width + srcX] = sum;
__syncthreads();
}
}
}
////////////////////////////////////////////////////////////////////////////////
// TILED GPU column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void tiledconvolutionColumnGPU(float *d_Dst, float *d_Src, float *d_Filter, int row_width, int imageW, int imageH, int filterR, int tile_width, int block, int ratio) {
extern __shared__ float data_shared[]; //pinakas pou tha fortwthoun ta dedomena sti shared
int i, j, srcX, srcY;
int k, d, colX, rowY;
float sum;
for(i = 0; i < ratio; i++) {
for(j = 0; j < ratio; j++) {
rowY = (i * tile_width)/ratio + threadIdx.y;
srcY = blockIdx.y * block + rowY + filterR;
colX = (j * tile_width)/ratio + threadIdx.x;
srcX = blockIdx.x * block + colX + filterR;
data_shared[rowY * (tile_width + 1) + colX] = d_Src[srcY * row_width + srcX];
__syncthreads();
}
}
for(i = 0; i < ratio; i++) {
for(j = 0; j < ratio; j++) {
rowY = (i * tile_width)/ratio + threadIdx.y;
srcY = blockIdx.y * block + rowY + filterR;
colX = (j * tile_width)/ratio + threadIdx.x;
srcX = blockIdx.x * block + colX + filterR;
sum = 0;
for(k = -filterR; k <= filterR; k++) {
d = rowY + k;
//an vriskomaste entos twn oriwn mporoume
//na xrisimopoihsoume th shared memory
if(d >= 0 && d < tile_width) {
sum += data_shared[d * (tile_width + 1) + colX] * const_filter[filterR - k];
}
else {
sum += d_Src[(srcY + k) * row_width + srcX] * const_filter[filterR - k];
}
}
d_Dst[(srcY-filterR) * imageW + srcX - filterR] = sum;
__syncthreads();
}
}
}
#else
////////////////////////////////////////////////////////////////////////////////
// GPU row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowGPU(float *d_Dst, float *d_Src, float *d_Filter, int row_width,
int imageW, int imageH, int filterR) {
int blockID = (gridDim.x * blockIdx.y) + (gridDim.x * gridDim.y * blockIdx.z) + blockIdx.x;
int threadID = threadIdx.x + (blockID * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x);
int x = (threadID % imageH) + filterR;
int y = (threadID / imageH) + filterR;
int k, d;
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
d = x + k;
sum += d_Src[y * row_width + d] * d_Filter[filterR - k];
}
d_Dst[y * row_width + x] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// GPU column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnGPU(float *d_Dst, float *d_Src, float *d_Filter, int row_width,
int imageW, int imageH, int filterR) {
int blockID = (gridDim.x * blockIdx.y) + (gridDim.x * gridDim.y * blockIdx.z) + blockIdx.x;
int threadID = threadIdx.x + (blockID * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x);
int x = (threadID % imageH) + filterR;
int y = (threadID / imageH) + filterR;
int k, d;
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
d = y + k;
sum += d_Src[d * row_width + x] * d_Filter[filterR - k];
}
d_Dst[(y-filterR) * imageH + x - filterR] = sum;
}
#endif
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
float
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU;
int imageW;
int imageH;
unsigned int i;
#ifdef COMMAND_INPUT
#ifdef TILED_CONVOLUTION
if(argc != 4) {
printf("Error with command line args.\n");
exit(-1);
}
filter_radius = (unsigned int) atoi(argv[1]);
imageW = atoi(argv[2]);
int ratio = atoi(argv[3]); //tile to block ratio
if(ratio > imageW) {
printf("Error, imageW cannot be smaller than ratio.\n");
exit(-1);
}
#else
if(argc != 3) {
printf("Error with command line args.\n");
exit(1);
}
filter_radius = (unsigned int) atoi(argv[1]);
imageW = atoi(argv[2]);
#endif
#else
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
#endif
imageH = imageW;
#ifndef ACC
printf("Filter Radius: %i\nFilter Length: %i\n", filter_radius, FILTER_LENGTH);
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
#endif
//printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
h_Input = (float *)malloc(imageW * imageH * sizeof(float));
h_Buffer = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float));
/*****************************CUDA*****************************/
float
*d_Filter,
*d_Input,
*d_Buffer,
*d_OutputGPU,
*h_OutputGPU,
*h_InputGPU;
unsigned int row_width = filter_radius * 2 + imageH;
unsigned int size = row_width*row_width * sizeof(float);
h_OutputGPU = (float *) malloc(imageW * imageH * sizeof(float));
h_InputGPU = (float *) malloc(size);
CUDAsafeCall(cudaMalloc((void **) &d_Filter, FILTER_LENGTH * sizeof(float)));
CUDAsafeCall(cudaMalloc((void **) &d_Input, size));
CUDAsafeCall(cudaMalloc((void **) &d_Buffer, size));
CUDAsafeCall(cudaMalloc((void **) &d_OutputGPU, imageW * imageH * sizeof(float)));
/**************************************************************/
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (float)(rand() % 16);
}
for (i = 0; i < (unsigned int) imageW * imageH; i++) {
h_Input[i] = (float)rand() / ((float)RAND_MAX / 255) + (float)rand() / (float)RAND_MAX;
}
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
//printf("CPU computation...\n");
#ifdef CPU_COMPARISON
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles
#endif
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
/***************PADDING***************/
memset(h_InputGPU, 0.0, size);
CUDAsafeCall(cudaMemset(d_Buffer, 0.0, size));
unsigned int x, y;
for(i = 0; i < (unsigned int) imageW * imageH; i++) {
x = filter_radius + i % imageH;
y = filter_radius + i / imageH;
h_InputGPU[y * row_width + x] = h_Input[i];
}
/*****************************CUDA ALLOC*****************************/
CUDAsafeCall(cudaMemcpy(d_Filter, h_Filter, FILTER_LENGTH * sizeof(float), cudaMemcpyHostToDevice));
CUDAsafeCall(cudaMemcpy(d_Input, h_InputGPU, size, cudaMemcpyHostToDevice));
/**************************************************************/
#ifdef TILED_CONVOLUTION
cudaMemcpyToSymbol(const_filter, h_Filter, FILTER_LENGTH * sizeof(float));
int tile_width, block;
int blockDimX, gridDimX, shared_mem;
blockDimX = imageW / ratio;
if(blockDimX >= 32)
blockDimX = 32;
tile_width = blockDimX * ratio;
while(tile_width >= 128) {
blockDimX /= 2;
tile_width = blockDimX * ratio;
}
gridDimX = imageW / (blockDimX * ratio);
block = blockDimX * ratio;
shared_mem = tile_width * (tile_width + 1) * sizeof(float);
#ifndef ACC
printf("Tile: %d, %d\ndimBlock: %d, %d\ndimGrid: %d,%d\nShared memory in Bytes: %d\n", tile_width, tile_width, blockDimX, blockDimX, gridDimX, gridDimX, shared_mem);
#endif
dim3 dimBlock(blockDimX, blockDimX);
dim3 dimGrid(gridDimX, gridDimX);
#ifdef TIME_CALC
struct timespec tv1, tv2;
clock_gettime(CLOCK_MONOTONIC_RAW, &tv1);
#endif
tiledconvolutionRowGPU<<<dimGrid, dimBlock, shared_mem>>>(d_Buffer, d_Input, d_Filter, (int) row_width, imageW, imageH, filter_radius, tile_width, block, ratio);
CUDAsafeCall(cudaPeekAtLastError());
CUDAsafeCall(cudaDeviceSynchronize());
tiledconvolutionRowGPU<<<dimGrid, dimBlock, shared_mem>>>(d_Buffer, d_Input, d_Filter, (int) row_width, imageW, imageH, filter_radius, tile_width, block, ratio);
CUDAsafeCall(cudaPeekAtLastError());
CUDAsafeCall(cudaDeviceSynchronize());
#else
int blockDimX, gridDimX;
if(imageW <=32)
blockDimX = imageW;
else
blockDimX = 32;
if((imageW*imageH/1024) > 0)
gridDimX = imageW*imageH/1024;
else
gridDimX = 1;
gridDimX = sqrt(gridDimX);
if(gridDimX > 65535)
gridDimX = 65535;
dim3 dimBlock(blockDimX, blockDimX);
dim3 dimGrid(gridDimX, gridDimX);
#ifdef TIME_CALC
//start
struct timespec tv1, tv2;
clock_gettime(CLOCK_MONOTONIC_RAW, &tv1);
#endif
convolutionRowGPU<<<dimGrid, dimBlock>>>(d_Buffer, d_Input, d_Filter, (int) row_width, imageW, imageH, filter_radius);
CUDAsafeCall(cudaPeekAtLastError());
CUDAsafeCall(cudaDeviceSynchronize());
convolutionColumnGPU<<<dimGrid, dimBlock>>>(d_OutputGPU, d_Buffer, d_Filter, (int) row_width, imageW, imageH, filter_radius);
CUDAsafeCall(cudaPeekAtLastError());
CUDAsafeCall(cudaDeviceSynchronize());
#endif
#ifdef TIME_CALC
//stamata to roloi
clock_gettime(CLOCK_MONOTONIC_RAW, &tv2);
#ifdef ACC
printf ("%10g \n",
(double) (tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +
(double) (tv2.tv_sec - tv1.tv_sec));
#else
printf ("GPU time: %10g seconds\n",
(double) (tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +
(double) (tv2.tv_sec - tv1.tv_sec));
#endif
#endif
CUDAsafeCall(cudaMemcpy(h_OutputGPU, d_OutputGPU, imageW*imageH*sizeof(float), cudaMemcpyDeviceToHost));
#ifdef CPU_COMPARISON
int err;
float acc;
for(acc = maxAccuracy; acc <= minAccuracy; acc *= 10) {
err = 0;
for(i = 0; i < (unsigned int) imageW*imageH; i++) {
if(acc < ABS(h_OutputCPU[i] - h_OutputGPU[i])) {
err = 1;
break;
}
}
if(err == 0) {
#ifndef ACC
printf("Max Accuracy: %f\n", acc);
#endif
break;
}
}
if(err) {
#ifndef ACC
printf("Image is not accurate with filter: %i x %i\n", filter_radius, filter_radius);
#endif
}
#endif
CUDAsafeCall(cudaFree(d_Filter));
CUDAsafeCall(cudaFree(d_Buffer));
CUDAsafeCall(cudaFree(d_Input));
CUDAsafeCall(cudaFree(d_OutputGPU));
free(h_OutputGPU);
free(h_InputGPU);
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
CUDAsafeCall(cudaDeviceReset());
return 0;
}
|
a1d393255d91c615addc052ea6ac82b600d56339.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include "support.h"
#include "kernel.hip"
int main(int argc, char* argv[])
{
Timer timer;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *in_h, *out_h;
float *in_d, *out_d;
unsigned num_elements;
hipError_t cuda_ret;
/* Allocate and initialize input vector */
if(argc == 1) {
num_elements = 1000000;
} else if(argc == 2) {
num_elements = atoi(argv[1]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./prefix-scan # Input of size 1,000,000 is used"
"\n Usage: ./prefix-scan <m> # Input of size m is used"
"\n");
exit(0);
}
initVector(&in_h, num_elements);
/* Allocate and initialize output vector */
out_h = (float*)calloc(num_elements, sizeof(float));
if(out_h == NULL) FATAL("Unable to allocate host");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Input size = %u\n", num_elements);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
cuda_ret = hipMalloc((void**)&in_d, num_elements*sizeof(float));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory");
cuda_ret = hipMalloc((void**)&out_d, num_elements*sizeof(float));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory");
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
cuda_ret = hipMemcpy(in_d, in_h, num_elements*sizeof(float),
hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device");
cuda_ret = hipMemset(out_d, 0, num_elements*sizeof(float));
if(cuda_ret != hipSuccess) FATAL("Unable to set device memory");
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
//Set up and invoke your kernel inside the preScan function, which is in kernel.cu
preScan(out_d, in_d, num_elements);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch/execute kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
cuda_ret = hipMemcpy(out_h, out_d, num_elements*sizeof(float),
hipMemcpyDeviceToHost);
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to host");
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(in_h, out_h, num_elements);
// Free memory ------------------------------------------------------------
hipFree(in_d); hipFree(out_d);
free(in_h); free(out_h);
return 0;
}
| a1d393255d91c615addc052ea6ac82b600d56339.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include "support.h"
#include "kernel.cu"
int main(int argc, char* argv[])
{
Timer timer;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *in_h, *out_h;
float *in_d, *out_d;
unsigned num_elements;
cudaError_t cuda_ret;
/* Allocate and initialize input vector */
if(argc == 1) {
num_elements = 1000000;
} else if(argc == 2) {
num_elements = atoi(argv[1]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./prefix-scan # Input of size 1,000,000 is used"
"\n Usage: ./prefix-scan <m> # Input of size m is used"
"\n");
exit(0);
}
initVector(&in_h, num_elements);
/* Allocate and initialize output vector */
out_h = (float*)calloc(num_elements, sizeof(float));
if(out_h == NULL) FATAL("Unable to allocate host");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Input size = %u\n", num_elements);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
cuda_ret = cudaMalloc((void**)&in_d, num_elements*sizeof(float));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory");
cuda_ret = cudaMalloc((void**)&out_d, num_elements*sizeof(float));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory");
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
cuda_ret = cudaMemcpy(in_d, in_h, num_elements*sizeof(float),
cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device");
cuda_ret = cudaMemset(out_d, 0, num_elements*sizeof(float));
if(cuda_ret != cudaSuccess) FATAL("Unable to set device memory");
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
//Set up and invoke your kernel inside the preScan function, which is in kernel.cu
preScan(out_d, in_d, num_elements);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch/execute kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
cuda_ret = cudaMemcpy(out_h, out_d, num_elements*sizeof(float),
cudaMemcpyDeviceToHost);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to host");
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(in_h, out_h, num_elements);
// Free memory ------------------------------------------------------------
cudaFree(in_d); cudaFree(out_d);
free(in_h); free(out_h);
return 0;
}
|
d69cc38e36321c32de846441fc1e0b7b788197c1.hip | // !!! This is a file automatically generated by hipify!!!
// Date March 26 2029
//Programer: Hemanta Bhattarai
// Progarm : To add two arrays and compare computation time in host and device
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h> //for random numbers
#include <time.h>
#include <sys/time.h>
#define gpuErrchk(ans){ gpuAssert((ans),__FILE__, __LINE__);}
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if(code != hipSuccess)
{
fprintf(stderr, "GPUassert : %s %s %d\n", hipGetErrorString(code), file, line);
if(abort) exit(code);
}
}
// device kernal
__global__ void vecAdd(int *A, int *B, int *C, int *D, int array_size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < array_size) D[i] = A[i] + B[i] + C[i];
}
int main()
{
// host function definition
int get_random();
//variable definition
int *hA, *hB, *hC, *hD, *hE, *dA, *dB, *dC, *dD;
int size_of_array;
//define size of array
printf("Enter the size of array");
scanf("%d",&size_of_array);
dim3 grid(1024);
dim3 block((size_of_array/grid.x)+1);
int size = sizeof(int) * size_of_array;
//memory allocation in host
hA = (int*)malloc(size);
hB = (int*)malloc(size);
hC = (int*)malloc(size);
hD = (int*)malloc(size);
hE = (int*)malloc(size);
//memory allocation in device
gpuErrchk(hipMalloc(&dA,size));
gpuErrchk(hipMalloc(&dB,size));
gpuErrchk(hipMalloc(&dC,size));
gpuErrchk(hipMalloc(&dD,size));
//array initilization
for(int i=0; i<size_of_array; ++i) hA[i] = get_random();
for(int i=0; i<size_of_array; ++i) hB[i] = get_random();
for(int i=0; i<size_of_array; ++i) hC[i] = get_random();
clock_t host_begin, host_end;
//record begin of host computation
host_begin = clock();
//add vectors in host
for(int i=0; i<size_of_array; ++i) hE[i] = hA[i] + hB[i] + hC[i];
//record end of host computation
host_end = clock();
clock_t device_begin, device_end;
//record of device computation
device_begin = clock();
//copy host data to memory
gpuErrchk(hipMemcpy(dA, hA, size, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(dB, hB, size, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(dC, hC, size, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(dD, hD, size, hipMemcpyHostToDevice));
//record start of device computation
// add array in device
hipLaunchKernelGGL(( vecAdd), dim3(block),dim3(grid), 0, 0, dA, dB, dC, dD, size_of_array);
//record end of device computation
device_end = clock();
//copy data from device to host
gpuErrchk(hipMemcpy(hD, dD, size, hipMemcpyDeviceToHost));
double host_time, device_time;
host_time = (double)((double)(host_end - host_begin)/(CLOCKS_PER_SEC));
device_time = (double)((double)(device_end - device_begin)/(CLOCKS_PER_SEC));
//print the time of host and device computation
printf("Host computation time: %f\n",host_time);
printf("Device computation time: %f\n",device_time);
//display the devation of device and host result
int sum = 0;
for(int i=0; i< size_of_array; ++i) sum += hE[i] - hD[i];
printf("The deviation of host and device result is %d\n",sum);
//free host memory
free(hA);
free(hB);
free(hC);
free(hD);
free(hE);
//free device memory
gpuErrchk(hipFree(dA));
gpuErrchk(hipFree(dB));
gpuErrchk(hipFree(dC));
gpuErrchk(hipFree(dD));
}
//random number generator
int get_random()
{
return rand() % 100 + 1;
}
| d69cc38e36321c32de846441fc1e0b7b788197c1.cu | // Date March 26 2029
//Programer: Hemanta Bhattarai
// Progarm : To add two arrays and compare computation time in host and device
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h> //for random numbers
#include <time.h>
#include <sys/time.h>
#define gpuErrchk(ans){ gpuAssert((ans),__FILE__, __LINE__);}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if(code != cudaSuccess)
{
fprintf(stderr, "GPUassert : %s %s %d\n", cudaGetErrorString(code), file, line);
if(abort) exit(code);
}
}
// device kernal
__global__ void vecAdd(int *A, int *B, int *C, int *D, int array_size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < array_size) D[i] = A[i] + B[i] + C[i];
}
int main()
{
// host function definition
int get_random();
//variable definition
int *hA, *hB, *hC, *hD, *hE, *dA, *dB, *dC, *dD;
int size_of_array;
//define size of array
printf("Enter the size of array");
scanf("%d",&size_of_array);
dim3 grid(1024);
dim3 block((size_of_array/grid.x)+1);
int size = sizeof(int) * size_of_array;
//memory allocation in host
hA = (int*)malloc(size);
hB = (int*)malloc(size);
hC = (int*)malloc(size);
hD = (int*)malloc(size);
hE = (int*)malloc(size);
//memory allocation in device
gpuErrchk(cudaMalloc(&dA,size));
gpuErrchk(cudaMalloc(&dB,size));
gpuErrchk(cudaMalloc(&dC,size));
gpuErrchk(cudaMalloc(&dD,size));
//array initilization
for(int i=0; i<size_of_array; ++i) hA[i] = get_random();
for(int i=0; i<size_of_array; ++i) hB[i] = get_random();
for(int i=0; i<size_of_array; ++i) hC[i] = get_random();
clock_t host_begin, host_end;
//record begin of host computation
host_begin = clock();
//add vectors in host
for(int i=0; i<size_of_array; ++i) hE[i] = hA[i] + hB[i] + hC[i];
//record end of host computation
host_end = clock();
clock_t device_begin, device_end;
//record of device computation
device_begin = clock();
//copy host data to memory
gpuErrchk(cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(dC, hC, size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(dD, hD, size, cudaMemcpyHostToDevice));
//record start of device computation
// add array in device
vecAdd<<<block,grid>>>(dA, dB, dC, dD, size_of_array);
//record end of device computation
device_end = clock();
//copy data from device to host
gpuErrchk(cudaMemcpy(hD, dD, size, cudaMemcpyDeviceToHost));
double host_time, device_time;
host_time = (double)((double)(host_end - host_begin)/(CLOCKS_PER_SEC));
device_time = (double)((double)(device_end - device_begin)/(CLOCKS_PER_SEC));
//print the time of host and device computation
printf("Host computation time: %f\n",host_time);
printf("Device computation time: %f\n",device_time);
//display the devation of device and host result
int sum = 0;
for(int i=0; i< size_of_array; ++i) sum += hE[i] - hD[i];
printf("The deviation of host and device result is %d\n",sum);
//free host memory
free(hA);
free(hB);
free(hC);
free(hD);
free(hE);
//free device memory
gpuErrchk(cudaFree(dA));
gpuErrchk(cudaFree(dB));
gpuErrchk(cudaFree(dC));
gpuErrchk(cudaFree(dD));
}
//random number generator
int get_random()
{
return rand() % 100 + 1;
}
|
6e35c51c68fab06ef36b6cadbd0cb0e3505f33f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void ReductionMax2(float *input, float *results, int n) //take thread divergence into account
{
extern __shared__ int sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tx = threadIdx.x;
//load input into __shared__ memory
int x = INT_MIN;
if(i < n)
x = input[i];
sdata[tx] = x;
__syncthreads();
// block-wide reduction
for(unsigned int offset = blockDim.x>>1; offset > 0; offset >>= 1)
{
__syncthreads();
if(tx < offset)
{
if(sdata[tx + offset] > sdata[tx])
sdata[tx] = sdata[tx + offset];
}
}
// finally, thread 0 writes the result
if(threadIdx.x == 0)
{
// the result is per-block
results[blockIdx.x] = sdata[0];
}
}
__global__ void find_maximum_kernel(float *array, float *max, int *mutex, unsigned int n)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ float cache[256];
float temp = -1.0;
while(index + offset < n){
temp = fmaxf(temp, array[index + offset]);
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*max = fmaxf(*max, cache[0]);
atomicExch(mutex, 0); //unlock
}
} | 6e35c51c68fab06ef36b6cadbd0cb0e3505f33f9.cu | __global__ void ReductionMax2(float *input, float *results, int n) //take thread divergence into account
{
extern __shared__ int sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tx = threadIdx.x;
//load input into __shared__ memory
int x = INT_MIN;
if(i < n)
x = input[i];
sdata[tx] = x;
__syncthreads();
// block-wide reduction
for(unsigned int offset = blockDim.x>>1; offset > 0; offset >>= 1)
{
__syncthreads();
if(tx < offset)
{
if(sdata[tx + offset] > sdata[tx])
sdata[tx] = sdata[tx + offset];
}
}
// finally, thread 0 writes the result
if(threadIdx.x == 0)
{
// the result is per-block
results[blockIdx.x] = sdata[0];
}
}
__global__ void find_maximum_kernel(float *array, float *max, int *mutex, unsigned int n)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ float cache[256];
float temp = -1.0;
while(index + offset < n){
temp = fmaxf(temp, array[index + offset]);
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*max = fmaxf(*max, cache[0]);
atomicExch(mutex, 0); //unlock
}
} |
ea69114d2c29b9a7cb9ca4091cd5d7ff4f42940c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
typedef struct {
int n;
int m;
int tile;
float* arr;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
void printa(float *A, int n, int m);
void generateMatrix(float *A, int n, int m, int num);
__global__ void MulKernel(const Matrix, const Matrix, Matrix);
__global__ void MulKernelShared(const Matrix, const Matrix, Matrix);
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.arr[row * A.tile + col];
}
__device__ void SetElement(Matrix A, int row, int col, float value)
{
A.arr[row * A.tile + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.n = BLOCK_SIZE;
Asub.m = BLOCK_SIZE;
Asub.tile = A.tile;
Asub.arr = &A.arr[A.tile * BLOCK_SIZE * row + BLOCK_SIZE * col];
return Asub;
}
// Matrix multiplication kernel called by MatrixMultiplication()
__global__ void MulKernel(Matrix A, Matrix B, Matrix C)
{
float sum = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.n; ++e)
sum += A.arr[row * A.n + e] * B.arr[e * B.n + col];
C.arr[row * C.n + col] = sum;
}
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
__host__ void MatrixMultiplication(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.n = A.n; d_A.m = A.m;
size_t size = A.n * A.m * sizeof(float);
hipMalloc(&d_A.arr, size);
hipMemcpy(d_A.arr, A.arr, size,
hipMemcpyHostToDevice);
Matrix d_B;
d_B.n = B.n; d_B.m = B.m;
size = B.n * B.m * sizeof(float);
hipMalloc(&d_B.arr, size);
hipMemcpy(d_B.arr, B.arr, size,
hipMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.n = C.n; d_C.m = C.m;
size = C.n * C.m * sizeof(float);
hipMalloc(&d_C.arr, size);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.n / dimBlock.x, A.m / dimBlock.y);
hipLaunchKernelGGL(( MulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
hipMemcpy(C.arr, d_C.arr, size, hipMemcpyDeviceToHost);
// printa(C.arr, C.n, C.m);
hipFree(d_A.arr);
hipFree(d_B.arr);
hipFree(d_C.arr);
}
__global__ void MulKernelShared(Matrix A, Matrix B, Matrix C)
{
// Use the block size of subarr of Matrix C.
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
float sum = 0;
for (int m = 0; m < (A.n / BLOCK_SIZE); ++m) {
Matrix Asub = GetSubMatrix(A, blockRow, m);
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// subarr A and B are stored in Shared memory
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize the storing data Asub and B sub into As and Bs.
__syncthreads();
for (int e = 0; e < BLOCK_SIZE; ++e)
sum += As[row][e] * Bs[e][col];
// Synchronize to block the new generation of Asub and Bsub during iteration.
__syncthreads();
}
SetElement(Csub, row, col, sum);
}
__host__ void MatrixMultiplicationShared(const Matrix A, const Matrix B, Matrix C)
{
Matrix d_A;
d_A.n = d_A.tile = A.n; d_A.m = A.m;
size_t size = A.n * A.m * sizeof(float);
hipMalloc(&d_A.arr, size);
hipMemcpy(d_A.arr, A.arr, size, hipMemcpyHostToDevice);
Matrix d_B;
d_B.n = d_B.tile = B.n; d_B.m = B.m;
size = B.n * B.m * sizeof(float);
hipMalloc(&d_B.arr, size);
hipMemcpy(d_B.arr, B.arr, size, hipMemcpyHostToDevice);
Matrix d_C;
d_C.n = d_C.tile = C.n; d_C.m = C.m;
size = C.n * C.m * sizeof(float);
hipMalloc(&d_C.arr, size);
// dim3(uint3 x, uint3 y), specify demensions. default is 1.
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // 16 x 16 , dimBlock.x * dimBlock.y, total 256 threads
// printf("dimBlock.x: %d, dim.y: %d\n", dimBlock.x, dimBlock.y);
dim3 dimGrid(B.n / dimBlock.x, A.m / dimBlock.y);
hipLaunchKernelGGL(( MulKernelShared), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
hipMemcpy(C.arr, d_C.arr, size, hipMemcpyDeviceToHost);
hipFree(d_A.arr);
hipFree(d_B.arr);
hipFree(d_C.arr);
}
// print the float array
void printa(float *A, int n, int m){
for (int i=0; i<n*m; i++){
printf("%.f ", A[i]);
}
printf("\n");
}
// fill the number in float array
void generateMatrix(float *A, int n, int m, int num){
for (int i=0; i<n*m; i++){
A[i] = num;
}
}
void generateMatrix2d(float **a, int row, int col, int num){
a = (float **)calloc(row, sizeof(float*));
for(int i = 0; i < row; i++)
a[i] = (float *) calloc (col, sizeof(float));
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
a[i][j] = num;
}
}
}
void MatrixMultiplicationCPU(float **a, float **b, float **c, int n, int m){
for(int i = 0; i < n; ++i)
for(int j = 0; j < m; ++j)
for(int k = 0; k < n; ++k)
{
c[i][j] += a[i][k] * b[k][j];
}
}
int main(int argc, char const *argv[]) {
int n, w, m;
float ms = 0; // milliseconds
float **a, **b, **c;
int num, row, col;
size_t sizeA, sizeB, sizeC;
float *Ae, *Be, *Ce;
for (int i= 32384; i >= 128; i >>= 1){
// n = m = w = i;
n = m = i;
w = i / 2;
printf("N x N = %d \n", m * n);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//
// num = 2, row = n, col = w;
//
// a = (float **)calloc(row, sizeof(float*));
// for(int i = 0; i < row; i++)
// a[i] = (float *) calloc (col, sizeof(float));
//
// for(int i = 0; i < row; i++){
// for(int j = 0; j < col; j++){
// a[i][j] = num;
// }
// }
//
// num = 3, row = w, col = m;
// b = (float **)calloc(row, sizeof(float*));
// for(int i = 0; i < row; i++)
// b[i] = (float *) calloc (col, sizeof(float));
//
// for(int i = 0; i < row; i++){
// for(int j = 0; j < col; j++){
// b[i][j] = num;
// }
// }
//
// num = 0, row = n, col = m;
// c = (float **)calloc(row, sizeof(float*));
// for(int i = 0; i < row; i++)
// c[i] = (float *) calloc (col, sizeof(float));
//
// for(int i = 0; i < row; i++){
// for(int j = 0; j < col; j++){
// c[i][j] = num;
// }
// }
//// generateMatrix2d(a, n, w, 2);
//// generateMatrix2d(b, w, m, 3);
//// generateMatrix2d(a, n, m, 0);
//
// hipEventRecord(start);
// // Matrix Multiplication on CPU, no parallel
// for(int i = 0; i < n; ++i)
// for(int j = 0; j < m; ++j)
// for(int k = 0; k < n; ++k)
// {
// c[i][j] += a[i][k] * b[k][j];
// }
//
// hipEventRecord(stop);
// hipEventSynchronize(stop);
// hipEventElapsedTime(&ms, start, stop);
// printf("CPU Multiplication time: %fn(ms)\n", ms);
sizeA = m * w * sizeof(float);
sizeB = w * n * sizeof(float);
sizeC = m * n * sizeof(float);
Ae = (float*) malloc(sizeA);
Be = (float*) malloc(sizeB);
Ce = (float*) malloc(sizeC);
Matrix A = {n, n, w, Ae};
Matrix B = {w, w, m, Be};
Matrix C = {n, n, m, Ce};
generateMatrix(A.arr, A.n, A.m, 2);
generateMatrix(B.arr, B.n, B.m, 3);
generateMatrix(C.arr, C.n, C.m, 0);
hipEventRecord(start);
// Matrix Multiplication without shared memory
MatrixMultiplication(B, A, C);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms, start, stop);
printf("Matrix Multiplication time: %fn(ms)\n", ms);
hipEventRecord(start);
// Matrix Multiplication with shared memory
MatrixMultiplicationShared(B, A, C);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms, start, stop);
printf("Matrix Multiplication shared time: %fn(ms)\n", ms);
free(a); free(b); free(c); free(Ae); free(Be); free(Ce);
}
return 0;
}
| ea69114d2c29b9a7cb9ca4091cd5d7ff4f42940c.cu | #include <stdio.h>
typedef struct {
int n;
int m;
int tile;
float* arr;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
void printa(float *A, int n, int m);
void generateMatrix(float *A, int n, int m, int num);
__global__ void MulKernel(const Matrix, const Matrix, Matrix);
__global__ void MulKernelShared(const Matrix, const Matrix, Matrix);
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.arr[row * A.tile + col];
}
__device__ void SetElement(Matrix A, int row, int col, float value)
{
A.arr[row * A.tile + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.n = BLOCK_SIZE;
Asub.m = BLOCK_SIZE;
Asub.tile = A.tile;
Asub.arr = &A.arr[A.tile * BLOCK_SIZE * row + BLOCK_SIZE * col];
return Asub;
}
// Matrix multiplication kernel called by MatrixMultiplication()
__global__ void MulKernel(Matrix A, Matrix B, Matrix C)
{
float sum = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.n; ++e)
sum += A.arr[row * A.n + e] * B.arr[e * B.n + col];
C.arr[row * C.n + col] = sum;
}
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
__host__ void MatrixMultiplication(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.n = A.n; d_A.m = A.m;
size_t size = A.n * A.m * sizeof(float);
cudaMalloc(&d_A.arr, size);
cudaMemcpy(d_A.arr, A.arr, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.n = B.n; d_B.m = B.m;
size = B.n * B.m * sizeof(float);
cudaMalloc(&d_B.arr, size);
cudaMemcpy(d_B.arr, B.arr, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.n = C.n; d_C.m = C.m;
size = C.n * C.m * sizeof(float);
cudaMalloc(&d_C.arr, size);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.n / dimBlock.x, A.m / dimBlock.y);
MulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(C.arr, d_C.arr, size, cudaMemcpyDeviceToHost);
// printa(C.arr, C.n, C.m);
cudaFree(d_A.arr);
cudaFree(d_B.arr);
cudaFree(d_C.arr);
}
__global__ void MulKernelShared(Matrix A, Matrix B, Matrix C)
{
// Use the block size of subarr of Matrix C.
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
float sum = 0;
for (int m = 0; m < (A.n / BLOCK_SIZE); ++m) {
Matrix Asub = GetSubMatrix(A, blockRow, m);
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// subarr A and B are stored in Shared memory
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize the storing data Asub and B sub into As and Bs.
__syncthreads();
for (int e = 0; e < BLOCK_SIZE; ++e)
sum += As[row][e] * Bs[e][col];
// Synchronize to block the new generation of Asub and Bsub during iteration.
__syncthreads();
}
SetElement(Csub, row, col, sum);
}
__host__ void MatrixMultiplicationShared(const Matrix A, const Matrix B, Matrix C)
{
Matrix d_A;
d_A.n = d_A.tile = A.n; d_A.m = A.m;
size_t size = A.n * A.m * sizeof(float);
cudaMalloc(&d_A.arr, size);
cudaMemcpy(d_A.arr, A.arr, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.n = d_B.tile = B.n; d_B.m = B.m;
size = B.n * B.m * sizeof(float);
cudaMalloc(&d_B.arr, size);
cudaMemcpy(d_B.arr, B.arr, size, cudaMemcpyHostToDevice);
Matrix d_C;
d_C.n = d_C.tile = C.n; d_C.m = C.m;
size = C.n * C.m * sizeof(float);
cudaMalloc(&d_C.arr, size);
// dim3(uint3 x, uint3 y), specify demensions. default is 1.
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // 16 x 16 , dimBlock.x * dimBlock.y, total 256 threads
// printf("dimBlock.x: %d, dim.y: %d\n", dimBlock.x, dimBlock.y);
dim3 dimGrid(B.n / dimBlock.x, A.m / dimBlock.y);
MulKernelShared<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(C.arr, d_C.arr, size, cudaMemcpyDeviceToHost);
cudaFree(d_A.arr);
cudaFree(d_B.arr);
cudaFree(d_C.arr);
}
// print the float array
void printa(float *A, int n, int m){
for (int i=0; i<n*m; i++){
printf("%.f ", A[i]);
}
printf("\n");
}
// fill the number in float array
void generateMatrix(float *A, int n, int m, int num){
for (int i=0; i<n*m; i++){
A[i] = num;
}
}
void generateMatrix2d(float **a, int row, int col, int num){
a = (float **)calloc(row, sizeof(float*));
for(int i = 0; i < row; i++)
a[i] = (float *) calloc (col, sizeof(float));
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
a[i][j] = num;
}
}
}
void MatrixMultiplicationCPU(float **a, float **b, float **c, int n, int m){
for(int i = 0; i < n; ++i)
for(int j = 0; j < m; ++j)
for(int k = 0; k < n; ++k)
{
c[i][j] += a[i][k] * b[k][j];
}
}
int main(int argc, char const *argv[]) {
int n, w, m;
float ms = 0; // milliseconds
float **a, **b, **c;
int num, row, col;
size_t sizeA, sizeB, sizeC;
float *Ae, *Be, *Ce;
for (int i= 32384; i >= 128; i >>= 1){
// n = m = w = i;
n = m = i;
w = i / 2;
printf("N x N = %d \n", m * n);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//
// num = 2, row = n, col = w;
//
// a = (float **)calloc(row, sizeof(float*));
// for(int i = 0; i < row; i++)
// a[i] = (float *) calloc (col, sizeof(float));
//
// for(int i = 0; i < row; i++){
// for(int j = 0; j < col; j++){
// a[i][j] = num;
// }
// }
//
// num = 3, row = w, col = m;
// b = (float **)calloc(row, sizeof(float*));
// for(int i = 0; i < row; i++)
// b[i] = (float *) calloc (col, sizeof(float));
//
// for(int i = 0; i < row; i++){
// for(int j = 0; j < col; j++){
// b[i][j] = num;
// }
// }
//
// num = 0, row = n, col = m;
// c = (float **)calloc(row, sizeof(float*));
// for(int i = 0; i < row; i++)
// c[i] = (float *) calloc (col, sizeof(float));
//
// for(int i = 0; i < row; i++){
// for(int j = 0; j < col; j++){
// c[i][j] = num;
// }
// }
//// generateMatrix2d(a, n, w, 2);
//// generateMatrix2d(b, w, m, 3);
//// generateMatrix2d(a, n, m, 0);
//
// cudaEventRecord(start);
// // Matrix Multiplication on CPU, no parallel
// for(int i = 0; i < n; ++i)
// for(int j = 0; j < m; ++j)
// for(int k = 0; k < n; ++k)
// {
// c[i][j] += a[i][k] * b[k][j];
// }
//
// cudaEventRecord(stop);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&ms, start, stop);
// printf("CPU Multiplication time: %fn(ms)\n", ms);
sizeA = m * w * sizeof(float);
sizeB = w * n * sizeof(float);
sizeC = m * n * sizeof(float);
Ae = (float*) malloc(sizeA);
Be = (float*) malloc(sizeB);
Ce = (float*) malloc(sizeC);
Matrix A = {n, n, w, Ae};
Matrix B = {w, w, m, Be};
Matrix C = {n, n, m, Ce};
generateMatrix(A.arr, A.n, A.m, 2);
generateMatrix(B.arr, B.n, B.m, 3);
generateMatrix(C.arr, C.n, C.m, 0);
cudaEventRecord(start);
// Matrix Multiplication without shared memory
MatrixMultiplication(B, A, C);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
printf("Matrix Multiplication time: %fn(ms)\n", ms);
cudaEventRecord(start);
// Matrix Multiplication with shared memory
MatrixMultiplicationShared(B, A, C);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
printf("Matrix Multiplication shared time: %fn(ms)\n", ms);
free(a); free(b); free(c); free(Ae); free(Be); free(Ce);
}
return 0;
}
|
5cf3e3dd9074eaa5b679c4a54558e7f43e44a1da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Implementing 3D convolution in CUDA
// @Jiangyan Feng, [email protected]
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "CUDA error: ", hipGetErrorString(err)); \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
//@@ Define any useful program-wide constants here
#define BLOCK_WIDTH 8
#define MASK_WIDTH 3
#define radius (MASK_WIDTH-1)/2
#define TILE_WIDTH (BLOCK_WIDTH + MASK_WIDTH -1)
//@@ Define constant memory for device kernel here
__constant__ float M_c[MASK_WIDTH*MASK_WIDTH*MASK_WIDTH];
__global__ void conv3d(float *input, float *output, const int z_size,
const int y_size, const int x_size) {
//@@ Insert kernel code here
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
__shared__ float N_ds[TILE_WIDTH][TILE_WIDTH][TILE_WIDTH];
// Loading part 1
int tid = threadIdx.x + threadIdx.y*BLOCK_WIDTH + threadIdx.z*BLOCK_WIDTH*BLOCK_WIDTH;
int tidx = tid % TILE_WIDTH;
int tidy = (tid / TILE_WIDTH) % TILE_WIDTH;
int tidz = (tid / TILE_WIDTH) / TILE_WIDTH;
int inx = tidx + (blockIdx.x*BLOCK_WIDTH) - radius;
int iny = tidy + (blockIdx.y*BLOCK_WIDTH) - radius;
int inz = tidz + (blockIdx.z*BLOCK_WIDTH) - radius;
int inid = inx + iny*x_size + inz*x_size*y_size;
if (inx >= 0 && inx < x_size && iny >= 0 && iny < y_size && inz >= 0 && inz < z_size){
N_ds[tidz][tidy][tidx] = input[inid];
}
else {
N_ds[tidz][tidy][tidx] = 0;
}
__syncthreads();
// Loading part 2
tid = threadIdx.x + threadIdx.y*BLOCK_WIDTH + threadIdx.z*BLOCK_WIDTH*BLOCK_WIDTH + BLOCK_WIDTH*BLOCK_WIDTH*BLOCK_WIDTH;
tidx = tid % TILE_WIDTH;
tidy = (tid / TILE_WIDTH) % TILE_WIDTH;
tidz = (tid / TILE_WIDTH) / TILE_WIDTH;
inx = tidx + (blockIdx.x*BLOCK_WIDTH) - radius;
iny = tidy + (blockIdx.y*BLOCK_WIDTH) - radius;
inz = tidz + (blockIdx.z*BLOCK_WIDTH) - radius;
inid = inx + iny*x_size + inz*x_size*y_size;
if (tidz < TILE_WIDTH){
if (inx >= 0 && inx < x_size && iny >= 0 && iny < y_size && inz >= 0 && inz < z_size){
N_ds[tidz][tidy][tidx] = input[inid];
}
else{
N_ds[tidz][tidy][tidx] = 0;
}
}
__syncthreads();
// Calculating
float sum = 0.0;
for (int m = 0; m < MASK_WIDTH; m++){
for (int j = 0; j < MASK_WIDTH; j++){
for (int i = 0; i < MASK_WIDTH; i++){
sum += N_ds[threadIdx.z + m][threadIdx.y + j][threadIdx.x + i]*M_c[m*MASK_WIDTH*MASK_WIDTH + j*MASK_WIDTH +i];
}
}
}
// Saving
if (x < x_size && y < y_size && z < z_size){
output[x + y*x_size + z*x_size*y_size] = sum;
__syncthreads();
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int z_size;
int y_size;
int x_size;
int inputLength, kernelLength;
float *hostInput;
float *hostKernel;
float *hostOutput;
float *deviceInput;
float *deviceOutput;
args = wbArg_read(argc, argv);
// Import data
hostInput = (float *)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostKernel =
(float *)wbImport(wbArg_getInputFile(args, 1), &kernelLength);
hostOutput = (float *)malloc(inputLength * sizeof(float));
// First three elements are the input dimensions
z_size = hostInput[0];
y_size = hostInput[1];
x_size = hostInput[2];
wbLog(TRACE, "The input size is ", z_size, "x", y_size, "x", x_size);
assert(z_size * y_size * x_size == inputLength - 3);
assert(kernelLength == 27);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ Allocate GPU memory here
// Recall that inputLength is 3 elements longer than the input data
// because the first three elements were the dimensions
int inputSize = inputLength - 3;
hipMalloc((void **)&deviceInput, inputSize*sizeof(float));
hipMalloc((void **)&deviceOutput, inputSize*sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ Copy input and kernel to GPU here
// Recall that the first three elements of hostInput are dimensions and
// do
// not need to be copied to the gpu
hipMemcpyToSymbol(M_c, hostKernel, kernelLength*sizeof(float));
hipMemcpy(deviceInput, &hostInput[3], inputSize*sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ Initialize grid and block dimensions here
dim3 dimGrid(ceil((1.0*x_size)/BLOCK_WIDTH), ceil((1.0*y_size)/BLOCK_WIDTH), ceil((1.0*z_size)/BLOCK_WIDTH));
dim3 dimBLOCK(BLOCK_WIDTH, BLOCK_WIDTH, BLOCK_WIDTH);
//@@ Launch the GPU kernel here
hipLaunchKernelGGL(( conv3d), dim3(dimGrid), dim3(dimBLOCK), 0, 0, deviceInput, deviceOutput, z_size, y_size, x_size);
hipDeviceSynchronize();
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ Copy the device memory back to the host here
// Recall that the first three elements of the output are the dimensions
// and should not be set here (they are set below)
hipMemcpy(&hostOutput[3], deviceOutput, inputSize*sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
// Set the output dimensions for correctness checking
hostOutput[0] = z_size;
hostOutput[1] = y_size;
hostOutput[2] = x_size;
wbSolution(args, hostOutput, inputLength);
// Free device memory
hipFree(deviceInput);
hipFree(deviceOutput);
// Free host memory
free(hostInput);
free(hostOutput);
return 0;
}
| 5cf3e3dd9074eaa5b679c4a54558e7f43e44a1da.cu | // Implementing 3D convolution in CUDA
// @Jiangyan Feng, [email protected]
#include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "CUDA error: ", cudaGetErrorString(err)); \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
//@@ Define any useful program-wide constants here
#define BLOCK_WIDTH 8
#define MASK_WIDTH 3
#define radius (MASK_WIDTH-1)/2
#define TILE_WIDTH (BLOCK_WIDTH + MASK_WIDTH -1)
//@@ Define constant memory for device kernel here
__constant__ float M_c[MASK_WIDTH*MASK_WIDTH*MASK_WIDTH];
__global__ void conv3d(float *input, float *output, const int z_size,
const int y_size, const int x_size) {
//@@ Insert kernel code here
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z*blockDim.z + threadIdx.z;
__shared__ float N_ds[TILE_WIDTH][TILE_WIDTH][TILE_WIDTH];
// Loading part 1
int tid = threadIdx.x + threadIdx.y*BLOCK_WIDTH + threadIdx.z*BLOCK_WIDTH*BLOCK_WIDTH;
int tidx = tid % TILE_WIDTH;
int tidy = (tid / TILE_WIDTH) % TILE_WIDTH;
int tidz = (tid / TILE_WIDTH) / TILE_WIDTH;
int inx = tidx + (blockIdx.x*BLOCK_WIDTH) - radius;
int iny = tidy + (blockIdx.y*BLOCK_WIDTH) - radius;
int inz = tidz + (blockIdx.z*BLOCK_WIDTH) - radius;
int inid = inx + iny*x_size + inz*x_size*y_size;
if (inx >= 0 && inx < x_size && iny >= 0 && iny < y_size && inz >= 0 && inz < z_size){
N_ds[tidz][tidy][tidx] = input[inid];
}
else {
N_ds[tidz][tidy][tidx] = 0;
}
__syncthreads();
// Loading part 2
tid = threadIdx.x + threadIdx.y*BLOCK_WIDTH + threadIdx.z*BLOCK_WIDTH*BLOCK_WIDTH + BLOCK_WIDTH*BLOCK_WIDTH*BLOCK_WIDTH;
tidx = tid % TILE_WIDTH;
tidy = (tid / TILE_WIDTH) % TILE_WIDTH;
tidz = (tid / TILE_WIDTH) / TILE_WIDTH;
inx = tidx + (blockIdx.x*BLOCK_WIDTH) - radius;
iny = tidy + (blockIdx.y*BLOCK_WIDTH) - radius;
inz = tidz + (blockIdx.z*BLOCK_WIDTH) - radius;
inid = inx + iny*x_size + inz*x_size*y_size;
if (tidz < TILE_WIDTH){
if (inx >= 0 && inx < x_size && iny >= 0 && iny < y_size && inz >= 0 && inz < z_size){
N_ds[tidz][tidy][tidx] = input[inid];
}
else{
N_ds[tidz][tidy][tidx] = 0;
}
}
__syncthreads();
// Calculating
float sum = 0.0;
for (int m = 0; m < MASK_WIDTH; m++){
for (int j = 0; j < MASK_WIDTH; j++){
for (int i = 0; i < MASK_WIDTH; i++){
sum += N_ds[threadIdx.z + m][threadIdx.y + j][threadIdx.x + i]*M_c[m*MASK_WIDTH*MASK_WIDTH + j*MASK_WIDTH +i];
}
}
}
// Saving
if (x < x_size && y < y_size && z < z_size){
output[x + y*x_size + z*x_size*y_size] = sum;
__syncthreads();
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int z_size;
int y_size;
int x_size;
int inputLength, kernelLength;
float *hostInput;
float *hostKernel;
float *hostOutput;
float *deviceInput;
float *deviceOutput;
args = wbArg_read(argc, argv);
// Import data
hostInput = (float *)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostKernel =
(float *)wbImport(wbArg_getInputFile(args, 1), &kernelLength);
hostOutput = (float *)malloc(inputLength * sizeof(float));
// First three elements are the input dimensions
z_size = hostInput[0];
y_size = hostInput[1];
x_size = hostInput[2];
wbLog(TRACE, "The input size is ", z_size, "x", y_size, "x", x_size);
assert(z_size * y_size * x_size == inputLength - 3);
assert(kernelLength == 27);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ Allocate GPU memory here
// Recall that inputLength is 3 elements longer than the input data
// because the first three elements were the dimensions
int inputSize = inputLength - 3;
cudaMalloc((void **)&deviceInput, inputSize*sizeof(float));
cudaMalloc((void **)&deviceOutput, inputSize*sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ Copy input and kernel to GPU here
// Recall that the first three elements of hostInput are dimensions and
// do
// not need to be copied to the gpu
cudaMemcpyToSymbol(M_c, hostKernel, kernelLength*sizeof(float));
cudaMemcpy(deviceInput, &hostInput[3], inputSize*sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ Initialize grid and block dimensions here
dim3 dimGrid(ceil((1.0*x_size)/BLOCK_WIDTH), ceil((1.0*y_size)/BLOCK_WIDTH), ceil((1.0*z_size)/BLOCK_WIDTH));
dim3 dimBLOCK(BLOCK_WIDTH, BLOCK_WIDTH, BLOCK_WIDTH);
//@@ Launch the GPU kernel here
conv3d<<<dimGrid, dimBLOCK>>> (deviceInput, deviceOutput, z_size, y_size, x_size);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ Copy the device memory back to the host here
// Recall that the first three elements of the output are the dimensions
// and should not be set here (they are set below)
cudaMemcpy(&hostOutput[3], deviceOutput, inputSize*sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
// Set the output dimensions for correctness checking
hostOutput[0] = z_size;
hostOutput[1] = y_size;
hostOutput[2] = x_size;
wbSolution(args, hostOutput, inputLength);
// Free device memory
cudaFree(deviceInput);
cudaFree(deviceOutput);
// Free host memory
free(hostInput);
free(hostOutput);
return 0;
}
|
727df99337ac90b893af59a36fd15defd97ce72d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
__global__ void cuda_hello(){
printf("Hello World from GPU!\n");
}
int main() {
int cnt{0};
hipGetDeviceCount(&cnt);
printf("Number of GPUs: %d\n", cnt);
int version;
hipRuntimeGetVersion(&version);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
printf("-------------------------------------\n");
printf("Device name: %s\n", prop.name);
printf("CUDA Runtime Version: %d.%d\n", version/1000, (version%100)/10);
printf("Compute capability: %d.%d\n", prop.major, prop.minor);
printf("Total global memory: %ld bytes (%lf GiB)\n", prop.totalGlobalMem, prop.totalGlobalMem/1.074e+9);
printf("-------------------------------------\n");
hipLaunchKernelGGL(( cuda_hello), dim3(1), dim3(10), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| 727df99337ac90b893af59a36fd15defd97ce72d.cu | #include <cstdio>
__global__ void cuda_hello(){
printf("Hello World from GPU!\n");
}
int main() {
int cnt{0};
cudaGetDeviceCount(&cnt);
printf("Number of GPUs: %d\n", cnt);
int version;
cudaRuntimeGetVersion(&version);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("-------------------------------------\n");
printf("Device name: %s\n", prop.name);
printf("CUDA Runtime Version: %d.%d\n", version/1000, (version%100)/10);
printf("Compute capability: %d.%d\n", prop.major, prop.minor);
printf("Total global memory: %ld bytes (%lf GiB)\n", prop.totalGlobalMem, prop.totalGlobalMem/1.074e+9);
printf("-------------------------------------\n");
cuda_hello<<<1, 10>>>();
cudaDeviceSynchronize();
return 0;
}
|
de79a9e1de2056d25b76429133df6df34b7edda0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include "getAverageAndNorm.cu"
#include "dataReader.c"
#include "calculateSimilarityMatrix.cu"
#include "transpose_kernel.cu"
float avg[ROWS];
float norm_val[ROWS];
float sim[ROWS*ROWS];
float global_sim[ROWS*ROWS];
// float transpose[COLS][ROWS];
int isSimilarityCorrect(){
printf("%d\t%d\n",ROWS,COLS);
for(int i = 0; i < ROWS; i++){
for(int j = i; j < ROWS; j++){
float temp = similarity_matrix[i][j];
if(abs(sim[i*ROWS+j] - temp) > 0.01){
printf("(%d, %d): GPU=%f CPU=%f\n", i, j, sim[i*ROWS+j], similarity_matrix[i*ROWS+j]);
return 0;
}
}
}
return 1;
}
int isAverageCorrect(){
for(int i = 0; i < ROWS; i++){
if(fabs(avg[i] - avg_rating[i]) > 0.00001){
printf("(%d): GPU=%f CPU=%f\n", i, avg[i], avg_rating[i]);
return 0;
}
}
return 1;
}
// void transposeMatrix(){
// for(int i = 0; i < ROWS; i++){
// for(int j = 0; j < COLS; j++){
// transpose[j][i] = ratings[i][j];
// }
// }
// }
int main(){
float *d_ratings, *d_avg, *d_norm, *d_sim, *d_transpose, *d_sim_global;
dim3 dimGrid(ROWS/TILE_WIDTH + 1, ROWS/TILE_WIDTH + 1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
printf("Loading CSV data.........\n");
hipEventRecord(start);
readCSV();
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0.0f;
hipEventElapsedTime(&milliseconds,start,stop);
printf("Time taken to load csv = %f seconds\n\n",(float)milliseconds/1000);
printf("Executing serial code\n");
printf("Computing average serial code\n");
hipEventRecord(start);
serial_mean();
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0.0f;
hipEventElapsedTime(&milliseconds,start,stop);
printf("Time taken for computing average serial implementation = %f seconds\n\n",(float)milliseconds/1000);
printf("computing serial similarity matrix\n\n");
hipEventRecord(start);
build_similarity_matrix();
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0.0f;
hipEventElapsedTime(&milliseconds,start,stop);
printf("Time taken for Similarity matrix serial implementation = %f seconds\n\n",(float)milliseconds/1000);
printf("Allocating device memory and copy data\n");
hipEventRecord(start);
hipMalloc((void**)&d_ratings, ROWS * COLS * sizeof(float));
hipMalloc((void**)&d_avg, ROWS * sizeof(float));
hipMalloc((void**)&d_norm, ROWS * sizeof(float));
hipMalloc((void**)&d_sim, ROWS * ROWS * sizeof(float));
hipMalloc((void**)&d_transpose, ROWS * COLS * sizeof(float));
hipMemcpy(d_ratings, ratings, ROWS * COLS * sizeof(float), hipMemcpyHostToDevice);
//transpose kernel being called
// Matrix out = AllocateMatrix(4, 6, 1);
// Matrix d_out = AllocateDeviceMatrix(out);
// CopyToDeviceMatrix(d_out, out);
hipMemcpy(sim, d_sim, ROWS * ROWS * sizeof(float), hipMemcpyDeviceToHost);
//transpose<<<transGrid, transBlock>>>(d_transpose, d_ratings, COLS, ROWS);
float trans_blocks_x = COLS/TILE_DIM+1;
float trans_blocks_y = ROWS/TILE_DIM+1;
dim3 transGrid(trans_blocks_x, trans_blocks_y);
dim3 transBlock(TILE_DIM, TILE_DIM);
//transpose<<<transGrid, transBlock>>>(d_transpose, d_ratings, COLS, ROWS);
hipEventRecord(stop);
hipDeviceSynchronize();
hipEventSynchronize(stop);
milliseconds = 0.0f;
hipEventElapsedTime(&milliseconds,start,stop);
printf("Time taken for allocating device memory and loading data = %f seconds\n\n",(float)milliseconds/1000);
hipMalloc((void**)&d_sim_global, ROWS * ROWS * sizeof(float));
printf("Computing transpose.....\n");
hipEventRecord(start);
hipLaunchKernelGGL(( transpose), dim3(transGrid), dim3(transBlock), 0, 0, d_transpose, d_ratings, COLS, ROWS);
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds,start,stop);
printf("Time taken to compute transpose = %f seconds\n\n",(float)milliseconds/1000);
printf("Getting Average and Norm.....\n");
hipEventRecord(start);
hipLaunchKernelGGL(( GetAverageAndNorm), dim3(1), dim3(ROWS), 0, 0, d_transpose, ROWS, COLS, d_avg, d_norm);
hipDeviceSynchronize();
hipEventRecord(stop);
hipDeviceSynchronize();
hipEventSynchronize(stop);
hipMemcpy(avg, d_avg, ROWS * sizeof(float), hipMemcpyDeviceToHost);
milliseconds = 0.0f;
hipEventElapsedTime(&milliseconds,start,stop);
printf("Time taken for computing average and norm = %f seconds\n\n",(float)milliseconds/1000);
printf("Computing Similarity matrix using tiling\n");
hipEventRecord(start);
hipLaunchKernelGGL(( calculateSimilarityMatrix), dim3(dimGrid), dim3(dimBlock), 0, 0, d_ratings,d_transpose,d_sim,COLS, ROWS, d_avg, d_norm);
hipMemcpy(sim, d_sim, ROWS * ROWS * sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(stop);
milliseconds = 0.0f;
hipDeviceSynchronize();
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds,start,stop);
printf("Execution time for Tiled parallel version similarity matrix computation: %f seconds\n\n",(float)milliseconds/1000);
printf("Computing Similarity Matrix using Global Memory\n");
hipEventRecord(start);
hipLaunchKernelGGL(( calculateSimilarityMatrixGlobal), dim3(dimGrid), dim3(dimBlock), 0, 0, d_ratings, d_sim_global, COLS, ROWS, d_avg, d_norm);
hipMemcpy(global_sim, d_sim_global, ROWS * ROWS * sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(stop);
milliseconds = 0.0f;
hipDeviceSynchronize();
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds,start,stop);
printf("Execution time for global memory version similarity matrix computation : %f seconds\n\n",(float)milliseconds/1000);
printf("Average values are %s\n", isAverageCorrect()? "correct" : "incorrect");
printf("Similarity values are %s\n", isSimilarityCorrect()? "correct" : "incorrect");
}
| de79a9e1de2056d25b76429133df6df34b7edda0.cu | #include<stdio.h>
#include "getAverageAndNorm.cu"
#include "dataReader.c"
#include "calculateSimilarityMatrix.cu"
#include "transpose_kernel.cu"
float avg[ROWS];
float norm_val[ROWS];
float sim[ROWS*ROWS];
float global_sim[ROWS*ROWS];
// float transpose[COLS][ROWS];
int isSimilarityCorrect(){
printf("%d\t%d\n",ROWS,COLS);
for(int i = 0; i < ROWS; i++){
for(int j = i; j < ROWS; j++){
float temp = similarity_matrix[i][j];
if(abs(sim[i*ROWS+j] - temp) > 0.01){
printf("(%d, %d): GPU=%f CPU=%f\n", i, j, sim[i*ROWS+j], similarity_matrix[i*ROWS+j]);
return 0;
}
}
}
return 1;
}
int isAverageCorrect(){
for(int i = 0; i < ROWS; i++){
if(fabs(avg[i] - avg_rating[i]) > 0.00001){
printf("(%d): GPU=%f CPU=%f\n", i, avg[i], avg_rating[i]);
return 0;
}
}
return 1;
}
// void transposeMatrix(){
// for(int i = 0; i < ROWS; i++){
// for(int j = 0; j < COLS; j++){
// transpose[j][i] = ratings[i][j];
// }
// }
// }
int main(){
float *d_ratings, *d_avg, *d_norm, *d_sim, *d_transpose, *d_sim_global;
dim3 dimGrid(ROWS/TILE_WIDTH + 1, ROWS/TILE_WIDTH + 1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("Loading CSV data.........\n");
cudaEventRecord(start);
readCSV();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0.0f;
cudaEventElapsedTime(&milliseconds,start,stop);
printf("Time taken to load csv = %f seconds\n\n",(float)milliseconds/1000);
printf("Executing serial code\n");
printf("Computing average serial code\n");
cudaEventRecord(start);
serial_mean();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0.0f;
cudaEventElapsedTime(&milliseconds,start,stop);
printf("Time taken for computing average serial implementation = %f seconds\n\n",(float)milliseconds/1000);
printf("computing serial similarity matrix\n\n");
cudaEventRecord(start);
build_similarity_matrix();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0.0f;
cudaEventElapsedTime(&milliseconds,start,stop);
printf("Time taken for Similarity matrix serial implementation = %f seconds\n\n",(float)milliseconds/1000);
printf("Allocating device memory and copy data\n");
cudaEventRecord(start);
cudaMalloc((void**)&d_ratings, ROWS * COLS * sizeof(float));
cudaMalloc((void**)&d_avg, ROWS * sizeof(float));
cudaMalloc((void**)&d_norm, ROWS * sizeof(float));
cudaMalloc((void**)&d_sim, ROWS * ROWS * sizeof(float));
cudaMalloc((void**)&d_transpose, ROWS * COLS * sizeof(float));
cudaMemcpy(d_ratings, ratings, ROWS * COLS * sizeof(float), cudaMemcpyHostToDevice);
//transpose kernel being called
// Matrix out = AllocateMatrix(4, 6, 1);
// Matrix d_out = AllocateDeviceMatrix(out);
// CopyToDeviceMatrix(d_out, out);
cudaMemcpy(sim, d_sim, ROWS * ROWS * sizeof(float), cudaMemcpyDeviceToHost);
//transpose<<<transGrid, transBlock>>>(d_transpose, d_ratings, COLS, ROWS);
float trans_blocks_x = COLS/TILE_DIM+1;
float trans_blocks_y = ROWS/TILE_DIM+1;
dim3 transGrid(trans_blocks_x, trans_blocks_y);
dim3 transBlock(TILE_DIM, TILE_DIM);
//transpose<<<transGrid, transBlock>>>(d_transpose, d_ratings, COLS, ROWS);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
milliseconds = 0.0f;
cudaEventElapsedTime(&milliseconds,start,stop);
printf("Time taken for allocating device memory and loading data = %f seconds\n\n",(float)milliseconds/1000);
cudaMalloc((void**)&d_sim_global, ROWS * ROWS * sizeof(float));
printf("Computing transpose.....\n");
cudaEventRecord(start);
transpose<<<transGrid, transBlock>>>(d_transpose, d_ratings, COLS, ROWS);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds,start,stop);
printf("Time taken to compute transpose = %f seconds\n\n",(float)milliseconds/1000);
printf("Getting Average and Norm.....\n");
cudaEventRecord(start);
GetAverageAndNorm<<<1, ROWS>>>(d_transpose, ROWS, COLS, d_avg, d_norm);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
cudaMemcpy(avg, d_avg, ROWS * sizeof(float), cudaMemcpyDeviceToHost);
milliseconds = 0.0f;
cudaEventElapsedTime(&milliseconds,start,stop);
printf("Time taken for computing average and norm = %f seconds\n\n",(float)milliseconds/1000);
printf("Computing Similarity matrix using tiling\n");
cudaEventRecord(start);
calculateSimilarityMatrix<<<dimGrid, dimBlock>>>(d_ratings,d_transpose,d_sim,COLS, ROWS, d_avg, d_norm);
cudaMemcpy(sim, d_sim, ROWS * ROWS * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
milliseconds = 0.0f;
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds,start,stop);
printf("Execution time for Tiled parallel version similarity matrix computation: %f seconds\n\n",(float)milliseconds/1000);
printf("Computing Similarity Matrix using Global Memory\n");
cudaEventRecord(start);
calculateSimilarityMatrixGlobal<<<dimGrid, dimBlock>>>(d_ratings, d_sim_global, COLS, ROWS, d_avg, d_norm);
cudaMemcpy(global_sim, d_sim_global, ROWS * ROWS * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
milliseconds = 0.0f;
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds,start,stop);
printf("Execution time for global memory version similarity matrix computation : %f seconds\n\n",(float)milliseconds/1000);
printf("Average values are %s\n", isAverageCorrect()? "correct" : "incorrect");
printf("Similarity values are %s\n", isSimilarityCorrect()? "correct" : "incorrect");
}
|
25b653664a5c2ef332c14e11709f924ec988eef2.hip | // !!! This is a file automatically generated by hipify!!!
/* CUDA exercise to convert a simple serial code for a brute force
largest prime number search into CUDA (32-bit, int version). This
initial code is serial, but it is written as CUDA code for your
convenience, so should be compiled with nvcc (see below). Your task
is to convert the serial computation to a kernel computation. In
the simplest case, use atomicMax to find the globally largest prime
number.
All prime numbers can be expressed as 6*k-1 or 6*k+1, k being an
integer. We provide the range of k to probe as macro parameters
KMIN and KMAX (see below).
You should get a speedup ~20x with atomicMax.
Hints:
* It's very convenient to use a two-dimensional grid of blocks,
defined as "dim3 Nblocks (NBLOCKS, 2, 1);". The second grid
dimension is used to derive the two values of j=(-1; 1) inside the
kernel: "int j = 2*blockIdx.y - 1;". This way, there will be only
one loop inside the kernel - for y.
* When you get a failure (not a prime) inside the y loop, you can exit
the thread with "return" (no need to use "break").
To compile:
nvcc -arch=sm_20 -O2 primes.cu -o primes
*/
#include <sys/time.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
// Range of k-numbers for primes search:
#define KMIN 100000000
// Should be smaller than 357,913,941 (because we are using signed int)
#define KMAX 100100000
// Number of threads in one block (possible range is 32...1024):
#define BLOCK_SIZE 256
// Number of blocks to run:
#define NBLOCKS (KMAX-KMIN+BLOCK_SIZE)/BLOCK_SIZE
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
// It messes up with y!
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Kernel(s) should go here:
int main (int argc,char **argv)
{
struct timeval tdr0, tdr1, tdr;
double restime;
int devid, devcount, error, success;
int xmax, ymax, x, y;
if (BLOCK_SIZE>1024)
{
printf ("Bad BLOCK_SIZE: %d\n", BLOCK_SIZE);
exit (1);
}
/* find number of device in current "context" */
hipGetDevice(&devid);
/* find how many devices are available */
if (hipGetDeviceCount(&devcount) || devcount==0)
{
printf ("No CUDA devices!\n");
exit (1);
}
else
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties (&deviceProp, devid);
printf ("Device count, devid: %d %d\n", devcount, devid);
printf ("Device: %s\n", deviceProp.name);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor);
}
//--------------------------------------------------------------------------------
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr0, NULL);
// This serial computation will have to be replaced by calls to kernel(s):
xmax = 0;
for (int k=KMIN; k<=KMAX; k++)
{
// testing "-1" and "+1" cases:
for (int j=-1; j<2; j=j+2)
{
// Prime candidate:
x = 6*k + j;
// We should be dividing by numbers up to sqrt(x):
ymax = (int)ceil(sqrt((double)x));
// Primality test:
for (y=3; y<=ymax; y=y+2)
{
// Tpo be a success, the modulus should not be equal to zero:
success = x % y;
if (!success)
break;
}
if (success && x > xmax)
{
xmax = x;
}
}
}
if (error = hipDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr1, NULL);
tdr = tdr0;
timeval_subtract (&restime, &tdr1, &tdr);
printf ("%d\n", xmax);
printf ("Time: %e\n", restime);
//--------------------------------------------------------------------------------
return 0;
}
| 25b653664a5c2ef332c14e11709f924ec988eef2.cu | /* CUDA exercise to convert a simple serial code for a brute force
largest prime number search into CUDA (32-bit, int version). This
initial code is serial, but it is written as CUDA code for your
convenience, so should be compiled with nvcc (see below). Your task
is to convert the serial computation to a kernel computation. In
the simplest case, use atomicMax to find the globally largest prime
number.
All prime numbers can be expressed as 6*k-1 or 6*k+1, k being an
integer. We provide the range of k to probe as macro parameters
KMIN and KMAX (see below).
You should get a speedup ~20x with atomicMax.
Hints:
* It's very convenient to use a two-dimensional grid of blocks,
defined as "dim3 Nblocks (NBLOCKS, 2, 1);". The second grid
dimension is used to derive the two values of j=(-1; 1) inside the
kernel: "int j = 2*blockIdx.y - 1;". This way, there will be only
one loop inside the kernel - for y.
* When you get a failure (not a prime) inside the y loop, you can exit
the thread with "return" (no need to use "break").
To compile:
nvcc -arch=sm_20 -O2 primes.cu -o primes
*/
#include <sys/time.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
// Range of k-numbers for primes search:
#define KMIN 100000000
// Should be smaller than 357,913,941 (because we are using signed int)
#define KMAX 100100000
// Number of threads in one block (possible range is 32...1024):
#define BLOCK_SIZE 256
// Number of blocks to run:
#define NBLOCKS (KMAX-KMIN+BLOCK_SIZE)/BLOCK_SIZE
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
// It messes up with y!
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Kernel(s) should go here:
int main (int argc,char **argv)
{
struct timeval tdr0, tdr1, tdr;
double restime;
int devid, devcount, error, success;
int xmax, ymax, x, y;
if (BLOCK_SIZE>1024)
{
printf ("Bad BLOCK_SIZE: %d\n", BLOCK_SIZE);
exit (1);
}
/* find number of device in current "context" */
cudaGetDevice(&devid);
/* find how many devices are available */
if (cudaGetDeviceCount(&devcount) || devcount==0)
{
printf ("No CUDA devices!\n");
exit (1);
}
else
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties (&deviceProp, devid);
printf ("Device count, devid: %d %d\n", devcount, devid);
printf ("Device: %s\n", deviceProp.name);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor);
}
//--------------------------------------------------------------------------------
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr0, NULL);
// This serial computation will have to be replaced by calls to kernel(s):
xmax = 0;
for (int k=KMIN; k<=KMAX; k++)
{
// testing "-1" and "+1" cases:
for (int j=-1; j<2; j=j+2)
{
// Prime candidate:
x = 6*k + j;
// We should be dividing by numbers up to sqrt(x):
ymax = (int)ceil(sqrt((double)x));
// Primality test:
for (y=3; y<=ymax; y=y+2)
{
// Tpo be a success, the modulus should not be equal to zero:
success = x % y;
if (!success)
break;
}
if (success && x > xmax)
{
xmax = x;
}
}
}
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr1, NULL);
tdr = tdr0;
timeval_subtract (&restime, &tdr1, &tdr);
printf ("%d\n", xmax);
printf ("Time: %e\n", restime);
//--------------------------------------------------------------------------------
return 0;
}
|
cbcc75154b84646c86ddef06c2214cc0b53d4ab1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../TFCudaCommon.h"
template<typename T>
__global__
void permutateFeatureKernel(
T *feats, // [pn,ps]
int *idxs, // [pn]
T *permutated_feats, // [pn,ps]
int pn,
int ps
)
{
int pi = threadIdx.x + blockIdx.x*blockDim.x;
int fi = threadIdx.y + blockIdx.y*blockDim.y;
if(pi>=pn||fi>=ps) return;
permutated_feats[pi*ps+fi]=feats[idxs[pi]*ps+fi];
}
template<typename T>
void permutateFeature(
T *feats, // [pn,ps]
int *idxs, // [pn]
T *permutated_feats, // [pn,ps]
int pn,
int ps
)
{
int tdim0,tdim1,tdim2=1;
int bdim0,bdim1,bdim2=1;
tdim1=1024/(tdim2);
if(ps<tdim1) tdim1=infTwoExp(ps);
bdim1=ps/tdim1;
if(ps%tdim1>0) bdim1++;
tdim0=1024/(tdim1*tdim2);
if(pn<tdim0) tdim0=infTwoExp(pn);
bdim0=pn/tdim0;
if(pn%tdim0>0) bdim0++;
dim3 block_dim(bdim0,bdim1,bdim2);
dim3 thread_dim(tdim0,tdim1,tdim2);
hipLaunchKernelGGL(( permutateFeatureKernel) , dim3(block_dim),dim3(thread_dim), 0, 0, feats,idxs,permutated_feats,pn,ps);
gpuErrchk(hipGetLastError())
}
template void permutateFeature<int>(int*,int*,int*,int,int);
template void permutateFeature<float>(float*,int*,float*,int,int); | cbcc75154b84646c86ddef06c2214cc0b53d4ab1.cu | #include "../TFCudaCommon.h"
template<typename T>
__global__
void permutateFeatureKernel(
T *feats, // [pn,ps]
int *idxs, // [pn]
T *permutated_feats, // [pn,ps]
int pn,
int ps
)
{
int pi = threadIdx.x + blockIdx.x*blockDim.x;
int fi = threadIdx.y + blockIdx.y*blockDim.y;
if(pi>=pn||fi>=ps) return;
permutated_feats[pi*ps+fi]=feats[idxs[pi]*ps+fi];
}
template<typename T>
void permutateFeature(
T *feats, // [pn,ps]
int *idxs, // [pn]
T *permutated_feats, // [pn,ps]
int pn,
int ps
)
{
int tdim0,tdim1,tdim2=1;
int bdim0,bdim1,bdim2=1;
tdim1=1024/(tdim2);
if(ps<tdim1) tdim1=infTwoExp(ps);
bdim1=ps/tdim1;
if(ps%tdim1>0) bdim1++;
tdim0=1024/(tdim1*tdim2);
if(pn<tdim0) tdim0=infTwoExp(pn);
bdim0=pn/tdim0;
if(pn%tdim0>0) bdim0++;
dim3 block_dim(bdim0,bdim1,bdim2);
dim3 thread_dim(tdim0,tdim1,tdim2);
permutateFeatureKernel <<<block_dim,thread_dim>>>(feats,idxs,permutated_feats,pn,ps);
gpuErrchk(cudaGetLastError())
}
template void permutateFeature<int>(int*,int*,int*,int,int);
template void permutateFeature<float>(float*,int*,float*,int,int); |
f08b2a7b6e2418287cb3de0fc98080945e91ed44.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kAddColMult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
hipMalloc(&mat, XSIZE*YSIZE);
float *vec = NULL;
hipMalloc(&vec, XSIZE*YSIZE);
float *tgtMat = NULL;
hipMalloc(&tgtMat, XSIZE*YSIZE);
float mult = 1;
unsigned int width = 1;
unsigned int height = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kAddColMult), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,vec,tgtMat,mult,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kAddColMult), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,vec,tgtMat,mult,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kAddColMult), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,vec,tgtMat,mult,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f08b2a7b6e2418287cb3de0fc98080945e91ed44.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kAddColMult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
cudaMalloc(&mat, XSIZE*YSIZE);
float *vec = NULL;
cudaMalloc(&vec, XSIZE*YSIZE);
float *tgtMat = NULL;
cudaMalloc(&tgtMat, XSIZE*YSIZE);
float mult = 1;
unsigned int width = 1;
unsigned int height = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kAddColMult<<<gridBlock,threadBlock>>>(mat,vec,tgtMat,mult,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kAddColMult<<<gridBlock,threadBlock>>>(mat,vec,tgtMat,mult,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kAddColMult<<<gridBlock,threadBlock>>>(mat,vec,tgtMat,mult,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b8230fdbab23a7b62973b66aa9e6a2356888a3d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
*
* wave.cu
* Concurrency and parallel programming, assignment 6.1
*
* by David van Schoorisse and Ben Witzen
* University of Amsterdam, 12-12-12 (nice date!)
*
* Calculates the well-known wave equation using CUDA. This program fills the
* first 25% of the arrays (t0 and t1) with the sinus function, and calculates
* the wave progression from there.
*
* Skeleton code was provided by Robert G. Belleman (University of Amsterdam).
*
* Usage: ./wave [i_max t_max blocksize]
* i_max (unsigned int) : amount of array elements; (default 1024)
* t_max (unsigned int) : amount of timesteps; (default 1000)
* blocksize (unsigned int) : amount of threads per block. (default 512)
*
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "timer.h"
#include <iostream>
double * end;
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(hipGetLastError());
*/
static void checkCudaCall(hipError_t result) {
if (result != hipSuccess) {
cerr << "cuda error: " << hipGetErrorString(result) << endl;
exit(1);
}
}
__global__ void waveKernel(int n, double* prev, double* curr, double* next) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
// shared, 1 element per thread
__shared__ double s_prev[1024];
__shared__ double s_curr[1024];
// each thread reads 1 device memory element
s_prev[threadIdx.x] = prev[i];
s_curr[threadIdx.x] = curr[i];
// avoid race condition
__syncthreads();
// nonborders
if (threadIdx.x > 0 && threadIdx.x < blockDim.x - 1)
next[i] = 2* s_curr[threadIdx.x] - s_prev[threadIdx.x] + 0.2 * ( s_curr[i-1] - ( 2 * s_curr[i] - s_curr[i+1] ));
// left border
else if (threadIdx.x == 0)
next[i] = 2* s_curr[threadIdx.x] - s_prev[threadIdx.x] + 0.2 * ( 0 - ( 2 * s_curr[i] - s_curr[i+1] ));
// right border
else
next[i] = 2* s_curr[threadIdx.x] - s_prev[threadIdx.x] + 0.2 * ( s_curr[i-1] - ( 2 * s_curr[i] - 0 ));
}
void waveCuda(int blocksize, int n, int t_max, double* a, double* b, double* result) {
int threadBlockSize = blocksize;
// allocate the vectors on the GPU
double* deviceA = NULL;
checkCudaCall(hipMalloc((void **) &deviceA, n * sizeof(double)));
if (deviceA == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
double* deviceB = NULL;
checkCudaCall(hipMalloc((void **) &deviceB, n * sizeof(double)));
if (deviceB == NULL) {
checkCudaCall(hipFree(deviceA));
cout << "could not allocate memory!" << endl;
return;
}
double* deviceResult = NULL;
checkCudaCall(hipMalloc((void **) &deviceResult, n * sizeof(double)));
if (deviceResult == NULL) {
checkCudaCall(hipFree(deviceA));
checkCudaCall(hipFree(deviceB));
cout << "could not allocate memory!" << endl;
return;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// copy the original vectors to the GPU
checkCudaCall(hipMemcpy(deviceA, a, n*sizeof(double), hipMemcpyHostToDevice));
checkCudaCall(hipMemcpy(deviceB, b, n*sizeof(double), hipMemcpyHostToDevice));
// copy the result vector (it's zero'd), just to be sure
checkCudaCall(hipMemcpy(deviceResult, result, n*sizeof(double), hipMemcpyHostToDevice));
// execute kernel, t_max amount of times.
hipEventRecord(start, 0);
for (int t = 1; t < t_max; t++) {
// execute kernel
hipLaunchKernelGGL(( waveKernel), dim3(ceil((double)n/threadBlockSize)), dim3(threadBlockSize), 0, 0, n, deviceA, deviceB, deviceResult);
// rotate buffers
double * tmp = deviceA; // tmp = prev
deviceA = deviceB; // prev = cur
deviceB = deviceResult; // cur = next
deviceResult = tmp; // next = tmp (= prev)
}
hipEventRecord(stop, 0);
// check whether the kernel invocation was successful
checkCudaCall(hipGetLastError());
// copy result back
checkCudaCall(hipMemcpy(result, deviceResult, n * sizeof(double), hipMemcpyDeviceToHost));
checkCudaCall(hipFree(deviceA));
checkCudaCall(hipFree(deviceB));
checkCudaCall(hipFree(deviceResult));
// print the time the kernel invocation took, without the copies!
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
cout << "kernel invocation took " << elapsedTime << " milliseconds" << endl;
}
/*
* Quite a handy function that fills an array with samples of a given function.
* Previously used in assignments 1.1, 2.1, and 3.1 for this course. Thanks
* Koen!
*/
typedef double (*func_t)(double x);
void fill(double *array, int offset, int range, double sample_start,
double sample_end, func_t f) {
int i;
double dx;
dx = (sample_end - sample_start) / range;
for (i = 0; i < range; i++)
array[i + offset] = f(sample_start + i * dx);
}
int main(int argc, char* argv[]) {
// default values for variables
int n = 1024;
int t_max = 1000;
int blocksize = 512;
// process command-line arguments
if (argc == 1) {
cout << "\033[;33m" << endl;
cout << "Remember you can use command-line arguments:" << endl;
cout << "./wave [i_max t_max blocksize]" << endl;
cout << "Now I'm using default values." << endl;
cout << "\033[0m" << endl;
}
else if (argc == 4) {
n = atoi(argv[1]);
t_max = atoi(argv[2]);
blocksize = atoi(argv[3]);
}
else {
cout << "\033[;33m" << endl;
cout << "Invalid amount of arguments." << endl;
cout << "./wave [INT i_max INT t_max INT blocksize]" << endl;
cout << "i_max = array length t_max = timesteps" << endl;
cout << "\033[0m" << endl;
return -2;
}
// validate arguments
if (n <= 0 || t_max <= 0 || blocksize <= 0) {
cout << "Argument error: each argument must be >0." << endl;
return -2;
}
// print values being used
cout << "\033[;36m" << endl;
cout << "Using values:" << endl;
cout << "i_max = " << n << endl;
cout << "t_max = " << t_max << endl;
cout << "blocksize = " << blocksize << endl;
cout << "\033[0m" << endl;
// start timer, prepare arrays
timer vectorAddTimer("vector add timer");
double* a = new double[n];
double* b = new double[n];
double* result = new double[n];
// initialize the vectors
fill(a, 1, n/4, 0, 2*3.14, sin);
fill(b, 2, n/4, 0, 2*3.14, sin);
// set the result vector to 0, just to be sure
for (int i = 0; i < n; i++)
result[i] = 0;
vectorAddTimer.start();
waveCuda(blocksize, n, t_max, a, b, result);
vectorAddTimer.stop();
cout << vectorAddTimer;
// write results to file (might be useful)
FILE * fp = fopen("results.txt", "w");
if (fp == NULL)
cout << "Could not write away results.txt" << endl;
else {
for (int i = 0; i < n; i++)
fprintf(fp, "%f\n", result[i]);
cout << "Results written to results.txt" << endl;
fclose(fp);
}
delete[] a;
delete[] b;
delete[] result;
return 0;
}
| b8230fdbab23a7b62973b66aa9e6a2356888a3d3.cu | /*******************************************************************************
*
* wave.cu
* Concurrency and parallel programming, assignment 6.1
*
* by David van Schoorisse and Ben Witzen
* University of Amsterdam, 12-12-12 (nice date!)
*
* Calculates the well-known wave equation using CUDA. This program fills the
* first 25% of the arrays (t0 and t1) with the sinus function, and calculates
* the wave progression from there.
*
* Skeleton code was provided by Robert G. Belleman (University of Amsterdam).
*
* Usage: ./wave [i_max t_max blocksize]
* i_max (unsigned int) : amount of array elements; (default 1024)
* t_max (unsigned int) : amount of timesteps; (default 1000)
* blocksize (unsigned int) : amount of threads per block. (default 512)
*
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "timer.h"
#include <iostream>
double * end;
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(cudaGetLastError());
*/
static void checkCudaCall(cudaError_t result) {
if (result != cudaSuccess) {
cerr << "cuda error: " << cudaGetErrorString(result) << endl;
exit(1);
}
}
__global__ void waveKernel(int n, double* prev, double* curr, double* next) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
// shared, 1 element per thread
__shared__ double s_prev[1024];
__shared__ double s_curr[1024];
// each thread reads 1 device memory element
s_prev[threadIdx.x] = prev[i];
s_curr[threadIdx.x] = curr[i];
// avoid race condition
__syncthreads();
// nonborders
if (threadIdx.x > 0 && threadIdx.x < blockDim.x - 1)
next[i] = 2* s_curr[threadIdx.x] - s_prev[threadIdx.x] + 0.2 * ( s_curr[i-1] - ( 2 * s_curr[i] - s_curr[i+1] ));
// left border
else if (threadIdx.x == 0)
next[i] = 2* s_curr[threadIdx.x] - s_prev[threadIdx.x] + 0.2 * ( 0 - ( 2 * s_curr[i] - s_curr[i+1] ));
// right border
else
next[i] = 2* s_curr[threadIdx.x] - s_prev[threadIdx.x] + 0.2 * ( s_curr[i-1] - ( 2 * s_curr[i] - 0 ));
}
void waveCuda(int blocksize, int n, int t_max, double* a, double* b, double* result) {
int threadBlockSize = blocksize;
// allocate the vectors on the GPU
double* deviceA = NULL;
checkCudaCall(cudaMalloc((void **) &deviceA, n * sizeof(double)));
if (deviceA == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
double* deviceB = NULL;
checkCudaCall(cudaMalloc((void **) &deviceB, n * sizeof(double)));
if (deviceB == NULL) {
checkCudaCall(cudaFree(deviceA));
cout << "could not allocate memory!" << endl;
return;
}
double* deviceResult = NULL;
checkCudaCall(cudaMalloc((void **) &deviceResult, n * sizeof(double)));
if (deviceResult == NULL) {
checkCudaCall(cudaFree(deviceA));
checkCudaCall(cudaFree(deviceB));
cout << "could not allocate memory!" << endl;
return;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// copy the original vectors to the GPU
checkCudaCall(cudaMemcpy(deviceA, a, n*sizeof(double), cudaMemcpyHostToDevice));
checkCudaCall(cudaMemcpy(deviceB, b, n*sizeof(double), cudaMemcpyHostToDevice));
// copy the result vector (it's zero'd), just to be sure
checkCudaCall(cudaMemcpy(deviceResult, result, n*sizeof(double), cudaMemcpyHostToDevice));
// execute kernel, t_max amount of times.
cudaEventRecord(start, 0);
for (int t = 1; t < t_max; t++) {
// execute kernel
waveKernel<<<ceil((double)n/threadBlockSize), threadBlockSize>>>(n, deviceA, deviceB, deviceResult);
// rotate buffers
double * tmp = deviceA; // tmp = prev
deviceA = deviceB; // prev = cur
deviceB = deviceResult; // cur = next
deviceResult = tmp; // next = tmp (= prev)
}
cudaEventRecord(stop, 0);
// check whether the kernel invocation was successful
checkCudaCall(cudaGetLastError());
// copy result back
checkCudaCall(cudaMemcpy(result, deviceResult, n * sizeof(double), cudaMemcpyDeviceToHost));
checkCudaCall(cudaFree(deviceA));
checkCudaCall(cudaFree(deviceB));
checkCudaCall(cudaFree(deviceResult));
// print the time the kernel invocation took, without the copies!
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "kernel invocation took " << elapsedTime << " milliseconds" << endl;
}
/*
* Quite a handy function that fills an array with samples of a given function.
* Previously used in assignments 1.1, 2.1, and 3.1 for this course. Thanks
* Koen!
*/
typedef double (*func_t)(double x);
void fill(double *array, int offset, int range, double sample_start,
double sample_end, func_t f) {
int i;
double dx;
dx = (sample_end - sample_start) / range;
for (i = 0; i < range; i++)
array[i + offset] = f(sample_start + i * dx);
}
int main(int argc, char* argv[]) {
// default values for variables
int n = 1024;
int t_max = 1000;
int blocksize = 512;
// process command-line arguments
if (argc == 1) {
cout << "\033[;33m" << endl;
cout << "Remember you can use command-line arguments:" << endl;
cout << "./wave [i_max t_max blocksize]" << endl;
cout << "Now I'm using default values." << endl;
cout << "\033[0m" << endl;
}
else if (argc == 4) {
n = atoi(argv[1]);
t_max = atoi(argv[2]);
blocksize = atoi(argv[3]);
}
else {
cout << "\033[;33m" << endl;
cout << "Invalid amount of arguments." << endl;
cout << "./wave [INT i_max INT t_max INT blocksize]" << endl;
cout << "i_max = array length t_max = timesteps" << endl;
cout << "\033[0m" << endl;
return -2;
}
// validate arguments
if (n <= 0 || t_max <= 0 || blocksize <= 0) {
cout << "Argument error: each argument must be >0." << endl;
return -2;
}
// print values being used
cout << "\033[;36m" << endl;
cout << "Using values:" << endl;
cout << "i_max = " << n << endl;
cout << "t_max = " << t_max << endl;
cout << "blocksize = " << blocksize << endl;
cout << "\033[0m" << endl;
// start timer, prepare arrays
timer vectorAddTimer("vector add timer");
double* a = new double[n];
double* b = new double[n];
double* result = new double[n];
// initialize the vectors
fill(a, 1, n/4, 0, 2*3.14, sin);
fill(b, 2, n/4, 0, 2*3.14, sin);
// set the result vector to 0, just to be sure
for (int i = 0; i < n; i++)
result[i] = 0;
vectorAddTimer.start();
waveCuda(blocksize, n, t_max, a, b, result);
vectorAddTimer.stop();
cout << vectorAddTimer;
// write results to file (might be useful)
FILE * fp = fopen("results.txt", "w");
if (fp == NULL)
cout << "Could not write away results.txt" << endl;
else {
for (int i = 0; i < n; i++)
fprintf(fp, "%f\n", result[i]);
cout << "Results written to results.txt" << endl;
fclose(fp);
}
delete[] a;
delete[] b;
delete[] result;
return 0;
}
|
537d95c2e79a404381162638205ad762bd1cb9df.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__
//void compute(const float* A, const float* B, const float* C, float* D, int n) {
void compute(float* D, int n, int div) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
float I1 = tid * 2.0;
int thread_id = threadIdx.x % 32;
if (thread_id < div) {
__asm volatile (
" .reg .s32 %r12;\n\t"
" .reg .s32 %r13;\n\t"
" .reg .s32 %r14;\n\t"
" .reg .s32 %r15;\n\t"
" .reg .s32 %r16;\n\t"
" .reg .s32 %r17;\n\t"
" .reg .s32 %r18;\n\t"
" .reg .s32 %r19;\n\t"
" .reg .s32 %r20;\n\t"
" .reg .s32 %r21;\n\t"
" .reg .s32 %r22;\n\t"
" .reg .s32 %r23;\n\t"
" .reg .s32 %r24;\n\t"
" .reg .s32 %r25;\n\t"
" .reg .s32 %r26;\n\t"
" .reg .s32 %r27;\n\t"
" .reg .s32 %r28;\n\t"
"mov.s32 %r12, 44;\n\t"
"mov.s32 %r13, %r12;\n\t"
"mov.s32 %r14, 22;\n\t"
"mov.s32 %r15, 33;\n\t"
"mov.s32 %r16, 123;\n\t"
"mov.s32 %r17, 242;\n\t"
"mov.s32 %r18, 334;\n\t"
"mov.s32 %r19, 562;\n\t"
"mov.s32 %r20, 256;\n\t"
"mov.s32 %r21, 156;\n\t"
"mov.s32 %r22, 256;\n\t"
"mov.s32 %r23, 556;\n\t"
"mov.s32 %r24, 856;\n\t"
"mov.s32 %r25, 356;\n\t"
"mov.s32 %r26, 556;\n\t"
"mov.s32 %r27, 656;\n\t"
"mov.s32 %r28, 56;\n\t"
);
for (int k = 0; k < n; k++) {
__asm volatile (
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
*D = I1;
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
int main(int argc, char **argv)
{
if (argc != 5) {
usage();
exit(1);
}
int num_blocks = atoi(argv[1]);
int num_threads_per_block = atoi(argv[2]);
int iterations = atoi(argv[3]);
int divergence = atoi(argv[4]);
// h_A = new float(2.0);
// h_B = new float(3.0);
// h_C = new float(4.0);
// hipMalloc((void**)&d_A, sizeof(float));
// hipMalloc((void**)&d_B, sizeof(float));
// hipMalloc((void**)&d_C, sizeof(float));
hipMalloc((void**)&d_res, sizeof(float));
// hipMemcpy(d_A, h_A, sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(d_B, h_B, sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(d_C, h_C, sizeof(float), hipMemcpyHostToDevice);
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipProfilerStart();
// hipLaunchKernelGGL(( compute), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_A, d_B, d_C, d_res, iterations);
hipLaunchKernelGGL(( compute), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_res, iterations, divergence);
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::cout << "GPU Elapsed Time = " << time << std::endl;
hipEventDestroy(start);
hipEventDestroy(stop);
hipDeviceSynchronize();
hipMemcpy(h_res, d_res, sizeof(float), hipMemcpyDeviceToHost);
return 0;
}
| 537d95c2e79a404381162638205ad762bd1cb9df.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__
//void compute(const float* A, const float* B, const float* C, float* D, int n) {
void compute(float* D, int n, int div) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
float I1 = tid * 2.0;
int thread_id = threadIdx.x % 32;
if (thread_id < div) {
__asm volatile (
" .reg .s32 %r12;\n\t"
" .reg .s32 %r13;\n\t"
" .reg .s32 %r14;\n\t"
" .reg .s32 %r15;\n\t"
" .reg .s32 %r16;\n\t"
" .reg .s32 %r17;\n\t"
" .reg .s32 %r18;\n\t"
" .reg .s32 %r19;\n\t"
" .reg .s32 %r20;\n\t"
" .reg .s32 %r21;\n\t"
" .reg .s32 %r22;\n\t"
" .reg .s32 %r23;\n\t"
" .reg .s32 %r24;\n\t"
" .reg .s32 %r25;\n\t"
" .reg .s32 %r26;\n\t"
" .reg .s32 %r27;\n\t"
" .reg .s32 %r28;\n\t"
"mov.s32 %r12, 44;\n\t"
"mov.s32 %r13, %r12;\n\t"
"mov.s32 %r14, 22;\n\t"
"mov.s32 %r15, 33;\n\t"
"mov.s32 %r16, 123;\n\t"
"mov.s32 %r17, 242;\n\t"
"mov.s32 %r18, 334;\n\t"
"mov.s32 %r19, 562;\n\t"
"mov.s32 %r20, 256;\n\t"
"mov.s32 %r21, 156;\n\t"
"mov.s32 %r22, 256;\n\t"
"mov.s32 %r23, 556;\n\t"
"mov.s32 %r24, 856;\n\t"
"mov.s32 %r25, 356;\n\t"
"mov.s32 %r26, 556;\n\t"
"mov.s32 %r27, 656;\n\t"
"mov.s32 %r28, 56;\n\t"
);
for (int k = 0; k < n; k++) {
__asm volatile (
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
"max.s32 %r13, %r11, %r13;\n\t"
"max.s32 %r14, %r11, %r14;\n\t"
"max.s32 %r15, %r11, %r15;\n\t"
"max.s32 %r16, %r11, %r16;\n\t"
"max.s32 %r17, %r11, %r17;\n\t"
"max.s32 %r18, %r11, %r18;\n\t"
"max.s32 %r19, %r11, %r19;\n\t"
"max.s32 %r20, %r11, %r20;\n\t"
"max.s32 %r21, %r11, %r21;\n\t"
"max.s32 %r22, %r11, %r22;\n\t"
"max.s32 %r23, %r11, %r23;\n\t"
"max.s32 %r24, %r11, %r24;\n\t"
"max.s32 %r25, %r11, %r25;\n\t"
"max.s32 %r26, %r11, %r26;\n\t"
"max.s32 %r27, %r11, %r27;\n\t"
"max.s32 %r28, %r11, %r28;\n\t"
);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
*D = I1;
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
int main(int argc, char **argv)
{
if (argc != 5) {
usage();
exit(1);
}
int num_blocks = atoi(argv[1]);
int num_threads_per_block = atoi(argv[2]);
int iterations = atoi(argv[3]);
int divergence = atoi(argv[4]);
// h_A = new float(2.0);
// h_B = new float(3.0);
// h_C = new float(4.0);
// cudaMalloc((void**)&d_A, sizeof(float));
// cudaMalloc((void**)&d_B, sizeof(float));
// cudaMalloc((void**)&d_C, sizeof(float));
cudaMalloc((void**)&d_res, sizeof(float));
// cudaMemcpy(d_A, h_A, sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_B, h_B, sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_C, h_C, sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
// compute<<<num_blocks, num_threads_per_block>>>(d_A, d_B, d_C, d_res, iterations);
compute<<<num_blocks, num_threads_per_block>>>(d_res, iterations, divergence);
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::cout << "GPU Elapsed Time = " << time << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceSynchronize();
cudaMemcpy(h_res, d_res, sizeof(float), cudaMemcpyDeviceToHost);
return 0;
}
|
ca7053605126c203dc1a1cf976ae495d44335f7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Render.h"
template<class T>
__host__ __device__ const T& max(const T& a, const T& b)
{
return a < b ? b : a;
}
__host__ __device__ Vec3f reflect(const Vec3f& I, const Vec3f& N) noexcept
{
return I - N * 2.f * (I * N);
}
__host__ __device__ bool sceneIntersect(const Vec3f& pos, const Vec3f& dir, const Sphere* const spheres, const uint spheresCount, Vec3f& hit, Vec3f& N, Material& material)
{
float spheresDist = FLT_MAX;
auto iTmp = UINT_MAX;
for (auto i = decltype(spheresCount){0}; i < spheresCount; ++i)
{
float distI;
if (spheres[i].RayIntersect(pos, dir, distI) && distI < spheresDist)
{
spheresDist = distI;
iTmp = i;
}
}
if (iTmp != UINT_MAX)
{
hit = pos + dir * spheresDist;
N = (hit - spheres[iTmp].center).normalize();
material = spheres[iTmp].material;
}
return spheresDist < 1000;
}
__host__ __device__ ColorRGB castRay(const Vec3f& pos, const Vec3f& dir, const Sphere* const spheres, const uint spheresCount, const Light* const lights, const uint lightsCount, uint depth)
{
Vec3f point, N; // N - surface normal.
Material material;
if (depth > MAX_DEPTH || !sceneIntersect(pos, dir, spheres, spheresCount, point, N, material))
{
return /*backgroundColor*/ColorRGB(0.05f, 0.04f, 0.08f);
}
const Vec3f reflectDir = reflect(dir, N)/*.normalize()*/;
const Vec3f reflectOrig = reflectDir * N < 0 ? point - N * 9.96e-4f : point + N * 9.96e-4f;
const Vec3f reflectColor = castRay(reflectOrig, reflectDir, spheres, spheresCount, lights, lightsCount, depth + 1);
float diffuseLightIntensivity = 0.f, specularLightIntensivity = 0.f;
for (auto i = decltype(lightsCount){0}; i < lightsCount; ++i)
{
const Vec3f lightDir = (lights[i].position - point).normalize();
const float lightDist = (lights[i].position - point).norm();
const Vec3f shadowOrig = lightDir * N < 0 ? point - N * 9.96e-4f : point + N * 9.96e-4f;
Vec3f shadowPoint, shadowN;
Material dummyMat;
if (sceneIntersect(shadowOrig, lightDir, spheres, spheresCount, shadowPoint, shadowN, dummyMat) && (shadowPoint - shadowOrig).norm() < lightDist)
{
continue;
}
diffuseLightIntensivity += lights[i].intensity * max(0.f, lightDir * N);
specularLightIntensivity += pow(max(0.f, reflect(lightDir, N) * dir), material.specularExp) * lights[i].intensity;
}
return /*backgroundColor*/Vec3f(0.05f, 0.04f, 0.08f) * 0.4f +
material.diffuseColor * diffuseLightIntensivity * material.reflectivity[0] +
Vec3f(1.0f, 1.0f, 1.0f) * specularLightIntensivity * material.reflectivity[1] +
reflectColor * material.reflectivity[2];
//return material.diffuseColor * (diffuseLightIntensivity + specularLightIntensivity);
//return material.diffuseColor * (diffuseLightIntensivity + specularLightIntensivity);
}
__global__ void dev_exportToJPG(const unsigned short* const width, const unsigned short* const height, unsigned char* B, unsigned char* G, unsigned char* R, const Sphere* const spheres, const uint spheresCount, const Light* const lights, const uint lightsCount)
{
const auto j = blockIdx.x * blockDim.x + threadIdx.x;
const auto i = blockIdx.y * blockDim.y + threadIdx.y;
if (j < *width && i < *height)
{
const float fov = M_PI_2;//75.f;
const float tang = tan(fov / 2.f / 2.f);
const float rotX = 0.f, rotY = 0.f;
const float x = (2 * (j + rotX) / (float)*width - 1.f) * tang * *width / (float)*height;
const float y = -(2 * (i + rotY) / (float)*height - 1.f) * tang;
const Vec3f dir = Vec3f(x, y, -1).normalize();
ColorRGB col = castRay({ 0.f, 0.f, 0.f }, dir, spheres, spheresCount, lights, lightsCount);
const float maxBGR = max(col.BLUE, max(col.GREEN, col.RED));
if (maxBGR > 1.f)
{
col = col * (1.f / maxBGR);
}
//col = max(col.BLUE, max(col.GREEN, col.RED)) > 1.f ? col * (1.f / maxBGR) : col;
B[j + i * *width] = col.BLUE * 255;
G[j + i * *width] = col.GREEN * 255;
R[j + i * *width] = col.RED * 255;
}
}
| ca7053605126c203dc1a1cf976ae495d44335f7e.cu | #include "Render.h"
template<class T>
__host__ __device__ const T& max(const T& a, const T& b)
{
return a < b ? b : a;
}
__host__ __device__ Vec3f reflect(const Vec3f& I, const Vec3f& N) noexcept
{
return I - N * 2.f * (I * N);
}
__host__ __device__ bool sceneIntersect(const Vec3f& pos, const Vec3f& dir, const Sphere* const spheres, const uint spheresCount, Vec3f& hit, Vec3f& N, Material& material)
{
float spheresDist = FLT_MAX;
auto iTmp = UINT_MAX;
for (auto i = decltype(spheresCount){0}; i < spheresCount; ++i)
{
float distI;
if (spheres[i].RayIntersect(pos, dir, distI) && distI < spheresDist)
{
spheresDist = distI;
iTmp = i;
}
}
if (iTmp != UINT_MAX)
{
hit = pos + dir * spheresDist;
N = (hit - spheres[iTmp].center).normalize();
material = spheres[iTmp].material;
}
return spheresDist < 1000;
}
__host__ __device__ ColorRGB castRay(const Vec3f& pos, const Vec3f& dir, const Sphere* const spheres, const uint spheresCount, const Light* const lights, const uint lightsCount, uint depth)
{
Vec3f point, N; // N - surface normal.
Material material;
if (depth > MAX_DEPTH || !sceneIntersect(pos, dir, spheres, spheresCount, point, N, material))
{
return /*backgroundColor*/ColorRGB(0.05f, 0.04f, 0.08f);
}
const Vec3f reflectDir = reflect(dir, N)/*.normalize()*/;
const Vec3f reflectOrig = reflectDir * N < 0 ? point - N * 9.96e-4f : point + N * 9.96e-4f;
const Vec3f reflectColor = castRay(reflectOrig, reflectDir, spheres, spheresCount, lights, lightsCount, depth + 1);
float diffuseLightIntensivity = 0.f, specularLightIntensivity = 0.f;
for (auto i = decltype(lightsCount){0}; i < lightsCount; ++i)
{
const Vec3f lightDir = (lights[i].position - point).normalize();
const float lightDist = (lights[i].position - point).norm();
const Vec3f shadowOrig = lightDir * N < 0 ? point - N * 9.96e-4f : point + N * 9.96e-4f;
Vec3f shadowPoint, shadowN;
Material dummyMat;
if (sceneIntersect(shadowOrig, lightDir, spheres, spheresCount, shadowPoint, shadowN, dummyMat) && (shadowPoint - shadowOrig).norm() < lightDist)
{
continue;
}
diffuseLightIntensivity += lights[i].intensity * max(0.f, lightDir * N);
specularLightIntensivity += pow(max(0.f, reflect(lightDir, N) * dir), material.specularExp) * lights[i].intensity;
}
return /*backgroundColor*/Vec3f(0.05f, 0.04f, 0.08f) * 0.4f +
material.diffuseColor * diffuseLightIntensivity * material.reflectivity[0] +
Vec3f(1.0f, 1.0f, 1.0f) * specularLightIntensivity * material.reflectivity[1] +
reflectColor * material.reflectivity[2];
//return material.diffuseColor * (diffuseLightIntensivity + specularLightIntensivity);
//return material.diffuseColor * (diffuseLightIntensivity + specularLightIntensivity);
}
__global__ void dev_exportToJPG(const unsigned short* const width, const unsigned short* const height, unsigned char* B, unsigned char* G, unsigned char* R, const Sphere* const spheres, const uint spheresCount, const Light* const lights, const uint lightsCount)
{
const auto j = blockIdx.x * blockDim.x + threadIdx.x;
const auto i = blockIdx.y * blockDim.y + threadIdx.y;
if (j < *width && i < *height)
{
const float fov = M_PI_2;//75.f;
const float tang = tan(fov / 2.f / 2.f);
const float rotX = 0.f, rotY = 0.f;
const float x = (2 * (j + rotX) / (float)*width - 1.f) * tang * *width / (float)*height;
const float y = -(2 * (i + rotY) / (float)*height - 1.f) * tang;
const Vec3f dir = Vec3f(x, y, -1).normalize();
ColorRGB col = castRay({ 0.f, 0.f, 0.f }, dir, spheres, spheresCount, lights, lightsCount);
const float maxBGR = max(col.BLUE, max(col.GREEN, col.RED));
if (maxBGR > 1.f)
{
col = col * (1.f / maxBGR);
}
//col = max(col.BLUE, max(col.GREEN, col.RED)) > 1.f ? col * (1.f / maxBGR) : col;
B[j + i * *width] = col.BLUE * 255;
G[j + i * *width] = col.GREEN * 255;
R[j + i * *width] = col.RED * 255;
}
}
|
3e011663aeb7099e8280c7f170f1440accf05be5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <gdrapi.h>
#include <common/logging.h>
#include <common/dump_utils.h>
#include <core/xms.h>
#include <api/block_itf.h>
#include <api/memory_itf.h>
#define CUDA_CHECK(x) if(x != hipSuccess) \
PERR("error: cuda err=%s", hipGetErrorString (hipGetLastError()));
__global__ void verify_memory(void * ptr)
{
char * p = (char *) ptr;
printf("From GPU: should be 0f 0f 0f ...\n");
printf("From GPU: %02x %02x %02x ...\n", p[0], p[1], p[2]);
}
extern "C" void cuda_run_test(Component::IBlock_device * block_device)
{
printf("run_test (cuda app lib)\n");
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
Component::VOLUME_INFO vi;
block_device->get_volume_info(vi);
PLOG("volume info: blocksize=%d", vi.block_size);
/* allocate GDR memory */
void* device_buffer = NULL;
size_t buffer_size = MB(32);
/**
* Here's the issue. SPDK requires 2MB aligned memory. Because CUDA
* does not allow alignment request, we have to allocate more than
* we need then calculate offset to aligned physical address and
* then re-map a new virtual address which is 2MB aligned. We will
* basically have to allocate a large slab and then do the
* management ourselves.
*
* Yes, its that painful.
*/
CUDA_CHECK(hipMalloc(&device_buffer, buffer_size));
gdr_t g = gdr_open();
assert(g);
gdr_mh_t mh;
if(gdr_pin_buffer(g,
(unsigned long) device_buffer,
buffer_size,
0,
0,
&mh))
throw General_exception("gdr_pin_buffer failed");
void *bar_ptr = NULL;
if(gdr_map(g, mh, &bar_ptr, buffer_size), 0)
throw General_exception("gdr_map failed");
PLOG("gdr_map returned bar_ptr=%p", bar_ptr);
assert(bar_ptr);
gdr_info_t info;
if(gdr_get_info(g, mh, &info))
throw General_exception("gdr_get_info failed");
int off = ((unsigned long)device_buffer) - info.va;
PLOG("offset: %d", off);
void *host_vaddr = (void*) ((char *)bar_ptr + off);
addr_t host_paddr = xms_get_phys(host_vaddr);
PLOG("GDR vaddr=%p paddr=%p", (void*)host_vaddr, (void*)host_paddr);
addr_t new_paddr = round_up(host_paddr, MB(2)); // only 2MB of 32MB is used here, to enforce alignment
unsigned offset = new_paddr - host_paddr;
void * new_vaddr = ((char*)host_vaddr)+offset;
new_vaddr = xms_mmap((void*) 0x900000000, new_paddr, MB(2));
PLOG("new paddr=0x%lx vaddr=0x%lx", (addr_t) new_paddr, (addr_t)new_vaddr);
PMAJOR("memory looks OK w.r.t 2MB alignment");
memset(new_vaddr, 0xb, KB(4));
/* register GDR memory with block device */
Component::io_buffer_t iob = block_device->register_memory_for_io(new_vaddr,
new_paddr,
MB(2));
/* try DMA from device into this buffer... here we go... */
PLOG("about to perform read operation...");
block_device->read(iob, 0, 1, 1, 0);
PMAJOR("read operation OK!");
hexdump(new_vaddr, 32);
/* verify on the GPU side the result */
hipLaunchKernelGGL(( verify_memory), dim3(1),dim3(1), 0, 0, ((char*)device_buffer) + offset);
hipDeviceSynchronize();
}
| 3e011663aeb7099e8280c7f170f1440accf05be5.cu | #include <stdio.h>
#include <gdrapi.h>
#include <common/logging.h>
#include <common/dump_utils.h>
#include <core/xms.h>
#include <api/block_itf.h>
#include <api/memory_itf.h>
#define CUDA_CHECK(x) if(x != cudaSuccess) \
PERR("error: cuda err=%s", cudaGetErrorString (cudaGetLastError()));
__global__ void verify_memory(void * ptr)
{
char * p = (char *) ptr;
printf("From GPU: should be 0f 0f 0f ...\n");
printf("From GPU: %02x %02x %02x ...\n", p[0], p[1], p[2]);
}
extern "C" void cuda_run_test(Component::IBlock_device * block_device)
{
printf("run_test (cuda app lib)\n");
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
Component::VOLUME_INFO vi;
block_device->get_volume_info(vi);
PLOG("volume info: blocksize=%d", vi.block_size);
/* allocate GDR memory */
void* device_buffer = NULL;
size_t buffer_size = MB(32);
/**
* Here's the issue. SPDK requires 2MB aligned memory. Because CUDA
* does not allow alignment request, we have to allocate more than
* we need then calculate offset to aligned physical address and
* then re-map a new virtual address which is 2MB aligned. We will
* basically have to allocate a large slab and then do the
* management ourselves.
*
* Yes, its that painful.
*/
CUDA_CHECK(cudaMalloc(&device_buffer, buffer_size));
gdr_t g = gdr_open();
assert(g);
gdr_mh_t mh;
if(gdr_pin_buffer(g,
(unsigned long) device_buffer,
buffer_size,
0,
0,
&mh))
throw General_exception("gdr_pin_buffer failed");
void *bar_ptr = NULL;
if(gdr_map(g, mh, &bar_ptr, buffer_size), 0)
throw General_exception("gdr_map failed");
PLOG("gdr_map returned bar_ptr=%p", bar_ptr);
assert(bar_ptr);
gdr_info_t info;
if(gdr_get_info(g, mh, &info))
throw General_exception("gdr_get_info failed");
int off = ((unsigned long)device_buffer) - info.va;
PLOG("offset: %d", off);
void *host_vaddr = (void*) ((char *)bar_ptr + off);
addr_t host_paddr = xms_get_phys(host_vaddr);
PLOG("GDR vaddr=%p paddr=%p", (void*)host_vaddr, (void*)host_paddr);
addr_t new_paddr = round_up(host_paddr, MB(2)); // only 2MB of 32MB is used here, to enforce alignment
unsigned offset = new_paddr - host_paddr;
void * new_vaddr = ((char*)host_vaddr)+offset;
new_vaddr = xms_mmap((void*) 0x900000000, new_paddr, MB(2));
PLOG("new paddr=0x%lx vaddr=0x%lx", (addr_t) new_paddr, (addr_t)new_vaddr);
PMAJOR("memory looks OK w.r.t 2MB alignment");
memset(new_vaddr, 0xb, KB(4));
/* register GDR memory with block device */
Component::io_buffer_t iob = block_device->register_memory_for_io(new_vaddr,
new_paddr,
MB(2));
/* try DMA from device into this buffer... here we go... */
PLOG("about to perform read operation...");
block_device->read(iob, 0, 1, 1, 0);
PMAJOR("read operation OK!");
hexdump(new_vaddr, 32);
/* verify on the GPU side the result */
verify_memory<<<1,1>>>(((char*)device_buffer) + offset);
cudaDeviceSynchronize();
}
|
5068ccd36650e8512c93705aca062f85bae8e197.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define WARP_SIZE 32
#define GROUPSET 16
#define NUMFACES 3
#define fouralpha 1.82
#define fouralpha4 5.82
#define Connect(a,b,c) Connect[ a + 3 * ( b + mC * c ) ]
__device__ __forceinline__ double shfl_d(double var,int lane)
{ float lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=f"(lo), "=f"(hi) : "d"(var));
hi = __shfl(hi, lane);
lo = __shfl(lo, lane);
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "f"(lo), "f"(hi));
return var;
}
extern "C"
{
__global__ void GPU_sweep(
int size_maxCorner,
int size_maxcf,
int nAngle,
int nzones,
int ncornr,
int Groups,
int nbelem,
int* AngleOrder,
double* soa_omega,
int* nextZ,
int* next,
int* soa_nCorner,
int* soa_nCFaces,
int* soa_c0,
double* soa_STotal,
double* STimeBatch,
double* soa_SigtInv,
double* soa_Volume,
double* soa_Sigt,
double* soa_A_fp,
double* soa_A_ez,
int* soa_Connect,
double* psic,
double* psib,
double* omega_A_fp,
double* omega_A_ez,
int* soa_Connect_ro,
int* passZ,
bool calcSTime,
double tau
)
{
// double omega[3];
int c,ig,i,icface,ifp,cez,k;
// double Q[Groups * size_maxCorner];
// double src[Groups * size_maxCorner];
// double SigtVol[Groups * size_maxCorner];
// double afpm[size_maxcf];
// double psifp[Groups * size_maxcf];
// int ez_exit[size_maxcf];
// double coefpsic[size_maxcf];
// double tpsic[Groups * size_maxCorner];
// double psi_opp[Groups];
double area_opp,area_inv,sumArea;
double r_psifp;
double psi_opp,tpsic,r_afpm;
double Q[8];
double src[8];
//double volume[8];
//double coefpsic_stk[3];
//double psifp[3];
//int ez_exit[3];
//double *src;
volatile double *volume;
volatile double *coefpsic;
volatile double *psifp;
volatile int *ez_exit;
//__shared__ volatile double sm_agg[12*128]; // 4x32 thread per tb. 8tb. 6KB
extern __shared__ double sm_agg[];
int offset = (8+3+3*WARP_SIZE+3)*threadIdx.y;
volume = &(sm_agg[offset]); //8 doubles
offset += size_maxCorner;
coefpsic = &(sm_agg[offset]); // 3 doubles
offset += size_maxcf;
psifp = &(sm_agg[offset]); // 3 x warp size doubles
offset += size_maxcf * WARP_SIZE;
//note ez_exit has integer type
ez_exit = (int*) &(sm_agg[offset]); // 3 int
// for(int Angle=0;Angle<nAngle;Angle++)
// const double fouralpha = 1.82;
// const double fouralpha4 = 5.82;
#define soa_omega(a,b) soa_omega[a + 3 * b]
// #define tpsic(ig,c) tpsic[ (ig) + Groups * (c)]
#define EB_ListExit(a,ia) EB_ListExit[ a + 2 * (ia) ]
#define soa_A_fp(a,icface,c,zone) soa_A_fp[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_A_ez(a,icface,c,zone) soa_A_ez[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define omega_A_fp(icface,c,zone) omega_A_fp[ ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define omega_A_ez(icface,c,zone) omega_A_ez[ ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_Connect(a,icface,c,zone) soa_Connect[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define psifp(ig,jf) psifp[(ig) + WARP_SIZE * (jf)]
#define psib(ig,b,c) psib[(ig) + Groups * ((b) + nbelem * (c) )]
#define psic(ig,b,c) psic[(ig) + Groups * ((b) + ncornr *(c) )]
#define Q(ig,c) Q[(ig) + WARP_SIZE * (c)]
#define src(ig,c) src[c]
#define soa_Sigt(ig,zone) soa_Sigt[(ig) + Groups * (zone)]
#define soa_Volume(c,zone) soa_Volume[c + size_maxCorner * (zone)]
#define soa_SigtInv(ig,zone) soa_SigtInv[(ig) + Groups * (zone)]
#define soa_STotal(ig,c,zone) soa_STotal[ig + Groups * ( c + size_maxCorner * (zone) )]
#define STimeBatch(ig,ic,Angle) STimeBatch[ig + Groups * ( (ic) + ncornr * (Angle) ) ]
#define nextZ(a,b) nextZ[ (a) + nzones * (b) ]
#define next(a,b) next[ (a) + (ncornr+1) * (b) ]
//int mm = blockIdx.x;
int Angle = AngleOrder[blockIdx.x]-1;
ig = threadIdx.x;
omega_A_fp += blockIdx.x * nzones * size_maxcf * size_maxCorner;
omega_A_ez += blockIdx.x * nzones * size_maxcf * size_maxCorner;
passZ += Angle * nzones;
const int group_offset=blockIdx.y * WARP_SIZE; //should be blockDim.x instead of warpsize?
// if (!( group_offset + threadIdx.x < Groups )) return;
psib += group_offset;
psic += group_offset;
soa_Sigt += group_offset;
soa_STotal += group_offset;
soa_SigtInv += group_offset;
STimeBatch += group_offset;
int ndone = 0;
int ndoneZ = 0;
// hyperplane number p
int p=0;
while(ndoneZ < nzones)
{
//increment hyperplane
p++;
// get number of zones in this hyperplane
int passZcnt = passZ[p] - passZ[p-1];
// for(int ii=threadIdx.y+blockDim.y*blockIdx.z; ii<passZcnt;ii+=blockDim.y*gridDim.z)
//for(int ii=blockIdx.z; ii<passZcnt;ii+=gridDim.z)
for(int ii=threadIdx.y; ii<passZcnt;ii+=blockDim.y)
{
ndone = ( ndoneZ + ii ) * size_maxCorner;
// get the zone (minus 1 so it is valid c index)
int zone = nextZ(ndoneZ+ii,Angle) - 1;
int nCorner = soa_nCorner[zone];
int nCFaces = soa_nCFaces[zone];
int c0 = soa_c0[zone] ;
double Sigt = soa_Sigt(ig,zone);
double r_soa_SightInv = soa_SigtInv(ig,zone);
double r_omega_A_fp;
double r_omega_A_ez;
int connect0,connect1,connect2;
// coallesced loads into shared memory
if(threadIdx.x<nCorner) volume[threadIdx.x] = soa_Volume(threadIdx.x,zone);
// different threads hold values for different icface in registers instead of shared memory
// other threads can access the register values via a shuffle command.
//But now with only 16 threads (groups) there are not threads to hold nCorner*nCFaces (3*8)
if(threadIdx.x<nCorner*nCFaces)
{
int cc = size_maxcf * size_maxCorner;
r_omega_A_fp = omega_A_fp[threadIdx.x + cc * zone];
r_omega_A_ez = omega_A_ez[threadIdx.x + cc * zone];
connect0 = soa_Connect_ro[threadIdx.x + cc*(0 + 3*zone)];
connect1 = soa_Connect_ro[threadIdx.x + cc*(1 + 3*zone)];
connect2 = soa_Connect_ro[threadIdx.x + cc*(2 + 3*zone)];
}
//if(nCorner*nCFaces>blockDim.x){printf("Error: threads are not covering nCorner*nCFaces\n");abort;}
for(c=0;c<nCorner;c++)
{
double source;
//if(!calcSTime)
//{
source = soa_STotal(ig,c,zone) + STimeBatch(ig,c0+c,blockIdx.x);
//}
//else // first temp and flux iteration: compute STime, use it, and zero copy back to host.
//{
//double STime_temp = tau*psic(ig,c0+c,blockIdx.x);
//source = soa_STotal(ig,c,zone) + STime_temp;
//STime(ig,c0+c,Angle) = STime_temp;
//}
Q[c] = r_soa_SightInv *source ;
//src(ig,c) = soa_Volume(c,zone) *source;
//volume[c] = soa_Volume(c,zone);
src(ig,c) = volume[c]*source; // really just src[c]
//SigtVol(ig,c) = soa_Sigt(ig,zone)*soa_Volume(c,zone);
}
for(i=0;i<nCorner;i++)
{
int ic = next(ndone+i,Angle);
c = ic - c0 - 1;
sumArea = 0.0;
for(icface=0;icface<nCFaces;icface++)
{
//afpm[icface] = omega_A_fp(icface,c,zone);
r_afpm = shfl_d(r_omega_A_fp,icface+size_maxcf*c);
// if ( Angle == 1 && ig==0 && zone == 1 )
// printf("a=%d,c=%d,icface=%d,afpm=%e\n",Angle,c,icface,r_afpm);
// int icfp = soa_Connect(0,icface,c,zone) - 1;
// int ib = soa_Connect(1,icface,c,zone) - 1;
int icfp= __shfl(connect0,icface+size_maxcf*c) - 1;
int ib= __shfl(connect1,icface+size_maxcf*c) - 1;
if ( r_afpm >= 0.0 )
{
sumArea = sumArea + r_afpm;
}
else
{
if (icfp == -1)
{
// psifp(ig,icface) = psib(ig,ib,Angle);
r_psifp = psib(ig,ib,blockIdx.x);
}
else
{
// psifp(ig,icface) = psic(ig,icfp,Angle);
r_psifp = psic(ig,icfp,blockIdx.x);
}
src(ig,c) -= r_afpm*r_psifp;
psifp(ig,icface) = r_psifp;
//psifp[icface] = r_psifp;
}
}
int nxez = 0;
for(icface=0;icface<nCFaces;icface++)
{
//double aez = omega_A_ez(icface,c,zone);
double aez = shfl_d(r_omega_A_ez,icface+size_maxcf*c);
if (aez > 0.0 )
{
sumArea = sumArea + aez;
area_opp = .0;
// cez = soa_Connect(2,icface,c,zone) - 1;
cez = __shfl(connect2,icface+size_maxcf*c) - 1;
ez_exit[nxez] = cez;
coefpsic[nxez] = aez;
nxez = nxez + 1;
if (nCFaces == 3)
{
ifp = (icface+1)%nCFaces;
r_afpm = shfl_d(r_omega_A_fp,ifp+size_maxcf*c);
if ( r_afpm < 0.0 )
{
area_opp = -r_afpm;
psi_opp = psifp(ig,ifp);
//psi_opp = psifp[ifp];
}
}
else
{
ifp = icface;
area_opp = 0.0;
psi_opp = 0.0;
for(k=0;k<nCFaces-2;k++)
{
ifp = (ifp+1)%nCFaces;
r_afpm = shfl_d(r_omega_A_fp,ifp+size_maxcf*c);
if ( r_afpm < 0.0 )
{
area_opp = area_opp - r_afpm;
psi_opp = psi_opp - r_afpm*psifp(ig,ifp);
//psi_opp = psi_opp - r_afpm*psifp[ifp];
}
}
area_inv = 1.0/area_opp;
psi_opp = psi_opp*area_inv;
}
if (area_opp > 0.0) {
double aez2 = aez*aez;
{
double sigv = Sigt*volume[c];
double sigv2 = sigv*sigv;
double gnum = aez2*( fouralpha*sigv2 + aez*(4.0*sigv + 3.0*aez) );
double gtau = gnum/( gnum + 4.0*sigv2*sigv2 + aez*sigv*(6.0*sigv2 + 2.0*aez*(2.0*sigv + aez)) ) ;
double sez = gtau*sigv*( psi_opp - Q[c] ) + 0.5*aez*(1.0 - gtau)*( Q[c] - Q[cez] );
src(ig,c) = src(ig,c) + sez;
src(ig,cez) = src(ig,cez) - sez;
}
}
else
{
double sez = 0.5*aez*( Q[c] - Q[cez] );
src(ig,c) = src(ig,c) + sez;
src(ig,cez) = src(ig,cez) - sez;
}
}
}
tpsic = src(ig,c)/(sumArea + Sigt*volume[c]);
for(icface=0;icface<nxez;icface++)
{
int cez = ez_exit[icface];
src(ig,cez) = src(ig,cez) + coefpsic[icface]*tpsic;
}
//hope that ther is no self referencing
psic(ig,c0+c,blockIdx.x) = tpsic;
//psibatch(ig,c0+c,mm)= tpsic;
} //end of corner
} //end of zone loop
ndoneZ += passZcnt;
__syncthreads();
} //end of while
}
__global__ void GPU_fp_ez_hplane(
int size_maxCorner,
int size_maxcf,
int nzones,
int ncornr,
int Groups,
int nbelem,
int* AngleOrder,
double* soa_omega,
int* nextZ,
int* next,
int* soa_nCorner,
int* soa_nCFaces,
int* soa_c0,
double* soa_A_fp,
double* soa_A_ez,
double* omega_A_fp,
double* omega_A_ez,
int* soa_Connect,
int* soa_Connect_ro,
int* passZ)
{
//int c,i,ig,icface,ii;
int c,i,icface;
#define soa_omega(a,b) soa_omega[a + 3 * b]
#define omega_A_fp(icface,c,zone) omega_A_fp[ ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define omega_A_ez(icface,c,zone) omega_A_ez[ ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_A_fp(a,icface,c,zone) soa_A_fp[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_A_ez(a,icface,c,zone) soa_A_ez[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_Connect(a,icface,c,zone) soa_Connect[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_Connect_ro(a,icface,c,zone) soa_Connect_ro[ icface + size_maxcf * ( c + size_maxCorner * ( a + 3 * zone) ) ]
#define nextZ(a,b) nextZ[ (a) + nzones * (b) ]
#define next(a,b) next[ (a) + (ncornr+1) * (b) ]
// for(int Angle=0;Angle<nAngle;Angle++)
//int Angle = blockIdx.x;
int Angle = AngleOrder[blockIdx.x]-1;
double omega0, omega1, omega2;
omega0 = soa_omega(0,Angle);
omega1 = soa_omega(1,Angle);
omega2 = soa_omega(2,Angle);
omega_A_fp += blockIdx.x * nzones * size_maxcf * size_maxCorner;
omega_A_ez += blockIdx.x * nzones * size_maxcf * size_maxCorner;
int ndone = 0;
int ndoneZ = 0;
// hyperplane number p
int p=0;
while(ndoneZ < nzones)
{
//increment hyperplane
p++;
// get number of zones in this hyperplane
int passZcnt = passZ[p] - passZ[p-1];
// you can print hyperplanes for visualization
//if( Angle == 0 && threadIdx.x==0) printf("%d \t %d\n",p,passZcnt);
//for(int ii=threadIdx.x+blockIdx.y*blockDim.x;ii<passZcnt;ii+=blockDim.x*gridDim.y)
for(int ii=threadIdx.x;ii<passZcnt;ii+=blockDim.x)
{
ndone = ( ndoneZ + ii ) * size_maxCorner;
// get the zone (minus 1 so it is valid c index)
int zone = nextZ(ndoneZ+ii,Angle) - 1;
int nCorner = soa_nCorner[zone];
int nCFaces = soa_nCFaces[zone];
int c0 = soa_c0[zone] ;
for(i=0;i<nCorner;i++)
{
int ic = next(ndone+i,Angle);
c = ic - c0 - 1;
for(icface=0;icface<nCFaces;icface++)
{
omega_A_fp(icface,c,zone) = omega0*soa_A_fp(0,icface,c,zone) +
omega1*soa_A_fp(1,icface,c,zone) +
omega2*soa_A_fp(2,icface,c,zone);
// could get rid of below if new order was used originally?
int icfp = soa_Connect(0,icface,c,zone);
int ib = soa_Connect(1,icface,c,zone);
int cez = soa_Connect(2,icface,c,zone);
soa_Connect_ro(0,icface,c,zone) = icfp;
soa_Connect_ro(1,icface,c,zone) = ib ;
soa_Connect_ro(2,icface,c,zone) = cez ;
}
for(icface=0;icface<nCFaces;icface++)
{
omega_A_ez(icface,c,zone) = omega0*soa_A_ez(0,icface,c,zone) + omega1*soa_A_ez(1,icface,c,zone) + omega2*soa_A_ez(2,icface,c,zone) ;
}
} // end corners
} // end zones in hplane
ndoneZ += passZcnt;
__syncthreads();
//ndone = ndone + nCorner;
}//end while
}//end function
}
| 5068ccd36650e8512c93705aca062f85bae8e197.cu | #include <stdio.h>
#define WARP_SIZE 32
#define GROUPSET 16
#define NUMFACES 3
#define fouralpha 1.82
#define fouralpha4 5.82
#define Connect(a,b,c) Connect[ a + 3 * ( b + mC * c ) ]
__device__ __forceinline__ double shfl_d(double var,int lane)
{ float lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=f"(lo), "=f"(hi) : "d"(var));
hi = __shfl(hi, lane);
lo = __shfl(lo, lane);
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "f"(lo), "f"(hi));
return var;
}
extern "C"
{
__global__ void GPU_sweep(
int size_maxCorner,
int size_maxcf,
int nAngle,
int nzones,
int ncornr,
int Groups,
int nbelem,
int* AngleOrder,
double* soa_omega,
int* nextZ,
int* next,
int* soa_nCorner,
int* soa_nCFaces,
int* soa_c0,
double* soa_STotal,
double* STimeBatch,
double* soa_SigtInv,
double* soa_Volume,
double* soa_Sigt,
double* soa_A_fp,
double* soa_A_ez,
int* soa_Connect,
double* psic,
double* psib,
double* omega_A_fp,
double* omega_A_ez,
int* soa_Connect_ro,
int* passZ,
bool calcSTime,
double tau
)
{
// double omega[3];
int c,ig,i,icface,ifp,cez,k;
// double Q[Groups * size_maxCorner];
// double src[Groups * size_maxCorner];
// double SigtVol[Groups * size_maxCorner];
// double afpm[size_maxcf];
// double psifp[Groups * size_maxcf];
// int ez_exit[size_maxcf];
// double coefpsic[size_maxcf];
// double tpsic[Groups * size_maxCorner];
// double psi_opp[Groups];
double area_opp,area_inv,sumArea;
double r_psifp;
double psi_opp,tpsic,r_afpm;
double Q[8];
double src[8];
//double volume[8];
//double coefpsic_stk[3];
//double psifp[3];
//int ez_exit[3];
//double *src;
volatile double *volume;
volatile double *coefpsic;
volatile double *psifp;
volatile int *ez_exit;
//__shared__ volatile double sm_agg[12*128]; // 4x32 thread per tb. 8tb. 6KB
extern __shared__ double sm_agg[];
int offset = (8+3+3*WARP_SIZE+3)*threadIdx.y;
volume = &(sm_agg[offset]); //8 doubles
offset += size_maxCorner;
coefpsic = &(sm_agg[offset]); // 3 doubles
offset += size_maxcf;
psifp = &(sm_agg[offset]); // 3 x warp size doubles
offset += size_maxcf * WARP_SIZE;
//note ez_exit has integer type
ez_exit = (int*) &(sm_agg[offset]); // 3 int
// for(int Angle=0;Angle<nAngle;Angle++)
// const double fouralpha = 1.82;
// const double fouralpha4 = 5.82;
#define soa_omega(a,b) soa_omega[a + 3 * b]
// #define tpsic(ig,c) tpsic[ (ig) + Groups * (c)]
#define EB_ListExit(a,ia) EB_ListExit[ a + 2 * (ia) ]
#define soa_A_fp(a,icface,c,zone) soa_A_fp[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_A_ez(a,icface,c,zone) soa_A_ez[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define omega_A_fp(icface,c,zone) omega_A_fp[ ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define omega_A_ez(icface,c,zone) omega_A_ez[ ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_Connect(a,icface,c,zone) soa_Connect[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define psifp(ig,jf) psifp[(ig) + WARP_SIZE * (jf)]
#define psib(ig,b,c) psib[(ig) + Groups * ((b) + nbelem * (c) )]
#define psic(ig,b,c) psic[(ig) + Groups * ((b) + ncornr *(c) )]
#define Q(ig,c) Q[(ig) + WARP_SIZE * (c)]
#define src(ig,c) src[c]
#define soa_Sigt(ig,zone) soa_Sigt[(ig) + Groups * (zone)]
#define soa_Volume(c,zone) soa_Volume[c + size_maxCorner * (zone)]
#define soa_SigtInv(ig,zone) soa_SigtInv[(ig) + Groups * (zone)]
#define soa_STotal(ig,c,zone) soa_STotal[ig + Groups * ( c + size_maxCorner * (zone) )]
#define STimeBatch(ig,ic,Angle) STimeBatch[ig + Groups * ( (ic) + ncornr * (Angle) ) ]
#define nextZ(a,b) nextZ[ (a) + nzones * (b) ]
#define next(a,b) next[ (a) + (ncornr+1) * (b) ]
//int mm = blockIdx.x;
int Angle = AngleOrder[blockIdx.x]-1;
ig = threadIdx.x;
omega_A_fp += blockIdx.x * nzones * size_maxcf * size_maxCorner;
omega_A_ez += blockIdx.x * nzones * size_maxcf * size_maxCorner;
passZ += Angle * nzones;
const int group_offset=blockIdx.y * WARP_SIZE; //should be blockDim.x instead of warpsize?
// if (!( group_offset + threadIdx.x < Groups )) return;
psib += group_offset;
psic += group_offset;
soa_Sigt += group_offset;
soa_STotal += group_offset;
soa_SigtInv += group_offset;
STimeBatch += group_offset;
int ndone = 0;
int ndoneZ = 0;
// hyperplane number p
int p=0;
while(ndoneZ < nzones)
{
//increment hyperplane
p++;
// get number of zones in this hyperplane
int passZcnt = passZ[p] - passZ[p-1];
// for(int ii=threadIdx.y+blockDim.y*blockIdx.z; ii<passZcnt;ii+=blockDim.y*gridDim.z)
//for(int ii=blockIdx.z; ii<passZcnt;ii+=gridDim.z)
for(int ii=threadIdx.y; ii<passZcnt;ii+=blockDim.y)
{
ndone = ( ndoneZ + ii ) * size_maxCorner;
// get the zone (minus 1 so it is valid c index)
int zone = nextZ(ndoneZ+ii,Angle) - 1;
int nCorner = soa_nCorner[zone];
int nCFaces = soa_nCFaces[zone];
int c0 = soa_c0[zone] ;
double Sigt = soa_Sigt(ig,zone);
double r_soa_SightInv = soa_SigtInv(ig,zone);
double r_omega_A_fp;
double r_omega_A_ez;
int connect0,connect1,connect2;
// coallesced loads into shared memory
if(threadIdx.x<nCorner) volume[threadIdx.x] = soa_Volume(threadIdx.x,zone);
// different threads hold values for different icface in registers instead of shared memory
// other threads can access the register values via a shuffle command.
//But now with only 16 threads (groups) there are not threads to hold nCorner*nCFaces (3*8)
if(threadIdx.x<nCorner*nCFaces)
{
int cc = size_maxcf * size_maxCorner;
r_omega_A_fp = omega_A_fp[threadIdx.x + cc * zone];
r_omega_A_ez = omega_A_ez[threadIdx.x + cc * zone];
connect0 = soa_Connect_ro[threadIdx.x + cc*(0 + 3*zone)];
connect1 = soa_Connect_ro[threadIdx.x + cc*(1 + 3*zone)];
connect2 = soa_Connect_ro[threadIdx.x + cc*(2 + 3*zone)];
}
//if(nCorner*nCFaces>blockDim.x){printf("Error: threads are not covering nCorner*nCFaces\n");abort;}
for(c=0;c<nCorner;c++)
{
double source;
//if(!calcSTime)
//{
source = soa_STotal(ig,c,zone) + STimeBatch(ig,c0+c,blockIdx.x);
//}
//else // first temp and flux iteration: compute STime, use it, and zero copy back to host.
//{
//double STime_temp = tau*psic(ig,c0+c,blockIdx.x);
//source = soa_STotal(ig,c,zone) + STime_temp;
//STime(ig,c0+c,Angle) = STime_temp;
//}
Q[c] = r_soa_SightInv *source ;
//src(ig,c) = soa_Volume(c,zone) *source;
//volume[c] = soa_Volume(c,zone);
src(ig,c) = volume[c]*source; // really just src[c]
//SigtVol(ig,c) = soa_Sigt(ig,zone)*soa_Volume(c,zone);
}
for(i=0;i<nCorner;i++)
{
int ic = next(ndone+i,Angle);
c = ic - c0 - 1;
sumArea = 0.0;
for(icface=0;icface<nCFaces;icface++)
{
//afpm[icface] = omega_A_fp(icface,c,zone);
r_afpm = shfl_d(r_omega_A_fp,icface+size_maxcf*c);
// if ( Angle == 1 && ig==0 && zone == 1 )
// printf("a=%d,c=%d,icface=%d,afpm=%e\n",Angle,c,icface,r_afpm);
// int icfp = soa_Connect(0,icface,c,zone) - 1;
// int ib = soa_Connect(1,icface,c,zone) - 1;
int icfp= __shfl(connect0,icface+size_maxcf*c) - 1;
int ib= __shfl(connect1,icface+size_maxcf*c) - 1;
if ( r_afpm >= 0.0 )
{
sumArea = sumArea + r_afpm;
}
else
{
if (icfp == -1)
{
// psifp(ig,icface) = psib(ig,ib,Angle);
r_psifp = psib(ig,ib,blockIdx.x);
}
else
{
// psifp(ig,icface) = psic(ig,icfp,Angle);
r_psifp = psic(ig,icfp,blockIdx.x);
}
src(ig,c) -= r_afpm*r_psifp;
psifp(ig,icface) = r_psifp;
//psifp[icface] = r_psifp;
}
}
int nxez = 0;
for(icface=0;icface<nCFaces;icface++)
{
//double aez = omega_A_ez(icface,c,zone);
double aez = shfl_d(r_omega_A_ez,icface+size_maxcf*c);
if (aez > 0.0 )
{
sumArea = sumArea + aez;
area_opp = .0;
// cez = soa_Connect(2,icface,c,zone) - 1;
cez = __shfl(connect2,icface+size_maxcf*c) - 1;
ez_exit[nxez] = cez;
coefpsic[nxez] = aez;
nxez = nxez + 1;
if (nCFaces == 3)
{
ifp = (icface+1)%nCFaces;
r_afpm = shfl_d(r_omega_A_fp,ifp+size_maxcf*c);
if ( r_afpm < 0.0 )
{
area_opp = -r_afpm;
psi_opp = psifp(ig,ifp);
//psi_opp = psifp[ifp];
}
}
else
{
ifp = icface;
area_opp = 0.0;
psi_opp = 0.0;
for(k=0;k<nCFaces-2;k++)
{
ifp = (ifp+1)%nCFaces;
r_afpm = shfl_d(r_omega_A_fp,ifp+size_maxcf*c);
if ( r_afpm < 0.0 )
{
area_opp = area_opp - r_afpm;
psi_opp = psi_opp - r_afpm*psifp(ig,ifp);
//psi_opp = psi_opp - r_afpm*psifp[ifp];
}
}
area_inv = 1.0/area_opp;
psi_opp = psi_opp*area_inv;
}
if (area_opp > 0.0) {
double aez2 = aez*aez;
{
double sigv = Sigt*volume[c];
double sigv2 = sigv*sigv;
double gnum = aez2*( fouralpha*sigv2 + aez*(4.0*sigv + 3.0*aez) );
double gtau = gnum/( gnum + 4.0*sigv2*sigv2 + aez*sigv*(6.0*sigv2 + 2.0*aez*(2.0*sigv + aez)) ) ;
double sez = gtau*sigv*( psi_opp - Q[c] ) + 0.5*aez*(1.0 - gtau)*( Q[c] - Q[cez] );
src(ig,c) = src(ig,c) + sez;
src(ig,cez) = src(ig,cez) - sez;
}
}
else
{
double sez = 0.5*aez*( Q[c] - Q[cez] );
src(ig,c) = src(ig,c) + sez;
src(ig,cez) = src(ig,cez) - sez;
}
}
}
tpsic = src(ig,c)/(sumArea + Sigt*volume[c]);
for(icface=0;icface<nxez;icface++)
{
int cez = ez_exit[icface];
src(ig,cez) = src(ig,cez) + coefpsic[icface]*tpsic;
}
//hope that ther is no self referencing
psic(ig,c0+c,blockIdx.x) = tpsic;
//psibatch(ig,c0+c,mm)= tpsic;
} //end of corner
} //end of zone loop
ndoneZ += passZcnt;
__syncthreads();
} //end of while
}
__global__ void GPU_fp_ez_hplane(
int size_maxCorner,
int size_maxcf,
int nzones,
int ncornr,
int Groups,
int nbelem,
int* AngleOrder,
double* soa_omega,
int* nextZ,
int* next,
int* soa_nCorner,
int* soa_nCFaces,
int* soa_c0,
double* soa_A_fp,
double* soa_A_ez,
double* omega_A_fp,
double* omega_A_ez,
int* soa_Connect,
int* soa_Connect_ro,
int* passZ)
{
//int c,i,ig,icface,ii;
int c,i,icface;
#define soa_omega(a,b) soa_omega[a + 3 * b]
#define omega_A_fp(icface,c,zone) omega_A_fp[ ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define omega_A_ez(icface,c,zone) omega_A_ez[ ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_A_fp(a,icface,c,zone) soa_A_fp[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_A_ez(a,icface,c,zone) soa_A_ez[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_Connect(a,icface,c,zone) soa_Connect[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_Connect_ro(a,icface,c,zone) soa_Connect_ro[ icface + size_maxcf * ( c + size_maxCorner * ( a + 3 * zone) ) ]
#define nextZ(a,b) nextZ[ (a) + nzones * (b) ]
#define next(a,b) next[ (a) + (ncornr+1) * (b) ]
// for(int Angle=0;Angle<nAngle;Angle++)
//int Angle = blockIdx.x;
int Angle = AngleOrder[blockIdx.x]-1;
double omega0, omega1, omega2;
omega0 = soa_omega(0,Angle);
omega1 = soa_omega(1,Angle);
omega2 = soa_omega(2,Angle);
omega_A_fp += blockIdx.x * nzones * size_maxcf * size_maxCorner;
omega_A_ez += blockIdx.x * nzones * size_maxcf * size_maxCorner;
int ndone = 0;
int ndoneZ = 0;
// hyperplane number p
int p=0;
while(ndoneZ < nzones)
{
//increment hyperplane
p++;
// get number of zones in this hyperplane
int passZcnt = passZ[p] - passZ[p-1];
// you can print hyperplanes for visualization
//if( Angle == 0 && threadIdx.x==0) printf("%d \t %d\n",p,passZcnt);
//for(int ii=threadIdx.x+blockIdx.y*blockDim.x;ii<passZcnt;ii+=blockDim.x*gridDim.y)
for(int ii=threadIdx.x;ii<passZcnt;ii+=blockDim.x)
{
ndone = ( ndoneZ + ii ) * size_maxCorner;
// get the zone (minus 1 so it is valid c index)
int zone = nextZ(ndoneZ+ii,Angle) - 1;
int nCorner = soa_nCorner[zone];
int nCFaces = soa_nCFaces[zone];
int c0 = soa_c0[zone] ;
for(i=0;i<nCorner;i++)
{
int ic = next(ndone+i,Angle);
c = ic - c0 - 1;
for(icface=0;icface<nCFaces;icface++)
{
omega_A_fp(icface,c,zone) = omega0*soa_A_fp(0,icface,c,zone) +
omega1*soa_A_fp(1,icface,c,zone) +
omega2*soa_A_fp(2,icface,c,zone);
// could get rid of below if new order was used originally?
int icfp = soa_Connect(0,icface,c,zone);
int ib = soa_Connect(1,icface,c,zone);
int cez = soa_Connect(2,icface,c,zone);
soa_Connect_ro(0,icface,c,zone) = icfp;
soa_Connect_ro(1,icface,c,zone) = ib ;
soa_Connect_ro(2,icface,c,zone) = cez ;
}
for(icface=0;icface<nCFaces;icface++)
{
omega_A_ez(icface,c,zone) = omega0*soa_A_ez(0,icface,c,zone) + omega1*soa_A_ez(1,icface,c,zone) + omega2*soa_A_ez(2,icface,c,zone) ;
}
} // end corners
} // end zones in hplane
ndoneZ += passZcnt;
__syncthreads();
//ndone = ndone + nCorner;
}//end while
}//end function
}
|
b8b05eaff2460adbef6830bab489ba6d962dd293.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define NUM_ROWS 10000
#define NUM_COLS 1000
int ha[NUM_ROWS][NUM_COLS] ;
int hb[NUM_ROWS][NUM_COLS] ;
int hc[NUM_ROWS][NUM_COLS] ;
__global__ void add(int* da, int* db, int* dc){
int tid = blockDim.x * blockDim.y * (blockIdx.y * gridDim.x + blockIdx.x) + (threadIdx.y * blockDim.x + threadIdx.x);
while(tid<NUM_ROWS* NUM_COLS){
dc[tid] = da[tid]+ db[tid];
tid = tid + blockDim.x * gridDim.x * blockDim.y * gridDim.y;
}
}
int main(){
int *da;
int *db;
int *dc;
int iSize = NUM_ROWS * NUM_COLS * sizeof(int) ;
hipError_t cuError = hipSuccess;
dim3 dimGrid (1, 1, 1) ;
dim3 dimBlock (512, 1, 1) ;
for(int i=0;i<NUM_ROWS;i++){
for(int j=0;j<NUM_COLS;j++){
ha[i][j]=rand()%10+1;
hb[i][j]=rand()%10+1;
}
}
cuError = hipMalloc((void**)&da, iSize) ;
if (hipSuccess != cuError){
printf ("Failed to allocate memory\n") ;
return 1 ;
}
cuError = hipMemcpy(da, ha, iSize, hipMemcpyHostToDevice);
if (hipSuccess != cuError){
hipFree (da) ;
printf ("Failed in Memcpy 1\n") ;
return 1 ;
}
cuError = hipMalloc((void**)&db, iSize) ;
if (hipSuccess != cuError){
printf ("Failed to allocate memory\n") ;
return 1 ;
}
cuError = hipMemcpy(db, hb, iSize, hipMemcpyHostToDevice);
if (hipSuccess != cuError){
hipFree (db) ;
printf ("Failed in Memcpy 1\n") ;
return 1 ;
}
cuError = hipMalloc((void**)&dc, iSize) ;
if (hipSuccess != cuError){
printf ("Failed to allocate memory\n") ;
return 1 ;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( add), dim3(dimGrid), dim3(dimBlock), 0, 0, da, db, dc);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
cuError = hipGetLastError () ;
if (hipSuccess != cuError){
printf ("Failed in kernel launch and reason is %s\n", hipGetErrorString(cuError)) ;
return 1 ;
}
cuError = hipMemcpy(hc, dc, iSize, hipMemcpyDeviceToHost);
if (hipSuccess != cuError){
hipFree (dc) ;
printf ("Failed in Memcpy 2\n") ;
return 1 ;
}
bool success = true;
for(int i=0;i<NUM_ROWS;i++){
for(int j=0;j<NUM_COLS;j++){
if ((ha[i][j] + hb[i][j]) != hc[i][j]) {
printf( "Error: %d + %d != %d\n", ha[i][j], hb[i][j], hc[i][j] );
success = false;
}
}
}
if (success) printf( "We did it!\n" );
hipFree (da) ;
hipFree (db) ;
hipFree (dc) ;
return 0;
} | b8b05eaff2460adbef6830bab489ba6d962dd293.cu | #include<stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
#define NUM_ROWS 10000
#define NUM_COLS 1000
int ha[NUM_ROWS][NUM_COLS] ;
int hb[NUM_ROWS][NUM_COLS] ;
int hc[NUM_ROWS][NUM_COLS] ;
__global__ void add(int* da, int* db, int* dc){
int tid = blockDim.x * blockDim.y * (blockIdx.y * gridDim.x + blockIdx.x) + (threadIdx.y * blockDim.x + threadIdx.x);
while(tid<NUM_ROWS* NUM_COLS){
dc[tid] = da[tid]+ db[tid];
tid = tid + blockDim.x * gridDim.x * blockDim.y * gridDim.y;
}
}
int main(){
int *da;
int *db;
int *dc;
int iSize = NUM_ROWS * NUM_COLS * sizeof(int) ;
cudaError_t cuError = cudaSuccess;
dim3 dimGrid (1, 1, 1) ;
dim3 dimBlock (512, 1, 1) ;
for(int i=0;i<NUM_ROWS;i++){
for(int j=0;j<NUM_COLS;j++){
ha[i][j]=rand()%10+1;
hb[i][j]=rand()%10+1;
}
}
cuError = cudaMalloc((void**)&da, iSize) ;
if (cudaSuccess != cuError){
printf ("Failed to allocate memory\n") ;
return 1 ;
}
cuError = cudaMemcpy(da, ha, iSize, cudaMemcpyHostToDevice);
if (cudaSuccess != cuError){
cudaFree (da) ;
printf ("Failed in Memcpy 1\n") ;
return 1 ;
}
cuError = cudaMalloc((void**)&db, iSize) ;
if (cudaSuccess != cuError){
printf ("Failed to allocate memory\n") ;
return 1 ;
}
cuError = cudaMemcpy(db, hb, iSize, cudaMemcpyHostToDevice);
if (cudaSuccess != cuError){
cudaFree (db) ;
printf ("Failed in Memcpy 1\n") ;
return 1 ;
}
cuError = cudaMalloc((void**)&dc, iSize) ;
if (cudaSuccess != cuError){
printf ("Failed to allocate memory\n") ;
return 1 ;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
add<<<dimGrid, dimBlock>>>(da, db, dc);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cuError = cudaGetLastError () ;
if (cudaSuccess != cuError){
printf ("Failed in kernel launch and reason is %s\n", cudaGetErrorString(cuError)) ;
return 1 ;
}
cuError = cudaMemcpy(hc, dc, iSize, cudaMemcpyDeviceToHost);
if (cudaSuccess != cuError){
cudaFree (dc) ;
printf ("Failed in Memcpy 2\n") ;
return 1 ;
}
bool success = true;
for(int i=0;i<NUM_ROWS;i++){
for(int j=0;j<NUM_COLS;j++){
if ((ha[i][j] + hb[i][j]) != hc[i][j]) {
printf( "Error: %d + %d != %d\n", ha[i][j], hb[i][j], hc[i][j] );
success = false;
}
}
}
if (success) printf( "We did it!\n" );
cudaFree (da) ;
cudaFree (db) ;
cudaFree (dc) ;
return 0;
} |
d001321f41585928384be0eed8f6a7644a2b4d5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/nvtit/md_iteration_leap_frog_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void MD_Iteration_Leap_Frog_With_LiuJian(const int atom_numbers, const float half_dt, const float dt,
const float exp_gamma, const float *inverse_mass,
const float *sqrt_mass_inverse, VECTOR *vel, VECTOR *crd,
VECTOR *frc, VECTOR *acc, VECTOR *random_frc) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < atom_numbers) {
acc[i].x = inverse_mass[i] * frc[i].x;
acc[i].y = inverse_mass[i] * frc[i].y;
acc[i].z = inverse_mass[i] * frc[i].z;
vel[i].x = vel[i].x + dt * acc[i].x;
vel[i].y = vel[i].y + dt * acc[i].y;
vel[i].z = vel[i].z + dt * acc[i].z;
crd[i].x = crd[i].x + half_dt * vel[i].x;
crd[i].y = crd[i].y + half_dt * vel[i].y;
crd[i].z = crd[i].z + half_dt * vel[i].z;
vel[i].x = exp_gamma * vel[i].x + sqrt_mass_inverse[i] * random_frc[i].x;
vel[i].y = exp_gamma * vel[i].y + sqrt_mass_inverse[i] * random_frc[i].y;
vel[i].z = exp_gamma * vel[i].z + sqrt_mass_inverse[i] * random_frc[i].z;
crd[i].x = crd[i].x + half_dt * vel[i].x;
crd[i].y = crd[i].y + half_dt * vel[i].y;
crd[i].z = crd[i].z + half_dt * vel[i].z;
frc[i].x = 0.;
frc[i].y = 0.;
frc[i].z = 0.;
}
}
__global__ void MD_Iteration_Leap_Frog_With_LiuJian_With_Max_Velocity(const int atom_numbers, const float half_dt,
const float dt, const float exp_gamma,
const float *inverse_mass,
const float *sqrt_mass_inverse, VECTOR *vel,
VECTOR *crd, VECTOR *frc, VECTOR *acc,
VECTOR *random_frc, const float max_vel) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float abs_vel;
if (i < atom_numbers) {
acc[i].x = inverse_mass[i] * frc[i].x;
acc[i].y = inverse_mass[i] * frc[i].y;
acc[i].z = inverse_mass[i] * frc[i].z;
vel[i].x = vel[i].x + dt * acc[i].x;
vel[i].y = vel[i].y + dt * acc[i].y;
vel[i].z = vel[i].z + dt * acc[i].z;
abs_vel = norm3df(vel[i].x, vel[i].y, vel[i].z);
if (abs_vel < max_vel) {
} else {
abs_vel = max_vel / abs_vel;
vel[i].x = abs_vel * vel[i].x;
vel[i].y = abs_vel * vel[i].y;
vel[i].z = abs_vel * vel[i].z;
}
crd[i].x = crd[i].x + half_dt * vel[i].x;
crd[i].y = crd[i].y + half_dt * vel[i].y;
crd[i].z = crd[i].z + half_dt * vel[i].z;
vel[i].x = exp_gamma * vel[i].x + sqrt_mass_inverse[i] * random_frc[i].x;
vel[i].y = exp_gamma * vel[i].y + sqrt_mass_inverse[i] * random_frc[i].y;
vel[i].z = exp_gamma * vel[i].z + sqrt_mass_inverse[i] * random_frc[i].z;
crd[i].x = crd[i].x + half_dt * vel[i].x;
crd[i].y = crd[i].y + half_dt * vel[i].y;
crd[i].z = crd[i].z + half_dt * vel[i].z;
frc[i].x = 0.;
frc[i].y = 0.;
frc[i].z = 0.;
}
}
void MDIterationLeapFrog(const int float4_numbers, const int atom_numbers, const float half_dt, const float dt,
const float exp_gamma, const int is_max_velocity, const float max_velocity,
const float *d_mass_inverse, const float *d_sqrt_mass, float *vel_f, float *crd_f,
float *frc_f, float *acc_f, hipStream_t stream) {
hipLaunchKernelGGL(( Reset_List), dim3(ceilf(static_cast<float>(3. * atom_numbers) / 128)), dim3(128), 0, 0, 3 * atom_numbers, acc_f, 0.);
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *vel = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(vel_f));
VECTOR *acc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(acc_f));
VECTOR *crd = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(crd_f));
hiprandStatePhilox4_32_10_t *rand_state;
VECTOR *random_force;
Cuda_Malloc_Safely(reinterpret_cast<void **>(&random_force), sizeof(float4) * float4_numbers);
Cuda_Malloc_Safely(reinterpret_cast<void **>(&rand_state), sizeof(hiprandStatePhilox4_32_10_t) * float4_numbers);
hipLaunchKernelGGL(( Setup_Rand_Normal_Kernel), dim3(ceilf(static_cast<float>(float4_numbers) / 32.)), dim3(32), 0, 0, float4_numbers, rand_state, 1);
hipLaunchKernelGGL(( Rand_Normal), dim3(ceilf(static_cast<float>(float4_numbers) / 32.)), dim3(32), 0, stream,
float4_numbers, rand_state, reinterpret_cast<float4 *>(random_force));
if (!is_max_velocity) {
hipLaunchKernelGGL(( MD_Iteration_Leap_Frog_With_LiuJian), dim3(ceilf(static_cast<float>(atom_numbers) / 32)), dim3(32), 0, stream,
atom_numbers, half_dt, dt, exp_gamma, d_mass_inverse, d_sqrt_mass, vel, crd, frc, acc, random_force);
} else {
hipLaunchKernelGGL(( MD_Iteration_Leap_Frog_With_LiuJian_With_Max_Velocity), dim3(ceilf(static_cast<float>(atom_numbers) / 32)), dim3(32), 0,
stream, atom_numbers, half_dt, dt, exp_gamma,
d_mass_inverse, d_sqrt_mass, vel, crd, frc, acc,
random_force, max_velocity);
hipStreamSynchronize(stream);
hipFree(random_force);
hipFree(rand_state);
return;
}
}
void MDIterationLeapFrog(const int float4_numbers, const int atom_numbers, const float half_dt, const float dt,
const float exp_gamma, const int is_max_velocity, const float max_velocity,
const float *d_mass_inverse, const float *d_sqrt_mass, float *vel_f, float *crd_f,
float *frc_f, float *acc_f, hipStream_t stream);
| d001321f41585928384be0eed8f6a7644a2b4d5d.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/nvtit/md_iteration_leap_frog_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void MD_Iteration_Leap_Frog_With_LiuJian(const int atom_numbers, const float half_dt, const float dt,
const float exp_gamma, const float *inverse_mass,
const float *sqrt_mass_inverse, VECTOR *vel, VECTOR *crd,
VECTOR *frc, VECTOR *acc, VECTOR *random_frc) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < atom_numbers) {
acc[i].x = inverse_mass[i] * frc[i].x;
acc[i].y = inverse_mass[i] * frc[i].y;
acc[i].z = inverse_mass[i] * frc[i].z;
vel[i].x = vel[i].x + dt * acc[i].x;
vel[i].y = vel[i].y + dt * acc[i].y;
vel[i].z = vel[i].z + dt * acc[i].z;
crd[i].x = crd[i].x + half_dt * vel[i].x;
crd[i].y = crd[i].y + half_dt * vel[i].y;
crd[i].z = crd[i].z + half_dt * vel[i].z;
vel[i].x = exp_gamma * vel[i].x + sqrt_mass_inverse[i] * random_frc[i].x;
vel[i].y = exp_gamma * vel[i].y + sqrt_mass_inverse[i] * random_frc[i].y;
vel[i].z = exp_gamma * vel[i].z + sqrt_mass_inverse[i] * random_frc[i].z;
crd[i].x = crd[i].x + half_dt * vel[i].x;
crd[i].y = crd[i].y + half_dt * vel[i].y;
crd[i].z = crd[i].z + half_dt * vel[i].z;
frc[i].x = 0.;
frc[i].y = 0.;
frc[i].z = 0.;
}
}
__global__ void MD_Iteration_Leap_Frog_With_LiuJian_With_Max_Velocity(const int atom_numbers, const float half_dt,
const float dt, const float exp_gamma,
const float *inverse_mass,
const float *sqrt_mass_inverse, VECTOR *vel,
VECTOR *crd, VECTOR *frc, VECTOR *acc,
VECTOR *random_frc, const float max_vel) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float abs_vel;
if (i < atom_numbers) {
acc[i].x = inverse_mass[i] * frc[i].x;
acc[i].y = inverse_mass[i] * frc[i].y;
acc[i].z = inverse_mass[i] * frc[i].z;
vel[i].x = vel[i].x + dt * acc[i].x;
vel[i].y = vel[i].y + dt * acc[i].y;
vel[i].z = vel[i].z + dt * acc[i].z;
abs_vel = norm3df(vel[i].x, vel[i].y, vel[i].z);
if (abs_vel < max_vel) {
} else {
abs_vel = max_vel / abs_vel;
vel[i].x = abs_vel * vel[i].x;
vel[i].y = abs_vel * vel[i].y;
vel[i].z = abs_vel * vel[i].z;
}
crd[i].x = crd[i].x + half_dt * vel[i].x;
crd[i].y = crd[i].y + half_dt * vel[i].y;
crd[i].z = crd[i].z + half_dt * vel[i].z;
vel[i].x = exp_gamma * vel[i].x + sqrt_mass_inverse[i] * random_frc[i].x;
vel[i].y = exp_gamma * vel[i].y + sqrt_mass_inverse[i] * random_frc[i].y;
vel[i].z = exp_gamma * vel[i].z + sqrt_mass_inverse[i] * random_frc[i].z;
crd[i].x = crd[i].x + half_dt * vel[i].x;
crd[i].y = crd[i].y + half_dt * vel[i].y;
crd[i].z = crd[i].z + half_dt * vel[i].z;
frc[i].x = 0.;
frc[i].y = 0.;
frc[i].z = 0.;
}
}
void MDIterationLeapFrog(const int float4_numbers, const int atom_numbers, const float half_dt, const float dt,
const float exp_gamma, const int is_max_velocity, const float max_velocity,
const float *d_mass_inverse, const float *d_sqrt_mass, float *vel_f, float *crd_f,
float *frc_f, float *acc_f, cudaStream_t stream) {
Reset_List<<<ceilf(static_cast<float>(3. * atom_numbers) / 128), 128>>>(3 * atom_numbers, acc_f, 0.);
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *vel = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(vel_f));
VECTOR *acc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(acc_f));
VECTOR *crd = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(crd_f));
curandStatePhilox4_32_10_t *rand_state;
VECTOR *random_force;
Cuda_Malloc_Safely(reinterpret_cast<void **>(&random_force), sizeof(float4) * float4_numbers);
Cuda_Malloc_Safely(reinterpret_cast<void **>(&rand_state), sizeof(curandStatePhilox4_32_10_t) * float4_numbers);
Setup_Rand_Normal_Kernel<<<ceilf(static_cast<float>(float4_numbers) / 32.), 32>>>(float4_numbers, rand_state, 1);
Rand_Normal<<<ceilf(static_cast<float>(float4_numbers) / 32.), 32, 0, stream>>>(
float4_numbers, rand_state, reinterpret_cast<float4 *>(random_force));
if (!is_max_velocity) {
MD_Iteration_Leap_Frog_With_LiuJian<<<ceilf(static_cast<float>(atom_numbers) / 32), 32, 0, stream>>>(
atom_numbers, half_dt, dt, exp_gamma, d_mass_inverse, d_sqrt_mass, vel, crd, frc, acc, random_force);
} else {
MD_Iteration_Leap_Frog_With_LiuJian_With_Max_Velocity<<<ceilf(static_cast<float>(atom_numbers) / 32), 32, 0,
stream>>>(atom_numbers, half_dt, dt, exp_gamma,
d_mass_inverse, d_sqrt_mass, vel, crd, frc, acc,
random_force, max_velocity);
cudaStreamSynchronize(stream);
cudaFree(random_force);
cudaFree(rand_state);
return;
}
}
void MDIterationLeapFrog(const int float4_numbers, const int atom_numbers, const float half_dt, const float dt,
const float exp_gamma, const int is_max_velocity, const float max_velocity,
const float *d_mass_inverse, const float *d_sqrt_mass, float *vel_f, float *crd_f,
float *frc_f, float *acc_f, cudaStream_t stream);
|
d806cbd537212df61cf490253468378899a414fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "scc.h"
#include<map>
#include<set>
#include<algorithm>
#include<vector>
#include "scc_kernels.h"
using namespace std;
typedef struct {
uint32_t u1, u2;
} Ele;
__device__ Ele devData[10001];
__device__ int devCount = 0;
__device__ int VecPushBack(Ele &e) {
int insertPt = atomicAdd(&devCount, 1);
if (insertPt < 10001){
devData[insertPt] = e;
return insertPt;
}
else return -1;
}
__global__ void identifyTransEdges(const uint32_t *Fc, const uint32_t *Fr, uint32_t *range, const uint8_t *tags, const uint32_t num_rows, uint32_t *Pr, bool *Occ){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]) || Occ[row])
return;
uint32_t myPr = Pr[row];
uint32_t cnt = Fr[row + 1] - Fr[row];
const uint32_t *nbrs = &Fc[Fr[row]];
uint32_t frpr;
if(myPr & 1)
frpr = myPr - 1;
else
frpr = myPr + 1;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
if(!isRangeSet(tags[index]) && Pr[index] == frpr && !Occ[index]){
//printf("ROW: %d, INDEX: %d\n", row, index);
Ele e = { .u1 = row, .u2 = index };
VecPushBack(e);
}
}
}
void wHong(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, bool t1, bool t2, int warpSize){
//Set the device which exclusively used by this program
hipSetDevice(7);
float sccTime=0;
hipEvent_t sccTimeStart, sccTimeStop;
hipEventCreate(&sccTimeStart);
hipEventCreate(&sccTimeStop);
hipEventRecord(sccTimeStart, 0);
//-----------GPU initialization---------------------------->
uint32_t* d_Fr = NULL;
uint32_t* d_Br = NULL;
uint32_t* d_Fc = NULL;
uint32_t* d_Bc = NULL;
uint32_t* d_pivots = NULL;
uint32_t* d_range = NULL;
uint8_t* d_tags = NULL;
uint8_t* tags = new uint8_t[RSize+1];
bool volatile* d_terminatef = NULL;
bool terminatef = false;
bool volatile* d_terminateb = NULL;
bool terminateb = false;
int FWD_iterations = 0;
int BWD_iterations = 0;
uint32_t iterations = 0;
int Trimm_iterations = 0;
const uint32_t max_pivot_count = RSize;
hipError_t e1, e2, e3, e4, e5, e6, e7, e8, e9;
CUDA_SAFE_CALL( e1 = hipMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e2 = hipMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e3 = hipMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e4 = hipMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e5 = hipMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( e6 = hipMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t)));
CUDA_SAFE_CALL( e7 = hipMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e8 = hipMalloc( (void**) &d_terminatef, sizeof(bool) ));
CUDA_SAFE_CALL( e9 = hipMalloc( (void**) &d_terminateb, sizeof(bool) ));
if (e1 == hipErrorMemoryAllocation || e2 == hipErrorMemoryAllocation ||
e3 == hipErrorMemoryAllocation || e4 == hipErrorMemoryAllocation ||
e5 == hipErrorMemoryAllocation || e6 == hipErrorMemoryAllocation ||
e7 == hipErrorMemoryAllocation || e8 == hipErrorMemoryAllocation || e9 == hipErrorMemoryAllocation) {
throw "Error: Not enough memory on GPU\n";
}
CUDA_SAFE_CALL( hipMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( hipMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t)));
dim3 gridfb;
if((RSize * warpSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize * warpSize / BLOCKSIZE));
gridfb.x = dim;
gridfb.y = dim;
gridfb.z = 1;
}else{
gridfb.x = (RSize * warpSize + BLOCKSIZE - 1)/BLOCKSIZE;
gridfb.y = 1;
gridfb.z = 1;
}
//for vertex-to-thread mapping
dim3 grid;
if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize / BLOCKSIZE));
grid.x = dim;
grid.y = dim;
grid.z = 1;
}else{
grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE;
grid.y = 1;
grid.z = 1;
}
dim3 threads(BLOCKSIZE, 1, 1);
#ifdef _DEBUG
float pivotTime = 0, temp = 0, bTime = 0, trim1Time = 0, trim2Time = 0, updateTime = 0, wccTime = 0;
hipEvent_t bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop;
hipEvent_t trim1TimeStart, trim1TimeStop, trim2TimeStart, trim2TimeStop, wccTimeStart, wccTimeStop;
hipEventCreate(&bTimeStart);
hipEventCreate(&bTimeStop);
hipEventCreate(&pivotTimeStart);
hipEventCreate(&pivotTimeStop);
hipEventCreate(&trim1TimeStart);
hipEventCreate(&trim1TimeStop);
hipEventCreate(&trim2TimeStart);
hipEventCreate(&trim2TimeStop);
hipEventCreate(&updateTimeStart);
hipEventCreate(&updateTimeStop);
hipEventCreate(&wccTimeStart);
hipEventCreate(&wccTimeStop);
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
hipEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( d_pivots, 0, sizeof(uint32_t) ));
hipLaunchKernelGGL(( pollForFirstPivot), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_pivots, d_Fr, d_Br);
hipLaunchKernelGGL(( selectFirstPivot), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_pivots);
#ifdef _DEBUG
hipEventRecord(pivotTimeStop, 0);
hipEventSynchronize(pivotTimeStop);
hipEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
switch(warpSize){
case 1:
hipLaunchKernelGGL(( fwd_warp<1>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd_warp<1>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 2:
hipLaunchKernelGGL(( fwd_warp<2>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd_warp<2>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 4:
hipLaunchKernelGGL(( fwd_warp<4>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd_warp<4>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 8:
hipLaunchKernelGGL(( fwd_warp<8>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd_warp<8>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 16:
hipLaunchKernelGGL(( fwd_warp<16>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd_warp<16>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 32:
hipLaunchKernelGGL(( fwd_warp<32>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd_warp<32>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
}
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
switch(warpSize){
case 1:
hipLaunchKernelGGL(( fwd_warp<1>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 2:
hipLaunchKernelGGL(( fwd_warp<2>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 4:
hipLaunchKernelGGL(( fwd_warp<4>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 8:
hipLaunchKernelGGL(( fwd_warp<8>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 16:
hipLaunchKernelGGL(( fwd_warp<16>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 32:
hipLaunchKernelGGL(( fwd_warp<32>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
}
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
switch(warpSize){
case 1:
hipLaunchKernelGGL(( bwd_warp<1>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 2:
hipLaunchKernelGGL(( bwd_warp<2>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 4:
hipLaunchKernelGGL(( bwd_warp<4>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 8:
hipLaunchKernelGGL(( bwd_warp<8>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 16:
hipLaunchKernelGGL(( bwd_warp<16>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 32:
hipLaunchKernelGGL(( bwd_warp<32>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
}
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
#ifdef _DEBUG
hipEventRecord(bTimeStop, 0);
hipEventSynchronize(bTimeStop);
hipEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(updateTimeStart, 0);
#endif
hipLaunchKernelGGL(( update), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminatef);
#ifdef _DEBUG
hipEventRecord(updateTimeStop, 0);
hipEventSynchronize(updateTimeStop);
hipEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim2TimeStart, 0);
#endif
if(t2)
hipLaunchKernelGGL(( trim2), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize);
#ifdef _DEBUG
hipEventRecord(trim2TimeStop, 0);
hipEventSynchronize(trim2TimeStop);
hipEventElapsedTime(&temp, trim2TimeStart, trim2TimeStop);
trim2Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(wccTimeStart, 0);
#endif
//Now WCC decomposition
hipLaunchKernelGGL(( assignUniqueRange), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize);
do{
CUDA_SAFE_CALL( hipMemset((void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( propagateRange1), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( propagateRange2), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef || !terminateb);
#ifdef _DEBUG
hipEventRecord(wccTimeStop, 0);
hipEventSynchronize(wccTimeStop);
hipEventElapsedTime(&temp, wccTimeStart, wccTimeStop);
wccTime+=temp;
#endif
//-----------Main algorithm-------------------------------->
while ( true ) {
iterations++;
//cout<<"\nIteration : "<<iterations<<endl;
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
hipEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( d_pivots, 0, max_pivot_count * sizeof(uint32_t) ));
hipLaunchKernelGGL(( pollForPivots), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_pivots, max_pivot_count, d_Fr, d_Br);
hipLaunchKernelGGL(( selectPivots), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_pivots, max_pivot_count);
#ifdef _DEBUG
hipEventRecord(pivotTimeStop, 0);
hipEventSynchronize(pivotTimeStop);
hipEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
#ifdef _DEBUG
hipEventRecord(bTimeStop, 0);
hipEventSynchronize(bTimeStop);
hipEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(updateTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( update), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
if (terminatef)
break; //only way out
#ifdef _DEBUG
hipEventRecord(updateTimeStop, 0);
hipEventSynchronize(updateTimeStop);
hipEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
}
//<----------Main algorithm---------------------------------
//SCC extraction
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
uint32_t numberOf1Sccs = 0;
uint32_t numberOf2Sccs = 0;
uint32_t numberOfPivotSccs = 0;
uint32_t numberOfSccs = 0;
for(uint32_t i=1;i<=RSize;i++)
if(isTrim1(tags[i]))
numberOf1Sccs++;
else if(isTrim2(tags[i]))
numberOf2Sccs++;
else if(isPivot(tags[i]))
numberOfPivotSccs++;
numberOfSccs = numberOf1Sccs + numberOf2Sccs + numberOfPivotSccs;
hipEventRecord(sccTimeStop, 0);
hipEventSynchronize(sccTimeStop);
hipEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop);
//printf(", %u, %d, %d, %d", iterations, FWD_iterations , BWD_iterations, Trimm_iterations);
#ifdef _DEBUG
printf(", %f", bTime);
printf(", %f", trim1Time);
printf(", %f", trim2Time);
printf(", %f", pivotTime);
printf(", %f", updateTime);
printf(", %f", wccTime);
#endif
printf("\nNumber Of Sccs : %d", numberOfSccs);
printf("\nTime : %f", sccTime );
CUDA_SAFE_CALL( hipFree( d_Fc ));
CUDA_SAFE_CALL( hipFree( d_Fr ));
CUDA_SAFE_CALL( hipFree( d_Bc ));
CUDA_SAFE_CALL( hipFree( d_Br ));
CUDA_SAFE_CALL( hipFree( d_range));
CUDA_SAFE_CALL( hipFree( d_tags));
CUDA_SAFE_CALL( hipFree( d_pivots ));
CUDA_SAFE_CALL( hipFree( (void *)d_terminatef));
CUDA_SAFE_CALL( hipFree( (void *)d_terminateb));
hipEventDestroy(sccTimeStart);
hipEventDestroy(sccTimeStop);
#ifdef _DEBUG
hipEventDestroy(bTimeStart);
hipEventDestroy(bTimeStop);
hipEventDestroy(trim1TimeStart);
hipEventDestroy(trim1TimeStop);
hipEventDestroy(trim2TimeStart);
hipEventDestroy(trim2TimeStop);
hipEventDestroy(pivotTimeStart);
hipEventDestroy(pivotTimeStop);
hipEventDestroy(updateTimeStart);
hipEventDestroy(updateTimeStop);
hipEventDestroy(wccTimeStart);
hipEventDestroy(wccTimeStop);
#endif
return;
}
void vHong(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, bool t1, bool t2){
//Set the device which exclusively used by this program
hipSetDevice(7);
float sccTime=0;
hipEvent_t sccTimeStart, sccTimeStop;
hipEventCreate(&sccTimeStart);
hipEventCreate(&sccTimeStop);
hipEventRecord(sccTimeStart, 0);
//-----------GPU initialization---------------------------->
uint32_t* d_Fr = NULL;
uint32_t* d_Br = NULL;
uint32_t* d_Fc = NULL;
uint32_t* d_Bc = NULL;
uint32_t* d_pivots = NULL;
uint32_t* d_range = NULL;
uint8_t* d_tags = NULL;
uint8_t* tags = new uint8_t[RSize+1];
bool volatile* d_terminatef = NULL;
bool terminatef = false;
bool volatile* d_terminateb = NULL;
bool terminateb = false;
int FWD_iterations = 0;
int BWD_iterations = 0;
uint32_t iterations = 0;
int Trimm_iterations = 0;
const uint32_t max_pivot_count = RSize;
hipError_t e1, e2, e3, e4, e5, e6, e7, e8;
CUDA_SAFE_CALL( e1 = hipMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e2 = hipMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e3 = hipMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e4 = hipMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e5 = hipMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( e5 = hipMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t)));
CUDA_SAFE_CALL( e6 = hipMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e7 = hipMalloc( (void**) &d_terminatef, sizeof(bool) ));
CUDA_SAFE_CALL( e8 = hipMalloc( (void**) &d_terminateb, sizeof(bool) ));
if (e1 == hipErrorMemoryAllocation || e2 == hipErrorMemoryAllocation ||
e3 == hipErrorMemoryAllocation || e4 == hipErrorMemoryAllocation ||
e5 == hipErrorMemoryAllocation || e6 == hipErrorMemoryAllocation ||
e7 == hipErrorMemoryAllocation || e8 == hipErrorMemoryAllocation ) {
throw "Error: Not enough memory on GPU\n";
}
CUDA_SAFE_CALL( hipMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( hipMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t)));
//for vertex-to-thread mapping
dim3 grid;
if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize / BLOCKSIZE));
grid.x = dim;
grid.y = dim;
grid.z = 1;
}else{
grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE;
grid.y = 1;
grid.z = 1;
}
dim3 threads(BLOCKSIZE, 1, 1);
#ifdef _DEBUG
float pivotTime = 0, temp = 0, bTime = 0, trim1Time = 0, trim2Time = 0, updateTime = 0, wccTime = 0;
hipEvent_t bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop;
hipEvent_t trim1TimeStart, trim1TimeStop, trim2TimeStart, trim2TimeStop, wccTimeStart, wccTimeStop;
hipEventCreate(&bTimeStart);
hipEventCreate(&bTimeStop);
hipEventCreate(&pivotTimeStart);
hipEventCreate(&pivotTimeStop);
hipEventCreate(&trim1TimeStart);
hipEventCreate(&trim1TimeStop);
hipEventCreate(&trim2TimeStart);
hipEventCreate(&trim2TimeStop);
hipEventCreate(&updateTimeStart);
hipEventCreate(&updateTimeStop);
hipEventCreate(&wccTimeStart);
hipEventCreate(&wccTimeStop);
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
hipEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( d_pivots, 0, sizeof(uint32_t) ));
hipLaunchKernelGGL(( pollForFirstPivot), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_pivots, d_Fr, d_Br);
hipLaunchKernelGGL(( selectFirstPivot), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_pivots);
#ifdef _DEBUG
hipEventRecord(pivotTimeStop, 0);
hipEventSynchronize(pivotTimeStop);
hipEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
#ifdef _DEBUG
hipEventRecord(bTimeStop, 0);
hipEventSynchronize(bTimeStop);
hipEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(updateTimeStart, 0);
#endif
hipLaunchKernelGGL(( update), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminatef);
#ifdef _DEBUG
hipEventRecord(updateTimeStop, 0);
hipEventSynchronize(updateTimeStop);
hipEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim2TimeStart, 0);
#endif
if(t2)
hipLaunchKernelGGL(( trim2), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize);
#ifdef _DEBUG
hipEventRecord(trim2TimeStop, 0);
hipEventSynchronize(trim2TimeStop);
hipEventElapsedTime(&temp, trim2TimeStart, trim2TimeStop);
trim2Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(wccTimeStart, 0);
#endif
//Now WCC decomposition
hipLaunchKernelGGL(( assignUniqueRange), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize);
do{
CUDA_SAFE_CALL( hipMemset((void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( propagateRange1), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( propagateRange2), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef || !terminateb);
#ifdef _DEBUG
hipEventRecord(wccTimeStop, 0);
hipEventSynchronize(wccTimeStop);
hipEventElapsedTime(&temp, wccTimeStart, wccTimeStop);
wccTime+=temp;
#endif
//-----------Main algorithm-------------------------------->
while ( true ) {
iterations++;
//cout<<"\nIteration : "<<iterations<<endl;
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
hipEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( d_pivots, 0, max_pivot_count * sizeof(uint32_t) ));
hipLaunchKernelGGL(( pollForPivots), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_pivots, max_pivot_count, d_Fr, d_Br);
hipLaunchKernelGGL(( selectPivots), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_pivots, max_pivot_count);
#ifdef _DEBUG
hipEventRecord(pivotTimeStop, 0);
hipEventSynchronize(pivotTimeStop);
hipEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
#ifdef _DEBUG
hipEventRecord(bTimeStop, 0);
hipEventSynchronize(bTimeStop);
hipEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(updateTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( update), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
if (terminatef)
break; //only way out
#ifdef _DEBUG
hipEventRecord(updateTimeStop, 0);
hipEventSynchronize(updateTimeStop);
hipEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
}
//<----------Main algorithm---------------------------------
//SCC extraction
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
uint32_t numberOf1Sccs = 0;
uint32_t numberOf2Sccs = 0;
uint32_t numberOfPivotSccs = 0;
uint32_t numberOfSccs = 0;
for(uint32_t i=1;i<=RSize;i++)
if(isTrim1(tags[i]))
numberOf1Sccs++;
else if(isTrim2(tags[i]))
numberOf2Sccs++;
else if(isPivot(tags[i]))
numberOfPivotSccs++;
printf("numberOf1Sccs: %d\n", numberOf1Sccs);
numberOfSccs = numberOf1Sccs + numberOf2Sccs + numberOfPivotSccs;
hipEventRecord(sccTimeStop, 0);
hipEventSynchronize(sccTimeStop);
hipEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop);
//printf(", %u, %d, %d, %d", iterations, FWD_iterations , BWD_iterations, Trimm_iterations);
#ifdef _DEBUG
printf(", %f", bTime);
printf(", %f", trim1Time);
printf(", %f", trim2Time);
printf(", %f", pivotTime);
printf(", %f", updateTime);
printf(", %f", wccTime);
#endif
printf("\nNumber Of Sccs : %d", numberOfSccs);
printf("\nTime : %f", sccTime );
CUDA_SAFE_CALL( hipFree( d_Fc ));
CUDA_SAFE_CALL( hipFree( d_Fr ));
CUDA_SAFE_CALL( hipFree( d_Bc ));
CUDA_SAFE_CALL( hipFree( d_Br ));
CUDA_SAFE_CALL( hipFree( d_range));
CUDA_SAFE_CALL( hipFree( d_tags));
CUDA_SAFE_CALL( hipFree( d_pivots ));
CUDA_SAFE_CALL( hipFree( (void *)d_terminatef));
CUDA_SAFE_CALL( hipFree( (void *)d_terminateb));
hipEventDestroy(sccTimeStart);
hipEventDestroy(sccTimeStop);
#ifdef _DEBUG
hipEventDestroy(bTimeStart);
hipEventDestroy(bTimeStop);
hipEventDestroy(trim1TimeStart);
hipEventDestroy(trim1TimeStop);
hipEventDestroy(trim2TimeStart);
hipEventDestroy(trim2TimeStop);
hipEventDestroy(pivotTimeStart);
hipEventDestroy(pivotTimeStop);
hipEventDestroy(updateTimeStart);
hipEventDestroy(updateTimeStop);
hipEventDestroy(wccTimeStart);
hipEventDestroy(wccTimeStop);
#endif
return;
}
void detectSCC(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, uint32_t * Pr, bool t1, bool t2){
//Set the device which exclusively used by this program
hipSetDevice(7);
//printf("RSize %d\n", RSize);
float sccTime=0;
hipEvent_t sccTimeStart, sccTimeStop;
hipEventCreate(&sccTimeStart);
hipEventCreate(&sccTimeStop);
hipEventRecord(sccTimeStart, 0);
//-----------GPU initialization---------------------------->
uint32_t* d_Fr = NULL;
uint32_t* d_Br = NULL;
uint32_t* d_Fc = NULL;
uint32_t* d_Bc = NULL;
uint32_t* d_pivots = NULL;
uint32_t* d_Pr = NULL;
uint32_t** d_dpivots = NULL;
uint32_t* d_range = NULL;
uint8_t* d_tags = NULL;
uint8_t* tags = new uint8_t[RSize+1];
uint32_t* range = new uint32_t[RSize+1];
bool volatile* d_terminatef = NULL;
bool terminatef = false;
bool volatile* d_terminateb = NULL;
bool terminateb = false;
int FWD_iterations = 0;
int BWD_iterations = 0;
uint32_t iterations = 0;
int Trimm_iterations = 0;
const uint32_t max_pivot_count = RSize + 1;
uint32_t partitionCount = 10;
uint32_t *HostArray[partitionCount];
CUDA_SAFE_CALL(hipMalloc((void**)&d_dpivots, partitionCount * sizeof(uint32_t *)));
for(int i = 0; i < partitionCount; i++)
{
CUDA_SAFE_CALL(hipMalloc((void**)&HostArray[i], max_pivot_count * sizeof(uint32_t)));
CUDA_SAFE_CALL(hipMemset(HostArray[i], 0, max_pivot_count * sizeof(uint32_t)));
}
CUDA_SAFE_CALL(hipMemcpy(d_dpivots, HostArray, partitionCount * sizeof(uint32_t *), hipMemcpyHostToDevice));
hipError_t e1, e2, e3, e4, e5, e6, e7, e8, e9;
CUDA_SAFE_CALL( e1 = hipMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e2 = hipMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e3 = hipMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e4 = hipMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e5 = hipMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( e5 = hipMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t)));
CUDA_SAFE_CALL( e6 = hipMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e7 = hipMalloc( (void**) &d_terminatef, sizeof(bool) ));
CUDA_SAFE_CALL( e8 = hipMalloc( (void**) &d_terminateb, sizeof(bool) ));
CUDA_SAFE_CALL( e9 = hipMalloc( (void**) &d_Pr, (RSize + 2) * sizeof(uint32_t) ));
if (e1 == hipErrorMemoryAllocation || e2 == hipErrorMemoryAllocation ||
e3 == hipErrorMemoryAllocation || e4 == hipErrorMemoryAllocation ||
e5 == hipErrorMemoryAllocation || e6 == hipErrorMemoryAllocation ||
e7 == hipErrorMemoryAllocation || e8 == hipErrorMemoryAllocation ||
e9 == hipErrorMemoryAllocation) {
throw "Error: Not enough memory on GPU\n";
}
CUDA_SAFE_CALL( hipMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemcpy( d_Pr, Pr, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice ));
CUDA_SAFE_CALL( hipMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( hipMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t)));
//for vertex-to-thread mapping
dim3 grid;
if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize / BLOCKSIZE));
grid.x = dim;
grid.y = dim;
grid.z = 1;
}else{
grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE;
grid.y = 1;
grid.z = 1;
}
dim3 threads(BLOCKSIZE, 1, 1);
#ifdef _DEBUG
float pivotTime = 0, temp = 0, bTime = 0, trim1Time = 0, trim2Time = 0, updateTime = 0, wccTime = 0;
hipEvent_t bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop;
hipEvent_t trim1TimeStart, trim1TimeStop, trim2TimeStart, trim2TimeStop, wccTimeStart, wccTimeStop;
hipEventCreate(&bTimeStart);
hipEventCreate(&bTimeStop);
hipEventCreate(&pivotTimeStart);
hipEventCreate(&pivotTimeStop);
hipEventCreate(&trim1TimeStart);
hipEventCreate(&trim1TimeStop);
hipEventCreate(&trim2TimeStart);
hipEventCreate(&trim2TimeStop);
hipEventCreate(&updateTimeStart);
hipEventCreate(&updateTimeStop);
hipEventCreate(&wccTimeStart);
hipEventCreate(&wccTimeStop);
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim2TimeStart, 0);
#endif
if(t2)
hipLaunchKernelGGL(( trim2), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize);
#ifdef _DEBUG
hipEventRecord(trim2TimeStop, 0);
hipEventSynchronize(trim2TimeStop);
hipEventElapsedTime(&temp, trim2TimeStart, trim2TimeStop);
trim2Time+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
hipEventRecord(trim1TimeStop, 0);
hipEventSynchronize(trim1TimeStop);
hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
bool *d_auxRange = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_auxRange, sizeof(bool) * (RSize + 1)));
CUDA_SAFE_CALL(hipMemset(d_auxRange, false, sizeof(bool) * (RSize + 1)));
//-----------Main algorithm-------------------------------->
while ( true ) {
iterations++;
//cout<<"\nIteration : "<<iterations<<endl;
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
hipEventRecord(pivotTimeStart, 0);
#endif
for(int i = 0; i < partitionCount; i++)
{
CUDA_SAFE_CALL(hipMemset(HostArray[i], 0, max_pivot_count * sizeof(uint32_t)));
}
CUDA_SAFE_CALL(hipMemcpy(d_dpivots, HostArray, partitionCount * sizeof(uint32_t *), hipMemcpyHostToDevice));
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( pollForPivotsLocal), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_dpivots, max_pivot_count, d_Fr, d_Br, d_Pr, d_terminatef, d_auxRange);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
hipLaunchKernelGGL(( selectPivotsLocal), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_dpivots, max_pivot_count, d_Pr, d_auxRange);
#ifdef _DEBUG
hipEventRecord(pivotTimeStop, 0);
hipEventSynchronize(pivotTimeStop);
hipEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwdLocal), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef, d_auxRange);
hipLaunchKernelGGL(( bwdLocal), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb, d_auxRange);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwdLocal), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef, d_auxRange);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( bwdLocal), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb, d_auxRange);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
#ifdef _DEBUG
hipEventRecord(bTimeStop, 0);
hipEventSynchronize(bTimeStop);
hipEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
hipEventRecord(updateTimeStart, 0);
#endif
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( updateLocal), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminatef, d_auxRange);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
//printf("$$$$$%d\n", terminatef);
if (terminatef)
break; //only way out
#ifdef _DEBUG
hipEventRecord(updateTimeStop, 0);
hipEventSynchronize(updateTimeStop);
hipEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
}
CUDA_SAFE_CALL( hipMemcpy(range, d_range, sizeof(uint32_t) * (RSize + 1), hipMemcpyDeviceToHost ));
printf("LOCAL SCC's IDENTIFIED! NODES WITH SAME RANGE VALUES BELONG TO THE SAME SCC!!\n");
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
set<int> Fs[RSize + 1], Bs[RSize + 1];
// Compute forward reachability and backward reachability
for(int i = 1; i <= RSize; i++)
{
if(isRangeSet(tags[i]))
continue;
hipLaunchKernelGGL(( resetTag), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, i);
hipDeviceSynchronize();
//printf("Processing %d\n", i);
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwdRc), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
hipLaunchKernelGGL(( bwdRc), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwdRc), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( bwdRc), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
//printf("Processed %d\n", i);
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
for(int j = 1; j <= RSize; j++)
{
if(isForwardVisited(tags[j]))
{
Fs[i].insert(j);
//printf("Inserting %d in Fs of %d\n", j, i);
}
if(isBackwardVisited(tags[j]))
{
Bs[i].insert(j);
//printf("Inserting %d in Bs of %d\n", j, i);
}
}
//printf("Node %d, FsSize %d, BsSize %d\n", i, (int)Fs[i].size(), (int)Bs[i].size());
}
hipLaunchKernelGGL(( resetTag), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, RSize + 2);
printf("Fs AND Bs ARE POPULATED!!\n");
uint32_t *d_Rm = NULL;
CUDA_SAFE_CALL( hipMalloc((void **)&d_Rm, sizeof(uint32_t) * partitionCount));
uint32_t itr = 0;
printf("STARTING MERGE!\n");
//<----------Merging Phase----------------------------------
bool terminatebb = false;
bool volatile *d_terminatebb = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_terminatebb, sizeof(bool) ));
unsigned char * _devCount;
while(!terminatebb)
{
hipGetSymbolAddress((void **)&_devCount, devCount);
hipMemset(_devCount, 0, sizeof(int));
itr++;
printf("Iterations %d\n", itr);
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatebb, true, sizeof(bool) ));
bool *d_Occ = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_Occ, (RSize + 1) * sizeof(bool)));
CUDA_SAFE_CALL(hipMemset((void*)d_Occ, false, (RSize + 1) * sizeof(bool)));
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( computeInDegree), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_Pr, d_Br, d_Bc, d_Occ, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( computeOutDegree), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_Pr, d_Fr, d_Fc, d_Occ, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
CUDA_SAFE_CALL( (hipMemset((void *)d_Rm, 0, sizeof(uint32_t) * partitionCount)));
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( getMaxRange), dim3(grid), dim3(threads), 0, 0, d_range, d_Pr, d_Rm, RSize, d_tags, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
hipLaunchKernelGGL(( shiftRange), dim3(grid), dim3(threads), 0, 0, d_range, d_Pr, d_Rm, RSize, d_tags);
hipLaunchKernelGGL(( identifyTransEdges), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_Pr, d_Occ);
hipDeviceSynchronize(); //Required?
//printf("Identified Trans-edges!\n");
int dsize;
hipMemcpyFromSymbol(&dsize, devCount, sizeof(int));
if (dsize >= CSize)
{
printf("No space!\n");
}
vector<Ele> results(dsize);
//printf("dsize: %d\n", dsize);
hipMemcpyFromSymbol(&(results[0]), devData, dsize * sizeof(Ele));
/*for(int i = 0; i < dsize; i++)
printf("transedge[%d]: <%d, %d>\n", i, results[i].u1, results[i].u2);*/
// Trans-edges are present in results -> <u1, u2>
CUDA_SAFE_CALL( hipMemcpy(range, d_range, sizeof(uint32_t) * (RSize + 1), hipMemcpyDeviceToHost ));
vector<vector<int> > transSets;
for(int i = 0; i < dsize; i++)
for(int j = i + 1; j < dsize; j++)
{
vector<int> temp1(Bs[results[i].u1].size() + Fs[results[j].u2].size());
vector<int>::iterator it;
it = set_intersection(Bs[results[i].u1].begin(), Bs[results[i].u1].end(), Fs[results[j].u2].begin(), Fs[results[j].u2].end(), temp1.begin());
temp1.resize(it - temp1.begin());
vector<int> temp2(Bs[results[j].u1].size() + Fs[results[i].u2].size());
it = set_intersection(Bs[results[j].u1].begin(), Bs[results[j].u1].end(), Fs[results[i].u2].begin(), Fs[results[i].u2].end(), temp2.begin());
temp2.resize(it - temp2.begin());
/*printf("BS U1: ");
for(set<int>::iterator it = Bs[results[j].u1].begin(); it != Bs[results[j].u1].end(); it++)
printf("%d ", *it);
printf("\n");
printf("FS U2: ");
for(set<int>::iterator it = Fs[results[i].u2].begin(); it != Fs[results[i].u2].end(); it++)
printf("%d ", *it);
printf("\n");
printf("temp2: ");
for(int k = 0; k < temp2.size(); k++)
printf("%d ", temp2[k]);
printf("\n");*/
temp1.insert(temp1.end(), temp2.begin(), temp2.end());
if((int)temp1.size() > 0)
transSets.push_back(temp1);
}
bool ok = true;
int ssSize = (int)transSets.size();
/*for(int i = 0; i < ssSize; i++)
{
printf("TRANS SET: ");
for(int j = 0; j < (int)transSets[i].size(); j++)
printf("%d ", transSets[i][j]);
printf("\n");
}*/
do
{
ok = true;
for(int i = 0; i < ssSize; i++)
{
uint32_t mxRange = 0;
for(int k = 0; k < (int)transSets[i].size(); k++)
{
if(mxRange < range[transSets[i][k]])
mxRange = range[transSets[i][k]];
}
for(int k = 0; k < (int)transSets[i].size(); k++)
{
if(range[transSets[i][k]] != mxRange)
{
range[transSets[i][k]] = mxRange;
ok = false;
}
}
}
}while(!ok);
CUDA_SAFE_CALL( hipMemcpy(d_range, range, sizeof(uint32_t) * (RSize + 1), hipMemcpyHostToDevice ));
hipLaunchKernelGGL(( updatePr), dim3(grid), dim3(threads), 0, 0, d_Pr, RSize, d_terminatebb, d_tags);
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
for(int i = 1; i <= RSize; i++)
{
if(isRangeSet(tags[i]))
continue;
hipLaunchKernelGGL(( resetTag), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, i);
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwdRc), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
hipLaunchKernelGGL(( bwdRc), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) ));
hipLaunchKernelGGL(( fwdRc), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) ));
hipLaunchKernelGGL(( bwdRc), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost ));
}
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
for(int j = 1; j <= RSize; j++)
{
if(isForwardVisited(tags[j]))
{
Fs[i].insert(j);
//printf("Inserting %d in Fs of %d\n", j, i);
}
if(isBackwardVisited(tags[j]))
{
Bs[i].insert(j);
//printf("Inserting %d in Bs of %d\n", j, i);
}
}
}
CUDA_SAFE_CALL( hipMemcpy( &terminatebb, (const void *)d_terminatebb, sizeof(bool), hipMemcpyDeviceToHost ));
//printf("terminatebb: %d\n", terminatebb);
}
printf("MERGING DONE! ^_^\n");
//<----------Main algorithm---------------------------------
//SCC extraction
CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost ));
CUDA_SAFE_CALL( hipMemcpy(range, d_range, sizeof(uint32_t) * (RSize + 1), hipMemcpyDeviceToHost ));
uint32_t numberOf1Sccs = 0;
uint32_t numberOf2Sccs = 0;
uint32_t numberOfPivotSccs = 0;
uint32_t numberOfSccs = 0;
map<uint32_t, bool> seen;
for(uint32_t i=1;i<=RSize;i++)
if(isTrim1(tags[i]))
{
numberOf1Sccs++;
//printf("TRIM1: %d\n", i);
}
else if(isTrim2(tags[i]))
numberOf2Sccs++;
else if(seen.find(range[i]) == seen.end())
{
numberOfPivotSccs++;
seen[range[i]] = true;
//printf("RANGE of %d: %d\n", range[i], i);
}
//printf("NumberOf1SccsS: %d\n", numberOf1Sccs);
numberOfSccs = numberOf1Sccs + numberOf2Sccs + numberOfPivotSccs;
hipEventRecord(sccTimeStop, 0);
hipEventSynchronize(sccTimeStop);
hipEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop);
//printf(", %u, %d, %d, %d", iterations, FWD_iterations , BWD_iterations, Trimm_iterations);
#ifdef _DEBUG
printf(", %f", bTime);
printf(", %f", trim1Time);
printf(", %f", trim2Time);
printf(", %f", pivotTime);
printf(", %f", updateTime);
printf(", %f", wccTime);
#endif
printf("\nNumber Of Sccs : %d", numberOfSccs);
printf("\nTime : %f\n", sccTime );
CUDA_SAFE_CALL( hipFree( d_Fc ));
CUDA_SAFE_CALL( hipFree( d_Fr ));
CUDA_SAFE_CALL( hipFree( d_Bc ));
CUDA_SAFE_CALL( hipFree( d_Br ));
CUDA_SAFE_CALL( hipFree( d_range));
CUDA_SAFE_CALL( hipFree( d_tags));
CUDA_SAFE_CALL( hipFree( d_pivots ));
CUDA_SAFE_CALL( hipFree( d_auxRange));
CUDA_SAFE_CALL( hipFree( d_Rm));
CUDA_SAFE_CALL( hipFree( (void *)d_terminatef));
CUDA_SAFE_CALL( hipFree( (void *)d_terminateb));
CUDA_SAFE_CALL( hipFree( (void *)d_terminatebb));
for(int i = 0; i < partitionCount; i++)
{
CUDA_SAFE_CALL(hipFree(HostArray[i]));
}
CUDA_SAFE_CALL(hipFree(d_dpivots));
hipEventDestroy(sccTimeStart);
hipEventDestroy(sccTimeStop);
#ifdef _DEBUG
hipEventDestroy(bTimeStart);
hipEventDestroy(bTimeStop);
hipEventDestroy(trim1TimeStart);
hipEventDestroy(trim1TimeStop);
hipEventDestroy(trim2TimeStart);
hipEventDestroy(trim2TimeStop);
hipEventDestroy(pivotTimeStart);
hipEventDestroy(pivotTimeStop);
hipEventDestroy(updateTimeStart);
hipEventDestroy(updateTimeStop);
hipEventDestroy(wccTimeStart);
hipEventDestroy(wccTimeStop);
#endif
return;
}
| d806cbd537212df61cf490253468378899a414fc.cu | #include "scc.h"
#include<map>
#include<set>
#include<algorithm>
#include<vector>
#include "scc_kernels.h"
using namespace std;
typedef struct {
uint32_t u1, u2;
} Ele;
__device__ Ele devData[10001];
__device__ int devCount = 0;
__device__ int VecPushBack(Ele &e) {
int insertPt = atomicAdd(&devCount, 1);
if (insertPt < 10001){
devData[insertPt] = e;
return insertPt;
}
else return -1;
}
__global__ void identifyTransEdges(const uint32_t *Fc, const uint32_t *Fr, uint32_t *range, const uint8_t *tags, const uint32_t num_rows, uint32_t *Pr, bool *Occ){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]) || Occ[row])
return;
uint32_t myPr = Pr[row];
uint32_t cnt = Fr[row + 1] - Fr[row];
const uint32_t *nbrs = &Fc[Fr[row]];
uint32_t frpr;
if(myPr & 1)
frpr = myPr - 1;
else
frpr = myPr + 1;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
if(!isRangeSet(tags[index]) && Pr[index] == frpr && !Occ[index]){
//printf("ROW: %d, INDEX: %d\n", row, index);
Ele e = { .u1 = row, .u2 = index };
VecPushBack(e);
}
}
}
void wHong(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, bool t1, bool t2, int warpSize){
//Set the device which exclusively used by this program
cudaSetDevice(7);
float sccTime=0;
cudaEvent_t sccTimeStart, sccTimeStop;
cudaEventCreate(&sccTimeStart);
cudaEventCreate(&sccTimeStop);
cudaEventRecord(sccTimeStart, 0);
//-----------GPU initialization---------------------------->
uint32_t* d_Fr = NULL;
uint32_t* d_Br = NULL;
uint32_t* d_Fc = NULL;
uint32_t* d_Bc = NULL;
uint32_t* d_pivots = NULL;
uint32_t* d_range = NULL;
uint8_t* d_tags = NULL;
uint8_t* tags = new uint8_t[RSize+1];
bool volatile* d_terminatef = NULL;
bool terminatef = false;
bool volatile* d_terminateb = NULL;
bool terminateb = false;
int FWD_iterations = 0;
int BWD_iterations = 0;
uint32_t iterations = 0;
int Trimm_iterations = 0;
const uint32_t max_pivot_count = RSize;
cudaError_t e1, e2, e3, e4, e5, e6, e7, e8, e9;
CUDA_SAFE_CALL( e1 = cudaMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e2 = cudaMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e3 = cudaMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e4 = cudaMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e5 = cudaMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( e6 = cudaMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t)));
CUDA_SAFE_CALL( e7 = cudaMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e8 = cudaMalloc( (void**) &d_terminatef, sizeof(bool) ));
CUDA_SAFE_CALL( e9 = cudaMalloc( (void**) &d_terminateb, sizeof(bool) ));
if (e1 == cudaErrorMemoryAllocation || e2 == cudaErrorMemoryAllocation ||
e3 == cudaErrorMemoryAllocation || e4 == cudaErrorMemoryAllocation ||
e5 == cudaErrorMemoryAllocation || e6 == cudaErrorMemoryAllocation ||
e7 == cudaErrorMemoryAllocation || e8 == cudaErrorMemoryAllocation || e9 == cudaErrorMemoryAllocation) {
throw "Error: Not enough memory on GPU\n";
}
CUDA_SAFE_CALL( cudaMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( cudaMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t)));
dim3 gridfb;
if((RSize * warpSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize * warpSize / BLOCKSIZE));
gridfb.x = dim;
gridfb.y = dim;
gridfb.z = 1;
}else{
gridfb.x = (RSize * warpSize + BLOCKSIZE - 1)/BLOCKSIZE;
gridfb.y = 1;
gridfb.z = 1;
}
//for vertex-to-thread mapping
dim3 grid;
if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize / BLOCKSIZE));
grid.x = dim;
grid.y = dim;
grid.z = 1;
}else{
grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE;
grid.y = 1;
grid.z = 1;
}
dim3 threads(BLOCKSIZE, 1, 1);
#ifdef _DEBUG
float pivotTime = 0, temp = 0, bTime = 0, trim1Time = 0, trim2Time = 0, updateTime = 0, wccTime = 0;
cudaEvent_t bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop;
cudaEvent_t trim1TimeStart, trim1TimeStop, trim2TimeStart, trim2TimeStop, wccTimeStart, wccTimeStop;
cudaEventCreate(&bTimeStart);
cudaEventCreate(&bTimeStop);
cudaEventCreate(&pivotTimeStart);
cudaEventCreate(&pivotTimeStop);
cudaEventCreate(&trim1TimeStart);
cudaEventCreate(&trim1TimeStop);
cudaEventCreate(&trim2TimeStart);
cudaEventCreate(&trim2TimeStop);
cudaEventCreate(&updateTimeStart);
cudaEventCreate(&updateTimeStop);
cudaEventCreate(&wccTimeStart);
cudaEventCreate(&wccTimeStop);
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
cudaEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( d_pivots, 0, sizeof(uint32_t) ));
pollForFirstPivot<<<grid, threads>>>( d_tags, RSize, d_pivots, d_Fr, d_Br);
selectFirstPivot<<<grid, threads>>>( d_tags, RSize, d_pivots);
#ifdef _DEBUG
cudaEventRecord(pivotTimeStop, 0);
cudaEventSynchronize(pivotTimeStop);
cudaEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
switch(warpSize){
case 1:
fwd_warp<1><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd_warp<1><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 2:
fwd_warp<2><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd_warp<2><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 4:
fwd_warp<4><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd_warp<4><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 8:
fwd_warp<8><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd_warp<8><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 16:
fwd_warp<16><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd_warp<16><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 32:
fwd_warp<32><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd_warp<32><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
}
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
switch(warpSize){
case 1:
fwd_warp<1><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 2:
fwd_warp<2><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 4:
fwd_warp<4><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 8:
fwd_warp<8><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 16:
fwd_warp<16><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
case 32:
fwd_warp<32><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
break;
}
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
switch(warpSize){
case 1:
bwd_warp<1><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 2:
bwd_warp<2><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 4:
bwd_warp<4><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 8:
bwd_warp<8><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 16:
bwd_warp<16><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
case 32:
bwd_warp<32><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
break;
}
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
#ifdef _DEBUG
cudaEventRecord(bTimeStop, 0);
cudaEventSynchronize(bTimeStop);
cudaEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(updateTimeStart, 0);
#endif
update<<<grid, threads>>>(d_range, d_tags, RSize, d_terminatef);
#ifdef _DEBUG
cudaEventRecord(updateTimeStop, 0);
cudaEventSynchronize(updateTimeStop);
cudaEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim2TimeStart, 0);
#endif
if(t2)
trim2<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize);
#ifdef _DEBUG
cudaEventRecord(trim2TimeStop, 0);
cudaEventSynchronize(trim2TimeStop);
cudaEventElapsedTime(&temp, trim2TimeStart, trim2TimeStop);
trim2Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(wccTimeStart, 0);
#endif
//Now WCC decomposition
assignUniqueRange<<<grid, threads>>>(d_range, d_tags, RSize);
do{
CUDA_SAFE_CALL( cudaMemset((void *)d_terminatef, true, sizeof(bool) ));
propagateRange1<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
propagateRange2<<<grid, threads>>>( d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef || !terminateb);
#ifdef _DEBUG
cudaEventRecord(wccTimeStop, 0);
cudaEventSynchronize(wccTimeStop);
cudaEventElapsedTime(&temp, wccTimeStart, wccTimeStop);
wccTime+=temp;
#endif
//-----------Main algorithm-------------------------------->
while ( true ) {
iterations++;
//cout<<"\nIteration : "<<iterations<<endl;
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
cudaEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( d_pivots, 0, max_pivot_count * sizeof(uint32_t) ));
pollForPivots<<<grid, threads>>>( d_range, d_tags, RSize, d_pivots, max_pivot_count, d_Fr, d_Br);
selectPivots<<<grid, threads>>>( d_range, d_tags, RSize, d_pivots, max_pivot_count);
#ifdef _DEBUG
cudaEventRecord(pivotTimeStop, 0);
cudaEventSynchronize(pivotTimeStop);
cudaEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
#ifdef _DEBUG
cudaEventRecord(bTimeStop, 0);
cudaEventSynchronize(bTimeStop);
cudaEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(updateTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
update<<<grid, threads>>>(d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
if (terminatef)
break; //only way out
#ifdef _DEBUG
cudaEventRecord(updateTimeStop, 0);
cudaEventSynchronize(updateTimeStop);
cudaEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
}
//<----------Main algorithm---------------------------------
//SCC extraction
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
uint32_t numberOf1Sccs = 0;
uint32_t numberOf2Sccs = 0;
uint32_t numberOfPivotSccs = 0;
uint32_t numberOfSccs = 0;
for(uint32_t i=1;i<=RSize;i++)
if(isTrim1(tags[i]))
numberOf1Sccs++;
else if(isTrim2(tags[i]))
numberOf2Sccs++;
else if(isPivot(tags[i]))
numberOfPivotSccs++;
numberOfSccs = numberOf1Sccs + numberOf2Sccs + numberOfPivotSccs;
cudaEventRecord(sccTimeStop, 0);
cudaEventSynchronize(sccTimeStop);
cudaEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop);
//printf(", %u, %d, %d, %d", iterations, FWD_iterations , BWD_iterations, Trimm_iterations);
#ifdef _DEBUG
printf(", %f", bTime);
printf(", %f", trim1Time);
printf(", %f", trim2Time);
printf(", %f", pivotTime);
printf(", %f", updateTime);
printf(", %f", wccTime);
#endif
printf("\nNumber Of Sccs : %d", numberOfSccs);
printf("\nTime : %f", sccTime );
CUDA_SAFE_CALL( cudaFree( d_Fc ));
CUDA_SAFE_CALL( cudaFree( d_Fr ));
CUDA_SAFE_CALL( cudaFree( d_Bc ));
CUDA_SAFE_CALL( cudaFree( d_Br ));
CUDA_SAFE_CALL( cudaFree( d_range));
CUDA_SAFE_CALL( cudaFree( d_tags));
CUDA_SAFE_CALL( cudaFree( d_pivots ));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminatef));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminateb));
cudaEventDestroy(sccTimeStart);
cudaEventDestroy(sccTimeStop);
#ifdef _DEBUG
cudaEventDestroy(bTimeStart);
cudaEventDestroy(bTimeStop);
cudaEventDestroy(trim1TimeStart);
cudaEventDestroy(trim1TimeStop);
cudaEventDestroy(trim2TimeStart);
cudaEventDestroy(trim2TimeStop);
cudaEventDestroy(pivotTimeStart);
cudaEventDestroy(pivotTimeStop);
cudaEventDestroy(updateTimeStart);
cudaEventDestroy(updateTimeStop);
cudaEventDestroy(wccTimeStart);
cudaEventDestroy(wccTimeStop);
#endif
return;
}
void vHong(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, bool t1, bool t2){
//Set the device which exclusively used by this program
cudaSetDevice(7);
float sccTime=0;
cudaEvent_t sccTimeStart, sccTimeStop;
cudaEventCreate(&sccTimeStart);
cudaEventCreate(&sccTimeStop);
cudaEventRecord(sccTimeStart, 0);
//-----------GPU initialization---------------------------->
uint32_t* d_Fr = NULL;
uint32_t* d_Br = NULL;
uint32_t* d_Fc = NULL;
uint32_t* d_Bc = NULL;
uint32_t* d_pivots = NULL;
uint32_t* d_range = NULL;
uint8_t* d_tags = NULL;
uint8_t* tags = new uint8_t[RSize+1];
bool volatile* d_terminatef = NULL;
bool terminatef = false;
bool volatile* d_terminateb = NULL;
bool terminateb = false;
int FWD_iterations = 0;
int BWD_iterations = 0;
uint32_t iterations = 0;
int Trimm_iterations = 0;
const uint32_t max_pivot_count = RSize;
cudaError_t e1, e2, e3, e4, e5, e6, e7, e8;
CUDA_SAFE_CALL( e1 = cudaMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e2 = cudaMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e3 = cudaMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e4 = cudaMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e5 = cudaMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( e5 = cudaMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t)));
CUDA_SAFE_CALL( e6 = cudaMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e7 = cudaMalloc( (void**) &d_terminatef, sizeof(bool) ));
CUDA_SAFE_CALL( e8 = cudaMalloc( (void**) &d_terminateb, sizeof(bool) ));
if (e1 == cudaErrorMemoryAllocation || e2 == cudaErrorMemoryAllocation ||
e3 == cudaErrorMemoryAllocation || e4 == cudaErrorMemoryAllocation ||
e5 == cudaErrorMemoryAllocation || e6 == cudaErrorMemoryAllocation ||
e7 == cudaErrorMemoryAllocation || e8 == cudaErrorMemoryAllocation ) {
throw "Error: Not enough memory on GPU\n";
}
CUDA_SAFE_CALL( cudaMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( cudaMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t)));
//for vertex-to-thread mapping
dim3 grid;
if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize / BLOCKSIZE));
grid.x = dim;
grid.y = dim;
grid.z = 1;
}else{
grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE;
grid.y = 1;
grid.z = 1;
}
dim3 threads(BLOCKSIZE, 1, 1);
#ifdef _DEBUG
float pivotTime = 0, temp = 0, bTime = 0, trim1Time = 0, trim2Time = 0, updateTime = 0, wccTime = 0;
cudaEvent_t bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop;
cudaEvent_t trim1TimeStart, trim1TimeStop, trim2TimeStart, trim2TimeStop, wccTimeStart, wccTimeStop;
cudaEventCreate(&bTimeStart);
cudaEventCreate(&bTimeStop);
cudaEventCreate(&pivotTimeStart);
cudaEventCreate(&pivotTimeStop);
cudaEventCreate(&trim1TimeStart);
cudaEventCreate(&trim1TimeStop);
cudaEventCreate(&trim2TimeStart);
cudaEventCreate(&trim2TimeStop);
cudaEventCreate(&updateTimeStart);
cudaEventCreate(&updateTimeStop);
cudaEventCreate(&wccTimeStart);
cudaEventCreate(&wccTimeStop);
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
cudaEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( d_pivots, 0, sizeof(uint32_t) ));
pollForFirstPivot<<<grid, threads>>>( d_tags, RSize, d_pivots, d_Fr, d_Br);
selectFirstPivot<<<grid, threads>>>( d_tags, RSize, d_pivots);
#ifdef _DEBUG
cudaEventRecord(pivotTimeStop, 0);
cudaEventSynchronize(pivotTimeStop);
cudaEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
#ifdef _DEBUG
cudaEventRecord(bTimeStop, 0);
cudaEventSynchronize(bTimeStop);
cudaEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(updateTimeStart, 0);
#endif
update<<<grid, threads>>>(d_range, d_tags, RSize, d_terminatef);
#ifdef _DEBUG
cudaEventRecord(updateTimeStop, 0);
cudaEventSynchronize(updateTimeStop);
cudaEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim2TimeStart, 0);
#endif
if(t2)
trim2<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize);
#ifdef _DEBUG
cudaEventRecord(trim2TimeStop, 0);
cudaEventSynchronize(trim2TimeStop);
cudaEventElapsedTime(&temp, trim2TimeStart, trim2TimeStop);
trim2Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(wccTimeStart, 0);
#endif
//Now WCC decomposition
assignUniqueRange<<<grid, threads>>>(d_range, d_tags, RSize);
do{
CUDA_SAFE_CALL( cudaMemset((void *)d_terminatef, true, sizeof(bool) ));
propagateRange1<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
propagateRange2<<<grid, threads>>>( d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef || !terminateb);
#ifdef _DEBUG
cudaEventRecord(wccTimeStop, 0);
cudaEventSynchronize(wccTimeStop);
cudaEventElapsedTime(&temp, wccTimeStart, wccTimeStop);
wccTime+=temp;
#endif
//-----------Main algorithm-------------------------------->
while ( true ) {
iterations++;
//cout<<"\nIteration : "<<iterations<<endl;
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
cudaEventRecord(pivotTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( d_pivots, 0, max_pivot_count * sizeof(uint32_t) ));
pollForPivots<<<grid, threads>>>( d_range, d_tags, RSize, d_pivots, max_pivot_count, d_Fr, d_Br);
selectPivots<<<grid, threads>>>( d_range, d_tags, RSize, d_pivots, max_pivot_count);
#ifdef _DEBUG
cudaEventRecord(pivotTimeStop, 0);
cudaEventSynchronize(pivotTimeStop);
cudaEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
#ifdef _DEBUG
cudaEventRecord(bTimeStop, 0);
cudaEventSynchronize(bTimeStop);
cudaEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(updateTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
update<<<grid, threads>>>(d_range, d_tags, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
if (terminatef)
break; //only way out
#ifdef _DEBUG
cudaEventRecord(updateTimeStop, 0);
cudaEventSynchronize(updateTimeStop);
cudaEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
}
//<----------Main algorithm---------------------------------
//SCC extraction
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
uint32_t numberOf1Sccs = 0;
uint32_t numberOf2Sccs = 0;
uint32_t numberOfPivotSccs = 0;
uint32_t numberOfSccs = 0;
for(uint32_t i=1;i<=RSize;i++)
if(isTrim1(tags[i]))
numberOf1Sccs++;
else if(isTrim2(tags[i]))
numberOf2Sccs++;
else if(isPivot(tags[i]))
numberOfPivotSccs++;
printf("numberOf1Sccs: %d\n", numberOf1Sccs);
numberOfSccs = numberOf1Sccs + numberOf2Sccs + numberOfPivotSccs;
cudaEventRecord(sccTimeStop, 0);
cudaEventSynchronize(sccTimeStop);
cudaEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop);
//printf(", %u, %d, %d, %d", iterations, FWD_iterations , BWD_iterations, Trimm_iterations);
#ifdef _DEBUG
printf(", %f", bTime);
printf(", %f", trim1Time);
printf(", %f", trim2Time);
printf(", %f", pivotTime);
printf(", %f", updateTime);
printf(", %f", wccTime);
#endif
printf("\nNumber Of Sccs : %d", numberOfSccs);
printf("\nTime : %f", sccTime );
CUDA_SAFE_CALL( cudaFree( d_Fc ));
CUDA_SAFE_CALL( cudaFree( d_Fr ));
CUDA_SAFE_CALL( cudaFree( d_Bc ));
CUDA_SAFE_CALL( cudaFree( d_Br ));
CUDA_SAFE_CALL( cudaFree( d_range));
CUDA_SAFE_CALL( cudaFree( d_tags));
CUDA_SAFE_CALL( cudaFree( d_pivots ));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminatef));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminateb));
cudaEventDestroy(sccTimeStart);
cudaEventDestroy(sccTimeStop);
#ifdef _DEBUG
cudaEventDestroy(bTimeStart);
cudaEventDestroy(bTimeStop);
cudaEventDestroy(trim1TimeStart);
cudaEventDestroy(trim1TimeStop);
cudaEventDestroy(trim2TimeStart);
cudaEventDestroy(trim2TimeStop);
cudaEventDestroy(pivotTimeStart);
cudaEventDestroy(pivotTimeStop);
cudaEventDestroy(updateTimeStart);
cudaEventDestroy(updateTimeStop);
cudaEventDestroy(wccTimeStart);
cudaEventDestroy(wccTimeStop);
#endif
return;
}
void detectSCC(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, uint32_t * Pr, bool t1, bool t2){
//Set the device which exclusively used by this program
cudaSetDevice(7);
//printf("RSize %d\n", RSize);
float sccTime=0;
cudaEvent_t sccTimeStart, sccTimeStop;
cudaEventCreate(&sccTimeStart);
cudaEventCreate(&sccTimeStop);
cudaEventRecord(sccTimeStart, 0);
//-----------GPU initialization---------------------------->
uint32_t* d_Fr = NULL;
uint32_t* d_Br = NULL;
uint32_t* d_Fc = NULL;
uint32_t* d_Bc = NULL;
uint32_t* d_pivots = NULL;
uint32_t* d_Pr = NULL;
uint32_t** d_dpivots = NULL;
uint32_t* d_range = NULL;
uint8_t* d_tags = NULL;
uint8_t* tags = new uint8_t[RSize+1];
uint32_t* range = new uint32_t[RSize+1];
bool volatile* d_terminatef = NULL;
bool terminatef = false;
bool volatile* d_terminateb = NULL;
bool terminateb = false;
int FWD_iterations = 0;
int BWD_iterations = 0;
uint32_t iterations = 0;
int Trimm_iterations = 0;
const uint32_t max_pivot_count = RSize + 1;
uint32_t partitionCount = 10;
uint32_t *HostArray[partitionCount];
CUDA_SAFE_CALL(cudaMalloc((void**)&d_dpivots, partitionCount * sizeof(uint32_t *)));
for(int i = 0; i < partitionCount; i++)
{
CUDA_SAFE_CALL(cudaMalloc((void**)&HostArray[i], max_pivot_count * sizeof(uint32_t)));
CUDA_SAFE_CALL(cudaMemset(HostArray[i], 0, max_pivot_count * sizeof(uint32_t)));
}
CUDA_SAFE_CALL(cudaMemcpy(d_dpivots, HostArray, partitionCount * sizeof(uint32_t *), cudaMemcpyHostToDevice));
cudaError_t e1, e2, e3, e4, e5, e6, e7, e8, e9;
CUDA_SAFE_CALL( e1 = cudaMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e2 = cudaMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e3 = cudaMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e4 = cudaMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e5 = cudaMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( e5 = cudaMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t)));
CUDA_SAFE_CALL( e6 = cudaMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) ));
CUDA_SAFE_CALL( e7 = cudaMalloc( (void**) &d_terminatef, sizeof(bool) ));
CUDA_SAFE_CALL( e8 = cudaMalloc( (void**) &d_terminateb, sizeof(bool) ));
CUDA_SAFE_CALL( e9 = cudaMalloc( (void**) &d_Pr, (RSize + 2) * sizeof(uint32_t) ));
if (e1 == cudaErrorMemoryAllocation || e2 == cudaErrorMemoryAllocation ||
e3 == cudaErrorMemoryAllocation || e4 == cudaErrorMemoryAllocation ||
e5 == cudaErrorMemoryAllocation || e6 == cudaErrorMemoryAllocation ||
e7 == cudaErrorMemoryAllocation || e8 == cudaErrorMemoryAllocation ||
e9 == cudaErrorMemoryAllocation) {
throw "Error: Not enough memory on GPU\n";
}
CUDA_SAFE_CALL( cudaMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemcpy( d_Pr, Pr, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL( cudaMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t)));
CUDA_SAFE_CALL( cudaMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t)));
//for vertex-to-thread mapping
dim3 grid;
if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) {
int dim = ceill(sqrt(RSize / BLOCKSIZE));
grid.x = dim;
grid.y = dim;
grid.z = 1;
}else{
grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE;
grid.y = 1;
grid.z = 1;
}
dim3 threads(BLOCKSIZE, 1, 1);
#ifdef _DEBUG
float pivotTime = 0, temp = 0, bTime = 0, trim1Time = 0, trim2Time = 0, updateTime = 0, wccTime = 0;
cudaEvent_t bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop;
cudaEvent_t trim1TimeStart, trim1TimeStop, trim2TimeStart, trim2TimeStop, wccTimeStart, wccTimeStop;
cudaEventCreate(&bTimeStart);
cudaEventCreate(&bTimeStop);
cudaEventCreate(&pivotTimeStart);
cudaEventCreate(&pivotTimeStop);
cudaEventCreate(&trim1TimeStart);
cudaEventCreate(&trim1TimeStop);
cudaEventCreate(&trim2TimeStart);
cudaEventCreate(&trim2TimeStop);
cudaEventCreate(&updateTimeStart);
cudaEventCreate(&updateTimeStop);
cudaEventCreate(&wccTimeStart);
cudaEventCreate(&wccTimeStop);
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim2TimeStart, 0);
#endif
if(t2)
trim2<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize);
#ifdef _DEBUG
cudaEventRecord(trim2TimeStop, 0);
cudaEventSynchronize(trim2TimeStop);
cudaEventElapsedTime(&temp, trim2TimeStart, trim2TimeStop);
trim2Time+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(trim1TimeStart, 0);
#endif
//-----------Trimming-------------------------------------->
if(t1){
do {
Trimm_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
} while (!terminatef);
}
#ifdef _DEBUG
cudaEventRecord(trim1TimeStop, 0);
cudaEventSynchronize(trim1TimeStop);
cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop);
trim1Time+=temp;
#endif
bool *d_auxRange = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_auxRange, sizeof(bool) * (RSize + 1)));
CUDA_SAFE_CALL(cudaMemset(d_auxRange, false, sizeof(bool) * (RSize + 1)));
//-----------Main algorithm-------------------------------->
while ( true ) {
iterations++;
//cout<<"\nIteration : "<<iterations<<endl;
//-----------Choose pivots--------------------------------->
#ifdef _DEBUG
cudaEventRecord(pivotTimeStart, 0);
#endif
for(int i = 0; i < partitionCount; i++)
{
CUDA_SAFE_CALL(cudaMemset(HostArray[i], 0, max_pivot_count * sizeof(uint32_t)));
}
CUDA_SAFE_CALL(cudaMemcpy(d_dpivots, HostArray, partitionCount * sizeof(uint32_t *), cudaMemcpyHostToDevice));
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
pollForPivotsLocal<<<grid, threads>>>( d_range, d_tags, RSize, d_dpivots, max_pivot_count, d_Fr, d_Br, d_Pr, d_terminatef, d_auxRange);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
selectPivotsLocal<<<grid, threads>>>( d_range, d_tags, RSize, d_dpivots, max_pivot_count, d_Pr, d_auxRange);
#ifdef _DEBUG
cudaEventRecord(pivotTimeStop, 0);
cudaEventSynchronize(pivotTimeStop);
cudaEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop);
pivotTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(bTimeStart, 0);
#endif
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
fwdLocal<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef, d_auxRange);
bwdLocal<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb, d_auxRange);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
fwdLocal<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef, d_auxRange);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
bwdLocal<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb, d_auxRange);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
#ifdef _DEBUG
cudaEventRecord(bTimeStop, 0);
cudaEventSynchronize(bTimeStop);
cudaEventElapsedTime(&temp, bTimeStart, bTimeStop);
bTime+=temp;
#endif
#ifdef _DEBUG
cudaEventRecord(updateTimeStart, 0);
#endif
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
updateLocal<<<grid, threads>>>(d_range, d_tags, RSize, d_terminatef, d_auxRange);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
//printf("$$$$$%d\n", terminatef);
if (terminatef)
break; //only way out
#ifdef _DEBUG
cudaEventRecord(updateTimeStop, 0);
cudaEventSynchronize(updateTimeStop);
cudaEventElapsedTime(&temp, updateTimeStart, updateTimeStop);
updateTime+=temp;
#endif
}
CUDA_SAFE_CALL( cudaMemcpy(range, d_range, sizeof(uint32_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
printf("LOCAL SCC's IDENTIFIED! NODES WITH SAME RANGE VALUES BELONG TO THE SAME SCC!!\n");
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
set<int> Fs[RSize + 1], Bs[RSize + 1];
// Compute forward reachability and backward reachability
for(int i = 1; i <= RSize; i++)
{
if(isRangeSet(tags[i]))
continue;
resetTag<<<grid, threads>>>(d_range, d_tags, RSize, i);
cudaDeviceSynchronize();
//printf("Processing %d\n", i);
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
fwdRc<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
bwdRc<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
fwdRc<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
bwdRc<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
//printf("Processed %d\n", i);
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
for(int j = 1; j <= RSize; j++)
{
if(isForwardVisited(tags[j]))
{
Fs[i].insert(j);
//printf("Inserting %d in Fs of %d\n", j, i);
}
if(isBackwardVisited(tags[j]))
{
Bs[i].insert(j);
//printf("Inserting %d in Bs of %d\n", j, i);
}
}
//printf("Node %d, FsSize %d, BsSize %d\n", i, (int)Fs[i].size(), (int)Bs[i].size());
}
resetTag<<<grid, threads>>>(d_range, d_tags, RSize, RSize + 2);
printf("Fs AND Bs ARE POPULATED!!\n");
uint32_t *d_Rm = NULL;
CUDA_SAFE_CALL( cudaMalloc((void **)&d_Rm, sizeof(uint32_t) * partitionCount));
uint32_t itr = 0;
printf("STARTING MERGE!\n");
//<----------Merging Phase----------------------------------
bool terminatebb = false;
bool volatile *d_terminatebb = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_terminatebb, sizeof(bool) ));
unsigned char * _devCount;
while(!terminatebb)
{
cudaGetSymbolAddress((void **)&_devCount, devCount);
cudaMemset(_devCount, 0, sizeof(int));
itr++;
printf("Iterations %d\n", itr);
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatebb, true, sizeof(bool) ));
bool *d_Occ = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_Occ, (RSize + 1) * sizeof(bool)));
CUDA_SAFE_CALL(cudaMemset((void*)d_Occ, false, (RSize + 1) * sizeof(bool)));
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
computeInDegree<<<grid, threads>>>(d_tags, RSize, d_Pr, d_Br, d_Bc, d_Occ, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
computeOutDegree<<<grid, threads>>>(d_tags, RSize, d_Pr, d_Fr, d_Fc, d_Occ, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
CUDA_SAFE_CALL( (cudaMemset((void *)d_Rm, 0, sizeof(uint32_t) * partitionCount)));
terminatef = false;
while(!terminatef)
{
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
getMaxRange<<<grid, threads>>>(d_range, d_Pr, d_Rm, RSize, d_tags, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
shiftRange<<<grid, threads>>>(d_range, d_Pr, d_Rm, RSize, d_tags);
identifyTransEdges<<<grid, threads>>>(d_Fc, d_Fr, d_range, d_tags, RSize, d_Pr, d_Occ);
cudaDeviceSynchronize(); //Required?
//printf("Identified Trans-edges!\n");
int dsize;
cudaMemcpyFromSymbol(&dsize, devCount, sizeof(int));
if (dsize >= CSize)
{
printf("No space!\n");
}
vector<Ele> results(dsize);
//printf("dsize: %d\n", dsize);
cudaMemcpyFromSymbol(&(results[0]), devData, dsize * sizeof(Ele));
/*for(int i = 0; i < dsize; i++)
printf("transedge[%d]: <%d, %d>\n", i, results[i].u1, results[i].u2);*/
// Trans-edges are present in results -> <u1, u2>
CUDA_SAFE_CALL( cudaMemcpy(range, d_range, sizeof(uint32_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
vector<vector<int> > transSets;
for(int i = 0; i < dsize; i++)
for(int j = i + 1; j < dsize; j++)
{
vector<int> temp1(Bs[results[i].u1].size() + Fs[results[j].u2].size());
vector<int>::iterator it;
it = set_intersection(Bs[results[i].u1].begin(), Bs[results[i].u1].end(), Fs[results[j].u2].begin(), Fs[results[j].u2].end(), temp1.begin());
temp1.resize(it - temp1.begin());
vector<int> temp2(Bs[results[j].u1].size() + Fs[results[i].u2].size());
it = set_intersection(Bs[results[j].u1].begin(), Bs[results[j].u1].end(), Fs[results[i].u2].begin(), Fs[results[i].u2].end(), temp2.begin());
temp2.resize(it - temp2.begin());
/*printf("BS U1: ");
for(set<int>::iterator it = Bs[results[j].u1].begin(); it != Bs[results[j].u1].end(); it++)
printf("%d ", *it);
printf("\n");
printf("FS U2: ");
for(set<int>::iterator it = Fs[results[i].u2].begin(); it != Fs[results[i].u2].end(); it++)
printf("%d ", *it);
printf("\n");
printf("temp2: ");
for(int k = 0; k < temp2.size(); k++)
printf("%d ", temp2[k]);
printf("\n");*/
temp1.insert(temp1.end(), temp2.begin(), temp2.end());
if((int)temp1.size() > 0)
transSets.push_back(temp1);
}
bool ok = true;
int ssSize = (int)transSets.size();
/*for(int i = 0; i < ssSize; i++)
{
printf("TRANS SET: ");
for(int j = 0; j < (int)transSets[i].size(); j++)
printf("%d ", transSets[i][j]);
printf("\n");
}*/
do
{
ok = true;
for(int i = 0; i < ssSize; i++)
{
uint32_t mxRange = 0;
for(int k = 0; k < (int)transSets[i].size(); k++)
{
if(mxRange < range[transSets[i][k]])
mxRange = range[transSets[i][k]];
}
for(int k = 0; k < (int)transSets[i].size(); k++)
{
if(range[transSets[i][k]] != mxRange)
{
range[transSets[i][k]] = mxRange;
ok = false;
}
}
}
}while(!ok);
CUDA_SAFE_CALL( cudaMemcpy(d_range, range, sizeof(uint32_t) * (RSize + 1), cudaMemcpyHostToDevice ));
updatePr<<<grid, threads>>>(d_Pr, RSize, d_terminatebb, d_tags);
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
for(int i = 1; i <= RSize; i++)
{
if(isRangeSet(tags[i]))
continue;
resetTag<<<grid, threads>>>(d_range, d_tags, RSize, i);
do{//Forward and Backward reachability
FWD_iterations++;
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
fwdRc<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
bwdRc<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}while(!terminatef && !terminateb);
while(!terminatef){//Forward reachability
FWD_iterations++;
CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) ));
fwdRc<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, d_Pr, RSize, d_terminatef);
CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost ));
}
while(!terminateb){//Backward reachability
BWD_iterations++;
CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) ));
bwdRc<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, d_Pr, RSize, d_terminateb);
CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost ));
}
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
for(int j = 1; j <= RSize; j++)
{
if(isForwardVisited(tags[j]))
{
Fs[i].insert(j);
//printf("Inserting %d in Fs of %d\n", j, i);
}
if(isBackwardVisited(tags[j]))
{
Bs[i].insert(j);
//printf("Inserting %d in Bs of %d\n", j, i);
}
}
}
CUDA_SAFE_CALL( cudaMemcpy( &terminatebb, (const void *)d_terminatebb, sizeof(bool), cudaMemcpyDeviceToHost ));
//printf("terminatebb: %d\n", terminatebb);
}
printf("MERGING DONE! ^_^\n");
//<----------Main algorithm---------------------------------
//SCC extraction
CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
CUDA_SAFE_CALL( cudaMemcpy(range, d_range, sizeof(uint32_t) * (RSize + 1), cudaMemcpyDeviceToHost ));
uint32_t numberOf1Sccs = 0;
uint32_t numberOf2Sccs = 0;
uint32_t numberOfPivotSccs = 0;
uint32_t numberOfSccs = 0;
map<uint32_t, bool> seen;
for(uint32_t i=1;i<=RSize;i++)
if(isTrim1(tags[i]))
{
numberOf1Sccs++;
//printf("TRIM1: %d\n", i);
}
else if(isTrim2(tags[i]))
numberOf2Sccs++;
else if(seen.find(range[i]) == seen.end())
{
numberOfPivotSccs++;
seen[range[i]] = true;
//printf("RANGE of %d: %d\n", range[i], i);
}
//printf("NumberOf1SccsS: %d\n", numberOf1Sccs);
numberOfSccs = numberOf1Sccs + numberOf2Sccs + numberOfPivotSccs;
cudaEventRecord(sccTimeStop, 0);
cudaEventSynchronize(sccTimeStop);
cudaEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop);
//printf(", %u, %d, %d, %d", iterations, FWD_iterations , BWD_iterations, Trimm_iterations);
#ifdef _DEBUG
printf(", %f", bTime);
printf(", %f", trim1Time);
printf(", %f", trim2Time);
printf(", %f", pivotTime);
printf(", %f", updateTime);
printf(", %f", wccTime);
#endif
printf("\nNumber Of Sccs : %d", numberOfSccs);
printf("\nTime : %f\n", sccTime );
CUDA_SAFE_CALL( cudaFree( d_Fc ));
CUDA_SAFE_CALL( cudaFree( d_Fr ));
CUDA_SAFE_CALL( cudaFree( d_Bc ));
CUDA_SAFE_CALL( cudaFree( d_Br ));
CUDA_SAFE_CALL( cudaFree( d_range));
CUDA_SAFE_CALL( cudaFree( d_tags));
CUDA_SAFE_CALL( cudaFree( d_pivots ));
CUDA_SAFE_CALL( cudaFree( d_auxRange));
CUDA_SAFE_CALL( cudaFree( d_Rm));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminatef));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminateb));
CUDA_SAFE_CALL( cudaFree( (void *)d_terminatebb));
for(int i = 0; i < partitionCount; i++)
{
CUDA_SAFE_CALL(cudaFree(HostArray[i]));
}
CUDA_SAFE_CALL(cudaFree(d_dpivots));
cudaEventDestroy(sccTimeStart);
cudaEventDestroy(sccTimeStop);
#ifdef _DEBUG
cudaEventDestroy(bTimeStart);
cudaEventDestroy(bTimeStop);
cudaEventDestroy(trim1TimeStart);
cudaEventDestroy(trim1TimeStop);
cudaEventDestroy(trim2TimeStart);
cudaEventDestroy(trim2TimeStop);
cudaEventDestroy(pivotTimeStart);
cudaEventDestroy(pivotTimeStop);
cudaEventDestroy(updateTimeStart);
cudaEventDestroy(updateTimeStop);
cudaEventDestroy(wccTimeStart);
cudaEventDestroy(wccTimeStop);
#endif
return;
}
|
af2376dd2c6f757bc505689979b0d48a0dd03a4f.hip | // !!! This is a file automatically generated by hipify!!!
#include <ops_lib_core.h>
#include <hip/hip_runtime.h>
void fetch_test(ops_dat dat) {
double slab2[] = {-1, -2, -3, -4};
double *slab2_d;
hipMalloc((void**)&slab2_d, 4*sizeof(double));
hipMemcpy(slab2_d, slab2, 4*sizeof(double), hipMemcpyHostToDevice);
int slab2_range[] = {6,8,6,8};
ops_dat_set_data_slab_memspace(dat, 0, (char*)slab2_d, slab2_range, OPS_DEVICE);
int disp[OPS_MAX_DIM];
int size[OPS_MAX_DIM];
ops_dat_get_extents(dat, 0, disp, size);
size_t bytes = sizeof(double) * size[0] * size[1];
double *data_h = (double*)ops_malloc(bytes);
double *data_d;
hipMalloc((void**)&data_d, bytes);
ops_dat_fetch_data_memspace(dat, 0, (char*)data_d, OPS_DEVICE);
hipMemcpy(data_h, data_d, bytes, hipMemcpyDeviceToHost);
printf("Fetched data:\n");
for (int j = 0; j < size[1]; j++) {
for (int i = 0; i < size[0]; i++) {
printf("%.1lf ", data_h[j*size[0]+i]);
}
printf("\n");
}
hipFree(data_d);
ops_free(data_h);
double *slab_h = (double*)malloc(4*sizeof(double));
double *slab_d;
hipMalloc((void**)&slab_d, 4*sizeof(double));
int slab_range[] = {10,12,10,12};
ops_dat_fetch_data_slab_memspace(dat, 0, (char*)slab_d, slab_range, OPS_DEVICE);
hipMemcpy(slab_h, slab_d, 4*sizeof(double), hipMemcpyDeviceToHost);
ops_printf("2D slab extracted on DEVICE:\n%g %g\n%g %g\n", slab_h[0], slab_h[1], slab_h[2], slab_h[3]);
free(slab_h);
hipFree(slab_d);
}
| af2376dd2c6f757bc505689979b0d48a0dd03a4f.cu | #include <ops_lib_core.h>
#include <cuda.h>
void fetch_test(ops_dat dat) {
double slab2[] = {-1, -2, -3, -4};
double *slab2_d;
cudaMalloc((void**)&slab2_d, 4*sizeof(double));
cudaMemcpy(slab2_d, slab2, 4*sizeof(double), cudaMemcpyHostToDevice);
int slab2_range[] = {6,8,6,8};
ops_dat_set_data_slab_memspace(dat, 0, (char*)slab2_d, slab2_range, OPS_DEVICE);
int disp[OPS_MAX_DIM];
int size[OPS_MAX_DIM];
ops_dat_get_extents(dat, 0, disp, size);
size_t bytes = sizeof(double) * size[0] * size[1];
double *data_h = (double*)ops_malloc(bytes);
double *data_d;
cudaMalloc((void**)&data_d, bytes);
ops_dat_fetch_data_memspace(dat, 0, (char*)data_d, OPS_DEVICE);
cudaMemcpy(data_h, data_d, bytes, cudaMemcpyDeviceToHost);
printf("Fetched data:\n");
for (int j = 0; j < size[1]; j++) {
for (int i = 0; i < size[0]; i++) {
printf("%.1lf ", data_h[j*size[0]+i]);
}
printf("\n");
}
cudaFree(data_d);
ops_free(data_h);
double *slab_h = (double*)malloc(4*sizeof(double));
double *slab_d;
cudaMalloc((void**)&slab_d, 4*sizeof(double));
int slab_range[] = {10,12,10,12};
ops_dat_fetch_data_slab_memspace(dat, 0, (char*)slab_d, slab_range, OPS_DEVICE);
cudaMemcpy(slab_h, slab_d, 4*sizeof(double), cudaMemcpyDeviceToHost);
ops_printf("2D slab extracted on DEVICE:\n%g %g\n%g %g\n", slab_h[0], slab_h[1], slab_h[2], slab_h[3]);
free(slab_h);
cudaFree(slab_d);
}
|
85610725df041c11da644e9eec84d498e411b282.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "stretch_sway_flip_weights_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *src_weight_gpu = NULL;
hipMalloc(&src_weight_gpu, XSIZE*YSIZE);
float *weight_deform_gpu = NULL;
hipMalloc(&weight_deform_gpu, XSIZE*YSIZE);
int nweights = 1;
int n = XSIZE*YSIZE;
int kernel_size = XSIZE*YSIZE;
float angle = 1;
int reverse = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
stretch_sway_flip_weights_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src_weight_gpu,weight_deform_gpu,nweights,n,kernel_size,angle,reverse);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
stretch_sway_flip_weights_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src_weight_gpu,weight_deform_gpu,nweights,n,kernel_size,angle,reverse);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
stretch_sway_flip_weights_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src_weight_gpu,weight_deform_gpu,nweights,n,kernel_size,angle,reverse);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 85610725df041c11da644e9eec84d498e411b282.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "stretch_sway_flip_weights_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *src_weight_gpu = NULL;
cudaMalloc(&src_weight_gpu, XSIZE*YSIZE);
float *weight_deform_gpu = NULL;
cudaMalloc(&weight_deform_gpu, XSIZE*YSIZE);
int nweights = 1;
int n = XSIZE*YSIZE;
int kernel_size = XSIZE*YSIZE;
float angle = 1;
int reverse = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
stretch_sway_flip_weights_kernel<<<gridBlock,threadBlock>>>(src_weight_gpu,weight_deform_gpu,nweights,n,kernel_size,angle,reverse);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
stretch_sway_flip_weights_kernel<<<gridBlock,threadBlock>>>(src_weight_gpu,weight_deform_gpu,nweights,n,kernel_size,angle,reverse);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
stretch_sway_flip_weights_kernel<<<gridBlock,threadBlock>>>(src_weight_gpu,weight_deform_gpu,nweights,n,kernel_size,angle,reverse);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
77ffeca3d582f5d5b4081ce94b95043703a93268.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA functions for ray-voxel intersection based projection
*
* This file has the necesary fucntiosn to perform X-ray CBCT projection
* operation given a geaometry, angles and image. It usesthe so-called
* Jacobs algorithm to compute efficiently the length of the x-rays over
* voxel space.
*
* CODE by Ander Biguri
* Sepideh Hatamikia (arbitrary rotation)
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "Siddon_projection.hpp"
#include "mex.h"
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("Ax:Siddon_projection",hipGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
#define PROJ_PER_BLOCK 9
#define PIXEL_SIZE_BLOCK 9
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
void CreateTexture(int num_devices,const float* imagedata,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,bool alloc);
__constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device
__global__ void vecAddInPlace(float *a, float *b, unsigned long n)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (idx < n)
a[idx] = a[idx] + b[idx];
}
__global__ void kernelPixelDetector( Geometry geo,
float* detector,
const int currProjSetNumber,
const int totalNoOfProjections,
hipTextureObject_t tex){
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long projNumber=threadIdx.z;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV)| (projNumber>=PROJ_PER_BLOCK))
return;
size_t idx = (size_t)(x * geo.nDetecV + y)+ (size_t)projNumber*geo.nDetecV *geo.nDetecU ;
int indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array (for a given GPU)
if(indAlpha>=totalNoOfProjections)
return;
Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaU = projParamsArrayDev[4*projNumber+1];
Point3D deltaV = projParamsArrayDev[4*projNumber+2];
Point3D source = projParamsArrayDev[4*projNumber+3];
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
Point3D pixel1D;
pixel1D.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
pixel1D.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
pixel1D.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
///////
// Siddon's ray-voxel intersection, optimized as in doi=10.1.1.55.7516
//////
// Also called Jacobs algorithms
Point3D ray;
// vector of Xray
ray.x=pixel1D.x-source.x;
ray.y=pixel1D.y-source.y;
ray.z=pixel1D.z-source.z;
// This variables are ommited because
// bx,by,bz ={0,0,0}
// dx,dy,dz ={1,1,1}
// compute parameter values for x-ray parametric equation. eq(3-10)
float axm,aym,azm;
float axM,ayM,azM;
// In the paper Nx= number of X planes-> Nvoxel+1
axm=fminf(__fdividef(-source.x,ray.x),__fdividef(geo.nVoxelX-source.x,ray.x));
aym=fminf(__fdividef(-source.y,ray.y),__fdividef(geo.nVoxelY-source.y,ray.y));
azm=fminf(__fdividef(-source.z,ray.z),__fdividef(geo.nVoxelZ-source.z,ray.z));
axM=fmaxf(__fdividef(-source.x,ray.x),__fdividef(geo.nVoxelX-source.x,ray.x));
ayM=fmaxf(__fdividef(-source.y,ray.y),__fdividef(geo.nVoxelY-source.y,ray.y));
azM=fmaxf(__fdividef(-source.z,ray.z),__fdividef(geo.nVoxelZ-source.z,ray.z));
float am=fmaxf(fmaxf(axm,aym),azm);
float aM=fminf(fminf(axM,ayM),azM);
// line intersects voxel space -> am<aM
if (am>=aM)
detector[idx]=0;
// Compute max/min image INDEX for intersection eq(11-19)
// Discussion about ternary operator in CUDA: https://stackoverflow.com/questions/7104384/in-cuda-why-is-a-b010-more-efficient-than-an-if-else-version
float imin,imax,jmin,jmax,kmin,kmax;
// for X
if( source.x<pixel1D.x){
imin=(am==axm)? 1.0f : ceilf (source.x+am*ray.x);
imax=(aM==axM)? geo.nVoxelX : floorf(source.x+aM*ray.x);
}else{
imax=(am==axm)? geo.nVoxelX-1.0f : floorf(source.x+am*ray.x);
imin=(aM==axM)? 0.0f : ceilf (source.x+aM*ray.x);
}
// for Y
if( source.y<pixel1D.y){
jmin=(am==aym)? 1.0f : ceilf (source.y+am*ray.y);
jmax=(aM==ayM)? geo.nVoxelY : floorf(source.y+aM*ray.y);
}else{
jmax=(am==aym)? geo.nVoxelY-1.0f : floorf(source.y+am*ray.y);
jmin=(aM==ayM)? 0.0f : ceilf (source.y+aM*ray.y);
}
// for Z
if( source.z<pixel1D.z){
kmin=(am==azm)? 1.0f : ceilf (source.z+am*ray.z);
kmax=(aM==azM)? geo.nVoxelZ : floorf(source.z+aM*ray.z);
}else{
kmax=(am==azm)? geo.nVoxelZ-1.0f : floorf(source.z+am*ray.z);
kmin=(aM==azM)? 0.0f : ceilf (source.z+aM*ray.z);
}
// get intersection point N1. eq(20-21) [(also eq 9-10)]
float ax,ay,az;
ax=(source.x<pixel1D.x)? __fdividef(imin-source.x,ray.x+0.000000000001f) : __fdividef(imax-source.x,ray.x+0.000000000001f);
ay=(source.y<pixel1D.y)? __fdividef(jmin-source.y,ray.y+0.000000000001f) : __fdividef(jmax-source.y,ray.y+0.000000000001f);
az=(source.z<pixel1D.z)? __fdividef(kmin-source.z,ray.z+0.000000000001f) : __fdividef(kmax-source.z,ray.z+0.000000000001f);
// get index of first intersection. eq (26) and (19)
int i,j,k;
float aminc=fminf(fminf(ax,ay),az);
i=(int)floorf(source.x+ (aminc+am)*0.5f*ray.x);
j=(int)floorf(source.y+ (aminc+am)*0.5f*ray.y);
k=(int)floorf(source.z+ (aminc+am)*0.5f*ray.z);
// Initialize
float ac=am;
//eq (28), unit anlges
float axu,ayu,azu;
axu=__frcp_rd(fabsf(ray.x));
ayu=__frcp_rd(fabsf(ray.y));
azu=__frcp_rd(fabsf(ray.z));
// eq(29), direction of update
float iu,ju,ku;
iu=(source.x< pixel1D.x)? 1.0f : -1.0f;
ju=(source.y< pixel1D.y)? 1.0f : -1.0f;
ku=(source.z< pixel1D.z)? 1.0f : -1.0f;
float maxlength=__fsqrt_rd(ray.x*ray.x*geo.dVoxelX*geo.dVoxelX+ray.y*ray.y*geo.dVoxelY*geo.dVoxelY+ray.z*ray.z*geo.dVoxelZ*geo.dVoxelZ);
float sum=0.0f;
unsigned int Np=(imax-imin+1)+(jmax-jmin+1)+(kmax-kmin+1); // Number of intersections
// Go iterating over the line, intersection by intersection. If double point, no worries, 0 will be computed
i+=0.5f;
j+=0.5f;
k+=0.5f;
for (unsigned int ii=0;ii<Np;ii++){
if (ax==aminc){
sum+=(ax-ac)*tex3D<float>(tex, i, j, k);
i=i+iu;
ac=ax;
ax+=axu;
}else if(ay==aminc){
sum+=(ay-ac)*tex3D<float>(tex, i, j, k);
j=j+ju;
ac=ay;
ay+=ayu;
}else if(az==aminc){
sum+=(az-ac)*tex3D<float>(tex, i, j, k);
k=k+ku;
ac=az;
az+=azu;
}
aminc=fminf(fminf(ax,ay),az);
}
detector[idx]=sum*maxlength;
}
int siddon_ray_projection(float * img, Geometry geo, float** result,float const * const angles,int nangles){
// Prepare for MultiGPU
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("Ax:Siddon_projection:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
int dev;
char * devicenames;
hipDeviceProp_t deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(dev);
hipGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicenames,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("Ax:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275.");
break;
}
}
devicenames=deviceProp.name;
}
// Check free memory
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float);
// Does everything fit in the GPUs?
bool fits_in_memory=false;
unsigned int splits=1;
Geometry * geoArray;
if (mem_image+2*PROJ_PER_BLOCK*mem_proj<mem_GPU_global){// yes it does
fits_in_memory=true;
geoArray=(Geometry*)malloc(sizeof(Geometry));
geoArray[0]=geo;
}
else{// Nope nope.
fits_in_memory=false; // Oh dear.
// approx free memory we have. We already have left some extra 5% free for internal stuff
// we need a second projection memory to combine multi-GPU stuff.
size_t mem_free=mem_GPU_global-4*PROJ_PER_BLOCK*mem_proj;
splits=mem_image/mem_free+1;// Ceil of the truncation
geoArray=(Geometry*)malloc(splits*sizeof(Geometry));
splitImage(splits,geo,geoArray,nangles);
}
// Allocate axuiliary memory for projections on the GPU to accumulate partial results
float ** dProjection_accum;
size_t num_bytes_proj = PROJ_PER_BLOCK*geo.nDetecU*geo.nDetecV * sizeof(float);
if (!fits_in_memory){
dProjection_accum=(float**)malloc(2*deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(dev);
for (int i = 0; i < 2; ++i){
hipMalloc((void**)&dProjection_accum[dev*2+i], num_bytes_proj);
hipMemset(dProjection_accum[dev*2+i],0,num_bytes_proj);
cudaCheckErrors("cudaMallocauxiliarty projections fail");
}
}
}
// This is happening regarthless if the image fits on memory
float** dProjection=(float**)malloc(2*deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
for (int i = 0; i < 2; ++i){
hipMalloc((void**)&dProjection[dev*2+i], num_bytes_proj);
hipMemset(dProjection[dev*2+i] ,0,num_bytes_proj);
cudaCheckErrors("hipMalloc projections fail");
}
}
//Pagelock memory for syncronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,0);
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Syncronously launching the memcpys. This is only worth it when the image is too big.
if (isHostRegisterSupported & (splits>1 |deviceCount>1)){
hipHostRegister(img, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),hipHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// auxiliary variables
Point3D source, deltaU, deltaV, uvOrigin;
Point3D* projParamsArrayHost;
hipHostMalloc((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D));
cudaCheckErrors("Error allocating auxiliary constant memory");
// Create Streams for overlapping memcopy and compute
int nStreams=deviceCount*2;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));;
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
for (int i = 0; i < 2; ++i){
hipStreamCreate(&stream[i+dev*2]);
}
}
cudaCheckErrors("Stream creation fail");
int nangles_device=(nangles+deviceCount-1)/deviceCount;
int nangles_last_device=(nangles-(deviceCount-1)*nangles_device);
unsigned int noOfKernelCalls = (nangles_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK
unsigned int last_device_blocks= (nangles_last_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // we will use this in the memory management.
int projection_this_block;
hipTextureObject_t *texImg = new hipTextureObject_t[deviceCount];
hipArray **d_cuArrTex = new hipArray*[deviceCount];
for (unsigned int sp=0;sp<splits;sp++){
// Create texture objects for all GPUs
size_t linear_idx_start;
//First one shoudl always be the same size as all the rest but the last
linear_idx_start= (size_t)sp*(size_t)geoArray[0].nVoxelX*(size_t)geoArray[0].nVoxelY*(size_t)geoArray[0].nVoxelZ;
CreateTexture(deviceCount,&img[linear_idx_start],geoArray[sp],d_cuArrTex,texImg,!sp);
cudaCheckErrors("Texture object creation fail");
// Prepare kernel lauch variables
int divU,divV;
divU=PIXEL_SIZE_BLOCK;
divV=PIXEL_SIZE_BLOCK;
dim3 grid((geoArray[sp].nDetecU+divU-1)/divU,(geoArray[0].nDetecV+divV-1)/divV,1);
dim3 block(divU,divV,PROJ_PER_BLOCK);
unsigned int proj_global;
unsigned int i;
// Now that we have prepared the image (piece of image) and parameters for kernels
// we project for all angles.
for ( i=0; i<noOfKernelCalls; i++){
for (dev=0;dev<deviceCount;dev++){
hipSetDevice(dev);
for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){
proj_global=(i*PROJ_PER_BLOCK+j)+dev*nangles_device;
if (proj_global>=nangles)
break;
if ((i*PROJ_PER_BLOCK+j)>=nangles_device)
break;
geoArray[sp].alpha=angles[proj_global*3];
geoArray[sp].theta=angles[proj_global*3+1];
geoArray[sp].psi =angles[proj_global*3+2];
//precomute distances for faster execution
//Precompute per angle constant stuff for speed
computeDeltas_Siddon(geoArray[sp],proj_global, &uvOrigin, &deltaU, &deltaV, &source);
//Ray tracing!
projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection
projParamsArrayHost[4*j+1]=deltaU;
projParamsArrayHost[4*j+2]=deltaV;
projParamsArrayHost[4*j+3]=source;
}
hipMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,hipMemcpyHostToDevice,stream[dev*2]);
hipStreamSynchronize(stream[dev*2]);
cudaCheckErrors("kernel fail");
hipLaunchKernelGGL(( kernelPixelDetector), dim3(grid),dim3(block),0,stream[dev*2], geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]);
}
// Now that the computation is happening, we need to either prepare the memory for
// combining of the projections (splits>1) and start removing previous results.
// If our image does not fit in memory then we need to make sure we accumulate previous results too.
// This is done in 2 steps:
// 1)copy previous results back into GPU
// 2)accumulate with current results
// The code to take them out is the same as when there are no splits needed
if( !fits_in_memory&&sp>0)
{
// 1) grab previous results and put them in the auxiliary variable dProjection_accum
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(dev);
//Global index of FIRST projection on this set on this GPU
proj_global=i*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise...
if(i+1==noOfKernelCalls) //is it the last block?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
else
projection_this_block=PROJ_PER_BLOCK;
hipMemcpyAsync(dProjection_accum[(i%2)+dev*2], result[proj_global], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyHostToDevice,stream[dev*2+1]);
}
// 2) take the results from current compute call and add it to the code in execution.
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(dev);
//Global index of FIRST projection on this set on this GPU
proj_global=i*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise...
if(i+1==noOfKernelCalls) //is it the last block?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
else
projection_this_block=PROJ_PER_BLOCK;
hipStreamSynchronize(stream[dev*2+1]); // wait until copy is finished
hipLaunchKernelGGL(( vecAddInPlace), dim3((geo.nDetecU*geo.nDetecV*projection_this_block+MAXTREADS-1)/MAXTREADS),dim3(MAXTREADS),0,stream[dev*2], dProjection[(i%2)+dev*2],dProjection_accum[(i%2)+dev*2],(unsigned long)geo.nDetecU*geo.nDetecV*projection_this_block);
}
} // end accumulation case, where the image needs to be split
// Now, lets get out the projections from the previous execution of the kernels.
if (i>0){
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(dev);
//Global index of FIRST projection on previous set on this GPU
proj_global=(i-1)*PROJ_PER_BLOCK+dev*nangles_device;
//Unless it is the last (handled separately later), all blocks are full.
projection_this_block=PROJ_PER_BLOCK;
hipMemcpyAsync(result[proj_global], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyDeviceToHost,stream[dev*2+1]);
}
}
// Make sure Computation on kernels has finished before we launch the next batch.
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*2]);
}
}
// We still have the last set of projections to get out of all GPUs
//Note: noOfKernelCalls==i
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(dev);
//Global index of FIRST projection on this set on this GPU
proj_global=(noOfKernelCalls-1)*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// How many projections are left here?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
hipDeviceSynchronize(); //Not really necesary, but just in case, we los nothing.
cudaCheckErrors("Error at copying the last set of projections out (or in the previous copy)");
hipMemcpyAsync(result[proj_global], dProjection[(int)(!(noOfKernelCalls%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyDeviceToHost,stream[dev*2+1]);
}
// Make sure everyone has done their bussiness before the next image split:
hipDeviceSynchronize();
} // End image split loop.
cudaCheckErrors("Main loop fail");
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDestroyTextureObject(texImg[dev]);
hipFreeArray(d_cuArrTex[dev]);
}
delete[] texImg; texImg = 0;
delete[] d_cuArrTex; d_cuArrTex = 0;
// Freeing Stage
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipFree(dProjection[dev*2]);
hipFree(dProjection[dev*2+1]);
}
free(dProjection);
if(!fits_in_memory){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipFree(dProjection_accum[dev*2]);
hipFree(dProjection_accum[dev*2+1]);
}
free(dProjection_accum);
}
freeGeoArray(splits,geoArray);
hipHostFree(projParamsArrayHost);
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]) ;
if (isHostRegisterSupported & (splits>1 |deviceCount>1)){
hipHostUnregister(img);
}
cudaCheckErrors("hipFree fail");
hipDeviceReset();
return 0;
}
void CreateTexture(int num_devices,const float* imagedata,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,bool alloc)
{
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
if(alloc){
for (unsigned int i = 0; i < num_devices; i++){
hipSetDevice(i);
//hipArray Descriptor
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
//cuda Array
hipMalloc3DArray(&d_cuArrTex[i], &channelDesc, extent);
}
}
for (unsigned int i = 0; i < num_devices; i++){
hipSetDevice(i);
hipMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_hipPitchedPtr((void *)imagedata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[i];
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3DAsync(©Params);
}
for (unsigned int i = 0; i < num_devices; i++){
hipSetDevice(i);
hipResourceDesc texRes;
memset(&texRes, 0, sizeof(hipResourceDesc));
texRes.resType = hipResourceTypeArray;
texRes.res.array.array = d_cuArrTex[i];
hipTextureDesc texDescr;
memset(&texDescr, 0, sizeof(hipTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = hipFilterModePoint;
texDescr.addressMode[0] = hipAddressModeBorder;
texDescr.addressMode[1] = hipAddressModeBorder;
texDescr.addressMode[2] = hipAddressModeBorder;
texDescr.readMode = hipReadModeElementType;
hipCreateTextureObject(&texImage[i], &texRes, &texDescr, NULL);
}
for (unsigned int i = 0; i < num_devices; i++){
hipSetDevice(i);
hipDeviceSynchronize();
}
cudaCheckErrors("Texture object creation fail");
}
/* This code generates the geometries needed to split the image properly in
* cases where the entire image does not fit in the memory of the GPU
**/
void splitImage(unsigned int splits,Geometry geo,Geometry* geoArray, unsigned int nangles){
unsigned long splitsize=(geo.nVoxelZ+splits-1)/splits;// ceil if not divisible
for(unsigned int sp=0;sp<splits;sp++){
geoArray[sp]=geo;
// All of them are splitsize, but the last one, possible
geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: geo.nVoxelZ-splitsize*sp;
geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ;
// We need to redefine the offsets, as now each subimage is not aligned in the origin.
geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float));
for (unsigned int i=0;i<nangles;i++){
geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2;
}
}
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas_Siddon(Geometry geo,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO[i];
S.y=0;
S.z=0;
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
// Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours.
// The obkjective is to get a position of the detector in a coordinate system where:
// 1-units are voxel size (in each direction can be different)
// 2-The image has the its first voxel at (0,0,0)
// 3-The image never rotates
// To do that, we need to compute the "deltas" the detector, or "by how much
// (in new xyz) does the voxels change when and index is added". To do that
// several geometric steps needs to be changed
//1.Roll,pitch,jaw
// The detector can have a small rotation.
// according to
//"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706.
// Only the Z rotation will have a big influence in the image quality when they are small.
// Still all rotations are supported
// To roll pitch jaw, the detector has to be in centered in OXYZ.
P.x=0;Pu0.x=0;Pv0.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&P);
rollPitchYaw(geo,i,&Pu0);
rollPitchYaw(geo,i,&Pv0);
//Now ltes translate the points where they shoudl be:
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]);
Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]);
//1: Offset detector
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x;
Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i];
Pfinalu0.x=Pu0.x;
Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i];
Pfinalv0.x=Pv0.x;
Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i];
eulerZYZ(geo,&Pfinal);
eulerZYZ(geo,&Pfinalu0);
eulerZYZ(geo,&Pfinalv0);
eulerZYZ(geo,&S);
//2: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2;
S.x =S.x+geo.sVoxelX/2; S.y =S.y+geo.sVoxelY/2; S.z =S.z +geo.sVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ;
//mexPrintf("COR: %f \n",geo.COR[i]);
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S.x+=CORx; S.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S;
}
#ifndef PROJECTION_HPP
float maxDistanceCubeXY(Geometry geo, float alpha,int i){
///////////
// Compute initial "t" so we access safely as less as out of bounds as possible.
//////////
float maxCubX,maxCubY;
// Forgetting Z, compute max distance: diagonal+offset
maxCubX=(geo.sVoxelX/2+ abs(geo.offOrigX[i]))/geo.dVoxelX;
maxCubY=(geo.sVoxelY/2+ abs(geo.offOrigY[i]))/geo.dVoxelY;
return geo.DSO[i]/geo.dVoxelX-sqrt(maxCubX*maxCubX+maxCubY*maxCubY);
}
void rollPitchYaw(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->z=-sin(geo.dPitch[i])*auxPoint.x
+cos(geo.dPitch[1])*sin(geo.dYaw[i])*auxPoint.y
+cos(geo.dPitch[1])*cos(geo.dYaw[i])*auxPoint.z;
}
void eulerZYZ(Geometry geo, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+
(-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+
cos(geo.alpha)*sin(geo.theta)*auxPoint.z;
point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+
(-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+
sin(geo.alpha)*sin(geo.theta)*auxPoint.z;
point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+
sin(geo.theta)*sin(geo.psi)*auxPoint.y+
cos(geo.theta)*auxPoint.z;
}
//______________________________________________________________________________
//
// Function: freeGeoArray
//
// Description: Frees the memory from the geometry array for multiGPU.
//______________________________________________________________________________
void freeGeoArray(unsigned int splits,Geometry* geoArray){
for(unsigned int sp=0;sp<splits;sp++){
free(geoArray[sp].offOrigZ);
}
free(geoArray);
}
//______________________________________________________________________________
//
// Function: checkFreeMemory
//
// Description: check available memory on devices
//______________________________________________________________________________
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("Ax:Siddon_projection:GPUmemory","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
#endif
| 77ffeca3d582f5d5b4081ce94b95043703a93268.cu | /*-------------------------------------------------------------------------
*
* CUDA functions for ray-voxel intersection based projection
*
* This file has the necesary fucntiosn to perform X-ray CBCT projection
* operation given a geaometry, angles and image. It usesthe so-called
* Jacobs algorithm to compute efficiently the length of the x-rays over
* voxel space.
*
* CODE by Ander Biguri
* Sepideh Hatamikia (arbitrary rotation)
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "Siddon_projection.hpp"
#include "mex.h"
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("Ax:Siddon_projection",cudaGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
#define PROJ_PER_BLOCK 9
#define PIXEL_SIZE_BLOCK 9
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
void CreateTexture(int num_devices,const float* imagedata,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,bool alloc);
__constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device
__global__ void vecAddInPlace(float *a, float *b, unsigned long n)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (idx < n)
a[idx] = a[idx] + b[idx];
}
__global__ void kernelPixelDetector( Geometry geo,
float* detector,
const int currProjSetNumber,
const int totalNoOfProjections,
cudaTextureObject_t tex){
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long projNumber=threadIdx.z;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV)| (projNumber>=PROJ_PER_BLOCK))
return;
size_t idx = (size_t)(x * geo.nDetecV + y)+ (size_t)projNumber*geo.nDetecV *geo.nDetecU ;
int indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array (for a given GPU)
if(indAlpha>=totalNoOfProjections)
return;
Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaU = projParamsArrayDev[4*projNumber+1];
Point3D deltaV = projParamsArrayDev[4*projNumber+2];
Point3D source = projParamsArrayDev[4*projNumber+3];
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
Point3D pixel1D;
pixel1D.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
pixel1D.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
pixel1D.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
///////
// Siddon's ray-voxel intersection, optimized as in doi=10.1.1.55.7516
//////
// Also called Jacobs algorithms
Point3D ray;
// vector of Xray
ray.x=pixel1D.x-source.x;
ray.y=pixel1D.y-source.y;
ray.z=pixel1D.z-source.z;
// This variables are ommited because
// bx,by,bz ={0,0,0}
// dx,dy,dz ={1,1,1}
// compute parameter values for x-ray parametric equation. eq(3-10)
float axm,aym,azm;
float axM,ayM,azM;
// In the paper Nx= number of X planes-> Nvoxel+1
axm=fminf(__fdividef(-source.x,ray.x),__fdividef(geo.nVoxelX-source.x,ray.x));
aym=fminf(__fdividef(-source.y,ray.y),__fdividef(geo.nVoxelY-source.y,ray.y));
azm=fminf(__fdividef(-source.z,ray.z),__fdividef(geo.nVoxelZ-source.z,ray.z));
axM=fmaxf(__fdividef(-source.x,ray.x),__fdividef(geo.nVoxelX-source.x,ray.x));
ayM=fmaxf(__fdividef(-source.y,ray.y),__fdividef(geo.nVoxelY-source.y,ray.y));
azM=fmaxf(__fdividef(-source.z,ray.z),__fdividef(geo.nVoxelZ-source.z,ray.z));
float am=fmaxf(fmaxf(axm,aym),azm);
float aM=fminf(fminf(axM,ayM),azM);
// line intersects voxel space -> am<aM
if (am>=aM)
detector[idx]=0;
// Compute max/min image INDEX for intersection eq(11-19)
// Discussion about ternary operator in CUDA: https://stackoverflow.com/questions/7104384/in-cuda-why-is-a-b010-more-efficient-than-an-if-else-version
float imin,imax,jmin,jmax,kmin,kmax;
// for X
if( source.x<pixel1D.x){
imin=(am==axm)? 1.0f : ceilf (source.x+am*ray.x);
imax=(aM==axM)? geo.nVoxelX : floorf(source.x+aM*ray.x);
}else{
imax=(am==axm)? geo.nVoxelX-1.0f : floorf(source.x+am*ray.x);
imin=(aM==axM)? 0.0f : ceilf (source.x+aM*ray.x);
}
// for Y
if( source.y<pixel1D.y){
jmin=(am==aym)? 1.0f : ceilf (source.y+am*ray.y);
jmax=(aM==ayM)? geo.nVoxelY : floorf(source.y+aM*ray.y);
}else{
jmax=(am==aym)? geo.nVoxelY-1.0f : floorf(source.y+am*ray.y);
jmin=(aM==ayM)? 0.0f : ceilf (source.y+aM*ray.y);
}
// for Z
if( source.z<pixel1D.z){
kmin=(am==azm)? 1.0f : ceilf (source.z+am*ray.z);
kmax=(aM==azM)? geo.nVoxelZ : floorf(source.z+aM*ray.z);
}else{
kmax=(am==azm)? geo.nVoxelZ-1.0f : floorf(source.z+am*ray.z);
kmin=(aM==azM)? 0.0f : ceilf (source.z+aM*ray.z);
}
// get intersection point N1. eq(20-21) [(also eq 9-10)]
float ax,ay,az;
ax=(source.x<pixel1D.x)? __fdividef(imin-source.x,ray.x+0.000000000001f) : __fdividef(imax-source.x,ray.x+0.000000000001f);
ay=(source.y<pixel1D.y)? __fdividef(jmin-source.y,ray.y+0.000000000001f) : __fdividef(jmax-source.y,ray.y+0.000000000001f);
az=(source.z<pixel1D.z)? __fdividef(kmin-source.z,ray.z+0.000000000001f) : __fdividef(kmax-source.z,ray.z+0.000000000001f);
// get index of first intersection. eq (26) and (19)
int i,j,k;
float aminc=fminf(fminf(ax,ay),az);
i=(int)floorf(source.x+ (aminc+am)*0.5f*ray.x);
j=(int)floorf(source.y+ (aminc+am)*0.5f*ray.y);
k=(int)floorf(source.z+ (aminc+am)*0.5f*ray.z);
// Initialize
float ac=am;
//eq (28), unit anlges
float axu,ayu,azu;
axu=__frcp_rd(fabsf(ray.x));
ayu=__frcp_rd(fabsf(ray.y));
azu=__frcp_rd(fabsf(ray.z));
// eq(29), direction of update
float iu,ju,ku;
iu=(source.x< pixel1D.x)? 1.0f : -1.0f;
ju=(source.y< pixel1D.y)? 1.0f : -1.0f;
ku=(source.z< pixel1D.z)? 1.0f : -1.0f;
float maxlength=__fsqrt_rd(ray.x*ray.x*geo.dVoxelX*geo.dVoxelX+ray.y*ray.y*geo.dVoxelY*geo.dVoxelY+ray.z*ray.z*geo.dVoxelZ*geo.dVoxelZ);
float sum=0.0f;
unsigned int Np=(imax-imin+1)+(jmax-jmin+1)+(kmax-kmin+1); // Number of intersections
// Go iterating over the line, intersection by intersection. If double point, no worries, 0 will be computed
i+=0.5f;
j+=0.5f;
k+=0.5f;
for (unsigned int ii=0;ii<Np;ii++){
if (ax==aminc){
sum+=(ax-ac)*tex3D<float>(tex, i, j, k);
i=i+iu;
ac=ax;
ax+=axu;
}else if(ay==aminc){
sum+=(ay-ac)*tex3D<float>(tex, i, j, k);
j=j+ju;
ac=ay;
ay+=ayu;
}else if(az==aminc){
sum+=(az-ac)*tex3D<float>(tex, i, j, k);
k=k+ku;
ac=az;
az+=azu;
}
aminc=fminf(fminf(ax,ay),az);
}
detector[idx]=sum*maxlength;
}
int siddon_ray_projection(float * img, Geometry geo, float** result,float const * const angles,int nangles){
// Prepare for MultiGPU
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("Ax:Siddon_projection:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
int dev;
char * devicenames;
cudaDeviceProp deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(dev);
cudaGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicenames,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("Ax:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275.");
break;
}
}
devicenames=deviceProp.name;
}
// Check free memory
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float);
// Does everything fit in the GPUs?
bool fits_in_memory=false;
unsigned int splits=1;
Geometry * geoArray;
if (mem_image+2*PROJ_PER_BLOCK*mem_proj<mem_GPU_global){// yes it does
fits_in_memory=true;
geoArray=(Geometry*)malloc(sizeof(Geometry));
geoArray[0]=geo;
}
else{// Nope nope.
fits_in_memory=false; // Oh dear.
// approx free memory we have. We already have left some extra 5% free for internal stuff
// we need a second projection memory to combine multi-GPU stuff.
size_t mem_free=mem_GPU_global-4*PROJ_PER_BLOCK*mem_proj;
splits=mem_image/mem_free+1;// Ceil of the truncation
geoArray=(Geometry*)malloc(splits*sizeof(Geometry));
splitImage(splits,geo,geoArray,nangles);
}
// Allocate axuiliary memory for projections on the GPU to accumulate partial results
float ** dProjection_accum;
size_t num_bytes_proj = PROJ_PER_BLOCK*geo.nDetecU*geo.nDetecV * sizeof(float);
if (!fits_in_memory){
dProjection_accum=(float**)malloc(2*deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(dev);
for (int i = 0; i < 2; ++i){
cudaMalloc((void**)&dProjection_accum[dev*2+i], num_bytes_proj);
cudaMemset(dProjection_accum[dev*2+i],0,num_bytes_proj);
cudaCheckErrors("cudaMallocauxiliarty projections fail");
}
}
}
// This is happening regarthless if the image fits on memory
float** dProjection=(float**)malloc(2*deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
for (int i = 0; i < 2; ++i){
cudaMalloc((void**)&dProjection[dev*2+i], num_bytes_proj);
cudaMemset(dProjection[dev*2+i] ,0,num_bytes_proj);
cudaCheckErrors("cudaMalloc projections fail");
}
}
//Pagelock memory for syncronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,0);
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Syncronously launching the memcpys. This is only worth it when the image is too big.
if (isHostRegisterSupported & (splits>1 |deviceCount>1)){
cudaHostRegister(img, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// auxiliary variables
Point3D source, deltaU, deltaV, uvOrigin;
Point3D* projParamsArrayHost;
cudaMallocHost((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D));
cudaCheckErrors("Error allocating auxiliary constant memory");
// Create Streams for overlapping memcopy and compute
int nStreams=deviceCount*2;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));;
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
for (int i = 0; i < 2; ++i){
cudaStreamCreate(&stream[i+dev*2]);
}
}
cudaCheckErrors("Stream creation fail");
int nangles_device=(nangles+deviceCount-1)/deviceCount;
int nangles_last_device=(nangles-(deviceCount-1)*nangles_device);
unsigned int noOfKernelCalls = (nangles_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK
unsigned int last_device_blocks= (nangles_last_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // we will use this in the memory management.
int projection_this_block;
cudaTextureObject_t *texImg = new cudaTextureObject_t[deviceCount];
cudaArray **d_cuArrTex = new cudaArray*[deviceCount];
for (unsigned int sp=0;sp<splits;sp++){
// Create texture objects for all GPUs
size_t linear_idx_start;
//First one shoudl always be the same size as all the rest but the last
linear_idx_start= (size_t)sp*(size_t)geoArray[0].nVoxelX*(size_t)geoArray[0].nVoxelY*(size_t)geoArray[0].nVoxelZ;
CreateTexture(deviceCount,&img[linear_idx_start],geoArray[sp],d_cuArrTex,texImg,!sp);
cudaCheckErrors("Texture object creation fail");
// Prepare kernel lauch variables
int divU,divV;
divU=PIXEL_SIZE_BLOCK;
divV=PIXEL_SIZE_BLOCK;
dim3 grid((geoArray[sp].nDetecU+divU-1)/divU,(geoArray[0].nDetecV+divV-1)/divV,1);
dim3 block(divU,divV,PROJ_PER_BLOCK);
unsigned int proj_global;
unsigned int i;
// Now that we have prepared the image (piece of image) and parameters for kernels
// we project for all angles.
for ( i=0; i<noOfKernelCalls; i++){
for (dev=0;dev<deviceCount;dev++){
cudaSetDevice(dev);
for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){
proj_global=(i*PROJ_PER_BLOCK+j)+dev*nangles_device;
if (proj_global>=nangles)
break;
if ((i*PROJ_PER_BLOCK+j)>=nangles_device)
break;
geoArray[sp].alpha=angles[proj_global*3];
geoArray[sp].theta=angles[proj_global*3+1];
geoArray[sp].psi =angles[proj_global*3+2];
//precomute distances for faster execution
//Precompute per angle constant stuff for speed
computeDeltas_Siddon(geoArray[sp],proj_global, &uvOrigin, &deltaU, &deltaV, &source);
//Ray tracing!
projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection
projParamsArrayHost[4*j+1]=deltaU;
projParamsArrayHost[4*j+2]=deltaV;
projParamsArrayHost[4*j+3]=source;
}
cudaMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[dev*2]);
cudaStreamSynchronize(stream[dev*2]);
cudaCheckErrors("kernel fail");
kernelPixelDetector<<<grid,block,0,stream[dev*2]>>>(geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]);
}
// Now that the computation is happening, we need to either prepare the memory for
// combining of the projections (splits>1) and start removing previous results.
// If our image does not fit in memory then we need to make sure we accumulate previous results too.
// This is done in 2 steps:
// 1)copy previous results back into GPU
// 2)accumulate with current results
// The code to take them out is the same as when there are no splits needed
if( !fits_in_memory&&sp>0)
{
// 1) grab previous results and put them in the auxiliary variable dProjection_accum
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(dev);
//Global index of FIRST projection on this set on this GPU
proj_global=i*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise...
if(i+1==noOfKernelCalls) //is it the last block?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
else
projection_this_block=PROJ_PER_BLOCK;
cudaMemcpyAsync(dProjection_accum[(i%2)+dev*2], result[proj_global], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyHostToDevice,stream[dev*2+1]);
}
// 2) take the results from current compute call and add it to the code in execution.
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(dev);
//Global index of FIRST projection on this set on this GPU
proj_global=i*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise...
if(i+1==noOfKernelCalls) //is it the last block?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
else
projection_this_block=PROJ_PER_BLOCK;
cudaStreamSynchronize(stream[dev*2+1]); // wait until copy is finished
vecAddInPlace<<<(geo.nDetecU*geo.nDetecV*projection_this_block+MAXTREADS-1)/MAXTREADS,MAXTREADS,0,stream[dev*2]>>>(dProjection[(i%2)+dev*2],dProjection_accum[(i%2)+dev*2],(unsigned long)geo.nDetecU*geo.nDetecV*projection_this_block);
}
} // end accumulation case, where the image needs to be split
// Now, lets get out the projections from the previous execution of the kernels.
if (i>0){
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(dev);
//Global index of FIRST projection on previous set on this GPU
proj_global=(i-1)*PROJ_PER_BLOCK+dev*nangles_device;
//Unless it is the last (handled separately later), all blocks are full.
projection_this_block=PROJ_PER_BLOCK;
cudaMemcpyAsync(result[proj_global], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*2+1]);
}
}
// Make sure Computation on kernels has finished before we launch the next batch.
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*2]);
}
}
// We still have the last set of projections to get out of all GPUs
//Note: noOfKernelCalls==i
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(dev);
//Global index of FIRST projection on this set on this GPU
proj_global=(noOfKernelCalls-1)*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// How many projections are left here?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
cudaDeviceSynchronize(); //Not really necesary, but just in case, we los nothing.
cudaCheckErrors("Error at copying the last set of projections out (or in the previous copy)");
cudaMemcpyAsync(result[proj_global], dProjection[(int)(!(noOfKernelCalls%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*2+1]);
}
// Make sure everyone has done their bussiness before the next image split:
cudaDeviceSynchronize();
} // End image split loop.
cudaCheckErrors("Main loop fail");
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDestroyTextureObject(texImg[dev]);
cudaFreeArray(d_cuArrTex[dev]);
}
delete[] texImg; texImg = 0;
delete[] d_cuArrTex; d_cuArrTex = 0;
// Freeing Stage
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaFree(dProjection[dev*2]);
cudaFree(dProjection[dev*2+1]);
}
free(dProjection);
if(!fits_in_memory){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaFree(dProjection_accum[dev*2]);
cudaFree(dProjection_accum[dev*2+1]);
}
free(dProjection_accum);
}
freeGeoArray(splits,geoArray);
cudaFreeHost(projParamsArrayHost);
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]) ;
if (isHostRegisterSupported & (splits>1 |deviceCount>1)){
cudaHostUnregister(img);
}
cudaCheckErrors("cudaFree fail");
cudaDeviceReset();
return 0;
}
void CreateTexture(int num_devices,const float* imagedata,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,bool alloc)
{
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
if(alloc){
for (unsigned int i = 0; i < num_devices; i++){
cudaSetDevice(i);
//cudaArray Descriptor
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
//cuda Array
cudaMalloc3DArray(&d_cuArrTex[i], &channelDesc, extent);
}
}
for (unsigned int i = 0; i < num_devices; i++){
cudaSetDevice(i);
cudaMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_cudaPitchedPtr((void *)imagedata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[i];
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3DAsync(©Params);
}
for (unsigned int i = 0; i < num_devices; i++){
cudaSetDevice(i);
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = d_cuArrTex[i];
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = cudaFilterModePoint;
texDescr.addressMode[0] = cudaAddressModeBorder;
texDescr.addressMode[1] = cudaAddressModeBorder;
texDescr.addressMode[2] = cudaAddressModeBorder;
texDescr.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texImage[i], &texRes, &texDescr, NULL);
}
for (unsigned int i = 0; i < num_devices; i++){
cudaSetDevice(i);
cudaDeviceSynchronize();
}
cudaCheckErrors("Texture object creation fail");
}
/* This code generates the geometries needed to split the image properly in
* cases where the entire image does not fit in the memory of the GPU
**/
void splitImage(unsigned int splits,Geometry geo,Geometry* geoArray, unsigned int nangles){
unsigned long splitsize=(geo.nVoxelZ+splits-1)/splits;// ceil if not divisible
for(unsigned int sp=0;sp<splits;sp++){
geoArray[sp]=geo;
// All of them are splitsize, but the last one, possible
geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: geo.nVoxelZ-splitsize*sp;
geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ;
// We need to redefine the offsets, as now each subimage is not aligned in the origin.
geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float));
for (unsigned int i=0;i<nangles;i++){
geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2;
}
}
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas_Siddon(Geometry geo,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO[i];
S.y=0;
S.z=0;
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
// Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours.
// The obkjective is to get a position of the detector in a coordinate system where:
// 1-units are voxel size (in each direction can be different)
// 2-The image has the its first voxel at (0,0,0)
// 3-The image never rotates
// To do that, we need to compute the "deltas" the detector, or "by how much
// (in new xyz) does the voxels change when and index is added". To do that
// several geometric steps needs to be changed
//1.Roll,pitch,jaw
// The detector can have a small rotation.
// according to
//"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706.
// Only the Z rotation will have a big influence in the image quality when they are small.
// Still all rotations are supported
// To roll pitch jaw, the detector has to be in centered in OXYZ.
P.x=0;Pu0.x=0;Pv0.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&P);
rollPitchYaw(geo,i,&Pu0);
rollPitchYaw(geo,i,&Pv0);
//Now ltes translate the points where they shoudl be:
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]);
Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]);
//1: Offset detector
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x;
Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i];
Pfinalu0.x=Pu0.x;
Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i];
Pfinalv0.x=Pv0.x;
Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i];
eulerZYZ(geo,&Pfinal);
eulerZYZ(geo,&Pfinalu0);
eulerZYZ(geo,&Pfinalv0);
eulerZYZ(geo,&S);
//2: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2;
S.x =S.x+geo.sVoxelX/2; S.y =S.y+geo.sVoxelY/2; S.z =S.z +geo.sVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ;
//mexPrintf("COR: %f \n",geo.COR[i]);
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S.x+=CORx; S.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S;
}
#ifndef PROJECTION_HPP
float maxDistanceCubeXY(Geometry geo, float alpha,int i){
///////////
// Compute initial "t" so we access safely as less as out of bounds as possible.
//////////
float maxCubX,maxCubY;
// Forgetting Z, compute max distance: diagonal+offset
maxCubX=(geo.sVoxelX/2+ abs(geo.offOrigX[i]))/geo.dVoxelX;
maxCubY=(geo.sVoxelY/2+ abs(geo.offOrigY[i]))/geo.dVoxelY;
return geo.DSO[i]/geo.dVoxelX-sqrt(maxCubX*maxCubX+maxCubY*maxCubY);
}
void rollPitchYaw(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->z=-sin(geo.dPitch[i])*auxPoint.x
+cos(geo.dPitch[1])*sin(geo.dYaw[i])*auxPoint.y
+cos(geo.dPitch[1])*cos(geo.dYaw[i])*auxPoint.z;
}
void eulerZYZ(Geometry geo, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+
(-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+
cos(geo.alpha)*sin(geo.theta)*auxPoint.z;
point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+
(-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+
sin(geo.alpha)*sin(geo.theta)*auxPoint.z;
point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+
sin(geo.theta)*sin(geo.psi)*auxPoint.y+
cos(geo.theta)*auxPoint.z;
}
//______________________________________________________________________________
//
// Function: freeGeoArray
//
// Description: Frees the memory from the geometry array for multiGPU.
//______________________________________________________________________________
void freeGeoArray(unsigned int splits,Geometry* geoArray){
for(unsigned int sp=0;sp<splits;sp++){
free(geoArray[sp].offOrigZ);
}
free(geoArray);
}
//______________________________________________________________________________
//
// Function: checkFreeMemory
//
// Description: check available memory on devices
//______________________________________________________________________________
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("Ax:Siddon_projection:GPUmemory","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
#endif
|
633b39862002268540cb2b8bb9d30c5ccd5dd266.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <omp.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "../utils/common.h"
static const size_t N = 10000;
void init(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
p[i] = i;
}
}
void output(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
printf("index %zu: %d\n", i, p[i]);
}
}
__global__
void vecAdd1(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
p[idx] = l[idx] + r[idx];
}
}
__device__
int __attribute__ ((noinline)) add(int l, int r) {
return l + r;
}
__global__
void vecAdd2(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
p[idx] = add(l[idx], r[idx]);
}
}
__global__
void vecAdd3(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
p[idx] = l[idx] - r[idx];
}
}
__global__
void vecAdd4(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
p[idx] = l[idx] * r[idx];
}
}
__global__
void vecAdd5(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
p[idx] = l[idx] / r[idx];
}
}
void test1(int *dl, int *dr, int *dp, int threads, int blocks) {
#pragma loop nounroll
for (size_t i = 0; i < 1 << 12; ++i) {
// C1
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd1), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N)));
// C2
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd1), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N)));
}
}
void test2(int *dl, int *dr, int *dp, int threads, int blocks) {
#pragma loop nounroll
for (size_t i = 0; i < 1 << 12; ++i) {
// C1
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd1), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N)));
// C2
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd1), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N / 2)));
}
}
void test3(int *dl, int *dr, int *dp, int threads, int blocks) {
#pragma loop nounroll
for (size_t i = 0; i < 1 << 12; ++i) {
// C1
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd2), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N)));
// C2
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd2), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N / 2)));
}
}
void test4(int *dl, int *dr, int *dp, int threads, int blocks) {
#pragma loop nounroll
for (size_t i = 0; i < 1 << 12; ++i) {
// C1
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd1), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N)));
// C2
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd2), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N / 2)));
if (i % 3 == 0) {
// C3
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd3), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N / 4)));
} else if (i % 3 == 1) {
// C4
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd4), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N / 8)));
} else {
// C5
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd5), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N / 8)));
}
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd1), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N)));
}
}
void test5(int *dl, int *dr, int *dp, int threads, int blocks) {
#pragma loop nounroll
for (size_t i = 0; i < 1 << 10; ++i) {
// C1
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd1), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N)));
// C2
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd2), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N / 2)));
// C3
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd3), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N / 4)));
}
}
void test6(int *dl, int *dr, int *dp, int threads, int blocks) {
#pragma loop nounroll
for (size_t j = 0; j < 2; ++j) {
#pragma loop nounroll
for (size_t i = 0; i < 1 << 10; ++i) {
// C1
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd1), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N)));
// C2
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd2), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N / 2)));
// C3
if (j == 1 && i > (1 << 5)) {
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd4), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N / 4)));
} else {
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd3), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N / 4)));
}
}
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd1), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N)));
}
}
int main(int argc, char *argv[]) {
#ifdef USE_MPI
int numtasks, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("MPI task %d/%d\n", rank, numtasks);
#endif
// Init device
int device_id = 0;
if (argc > 1) {
device_id = atoi(argv[1]);
}
cuda_init_device(device_id);
int mode = 1;
if (argc > 2) {
mode = atoi(argv[2]);
}
#pragma omp parallel
{
int l[N], r[N], p[N];
int *dl, *dr, *dp;
init(l, N);
init(r, N);
RUNTIME_API_CALL(hipMalloc(&dl, N * sizeof(int)));
RUNTIME_API_CALL(hipMalloc(&dr, N * sizeof(int)));
RUNTIME_API_CALL(hipMalloc(&dp, N * sizeof(int)));
RUNTIME_API_CALL(hipMemcpy(dl, l, N * sizeof(int), hipMemcpyHostToDevice));
RUNTIME_API_CALL(hipMemcpy(dr, r, N * sizeof(int), hipMemcpyHostToDevice));
size_t threads = 256;
size_t blocks = (N - 1) / threads + 1;
if (mode == 1) {
// Test case 1
// C2 should the same number samples as C1
test1(dl, dr, dp, threads, blocks);
} else if (mode == 2) {
// Test case 2
// C2 should get half the samples of C1
// The equal range mode should fail in this case
test2(dl, dr, dp, threads, blocks);
} else if (mode == 3) {
// Test case 3
// C2's add should be half of C1's add
// The equal range mode should fail in this case
test3(dl, dr, dp, threads, blocks);
} else if (mode == 4) {
// Test case 4
// Test range ids
test4(dl, dr, dp, threads, blocks);
} else if (mode == 5) {
// Test case 5
// Test compress rate
test5(dl, dr, dp, threads, blocks);
} else if (mode == 6) {
// Test case 5
// Test split function
test6(dl, dr, dp, threads, blocks);
}
RUNTIME_API_CALL(hipMemcpy(p, dp, N * sizeof(int), hipMemcpyDeviceToHost));
RUNTIME_API_CALL(hipFree(dl));
RUNTIME_API_CALL(hipFree(dr));
RUNTIME_API_CALL(hipFree(dp));
#ifdef OUTPUT
#pragma omp critical
{
printf("Thread %d\n", omp_get_thread_num());
output(p, N);
}
#endif
}
hipDeviceSynchronize();
#ifdef USE_MPI
MPI_Finalize();
#endif
return 0;
}
| 633b39862002268540cb2b8bb9d30c5ccd5dd266.cu | #include <cstdio>
#include <omp.h>
#include <cuda.h>
#include <cuda_runtime.h>
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "../utils/common.h"
static const size_t N = 10000;
void init(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
p[i] = i;
}
}
void output(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
printf("index %zu: %d\n", i, p[i]);
}
}
__global__
void vecAdd1(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
p[idx] = l[idx] + r[idx];
}
}
__device__
int __attribute__ ((noinline)) add(int l, int r) {
return l + r;
}
__global__
void vecAdd2(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
p[idx] = add(l[idx], r[idx]);
}
}
__global__
void vecAdd3(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
p[idx] = l[idx] - r[idx];
}
}
__global__
void vecAdd4(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
p[idx] = l[idx] * r[idx];
}
}
__global__
void vecAdd5(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
p[idx] = l[idx] / r[idx];
}
}
void test1(int *dl, int *dr, int *dp, int threads, int blocks) {
#pragma loop nounroll
for (size_t i = 0; i < 1 << 12; ++i) {
// C1
GPU_TEST_FOR((vecAdd1<<<blocks, threads>>>(dl, dr, dp, N)));
// C2
GPU_TEST_FOR((vecAdd1<<<blocks, threads>>>(dl, dr, dp, N)));
}
}
void test2(int *dl, int *dr, int *dp, int threads, int blocks) {
#pragma loop nounroll
for (size_t i = 0; i < 1 << 12; ++i) {
// C1
GPU_TEST_FOR((vecAdd1<<<blocks, threads>>>(dl, dr, dp, N)));
// C2
GPU_TEST_FOR((vecAdd1<<<blocks, threads>>>(dl, dr, dp, N / 2)));
}
}
void test3(int *dl, int *dr, int *dp, int threads, int blocks) {
#pragma loop nounroll
for (size_t i = 0; i < 1 << 12; ++i) {
// C1
GPU_TEST_FOR((vecAdd2<<<blocks, threads>>>(dl, dr, dp, N)));
// C2
GPU_TEST_FOR((vecAdd2<<<blocks, threads>>>(dl, dr, dp, N / 2)));
}
}
void test4(int *dl, int *dr, int *dp, int threads, int blocks) {
#pragma loop nounroll
for (size_t i = 0; i < 1 << 12; ++i) {
// C1
GPU_TEST_FOR((vecAdd1<<<blocks, threads>>>(dl, dr, dp, N)));
// C2
GPU_TEST_FOR((vecAdd2<<<blocks, threads>>>(dl, dr, dp, N / 2)));
if (i % 3 == 0) {
// C3
GPU_TEST_FOR((vecAdd3<<<blocks, threads>>>(dl, dr, dp, N / 4)));
} else if (i % 3 == 1) {
// C4
GPU_TEST_FOR((vecAdd4<<<blocks, threads>>>(dl, dr, dp, N / 8)));
} else {
// C5
GPU_TEST_FOR((vecAdd5<<<blocks, threads>>>(dl, dr, dp, N / 8)));
}
GPU_TEST_FOR((vecAdd1<<<blocks, threads>>>(dl, dr, dp, N)));
}
}
void test5(int *dl, int *dr, int *dp, int threads, int blocks) {
#pragma loop nounroll
for (size_t i = 0; i < 1 << 10; ++i) {
// C1
GPU_TEST_FOR((vecAdd1<<<blocks, threads>>>(dl, dr, dp, N)));
// C2
GPU_TEST_FOR((vecAdd2<<<blocks, threads>>>(dl, dr, dp, N / 2)));
// C3
GPU_TEST_FOR((vecAdd3<<<blocks, threads>>>(dl, dr, dp, N / 4)));
}
}
void test6(int *dl, int *dr, int *dp, int threads, int blocks) {
#pragma loop nounroll
for (size_t j = 0; j < 2; ++j) {
#pragma loop nounroll
for (size_t i = 0; i < 1 << 10; ++i) {
// C1
GPU_TEST_FOR((vecAdd1<<<blocks, threads>>>(dl, dr, dp, N)));
// C2
GPU_TEST_FOR((vecAdd2<<<blocks, threads>>>(dl, dr, dp, N / 2)));
// C3
if (j == 1 && i > (1 << 5)) {
GPU_TEST_FOR((vecAdd4<<<blocks, threads>>>(dl, dr, dp, N / 4)));
} else {
GPU_TEST_FOR((vecAdd3<<<blocks, threads>>>(dl, dr, dp, N / 4)));
}
}
GPU_TEST_FOR((vecAdd1<<<blocks, threads>>>(dl, dr, dp, N)));
}
}
int main(int argc, char *argv[]) {
#ifdef USE_MPI
int numtasks, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("MPI task %d/%d\n", rank, numtasks);
#endif
// Init device
int device_id = 0;
if (argc > 1) {
device_id = atoi(argv[1]);
}
cuda_init_device(device_id);
int mode = 1;
if (argc > 2) {
mode = atoi(argv[2]);
}
#pragma omp parallel
{
int l[N], r[N], p[N];
int *dl, *dr, *dp;
init(l, N);
init(r, N);
RUNTIME_API_CALL(cudaMalloc(&dl, N * sizeof(int)));
RUNTIME_API_CALL(cudaMalloc(&dr, N * sizeof(int)));
RUNTIME_API_CALL(cudaMalloc(&dp, N * sizeof(int)));
RUNTIME_API_CALL(cudaMemcpy(dl, l, N * sizeof(int), cudaMemcpyHostToDevice));
RUNTIME_API_CALL(cudaMemcpy(dr, r, N * sizeof(int), cudaMemcpyHostToDevice));
size_t threads = 256;
size_t blocks = (N - 1) / threads + 1;
if (mode == 1) {
// Test case 1
// C2 should the same number samples as C1
test1(dl, dr, dp, threads, blocks);
} else if (mode == 2) {
// Test case 2
// C2 should get half the samples of C1
// The equal range mode should fail in this case
test2(dl, dr, dp, threads, blocks);
} else if (mode == 3) {
// Test case 3
// C2's add should be half of C1's add
// The equal range mode should fail in this case
test3(dl, dr, dp, threads, blocks);
} else if (mode == 4) {
// Test case 4
// Test range ids
test4(dl, dr, dp, threads, blocks);
} else if (mode == 5) {
// Test case 5
// Test compress rate
test5(dl, dr, dp, threads, blocks);
} else if (mode == 6) {
// Test case 5
// Test split function
test6(dl, dr, dp, threads, blocks);
}
RUNTIME_API_CALL(cudaMemcpy(p, dp, N * sizeof(int), cudaMemcpyDeviceToHost));
RUNTIME_API_CALL(cudaFree(dl));
RUNTIME_API_CALL(cudaFree(dr));
RUNTIME_API_CALL(cudaFree(dp));
#ifdef OUTPUT
#pragma omp critical
{
printf("Thread %d\n", omp_get_thread_num());
output(p, N);
}
#endif
}
cudaDeviceSynchronize();
#ifdef USE_MPI
MPI_Finalize();
#endif
return 0;
}
|
d77203381a2441c936fc720149c7502f6e1bfadc.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathPairwise.hip"
#else
#include <ATen/NamedTensorUtils.h>
static int THCTensor_(equalImpl)(THCState *state, THCTensor *self_, THCTensor *src_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (!THCTensor_(isSameSizeAs(state, self_, src_))) {
return 0;
}
// This is not as efficient as TH, but the basic idea: create a buffer that stores
// 1 if the two tensors are equal at a position, otherwise 0. If the minimum value
// in this buffer is 1, the two tensors are equal, otherwise they are not
// Both tensors are empty
if(THTensor_(nElement)(self_) == 0) return true;
THCudaByteTensor *buf = at::empty_like(THTensor_wrap(self_), at::kByte).unsafeReleaseTensorImpl();
if (!THC_pointwiseApply3<uint8_t, scalar_t, scalar_t>(state, buf, self_, src_, TensorEQOp<scalar_t, unsigned char>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
unsigned char min = THTensor_wrap(buf).min().item<unsigned char>();
THCudaByteTensor_free(state, buf);
return min != 0;
}
int THCTensor_(equal)(THCState *state, THCTensor *self_, THCTensor *src_) {
if (!at::namedinference::are_names_equal(self_, src_)) {
return 0;
}
at::NoNamesGuard guard;
return THCTensor_(equalImpl)(state, self_, src_);
}
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(mul)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(fmod)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
#endif
#endif
| d77203381a2441c936fc720149c7502f6e1bfadc.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathPairwise.cu"
#else
#include <ATen/NamedTensorUtils.h>
static int THCTensor_(equalImpl)(THCState *state, THCTensor *self_, THCTensor *src_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (!THCTensor_(isSameSizeAs(state, self_, src_))) {
return 0;
}
// This is not as efficient as TH, but the basic idea: create a buffer that stores
// 1 if the two tensors are equal at a position, otherwise 0. If the minimum value
// in this buffer is 1, the two tensors are equal, otherwise they are not
// Both tensors are empty
if(THTensor_(nElement)(self_) == 0) return true;
THCudaByteTensor *buf = at::empty_like(THTensor_wrap(self_), at::kByte).unsafeReleaseTensorImpl();
if (!THC_pointwiseApply3<uint8_t, scalar_t, scalar_t>(state, buf, self_, src_, TensorEQOp<scalar_t, unsigned char>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
unsigned char min = THTensor_wrap(buf).min().item<unsigned char>();
THCudaByteTensor_free(state, buf);
return min != 0;
}
int THCTensor_(equal)(THCState *state, THCTensor *self_, THCTensor *src_) {
if (!at::namedinference::are_names_equal(self_, src_)) {
return 0;
}
at::NoNamesGuard guard;
return THCTensor_(equalImpl)(state, self_, src_);
}
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(mul)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(fmod)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
#endif
#endif
|
ea4a52e5cc794dae1a1e33ecacaefa38d4ec0326.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Symmetrize1D_kernel.cu"
#define THREADSPERBLOCK 512
extern "C" void TestSymmetrize1D( const size_t blocks, const size_t totalSize, float* output, float* blockHessian, float* blockSizes, int* startDof ) {
float *gpuBlockHessian;
hipMalloc( ( void ** ) &gpuBlockHessian, totalSize * sizeof( float ) );
hipMemcpy( gpuBlockHessian, blockHessian, totalSize * sizeof( float ), hipMemcpyHostToDevice );
int *gpuBlockSizes;
hipMalloc( ( void ** ) &gpuBlockSizes, blocks * sizeof( int ) );
hipMemcpy( gpuBlockSizes, blockSizes, blocks * sizeof( int ), hipMemcpyHostToDevice );
int *gpuBlockPositions;
hipMalloc( ( void ** ) &gpuBlockPositions, blocks * sizeof( int ) );
hipMemcpy( gpuBlockPositions, startDof, blocks * sizeof( int ), hipMemcpyHostToDevice );
hipLaunchKernelGGL(( symmetrize1D) , dim3(blocks / (THREADSPERBLOCK + 1)), dim3(THREADSPERBLOCK), 0, 0, gpuBlockHessian, gpuBlockPositions, gpuBlockSizes, blocks );
float outBlockHessian[totalSize];
hipMemcpy( outBlockHessian, gpuBlockHessian, totalSize * sizeof( float ), hipMemcpyDeviceToHost );
hipFree( gpuBlockHessian );
hipFree( gpuBlockSizes );
hipFree( gpuBlockPositions );
}
| ea4a52e5cc794dae1a1e33ecacaefa38d4ec0326.cu | #include "Symmetrize1D_kernel.cu"
#define THREADSPERBLOCK 512
extern "C" void TestSymmetrize1D( const size_t blocks, const size_t totalSize, float* output, float* blockHessian, float* blockSizes, int* startDof ) {
float *gpuBlockHessian;
cudaMalloc( ( void ** ) &gpuBlockHessian, totalSize * sizeof( float ) );
cudaMemcpy( gpuBlockHessian, blockHessian, totalSize * sizeof( float ), cudaMemcpyHostToDevice );
int *gpuBlockSizes;
cudaMalloc( ( void ** ) &gpuBlockSizes, blocks * sizeof( int ) );
cudaMemcpy( gpuBlockSizes, blockSizes, blocks * sizeof( int ), cudaMemcpyHostToDevice );
int *gpuBlockPositions;
cudaMalloc( ( void ** ) &gpuBlockPositions, blocks * sizeof( int ) );
cudaMemcpy( gpuBlockPositions, startDof, blocks * sizeof( int ), cudaMemcpyHostToDevice );
symmetrize1D <<< blocks / (THREADSPERBLOCK + 1), THREADSPERBLOCK>>>( gpuBlockHessian, gpuBlockPositions, gpuBlockSizes, blocks );
float outBlockHessian[totalSize];
cudaMemcpy( outBlockHessian, gpuBlockHessian, totalSize * sizeof( float ), cudaMemcpyDeviceToHost );
cudaFree( gpuBlockHessian );
cudaFree( gpuBlockSizes );
cudaFree( gpuBlockPositions );
}
|
4b4b7e756db1596b128dfa518da1328c32aebb3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file bilinear_sampler.cu
* \brief
* \author Xu Dong
*/
#include "./bilinear_sampler-inl.h"
#include <algorithm>
#include "../common/cuda_utils.h"
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
#include "./cudnn_bilinear_sampler-inl.h"
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
namespace mshadow {
namespace cuda {
template<typename DType>
__device__ bool between(DType value, int lowerBound, int upperBound) {
return (value >= lowerBound && value <= upperBound);
}
template<typename DType>
__global__ void BilinearSamplerForwardKernel(const int i_c, const int i_h,
const int i_w, const DType* data,
const DType* grid, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* out) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_c * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in out
int w = index % o_w;
int h = (index / o_w) % o_h;
int c = (index / o_w / o_h) % o_c;
int n = index / o_w / o_h / o_c;
index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
index_t grid_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_left_v = *(data + data_index);
if (between(top_left_x + 1, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_right_v = *(data + data_index + 1);
if (between(top_left_x, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_left_v = *(data + data_index + i_w);
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_right_v = *(data + data_index + i_w + 1);
*(out+out_index) = top_left_v * top_left_y_w * top_left_x_w +
top_right_v * top_left_y_w * (1.0 - top_left_x_w) +
bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w +
bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w);
}
}
template<typename DType>
__global__ void BilinearSamplerBackwardKernel(const int i_c, const int i_h,
const int i_w, const DType* grad,
const DType* data, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* g_input,
const DType* grid_src,
DType* grad_grid) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in grad
int w = index % o_w;
int h = (index / o_w) % o_h;
int n = index / o_w / o_h;
DType top_left_y_gw = 0.0;
DType top_left_x_gw = 0.0;
index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
for (index_t c = 0; c < o_c; ++c) {
index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
// calc 4 vertex value in input data
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
// calc input grad
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
atomicAdd(&g_input[data_index], *(grad + grad_index) * top_left_y_w * top_left_x_w);
top_left_v = *(data + data_index);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
atomicAdd(&g_input[data_index + 1], *(grad + grad_index) * top_left_y_w
* (1.0 - top_left_x_w));
top_right_v = *(data + data_index + 1);
}
if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
atomicAdd(&g_input[data_index+ i_w], *(grad + grad_index) * (1.0 - top_left_y_w)
* top_left_x_w);
bottom_left_v = *(data + data_index + i_w);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
atomicAdd(&g_input[data_index+ i_w + 1], *(grad + grad_index) * (1.0 - top_left_y_w)
* (1.0 - top_left_x_w));
bottom_right_v = *(data + data_index + i_w + 1);
}
// calc weight grad of top_left_w, then multiple -1 is the grad of grid_src
top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_x_w);
top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_y_w);
}
// calc grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += top_left_y_gw * (i_h - 1) / 2;
*(grad_grid + grid_src_index) += top_left_x_gw * (i_w - 1) / 2;
}
}
} // namespace cuda
template<typename DType>
inline void BilinearSamplerForward(const Tensor<gpu, 4, DType> &output,
const Tensor<gpu, 4, DType> &input,
const Tensor<gpu, 4, DType> &grid_src) {
DType *out = output.dptr_;
const DType *data = input.dptr_;
const DType *grid = grid_src.dptr_;
int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3);
int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3);
using namespace cuda;
const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler forward");
hipStream_t stream = Stream<gpu>::GetStream(output.stream_);
cuda::BilinearSamplerForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out);
// post kernel check
hipError_t err = hipPeekAtLastError();
CHECK_EQ(err, hipSuccess) << hipGetErrorString(err);
}
template<typename DType>
inline void BilinearSamplerBackward(const Tensor<gpu, 4, DType> &input_grad,
const Tensor<gpu, 4, DType> &ggrid,
const Tensor<gpu, 4, DType> &output_grad,
const Tensor<gpu, 4, DType> &input_data,
const Tensor<gpu, 4, DType> &grid) {
DType *g_input = input_grad.dptr_;
DType *grad_grid = ggrid.dptr_;
const DType *grid_src = grid.dptr_;
const DType *grad = output_grad.dptr_;
const DType *data = input_data.dptr_;
int o_n = output_grad.size(0), o_c = output_grad.size(1),
o_h = output_grad.size(2), o_w = output_grad.size(3);
int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3);
using namespace cuda;
const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1)
/ kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler backward");
hipStream_t stream = Stream<gpu>::GetStream(input_grad.stream_);
cuda::BilinearSamplerBackwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src, grad_grid);
// post kernel check
hipError_t err = hipPeekAtLastError();
CHECK_EQ(err, hipSuccess) << hipGetErrorString(err);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(BilinearSamplerParam param, int dtype) {
Operator *op = NULL;
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new CuDNNBilinearSamplerOp<DType>(param);
})
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new BilinearSamplerOp<gpu, DType>(param);
})
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
return op;
}
} // namespace op
} // namespace mxnet
| 4b4b7e756db1596b128dfa518da1328c32aebb3d.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file bilinear_sampler.cu
* \brief
* \author Xu Dong
*/
#include "./bilinear_sampler-inl.h"
#include <algorithm>
#include "../common/cuda_utils.h"
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
#include "./cudnn_bilinear_sampler-inl.h"
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
namespace mshadow {
namespace cuda {
template<typename DType>
__device__ bool between(DType value, int lowerBound, int upperBound) {
return (value >= lowerBound && value <= upperBound);
}
template<typename DType>
__global__ void BilinearSamplerForwardKernel(const int i_c, const int i_h,
const int i_w, const DType* data,
const DType* grid, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* out) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_c * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in out
int w = index % o_w;
int h = (index / o_w) % o_h;
int c = (index / o_w / o_h) % o_c;
int n = index / o_w / o_h / o_c;
index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
index_t grid_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_left_v = *(data + data_index);
if (between(top_left_x + 1, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_right_v = *(data + data_index + 1);
if (between(top_left_x, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_left_v = *(data + data_index + i_w);
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_right_v = *(data + data_index + i_w + 1);
*(out+out_index) = top_left_v * top_left_y_w * top_left_x_w +
top_right_v * top_left_y_w * (1.0 - top_left_x_w) +
bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w +
bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w);
}
}
template<typename DType>
__global__ void BilinearSamplerBackwardKernel(const int i_c, const int i_h,
const int i_w, const DType* grad,
const DType* data, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* g_input,
const DType* grid_src,
DType* grad_grid) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in grad
int w = index % o_w;
int h = (index / o_w) % o_h;
int n = index / o_w / o_h;
DType top_left_y_gw = 0.0;
DType top_left_x_gw = 0.0;
index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
for (index_t c = 0; c < o_c; ++c) {
index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
// calc 4 vertex value in input data
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
// calc input grad
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
atomicAdd(&g_input[data_index], *(grad + grad_index) * top_left_y_w * top_left_x_w);
top_left_v = *(data + data_index);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
atomicAdd(&g_input[data_index + 1], *(grad + grad_index) * top_left_y_w
* (1.0 - top_left_x_w));
top_right_v = *(data + data_index + 1);
}
if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
atomicAdd(&g_input[data_index+ i_w], *(grad + grad_index) * (1.0 - top_left_y_w)
* top_left_x_w);
bottom_left_v = *(data + data_index + i_w);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
atomicAdd(&g_input[data_index+ i_w + 1], *(grad + grad_index) * (1.0 - top_left_y_w)
* (1.0 - top_left_x_w));
bottom_right_v = *(data + data_index + i_w + 1);
}
// calc weight grad of top_left_w, then multiple -1 is the grad of grid_src
top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_x_w);
top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_y_w);
}
// calc grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += top_left_y_gw * (i_h - 1) / 2;
*(grad_grid + grid_src_index) += top_left_x_gw * (i_w - 1) / 2;
}
}
} // namespace cuda
template<typename DType>
inline void BilinearSamplerForward(const Tensor<gpu, 4, DType> &output,
const Tensor<gpu, 4, DType> &input,
const Tensor<gpu, 4, DType> &grid_src) {
DType *out = output.dptr_;
const DType *data = input.dptr_;
const DType *grid = grid_src.dptr_;
int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3);
int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3);
using namespace cuda;
const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler forward");
cudaStream_t stream = Stream<gpu>::GetStream(output.stream_);
cuda::BilinearSamplerForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out);
// post kernel check
cudaError err = cudaPeekAtLastError();
CHECK_EQ(err, cudaSuccess) << cudaGetErrorString(err);
}
template<typename DType>
inline void BilinearSamplerBackward(const Tensor<gpu, 4, DType> &input_grad,
const Tensor<gpu, 4, DType> &ggrid,
const Tensor<gpu, 4, DType> &output_grad,
const Tensor<gpu, 4, DType> &input_data,
const Tensor<gpu, 4, DType> &grid) {
DType *g_input = input_grad.dptr_;
DType *grad_grid = ggrid.dptr_;
const DType *grid_src = grid.dptr_;
const DType *grad = output_grad.dptr_;
const DType *data = input_data.dptr_;
int o_n = output_grad.size(0), o_c = output_grad.size(1),
o_h = output_grad.size(2), o_w = output_grad.size(3);
int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3);
using namespace cuda;
const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1)
/ kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler backward");
cudaStream_t stream = Stream<gpu>::GetStream(input_grad.stream_);
cuda::BilinearSamplerBackwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src, grad_grid);
// post kernel check
cudaError err = cudaPeekAtLastError();
CHECK_EQ(err, cudaSuccess) << cudaGetErrorString(err);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(BilinearSamplerParam param, int dtype) {
Operator *op = NULL;
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new CuDNNBilinearSamplerOp<DType>(param);
})
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new BilinearSamplerOp<gpu, DType>(param);
})
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
return op;
}
} // namespace op
} // namespace mxnet
|
fada6a2de4ca3fcf2356d47f064b5722002c9858.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "particule.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <float.h>
#include <sys/types.h>
#include <unistd.h>
#define NBITER 100
#define BLOCKSIZE 16
#define GRIDDIM 64
static int SEEDED =0;
#define MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
unsigned long mix(unsigned long a, unsigned long b, unsigned long c)
{
a=a-b; a=a-c; a=a^(c >> 13);
b=b-c; b=b-a; b=b^(a << 8);
c=c-a; c=c-b; c=c^(b >> 13);
a=a-b; a=a-c; a=a^(c >> 12);
b=b-c; b=b-a; b=b^(a << 16);
c=c-a; c=c-b; c=c^(b >> 5);
a=a-b; a=a-c; a=a^(c >> 3);
b=b-c; b=b-a; b=b^(a << 10);
c=c-a; c=c-b; c=c^(b >> 15);
return c;
}
pset * pset_alloc(int nb_par){
pset * set = (pset *)malloc(sizeof(pset));
if(set == NULL)
{
fprintf(stderr, "Can't allocate memory for the set creation.\n");
exit(EXIT_FAILURE);
}
set->nb = nb_par;
set->m = (double*)malloc(nb_par * sizeof(double));
set->pos = (double*)malloc(3* nb_par * sizeof(double));
set->spd = (double*)malloc(3* nb_par * sizeof(double));
set->acc = (double*)malloc(3* nb_par * sizeof(double));
set->force = (double*)malloc(3* nb_par * sizeof(double));
return set;
}
void pset_free(pset * set){
free(set->pos);
free(set->spd);
free(set->acc);
free(set->m);
free(set->force);
free(set);
}
void pset_copy(pset * origin, pset * dest){
int nb = origin-> nb;
int sd = sizeof(double);
dest->nb = origin->nb;
memcpy(dest->m, origin->m , nb*sd);
memcpy(dest->acc, origin->acc, 3* nb*sd);
memcpy(dest->spd, origin->spd, 3* nb*sd);
memcpy(dest->pos, origin->pos, 3* nb*sd);
}
void pset_print(pset * set)
{
int i;
int size = set->nb;
for (i = 0; i < size; ++i)
{
printf("#Particule numro : %d, de masse %g\n", i, set->m[i]);
printf("\tx:%g y:%g z:%g\n", set->pos[i], set->pos[i+ size], set->pos[i+ 2*size] );
printf("\tvx:%g vy:%g vz:%g\n",set->spd[i], set->spd[i+ size], set->spd[i+ 2*size]);
printf("\tax:%g ay:%g az:%g\n",set->acc[i], set->acc[i+ size], set->acc[i+ 2*size]);
}
}
void seed()
{
if(!SEEDED)
{
unsigned long seed = mix(clock(), time(NULL), getpid());
srand(seed);
SEEDED++;
}
}
void pset_init_rand(pset * s)
{
seed();
int i;
int size = s->nb;
for (i = 0; i < size; i++)
{
s->m[i] = 1.0e10;
s->pos[i] = MIN_RAND + rand()%(MAX_RAND-MIN_RAND);
s->pos[i+size] = MIN_RAND + rand()%(MAX_RAND-MIN_RAND);
s->pos[i+2*size] = MIN_RAND + rand()%(MAX_RAND-MIN_RAND);
s->spd[i] = 0;
s->spd[i+size] = 0;
s->spd[i+2*size] = 0;
s->acc[i] = 0;
s->acc[i+size] = 0;
s->acc[i+2*size] = 0;
}
}
/* Calcule la vitesse de satellisation */
double v_orbit(double mass, double distance)
{
return sqrt(CONST_GRAV*mass/distance);
}
void pset_init_orbit(pset *s)
{
seed();
double dmin= 200, distance;
int size = s->nb;
s->pos[0 ] = 0;
s->pos[0 +size] = 0;
s->pos[0 +2*size] = 0;
s->spd[0 ] = 0;
s->spd[0 +size] = 0;
s->spd[0 +2*size] = 0;
s->acc[0 ] = 0;
s->acc[0 + size] = 0;
s->acc[0 +2*size] = 0;
s->m[0] = 1e10;
for (int i = 1; i < size; ++i)
{
distance = dmin*i + rand()% 50;
s->m[i] = s->m[0] /20000;
s->pos[i] = s->pos[0] -distance;
s->pos[i+size] = 0;
s->pos[i+2*size] = 0;
s->spd[i] = 0;
s->spd[i+size]= v_orbit(s->m[0], distance);
s->spd[i+2*size]= 0;
s->acc[i] = 0;
s->acc[i+size] = 0;
s->acc[i+2*size] = 0;
}
}
/** Prend en argument les masses de deux particules, la distance entre
* ces particules et retourne l'intensit de la force gravitationnelle
* entre ces deux particules
*/
__device__ void distance(double x1, double y1, double z1,
double x2, double y2, double z2,
double *res)
{
*res = sqrt((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) + (z2-z1)*(z2-z1) );
}
__device__ void intensity(double m, double d, double * res)
{
*res = (CONST_GRAV * m / (d*d*d));
}
__device__ int getGlobalIdx_2D_2D()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
__global__ void nbody(int* n, double* acc, double* spd, double* pos, double* m)
{
unsigned int idx = blockDim.x* blockIdx.x + threadIdx.x;
double d, inten1;
int j;
int size = *n;
double dt = 100.0;
if(idx > size)
return;
acc[idx] = 0;
acc[idx+size] = 0;
acc[idx+2*size] = 0;
/* ATTENTION !! En fait, a ne marche pas de faire a. */
/* En effet: On touche acc[j], alors que acc[j] s'est peut tre
dj mis jour au niveau de la position (D'ailleurs il y a
de grandes chances que a soit le cas
vu qu'il a moins d'oprations faire.)*/
for (j = 0; j < size; ++j)
{
if(j != idx)
{
distance(pos[idx], pos[idx+ size], pos[idx+ 2*size], pos[j],
pos[j+ size], pos[j+ 2*size], &d);
intensity(m[j], d, &inten1);
acc[idx]+= inten1 *(pos[j] - pos[idx]);
acc[idx+size]+= inten1 *(pos[j+size] - pos[idx+size]);
acc[idx+2*size]+= inten1 *(pos[j+2*size] - pos[idx+2*size]);
}
}
pos[idx]+= dt* spd[idx] + dt*dt/2 * acc[idx];
pos[idx + size]+= dt* spd[idx+ size] + dt*dt/2 * acc[idx+size];
pos[idx + 2*size]+= dt* spd[idx+ 2*size] + dt*dt/2 * acc[idx+2*size];
spd[idx]+= dt* acc[idx];
spd[idx + size]+= dt* acc[idx+ size];
spd[idx + 2*size]+= dt* acc[idx+ 2*size];
}
int main(int argc, char ** argv)
{
if(argc != 2){
fprintf(stderr, "Enter the number of particles\n");
exit(EXIT_FAILURE);
}
int NBPAR = atoi(argv[1]);
pset *s = pset_alloc(NBPAR);
pset_init_orbit(s);
/*pset_print(s);*/
int* nb;
double* acc, *spd, *pos, *m;
hipMalloc((void**)&nb, 1*sizeof(int));
hipMalloc((void**)&acc, 3*NBPAR*sizeof(double));
hipMalloc((void**)&spd, 3*NBPAR*sizeof(double));
hipMalloc((void**)&pos, 3*NBPAR*sizeof(double));
hipMalloc((void**)&m, NBPAR*sizeof(double));
hipMemcpy(nb, &s->nb, 1*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(acc, s->acc, 3*NBPAR*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(spd, s->spd, 3*NBPAR*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(pos, s->pos, 3*NBPAR*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(m, s->m, NBPAR*sizeof(double), hipMemcpyHostToDevice);
dim3 dimBlock(BLOCKSIZE);
dim3 dimGrid(GRIDDIM);
FILE * fichier =fopen("datafile", "w+");
fprintf(fichier, "#particule X Y Z\n");
hipLaunchKernelGGL(( nbody), dim3(dimGrid), dim3(dimBlock) , 0, 0, nb, acc, spd, pos, m);
hipMemcpy(s->pos, pos, 3*NBPAR*sizeof(double), hipMemcpyDeviceToHost);
for (int i = 0; i < NBITER ; ++i)
{
hipLaunchKernelGGL(( nbody), dim3(dimGrid), dim3(dimBlock) , 0, 0, nb, acc, spd, pos, m);
hipMemcpy(s->pos, pos, 3*NBPAR*sizeof(double), hipMemcpyDeviceToHost);
for (int j = 0; j < NBPAR; ++j)
{
fprintf(fichier,
"%d %g %g %g\n",
j, s->pos[j], s->pos[j+NBPAR], s->pos[j+2*NBPAR]);
}
if(i!= NBITER -1)
fprintf(fichier, "\n\n");
}
/*pset_print(s);*/
fclose(fichier);
pset_free(s);
hipFree(nb);
hipFree(acc);
hipFree(spd);
hipFree(pos);
hipFree(m);
return 0;
} | fada6a2de4ca3fcf2356d47f064b5722002c9858.cu | #include "particule.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <float.h>
#include <sys/types.h>
#include <unistd.h>
#define NBITER 100
#define BLOCKSIZE 16
#define GRIDDIM 64
static int SEEDED =0;
#define MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
unsigned long mix(unsigned long a, unsigned long b, unsigned long c)
{
a=a-b; a=a-c; a=a^(c >> 13);
b=b-c; b=b-a; b=b^(a << 8);
c=c-a; c=c-b; c=c^(b >> 13);
a=a-b; a=a-c; a=a^(c >> 12);
b=b-c; b=b-a; b=b^(a << 16);
c=c-a; c=c-b; c=c^(b >> 5);
a=a-b; a=a-c; a=a^(c >> 3);
b=b-c; b=b-a; b=b^(a << 10);
c=c-a; c=c-b; c=c^(b >> 15);
return c;
}
pset * pset_alloc(int nb_par){
pset * set = (pset *)malloc(sizeof(pset));
if(set == NULL)
{
fprintf(stderr, "Can't allocate memory for the set creation.\n");
exit(EXIT_FAILURE);
}
set->nb = nb_par;
set->m = (double*)malloc(nb_par * sizeof(double));
set->pos = (double*)malloc(3* nb_par * sizeof(double));
set->spd = (double*)malloc(3* nb_par * sizeof(double));
set->acc = (double*)malloc(3* nb_par * sizeof(double));
set->force = (double*)malloc(3* nb_par * sizeof(double));
return set;
}
void pset_free(pset * set){
free(set->pos);
free(set->spd);
free(set->acc);
free(set->m);
free(set->force);
free(set);
}
void pset_copy(pset * origin, pset * dest){
int nb = origin-> nb;
int sd = sizeof(double);
dest->nb = origin->nb;
memcpy(dest->m, origin->m , nb*sd);
memcpy(dest->acc, origin->acc, 3* nb*sd);
memcpy(dest->spd, origin->spd, 3* nb*sd);
memcpy(dest->pos, origin->pos, 3* nb*sd);
}
void pset_print(pset * set)
{
int i;
int size = set->nb;
for (i = 0; i < size; ++i)
{
printf("#Particule numéro : %d, de masse %g\n", i, set->m[i]);
printf("\tx:%g y:%g z:%g\n", set->pos[i], set->pos[i+ size], set->pos[i+ 2*size] );
printf("\tvx:%g vy:%g vz:%g\n",set->spd[i], set->spd[i+ size], set->spd[i+ 2*size]);
printf("\tax:%g ay:%g az:%g\n",set->acc[i], set->acc[i+ size], set->acc[i+ 2*size]);
}
}
void seed()
{
if(!SEEDED)
{
unsigned long seed = mix(clock(), time(NULL), getpid());
srand(seed);
SEEDED++;
}
}
void pset_init_rand(pset * s)
{
seed();
int i;
int size = s->nb;
for (i = 0; i < size; i++)
{
s->m[i] = 1.0e10;
s->pos[i] = MIN_RAND + rand()%(MAX_RAND-MIN_RAND);
s->pos[i+size] = MIN_RAND + rand()%(MAX_RAND-MIN_RAND);
s->pos[i+2*size] = MIN_RAND + rand()%(MAX_RAND-MIN_RAND);
s->spd[i] = 0;
s->spd[i+size] = 0;
s->spd[i+2*size] = 0;
s->acc[i] = 0;
s->acc[i+size] = 0;
s->acc[i+2*size] = 0;
}
}
/* Calcule la vitesse de satellisation */
double v_orbit(double mass, double distance)
{
return sqrt(CONST_GRAV*mass/distance);
}
void pset_init_orbit(pset *s)
{
seed();
double dmin= 200, distance;
int size = s->nb;
s->pos[0 ] = 0;
s->pos[0 +size] = 0;
s->pos[0 +2*size] = 0;
s->spd[0 ] = 0;
s->spd[0 +size] = 0;
s->spd[0 +2*size] = 0;
s->acc[0 ] = 0;
s->acc[0 + size] = 0;
s->acc[0 +2*size] = 0;
s->m[0] = 1e10;
for (int i = 1; i < size; ++i)
{
distance = dmin*i + rand()% 50;
s->m[i] = s->m[0] /20000;
s->pos[i] = s->pos[0] -distance;
s->pos[i+size] = 0;
s->pos[i+2*size] = 0;
s->spd[i] = 0;
s->spd[i+size]= v_orbit(s->m[0], distance);
s->spd[i+2*size]= 0;
s->acc[i] = 0;
s->acc[i+size] = 0;
s->acc[i+2*size] = 0;
}
}
/** Prend en argument les masses de deux particules, la distance entre
* ces particules et retourne l'intensité de la force gravitationnelle
* entre ces deux particules
*/
__device__ void distance(double x1, double y1, double z1,
double x2, double y2, double z2,
double *res)
{
*res = sqrt((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) + (z2-z1)*(z2-z1) );
}
__device__ void intensity(double m, double d, double * res)
{
*res = (CONST_GRAV * m / (d*d*d));
}
__device__ int getGlobalIdx_2D_2D()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
__global__ void nbody(int* n, double* acc, double* spd, double* pos, double* m)
{
unsigned int idx = blockDim.x* blockIdx.x + threadIdx.x;
double d, inten1;
int j;
int size = *n;
double dt = 100.0;
if(idx > size)
return;
acc[idx] = 0;
acc[idx+size] = 0;
acc[idx+2*size] = 0;
/* ATTENTION !! En fait, ça ne marche pas de faire ça. */
/* En effet: On touche à acc[j], alors que acc[j] s'est peut être
déjà mis à jour au niveau de la position (D'ailleurs il y a
de grandes chances que ça soit le cas
vu qu'il a moins d'opérations à faire.)*/
for (j = 0; j < size; ++j)
{
if(j != idx)
{
distance(pos[idx], pos[idx+ size], pos[idx+ 2*size], pos[j],
pos[j+ size], pos[j+ 2*size], &d);
intensity(m[j], d, &inten1);
acc[idx]+= inten1 *(pos[j] - pos[idx]);
acc[idx+size]+= inten1 *(pos[j+size] - pos[idx+size]);
acc[idx+2*size]+= inten1 *(pos[j+2*size] - pos[idx+2*size]);
}
}
pos[idx]+= dt* spd[idx] + dt*dt/2 * acc[idx];
pos[idx + size]+= dt* spd[idx+ size] + dt*dt/2 * acc[idx+size];
pos[idx + 2*size]+= dt* spd[idx+ 2*size] + dt*dt/2 * acc[idx+2*size];
spd[idx]+= dt* acc[idx];
spd[idx + size]+= dt* acc[idx+ size];
spd[idx + 2*size]+= dt* acc[idx+ 2*size];
}
int main(int argc, char ** argv)
{
if(argc != 2){
fprintf(stderr, "Enter the number of particles\n");
exit(EXIT_FAILURE);
}
int NBPAR = atoi(argv[1]);
pset *s = pset_alloc(NBPAR);
pset_init_orbit(s);
/*pset_print(s);*/
int* nb;
double* acc, *spd, *pos, *m;
cudaMalloc((void**)&nb, 1*sizeof(int));
cudaMalloc((void**)&acc, 3*NBPAR*sizeof(double));
cudaMalloc((void**)&spd, 3*NBPAR*sizeof(double));
cudaMalloc((void**)&pos, 3*NBPAR*sizeof(double));
cudaMalloc((void**)&m, NBPAR*sizeof(double));
cudaMemcpy(nb, &s->nb, 1*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(acc, s->acc, 3*NBPAR*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(spd, s->spd, 3*NBPAR*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(pos, s->pos, 3*NBPAR*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(m, s->m, NBPAR*sizeof(double), cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCKSIZE);
dim3 dimGrid(GRIDDIM);
FILE * fichier =fopen("datafile", "w+");
fprintf(fichier, "#particule X Y Z\n");
nbody<<< dimGrid, dimBlock >>>(nb, acc, spd, pos, m);
cudaMemcpy(s->pos, pos, 3*NBPAR*sizeof(double), cudaMemcpyDeviceToHost);
for (int i = 0; i < NBITER ; ++i)
{
nbody<<< dimGrid, dimBlock >>>(nb, acc, spd, pos, m);
cudaMemcpy(s->pos, pos, 3*NBPAR*sizeof(double), cudaMemcpyDeviceToHost);
for (int j = 0; j < NBPAR; ++j)
{
fprintf(fichier,
"%d %g %g %g\n",
j, s->pos[j], s->pos[j+NBPAR], s->pos[j+2*NBPAR]);
}
if(i!= NBITER -1)
fprintf(fichier, "\n\n");
}
/*pset_print(s);*/
fclose(fichier);
pset_free(s);
cudaFree(nb);
cudaFree(acc);
cudaFree(spd);
cudaFree(pos);
cudaFree(m);
return 0;
} |
e5259453cda48f9677fa0959583b00680691d25d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2019 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Asher Elmquist
// =============================================================================
//
// RT kernels for sphere geometries
//
// =============================================================================
#ifdef _WIN32
#ifndef NOMINMAX
#define NOMINMAX
#endif
#endif
#include "chrono_sensor/optix/shaders/device_utils.h"
extern "C" __global__ void __intersection__sphere_intersect() {
const float3 ray_orig = optixGetObjectRayOrigin();
const float3 ray_dir = optixGetObjectRayDirection();
const float ray_tmin = optixGetRayTmin();
const float ray_tmax = optixGetRayTmax();
// calculate the three components of quadratic equation that defines intertersection
float a = Dot(ray_dir, ray_dir);
float b = 2 * Dot(ray_dir, ray_orig);
float c = Dot(ray_orig, ray_orig) - 1;
float det = b * b - 4 * a * c;
if (det > 0) {
det = sqrtf(det);
float dist_near = (-b - det) / (2 * a);
float dist_far = (-b + det) / (2 * a);
if (dist_near <= dist_far) {
if (dist_near > ray_tmin && dist_near < ray_tmax) {
float3 p = ray_orig + ray_dir * dist_near;
float3 tangent_vector = make_float3(p.y, -p.x, 0);
float2 texcoord = make_float2(atan2(p.x, p.y) / (2 * CUDART_PI_F) + 0.5, p.z * 0.5 + 0.5);
optixReportIntersection(dist_near, //
0, //
reinterpret_cast<unsigned int&>(p.x), //
reinterpret_cast<unsigned int&>(p.y), //
reinterpret_cast<unsigned int&>(p.z), //
reinterpret_cast<unsigned int&>(texcoord.x),
reinterpret_cast<unsigned int&>(texcoord.y),
reinterpret_cast<unsigned int&>(tangent_vector.x),
reinterpret_cast<unsigned int&>(tangent_vector.y),
reinterpret_cast<unsigned int&>(tangent_vector.z));
} else if (dist_far > ray_tmin && dist_far < ray_tmax) {
float3 p = ray_orig + ray_dir * dist_far;
float3 tangent_vector = make_float3(p.y, -p.x, 0);
float2 texcoord = make_float2(atan2(p.x, p.y) / (2 * CUDART_PI_F) + 0.5, p.z * 0.5 + 0.5);
optixReportIntersection(dist_far, //
0, //
reinterpret_cast<unsigned int&>(p.x), reinterpret_cast<unsigned int&>(p.y),
reinterpret_cast<unsigned int&>(p.z), //
reinterpret_cast<unsigned int&>(texcoord.x),
reinterpret_cast<unsigned int&>(texcoord.y),
reinterpret_cast<unsigned int&>(tangent_vector.x),
reinterpret_cast<unsigned int&>(tangent_vector.y),
reinterpret_cast<unsigned int&>(tangent_vector.z));
}
}
}
}
| e5259453cda48f9677fa0959583b00680691d25d.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2019 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Asher Elmquist
// =============================================================================
//
// RT kernels for sphere geometries
//
// =============================================================================
#ifdef _WIN32
#ifndef NOMINMAX
#define NOMINMAX
#endif
#endif
#include "chrono_sensor/optix/shaders/device_utils.h"
extern "C" __global__ void __intersection__sphere_intersect() {
const float3 ray_orig = optixGetObjectRayOrigin();
const float3 ray_dir = optixGetObjectRayDirection();
const float ray_tmin = optixGetRayTmin();
const float ray_tmax = optixGetRayTmax();
// calculate the three components of quadratic equation that defines intertersection
float a = Dot(ray_dir, ray_dir);
float b = 2 * Dot(ray_dir, ray_orig);
float c = Dot(ray_orig, ray_orig) - 1;
float det = b * b - 4 * a * c;
if (det > 0) {
det = sqrtf(det);
float dist_near = (-b - det) / (2 * a);
float dist_far = (-b + det) / (2 * a);
if (dist_near <= dist_far) {
if (dist_near > ray_tmin && dist_near < ray_tmax) {
float3 p = ray_orig + ray_dir * dist_near;
float3 tangent_vector = make_float3(p.y, -p.x, 0);
float2 texcoord = make_float2(atan2(p.x, p.y) / (2 * CUDART_PI_F) + 0.5, p.z * 0.5 + 0.5);
optixReportIntersection(dist_near, //
0, //
reinterpret_cast<unsigned int&>(p.x), //
reinterpret_cast<unsigned int&>(p.y), //
reinterpret_cast<unsigned int&>(p.z), //
reinterpret_cast<unsigned int&>(texcoord.x),
reinterpret_cast<unsigned int&>(texcoord.y),
reinterpret_cast<unsigned int&>(tangent_vector.x),
reinterpret_cast<unsigned int&>(tangent_vector.y),
reinterpret_cast<unsigned int&>(tangent_vector.z));
} else if (dist_far > ray_tmin && dist_far < ray_tmax) {
float3 p = ray_orig + ray_dir * dist_far;
float3 tangent_vector = make_float3(p.y, -p.x, 0);
float2 texcoord = make_float2(atan2(p.x, p.y) / (2 * CUDART_PI_F) + 0.5, p.z * 0.5 + 0.5);
optixReportIntersection(dist_far, //
0, //
reinterpret_cast<unsigned int&>(p.x), reinterpret_cast<unsigned int&>(p.y),
reinterpret_cast<unsigned int&>(p.z), //
reinterpret_cast<unsigned int&>(texcoord.x),
reinterpret_cast<unsigned int&>(texcoord.y),
reinterpret_cast<unsigned int&>(tangent_vector.x),
reinterpret_cast<unsigned int&>(tangent_vector.y),
reinterpret_cast<unsigned int&>(tangent_vector.z));
}
}
}
}
|
992b8b9f413d737227469cbbe676c144f2d69de3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define width = 10000;
#define height = 10000;
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void Reduce_noise(int *devPtr, int *pitch )
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
float x = ((devPtr + (j * pitch + i - 1) + (devPtr + (j * pitch + i + 1))/2;
//devPtr + (j * pitch + i) = x;
if (i == 0) {
}
if (i == width - 1) {
}
if (j == 0) {
}
if (j == height - 1) {
}
}
int main()
{
float *h_A[width][height];
size_t pitch;
float *devPtr;
hipMallocPitch (&devPtr, &pitch, width * sizeof(float), height * sizeof(float));
hipMemcpy2D(devPtr, pitch, h_A, pitch, width* sizeof(float), height* sizeof(float), hipMemcpyHostToDevice);
dim3 threadsPerBlock = (width, height);
dim3 blocksPerGrid = 1;
hipLaunchKernelGGL(( Reduce_noise), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devPtr, pitch);
hipMemcpy2D(h_A, pitch, devPtr, width* sizeof(float), height* sizeof(float), hipMemcpyDeviceToHost);
hipFree(devPtr);
/*
hipError_t hipMemcpy2D ( void * dst,
size_t dpitch,
const void * src,
size_t spitch,
size_t width,
size_t height,
enum hipMemcpyKind kind
)
dst - Destination memory address
dpitch - Pitch of destination memory
src - Source memory address
spitch - Pitch of source memory
width - Width of matrix transfer (columns in bytes)
height - Height of matrix transfer (rows)
kind - Type of transfer
hipError_t hipMallocPitch ( void ** devPtr,
size_t * pitch,
size_t width,
size_t height
)
devPtr - Pointer to allocated pitched device memory
pitch - Pitch for allocation
width - Requested pitched allocation width (in bytes)
height - Requested pitched allocation height
*/
| 992b8b9f413d737227469cbbe676c144f2d69de3.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define width = 10000;
#define height = 10000;
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void Reduce_noise(int *devPtr, int *pitch )
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
float x = ((devPtr + (j * pitch + i - 1) + (devPtr + (j * pitch + i + 1))/2;
//devPtr + (j * pitch + i) = x;
if (i == 0) {
}
if (i == width - 1) {
}
if (j == 0) {
}
if (j == height - 1) {
}
}
int main()
{
float *h_A[width][height];
size_t pitch;
float *devPtr;
cudaMallocPitch (&devPtr, &pitch, width * sizeof(float), height * sizeof(float));
cudaMemcpy2D(devPtr, pitch, h_A, pitch, width* sizeof(float), height* sizeof(float), cudaMemcpyHostToDevice);
dim3 threadsPerBlock = (width, height);
dim3 blocksPerGrid = 1;
Reduce_noise<<<blocksPerGrid, threadsPerBlock>>>(devPtr, pitch);
cudaMemcpy2D(h_A, pitch, devPtr, width* sizeof(float), height* sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(devPtr);
/*
cudaError_t cudaMemcpy2D ( void * dst,
size_t dpitch,
const void * src,
size_t spitch,
size_t width,
size_t height,
enum cudaMemcpyKind kind
)
dst - Destination memory address
dpitch - Pitch of destination memory
src - Source memory address
spitch - Pitch of source memory
width - Width of matrix transfer (columns in bytes)
height - Height of matrix transfer (rows)
kind - Type of transfer
cudaError_t cudaMallocPitch ( void ** devPtr,
size_t * pitch,
size_t width,
size_t height
)
devPtr - Pointer to allocated pitched device memory
pitch - Pitch for allocation
width - Requested pitched allocation width (in bytes)
height - Requested pitched allocation height
*/
|
85d2762b83fc3549a9f4b21e603ea2f367491225.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
GF100-optimized variant of the "Speculative while-while"
kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define STACK_SIZE 64 // Size of the traversal stack in local memory.
//------------------------------------------------------------------------
extern "C" __global__ void queryConfig(void)
{
g_config.bvhLayout = BVHLayout_Compact;
g_config.blockWidth = 32; // One warp per row.
g_config.blockHeight = 4; // 4*32 = 128 threads, optimal for GTX480
}
//------------------------------------------------------------------------
TRACE_FUNC_BVH
{
// Traversal stack in CUDA thread-local memory.
int traversalStack[STACK_SIZE];
// Live state during traversal, stored in registers.
int rayidx; // Ray index.
float origx, origy, origz; // Ray origin.
float dirx, diry, dirz; // Ray direction.
float tmin; // t-value from which the ray starts. Usually 0.
float idirx, idiry, idirz; // 1 / dir
float oodx, oody, oodz; // orig / dir
char* stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int nodeAddr; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
float hitU, hitV; // Barycentric coordinates
// Initialize.
{
// Pick ray index.
rayidx = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * (blockIdx.x + gridDim.x * blockIdx.y));
if (rayidx >= numRays)
return;
// Fetch ray.
float4 o = rays[rayidx * 2 + 0];
float4 d = rays[rayidx * 2 + 1];
origx = o.x, origy = o.y, origz = o.z;
dirx = d.x, diry = d.y, dirz = d.z;
tmin = o.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
oodx = origx * idirx, oody = origy * idiry, oodz = origz * idirz;
// Setup traversal.
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
stackPtr = (char*)&traversalStack[0];
leafAddr = 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
hitT = d.w; // tmax
}
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
// Traverse internal nodes until all SIMD lanes have found a leaf.
bool searchingLeaf = true;
while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
{
// Fetch AABBs of the two child nodes.
float4* ptr = (float4*)((char*)nodesA + nodeAddr);
float4 n0xy = ptr[0]; // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = ptr[1]; // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = ptr[2]; // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
// Intersect the ray against the child nodes.
float c0lox = n0xy.x * idirx - oodx;
float c0hix = n0xy.y * idirx - oodx;
float c0loy = n0xy.z * idiry - oody;
float c0hiy = n0xy.w * idiry - oody;
float c0loz = nz.x * idirz - oodz;
float c0hiz = nz.y * idirz - oodz;
float c1loz = nz.z * idirz - oodz;
float c1hiz = nz.w * idirz - oodz;
float c0min = spanBeginFermi(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin);
float c0max = spanEndFermi (c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT);
float c1lox = n1xy.x * idirx - oodx;
float c1hix = n1xy.y * idirx - oodx;
float c1loy = n1xy.z * idiry - oody;
float c1hiy = n1xy.w * idiry - oody;
float c1min = spanBeginFermi(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin);
float c1max = spanEndFermi (c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT);
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
// Neither child was intersected => pop stack.
if (!traverseChild0 && !traverseChild1)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// Otherwise => fetch child pointers.
else
{
int2 cnodes = *(int2*)&ptr[3];
nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y;
// Both children were intersected => push the farther one.
if (traverseChild0 && traverseChild1)
{
if (c1min < c0min)
swap(nodeAddr, cnodes.y);
stackPtr += 4;
*(int*)stackPtr = cnodes.y;
}
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0)
{
searchingLeaf = false;
leafAddr = nodeAddr;
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// All SIMD lanes have found a leaf => process them.
if (!__any(searchingLeaf))
break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
// Intersect the ray against each triangle using Sven Woop's algorithm.
for (int triAddr = ~leafAddr;; triAddr += 3)
{
// Read first 16 bytes of the triangle.
// End marker (negative zero) => all triangles processed.
float4 v00 = tex1Dfetch(t_trisA, triAddr + 0);
if (__float_as_int(v00.x) == 0x80000000)
break;
// Compute and check intersection t-value.
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > tmin && t < hitT)
{
// Compute and check barycentric u.
float4 v11 = tex1Dfetch(t_trisA, triAddr + 1);
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
float u = Ox + t*Dx;
if (u >= 0.0f && u <= 1.0f)
{
// Compute and check barycentric v.
float4 v22 = tex1Dfetch(t_trisA, triAddr + 2);
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitU = u;
hitV = v;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
leafAddr = nodeAddr;
if(nodeAddr<0)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
} // leaf
} // traversal
// Remap intersected triangle index, and store the result.
if (hitIndex != -1)
hitIndex = tex1Dfetch(t_triIndices, hitIndex);
STORE_RESULT(rayidx, hitIndex, hitT, hitU, hitV);
}
//------------------------------------------------------------------------
| 85d2762b83fc3549a9f4b21e603ea2f367491225.cu | /*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
GF100-optimized variant of the "Speculative while-while"
kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define STACK_SIZE 64 // Size of the traversal stack in local memory.
//------------------------------------------------------------------------
extern "C" __global__ void queryConfig(void)
{
g_config.bvhLayout = BVHLayout_Compact;
g_config.blockWidth = 32; // One warp per row.
g_config.blockHeight = 4; // 4*32 = 128 threads, optimal for GTX480
}
//------------------------------------------------------------------------
TRACE_FUNC_BVH
{
// Traversal stack in CUDA thread-local memory.
int traversalStack[STACK_SIZE];
// Live state during traversal, stored in registers.
int rayidx; // Ray index.
float origx, origy, origz; // Ray origin.
float dirx, diry, dirz; // Ray direction.
float tmin; // t-value from which the ray starts. Usually 0.
float idirx, idiry, idirz; // 1 / dir
float oodx, oody, oodz; // orig / dir
char* stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int nodeAddr; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
float hitU, hitV; // Barycentric coordinates
// Initialize.
{
// Pick ray index.
rayidx = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * (blockIdx.x + gridDim.x * blockIdx.y));
if (rayidx >= numRays)
return;
// Fetch ray.
float4 o = rays[rayidx * 2 + 0];
float4 d = rays[rayidx * 2 + 1];
origx = o.x, origy = o.y, origz = o.z;
dirx = d.x, diry = d.y, dirz = d.z;
tmin = o.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
oodx = origx * idirx, oody = origy * idiry, oodz = origz * idirz;
// Setup traversal.
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
stackPtr = (char*)&traversalStack[0];
leafAddr = 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
hitT = d.w; // tmax
}
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
// Traverse internal nodes until all SIMD lanes have found a leaf.
bool searchingLeaf = true;
while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
{
// Fetch AABBs of the two child nodes.
float4* ptr = (float4*)((char*)nodesA + nodeAddr);
float4 n0xy = ptr[0]; // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = ptr[1]; // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = ptr[2]; // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
// Intersect the ray against the child nodes.
float c0lox = n0xy.x * idirx - oodx;
float c0hix = n0xy.y * idirx - oodx;
float c0loy = n0xy.z * idiry - oody;
float c0hiy = n0xy.w * idiry - oody;
float c0loz = nz.x * idirz - oodz;
float c0hiz = nz.y * idirz - oodz;
float c1loz = nz.z * idirz - oodz;
float c1hiz = nz.w * idirz - oodz;
float c0min = spanBeginFermi(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin);
float c0max = spanEndFermi (c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT);
float c1lox = n1xy.x * idirx - oodx;
float c1hix = n1xy.y * idirx - oodx;
float c1loy = n1xy.z * idiry - oody;
float c1hiy = n1xy.w * idiry - oody;
float c1min = spanBeginFermi(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin);
float c1max = spanEndFermi (c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT);
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
// Neither child was intersected => pop stack.
if (!traverseChild0 && !traverseChild1)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// Otherwise => fetch child pointers.
else
{
int2 cnodes = *(int2*)&ptr[3];
nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y;
// Both children were intersected => push the farther one.
if (traverseChild0 && traverseChild1)
{
if (c1min < c0min)
swap(nodeAddr, cnodes.y);
stackPtr += 4;
*(int*)stackPtr = cnodes.y;
}
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0)
{
searchingLeaf = false;
leafAddr = nodeAddr;
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// All SIMD lanes have found a leaf => process them.
if (!__any(searchingLeaf))
break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
// Intersect the ray against each triangle using Sven Woop's algorithm.
for (int triAddr = ~leafAddr;; triAddr += 3)
{
// Read first 16 bytes of the triangle.
// End marker (negative zero) => all triangles processed.
float4 v00 = tex1Dfetch(t_trisA, triAddr + 0);
if (__float_as_int(v00.x) == 0x80000000)
break;
// Compute and check intersection t-value.
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > tmin && t < hitT)
{
// Compute and check barycentric u.
float4 v11 = tex1Dfetch(t_trisA, triAddr + 1);
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
float u = Ox + t*Dx;
if (u >= 0.0f && u <= 1.0f)
{
// Compute and check barycentric v.
float4 v22 = tex1Dfetch(t_trisA, triAddr + 2);
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitU = u;
hitV = v;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
leafAddr = nodeAddr;
if(nodeAddr<0)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
} // leaf
} // traversal
// Remap intersected triangle index, and store the result.
if (hitIndex != -1)
hitIndex = tex1Dfetch(t_triIndices, hitIndex);
STORE_RESULT(rayidx, hitIndex, hitT, hitU, hitV);
}
//------------------------------------------------------------------------
|
409b135978ce5528787a7e12a8cf7de1a450a918.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#define N 10
using namespace std;
__global__ void add(int *a, int *b, int *c) {
int tid = blockIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main(void) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
hipMalloc((void **) &dev_a, N * sizeof(int));
hipMalloc((void **) &dev_b, N * sizeof(int));
hipMalloc((void **) &dev_c, N * sizeof(int));
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i * 1;
}
hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyDeviceToDevice);
hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( add), dim3(N), dim3(1), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToDevice);
//
for (int i = 0; i < N; i++) {
cout << "a[i]+b[i]+c[i]=" << a[i] + b[i] + c[i] << endl;
}
// todo
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 409b135978ce5528787a7e12a8cf7de1a450a918.cu | #include <iostream>
#define N 10
using namespace std;
__global__ void add(int *a, int *b, int *c) {
int tid = blockIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main(void) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **) &dev_a, N * sizeof(int));
cudaMalloc((void **) &dev_b, N * sizeof(int));
cudaMalloc((void **) &dev_c, N * sizeof(int));
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i * 1;
}
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyDeviceToDevice);
add<<<N, 1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToDevice);
// 显示结果
for (int i = 0; i < N; i++) {
cout << "a[i]+b[i]+c[i]=" << a[i] + b[i] + c[i] << endl;
}
// 释放内存(todo:不释放会如何)
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
71049d1a04f607fe8f9386b97834839fac98ff12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void BFS_kernel_one_block( volatile unsigned int *frontier, unsigned int frontier_len, volatile unsigned int *cost, volatile int *visited, unsigned int *edgeArray, unsigned int *edgeArrayAux, unsigned int numVertices, unsigned int numEdges, volatile unsigned int *frontier_length, unsigned int num_p_per_mp, unsigned int w_q_size) { ; } | 71049d1a04f607fe8f9386b97834839fac98ff12.cu | #include "includes.h"
__global__ void BFS_kernel_one_block( volatile unsigned int *frontier, unsigned int frontier_len, volatile unsigned int *cost, volatile int *visited, unsigned int *edgeArray, unsigned int *edgeArrayAux, unsigned int numVertices, unsigned int numEdges, volatile unsigned int *frontier_length, unsigned int num_p_per_mp, unsigned int w_q_size) { ; } |
a1e46bf069950757f6d7fffc64389c00189f832c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_bcnn_pow_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float a = 2;
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_bcnn_pow_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,a,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_bcnn_pow_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,a,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_bcnn_pow_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,a,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a1e46bf069950757f6d7fffc64389c00189f832c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_bcnn_pow_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float a = 2;
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_bcnn_pow_kernel<<<gridBlock,threadBlock>>>(n,x,a,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_bcnn_pow_kernel<<<gridBlock,threadBlock>>>(n,x,a,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_bcnn_pow_kernel<<<gridBlock,threadBlock>>>(n,x,a,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
05e385d530b48141f5ce36d8b5938ed6f3f16fa9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#ifdef VCUDA
#define globalifcuda __device__ __host__
#include "./tiny-AES-c/aes.cu"
#else
#define globalifcuda
#include <aes.h>
#endif
typedef int64_t i64;
typedef union aeskey {
char bytes[32];
i64 ints[4];
} AESkey_t;
void print_key(const AESkey_t* key);
// Creates a mask to capture the first n bits (starts from lsb)
AESkey_t make_mask(i64 n) {
AESkey_t mask;
// Accept all bits initially
for (int i = 0; i < sizeof(AESkey_t) / sizeof(i64); i++) mask.ints[i] = -1LL;
i64 index = 0;
// Ignore whole bytes
while (n >= 8) {
mask.bytes[index++] = 0;
n -= 8;
}
// Ignore 1 to 7 bits, if needed
if (n)
mask.bytes[index] = ~(( 1 << n ) - 1);
return mask;
}
globalifcuda
void apply_mask(const AESkey_t *mask, AESkey_t *key) {
for (int i = 0; i < 4; i++) key->ints[i] &= mask->ints[i];
}
globalifcuda
void apply_bits_to_key(const AESkey_t *mask, AESkey_t *key, i64 bits, i64 nbits) {
apply_mask(mask, key);
bits &= (1L << nbits) - 1L;
key->ints[0] |= bits;
}
void print_key(const AESkey_t *mask) {
for (int i = 0; i < 4; i += 1)
printf("%lx ", mask->ints[i]);
printf("\n");
}
globalifcuda
int bytes_eq(const char* a, const char* b, i64 n) {
for (int i = 0; i < n ; i ++)
if (a[i] != b[i])
return 0;
return 1;
}
#ifdef VOMP
int atomic_done = 0;
void set_atomic_done(int new) {
#pragma omp atomic write
atomic_done = new;
}
int get_atomic_done() {
int done;
#pragma ompt atomic read
done = atomic_done;
return done;
}
void crack(const AESkey_t* partial_key, i64 nbits, char* iv, const char* plaintext, const char* true_ciphertext, i64 len) {
AESkey_t mask = make_mask(nbits);
#pragma omp parallel
#pragma omp for
for (i64 bits = 0; bits < (1L << nbits) - 1; bits++) {
if (get_atomic_done()) {
// break
#pragma omp cancel for
}
struct AES_ctx ctx;
AESkey_t local_partial_key = *partial_key;
char ciphertext[512];
strcpy(ciphertext, plaintext);
// printf("Trying key with bits %lx\nKey: ", bits);
apply_bits_to_key(&mask, &local_partial_key, bits, nbits);
// Reset the AES context
AES_init_ctx_iv(&ctx, (const uint8_t*) &local_partial_key, iv);
// Encrypt the ciphertext (modifies ciphertext in place)
AES_CBC_encrypt_buffer(&ctx, (uint8_t *) ciphertext, len);
if (bytes_eq(ciphertext, true_ciphertext, len - 1)) {
printf("Key found:\n");
print_key(&local_partial_key);
set_atomic_done(1);
}
}
if (get_atomic_done() == 0) {
printf("Failed to find key.\n");
}
}
#endif
#ifdef VCUDA
__global__ void crack( i64 n, const AESkey_t* mask, const AESkey_t* partial_key, i64 nbits, i64 bits_per_thread,
const uint8_t* iv, const char* plaintext, const char* true_ciphertext, i64 len,
int* done, AESkey_t *true_key, char* debug_ciphertext) {
if (*done == 0) {
for (i64 i = 0; i < (1 << bits_per_thread); i++) {
i64 bits = ((i64) blockIdx.x) * ((i64) blockDim.x) + ((i64) threadIdx.x);
if (bits >= n) return;
// Not masking out the lower bits of bits nor the upper bits of i, should be okay though.
bits <<= bits_per_thread;
bits |= i;
struct AES_ctx ctx;
AESkey_t local_partial_key = *partial_key;
char ciphertext[128];
for (int i = 0; i < (int) len; i += 1)
ciphertext[i] = plaintext[i];
// printf("Trying key with bits %lx\nKey: ", bits);
apply_bits_to_key(mask, &local_partial_key, bits, nbits + bits_per_thread);
// Reset the AES context
AES_init_ctx_iv(&ctx, (const uint8_t*) &local_partial_key, iv);
// Encrypt the ciphertext (modifies ciphertext in place)
AES_CBC_encrypt_buffer(&ctx, (uint8_t *) ciphertext, len);
if (bytes_eq(ciphertext, true_ciphertext, len - 1)) {
// print_key(&local_partial_key);
*done = -1;
*true_key = local_partial_key;
break;
}
}
}
}
#else
#ifndef VOMP
// Sequential version
void crack(const AESkey_t* partial_key, i64 nbits, char* iv, const char* plaintext, const char* true_ciphertext, i64 len) {
AESkey_t mask = make_mask(nbits);
struct AES_ctx ctx;
char ciphertext[256];
for (i64 bits = 0; bits < (1L << nbits) - 1; bits++) {
AESkey_t local_partial_key = *partial_key;
for (int i = 0; i < (int) len; i += 1)
ciphertext[i] = plaintext[i];
// printf("Trying key with bits %lx\nKey: ", bits);
apply_bits_to_key(&mask, &local_partial_key, bits, nbits);
// Reset the AES context
AES_init_ctx_iv(&ctx, (const uint8_t*) &local_partial_key, iv);
// Encrypt the ciphertext (modifies ciphertext in place)
AES_CBC_encrypt_buffer(&ctx, (uint8_t *) ciphertext, len);
if (bytes_eq(ciphertext, true_ciphertext, len - 1)) {
printf("Key found:\n");
print_key(&local_partial_key);
return;
}
}
printf("Failed to find key.\n");
}
#endif
#endif
int main() {
char plaintext[] = "TESTTESTTESTTES\0";
char *true_ciphertext = (char *) malloc(512);
AESkey_t key;
for (int i = 0; i < sizeof(AESkey_t); i += 1) key.bytes[i] = rand() & 0xFF;
i64 nbits = 32;
i64 true_bits = 0xDEADBEEFCAFEBABE;
AESkey_t mask = make_mask(nbits);
printf("Mask: \n"); print_key(&mask);
apply_bits_to_key(&mask, &key, true_bits, nbits);
printf("true key: \n");
print_key(&key);
srand(0x12574123);
uint8_t iv[256];
for (int i = 0 ; i < 32; i += 1)
iv[i] = rand() & 0xFF;
struct AES_ctx ctx;
AES_init_ctx_iv(&ctx, (const uint8_t*) &key, iv);
strcpy(true_ciphertext, plaintext);
AES_CBC_encrypt_buffer(&ctx, (uint8_t *) true_ciphertext, sizeof(plaintext));
#ifdef VCUDA
hipError_t code = hipPeekAtLastError();
#define check_for_cuda_err(line) \
if ((code=hipPeekAtLastError()) != hipSuccess) { \
printf("Encountered cuda error on line %d: \n %s\n", line, hipGetErrorString(code)); \
exit(-1); \
}
apply_bits_to_key(&mask, &key, true_bits, nbits);
int *done;
hipMallocManaged(&done, sizeof(int));
*done = 0;
AESkey_t *true_key;
hipMallocManaged(&true_key, sizeof(AESkey_t));
*true_key = AESkey_t { ints: { 0L,0L,0L,0L} };
char *plaintext_d;
hipMallocManaged(&plaintext_d, 256);
strcpy(plaintext_d, plaintext);
char *true_ciphertext_d;
hipMallocManaged(&true_ciphertext_d, 256);
strcpy(true_ciphertext_d, true_ciphertext);
char *debug_ciphertext;
hipMallocManaged(&debug_ciphertext, 256);
strcpy(debug_ciphertext, true_ciphertext);
uint8_t *iv_d;
hipMallocManaged(&iv_d, 32);
memcpy(iv_d, iv, 32);
AESkey_t *key_d;
AESkey_t *mask_d;
hipMallocManaged(&key_d, sizeof(AESkey_t));
hipMallocManaged(&mask_d, sizeof(AESkey_t));
*key_d = key;
*mask_d = mask;
if (nbits > 4) {
i64 nbits_used = nbits - 4L;
i64 nblocks = 1L << nbits_used;
printf("nblocks = %d\n", (1024L + nblocks) / 1024);
hipLaunchKernelGGL(( crack), dim3((1024L + nblocks)/1024L), dim3(1024L), 0, 0, 1 << (nbits_used), mask_d, key_d, nbits_used, 4,
iv_d, plaintext_d, true_ciphertext_d,
sizeof(plaintext), done, true_key, debug_ciphertext);
} else {
hipLaunchKernelGGL(( crack), dim3((1024L + (1L << nbits)) / 1024L), dim3(1024L), 0, 0, 0, 0, 1 << nbits, mask_d, key_d, nbits, 0,
iv_d, plaintext_d, true_ciphertext_d,
sizeof(plaintext), done, true_key, debug_ciphertext);
}
check_for_cuda_err(__LINE__);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
check_for_cuda_err(__LINE__);
printf("true key bits: %lx\n", true_key->ints[0]);
if (*done < 0) {
printf("Calculated true key:\n");
print_key(true_key);
} else {
printf("Failed to find true key.\n");
}
#else
crack(&key, nbits, iv, plaintext, true_ciphertext, sizeof(plaintext));
#endif
free(true_ciphertext);
}
| 05e385d530b48141f5ce36d8b5938ed6f3f16fa9.cu | #include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#ifdef VCUDA
#define globalifcuda __device__ __host__
#include "./tiny-AES-c/aes.cu"
#else
#define globalifcuda
#include <aes.h>
#endif
typedef int64_t i64;
typedef union aeskey {
char bytes[32];
i64 ints[4];
} AESkey_t;
void print_key(const AESkey_t* key);
// Creates a mask to capture the first n bits (starts from lsb)
AESkey_t make_mask(i64 n) {
AESkey_t mask;
// Accept all bits initially
for (int i = 0; i < sizeof(AESkey_t) / sizeof(i64); i++) mask.ints[i] = -1LL;
i64 index = 0;
// Ignore whole bytes
while (n >= 8) {
mask.bytes[index++] = 0;
n -= 8;
}
// Ignore 1 to 7 bits, if needed
if (n)
mask.bytes[index] = ~(( 1 << n ) - 1);
return mask;
}
globalifcuda
void apply_mask(const AESkey_t *mask, AESkey_t *key) {
for (int i = 0; i < 4; i++) key->ints[i] &= mask->ints[i];
}
globalifcuda
void apply_bits_to_key(const AESkey_t *mask, AESkey_t *key, i64 bits, i64 nbits) {
apply_mask(mask, key);
bits &= (1L << nbits) - 1L;
key->ints[0] |= bits;
}
void print_key(const AESkey_t *mask) {
for (int i = 0; i < 4; i += 1)
printf("%lx ", mask->ints[i]);
printf("\n");
}
globalifcuda
int bytes_eq(const char* a, const char* b, i64 n) {
for (int i = 0; i < n ; i ++)
if (a[i] != b[i])
return 0;
return 1;
}
#ifdef VOMP
int atomic_done = 0;
void set_atomic_done(int new) {
#pragma omp atomic write
atomic_done = new;
}
int get_atomic_done() {
int done;
#pragma ompt atomic read
done = atomic_done;
return done;
}
void crack(const AESkey_t* partial_key, i64 nbits, char* iv, const char* plaintext, const char* true_ciphertext, i64 len) {
AESkey_t mask = make_mask(nbits);
#pragma omp parallel
#pragma omp for
for (i64 bits = 0; bits < (1L << nbits) - 1; bits++) {
if (get_atomic_done()) {
// break
#pragma omp cancel for
}
struct AES_ctx ctx;
AESkey_t local_partial_key = *partial_key;
char ciphertext[512];
strcpy(ciphertext, plaintext);
// printf("Trying key with bits %lx\nKey: ", bits);
apply_bits_to_key(&mask, &local_partial_key, bits, nbits);
// Reset the AES context
AES_init_ctx_iv(&ctx, (const uint8_t*) &local_partial_key, iv);
// Encrypt the ciphertext (modifies ciphertext in place)
AES_CBC_encrypt_buffer(&ctx, (uint8_t *) ciphertext, len);
if (bytes_eq(ciphertext, true_ciphertext, len - 1)) {
printf("Key found:\n");
print_key(&local_partial_key);
set_atomic_done(1);
}
}
if (get_atomic_done() == 0) {
printf("Failed to find key.\n");
}
}
#endif
#ifdef VCUDA
__global__ void crack( i64 n, const AESkey_t* mask, const AESkey_t* partial_key, i64 nbits, i64 bits_per_thread,
const uint8_t* iv, const char* plaintext, const char* true_ciphertext, i64 len,
int* done, AESkey_t *true_key, char* debug_ciphertext) {
if (*done == 0) {
for (i64 i = 0; i < (1 << bits_per_thread); i++) {
i64 bits = ((i64) blockIdx.x) * ((i64) blockDim.x) + ((i64) threadIdx.x);
if (bits >= n) return;
// Not masking out the lower bits of bits nor the upper bits of i, should be okay though.
bits <<= bits_per_thread;
bits |= i;
struct AES_ctx ctx;
AESkey_t local_partial_key = *partial_key;
char ciphertext[128];
for (int i = 0; i < (int) len; i += 1)
ciphertext[i] = plaintext[i];
// printf("Trying key with bits %lx\nKey: ", bits);
apply_bits_to_key(mask, &local_partial_key, bits, nbits + bits_per_thread);
// Reset the AES context
AES_init_ctx_iv(&ctx, (const uint8_t*) &local_partial_key, iv);
// Encrypt the ciphertext (modifies ciphertext in place)
AES_CBC_encrypt_buffer(&ctx, (uint8_t *) ciphertext, len);
if (bytes_eq(ciphertext, true_ciphertext, len - 1)) {
// print_key(&local_partial_key);
*done = -1;
*true_key = local_partial_key;
break;
}
}
}
}
#else
#ifndef VOMP
// Sequential version
void crack(const AESkey_t* partial_key, i64 nbits, char* iv, const char* plaintext, const char* true_ciphertext, i64 len) {
AESkey_t mask = make_mask(nbits);
struct AES_ctx ctx;
char ciphertext[256];
for (i64 bits = 0; bits < (1L << nbits) - 1; bits++) {
AESkey_t local_partial_key = *partial_key;
for (int i = 0; i < (int) len; i += 1)
ciphertext[i] = plaintext[i];
// printf("Trying key with bits %lx\nKey: ", bits);
apply_bits_to_key(&mask, &local_partial_key, bits, nbits);
// Reset the AES context
AES_init_ctx_iv(&ctx, (const uint8_t*) &local_partial_key, iv);
// Encrypt the ciphertext (modifies ciphertext in place)
AES_CBC_encrypt_buffer(&ctx, (uint8_t *) ciphertext, len);
if (bytes_eq(ciphertext, true_ciphertext, len - 1)) {
printf("Key found:\n");
print_key(&local_partial_key);
return;
}
}
printf("Failed to find key.\n");
}
#endif
#endif
int main() {
char plaintext[] = "TESTTESTTESTTES\0";
char *true_ciphertext = (char *) malloc(512);
AESkey_t key;
for (int i = 0; i < sizeof(AESkey_t); i += 1) key.bytes[i] = rand() & 0xFF;
i64 nbits = 32;
i64 true_bits = 0xDEADBEEFCAFEBABE;
AESkey_t mask = make_mask(nbits);
printf("Mask: \n"); print_key(&mask);
apply_bits_to_key(&mask, &key, true_bits, nbits);
printf("true key: \n");
print_key(&key);
srand(0x12574123);
uint8_t iv[256];
for (int i = 0 ; i < 32; i += 1)
iv[i] = rand() & 0xFF;
struct AES_ctx ctx;
AES_init_ctx_iv(&ctx, (const uint8_t*) &key, iv);
strcpy(true_ciphertext, plaintext);
AES_CBC_encrypt_buffer(&ctx, (uint8_t *) true_ciphertext, sizeof(plaintext));
#ifdef VCUDA
cudaError_t code = cudaPeekAtLastError();
#define check_for_cuda_err(line) \
if ((code=cudaPeekAtLastError()) != cudaSuccess) { \
printf("Encountered cuda error on line %d: \n %s\n", line, cudaGetErrorString(code)); \
exit(-1); \
}
apply_bits_to_key(&mask, &key, true_bits, nbits);
int *done;
cudaMallocManaged(&done, sizeof(int));
*done = 0;
AESkey_t *true_key;
cudaMallocManaged(&true_key, sizeof(AESkey_t));
*true_key = AESkey_t { ints: { 0L,0L,0L,0L} };
char *plaintext_d;
cudaMallocManaged(&plaintext_d, 256);
strcpy(plaintext_d, plaintext);
char *true_ciphertext_d;
cudaMallocManaged(&true_ciphertext_d, 256);
strcpy(true_ciphertext_d, true_ciphertext);
char *debug_ciphertext;
cudaMallocManaged(&debug_ciphertext, 256);
strcpy(debug_ciphertext, true_ciphertext);
uint8_t *iv_d;
cudaMallocManaged(&iv_d, 32);
memcpy(iv_d, iv, 32);
AESkey_t *key_d;
AESkey_t *mask_d;
cudaMallocManaged(&key_d, sizeof(AESkey_t));
cudaMallocManaged(&mask_d, sizeof(AESkey_t));
*key_d = key;
*mask_d = mask;
if (nbits > 4) {
i64 nbits_used = nbits - 4L;
i64 nblocks = 1L << nbits_used;
printf("nblocks = %d\n", (1024L + nblocks) / 1024);
crack<<<(1024L + nblocks)/1024L, 1024L>>>(1 << (nbits_used), mask_d, key_d, nbits_used, 4,
iv_d, plaintext_d, true_ciphertext_d,
sizeof(plaintext), done, true_key, debug_ciphertext);
} else {
crack<<<(1024L + (1L << nbits)) / 1024L, 1024L>>>(1 << nbits, mask_d, key_d, nbits, 0,
iv_d, plaintext_d, true_ciphertext_d,
sizeof(plaintext), done, true_key, debug_ciphertext);
}
check_for_cuda_err(__LINE__);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
check_for_cuda_err(__LINE__);
printf("true key bits: %lx\n", true_key->ints[0]);
if (*done < 0) {
printf("Calculated true key:\n");
print_key(true_key);
} else {
printf("Failed to find true key.\n");
}
#else
crack(&key, nbits, iv, plaintext, true_ciphertext, sizeof(plaintext));
#endif
free(true_ciphertext);
}
|
88d5459b9a914e40033193618b3f57feb5abc754.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "j3d27pt-64x16-3-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 54
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 12;
const AN5D_TYPE __side3Len = 60;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 12;
const AN5D_TYPE __side3Len = 60;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 12;
const AN5D_TYPE __side3Len = 60;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
(1.500f*A[t%2][i-1][j][k] +
0.500f*A[t%2][i-1][j-1][k-1] +
0.700f*A[t%2][i-1][j-1][k] +
0.900f*A[t%2][i-1][j-1][k+1] +
1.200f*A[t%2][i-1][j][k-1] +
1.201f*A[t%2][i-1][j][k+1] +
0.901f*A[t%2][i-1][j+1][k-1] +
0.701f*A[t%2][i-1][j+1][k] +
0.501f*A[t%2][i-1][j+1][k+1] +
1.510f*A[t%2][i][j][k] +
0.510f*A[t%2][i][j-1][k-1] +
0.710f*A[t%2][i][j-1][k] +
0.910f*A[t%2][i][j-1][k+1] +
1.210f*A[t%2][i][j][k-1] +
1.211f*A[t%2][i][j][k+1] +
0.911f*A[t%2][i][j+1][k-1] +
0.711f*A[t%2][i][j+1][k] +
0.511f*A[t%2][i][j+1][k+1] +
1.520f*A[t%2][i+1][j][k] +
0.520f*A[t%2][i+1][j-1][k-1] +
0.720f*A[t%2][i+1][j-1][k] +
0.920f*A[t%2][i+1][j-1][k+1] +
1.220f*A[t%2][i+1][j][k-1] +
1.221f*A[t%2][i+1][j][k+1] +
0.921f*A[t%2][i+1][j+1][k-1] +
0.721f*A[t%2][i+1][j+1][k] +
0.521f*A[t%2][i+1][j+1][k+1]) / 159;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 88d5459b9a914e40033193618b3f57feb5abc754.cu | #include <assert.h>
#include <stdio.h>
#include "j3d27pt-64x16-3-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 54
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 12;
const AN5D_TYPE __side3Len = 60;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 12;
const AN5D_TYPE __side3Len = 60;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 12;
const AN5D_TYPE __side3Len = 60;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
(1.500f*A[t%2][i-1][j][k] +
0.500f*A[t%2][i-1][j-1][k-1] +
0.700f*A[t%2][i-1][j-1][k] +
0.900f*A[t%2][i-1][j-1][k+1] +
1.200f*A[t%2][i-1][j][k-1] +
1.201f*A[t%2][i-1][j][k+1] +
0.901f*A[t%2][i-1][j+1][k-1] +
0.701f*A[t%2][i-1][j+1][k] +
0.501f*A[t%2][i-1][j+1][k+1] +
1.510f*A[t%2][i][j][k] +
0.510f*A[t%2][i][j-1][k-1] +
0.710f*A[t%2][i][j-1][k] +
0.910f*A[t%2][i][j-1][k+1] +
1.210f*A[t%2][i][j][k-1] +
1.211f*A[t%2][i][j][k+1] +
0.911f*A[t%2][i][j+1][k-1] +
0.711f*A[t%2][i][j+1][k] +
0.511f*A[t%2][i][j+1][k+1] +
1.520f*A[t%2][i+1][j][k] +
0.520f*A[t%2][i+1][j-1][k-1] +
0.720f*A[t%2][i+1][j-1][k] +
0.920f*A[t%2][i+1][j-1][k+1] +
1.220f*A[t%2][i+1][j][k-1] +
1.221f*A[t%2][i+1][j][k+1] +
0.921f*A[t%2][i+1][j+1][k-1] +
0.721f*A[t%2][i+1][j+1][k] +
0.521f*A[t%2][i+1][j+1][k+1]) / 159;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
0f82478b8d8c18eb371e23e1ce4140064f65d441.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "compact.h"
#include "scatter.h"
#include "mp3-util.h"
#include "math.h"
__global__
void create_bit_vector(
unsigned int* d_bit_vector, const real* d_call, const real* d_put,
size_t* num_compacted, const size_t n, const real min_call_threshold,
const real min_put_threshold
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
if (d_call[i] >= min_call_threshold && d_put[i] >= min_put_threshold)
{
d_bit_vector[i] = 1;
atomicAdd(num_compacted, 1);
}
//printf("%d, %d\n", d_call[i], d_put[i]);
}
}
__global__
void test_bit_vector(
unsigned int* d_bit_vector, const unsigned int *d_input,
unsigned int *num_compacted, const size_t n
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
if (d_input[i] % 2 == 0)
{
d_bit_vector[i] = 1;
atomicAdd(num_compacted, 1);
}
}
}
// compact_options copies the input options whose call and put
// results from the first round meet or exceed the given call & put
// thresholds to a compacted output in three result arrays.
size_t compact_options(
const real min_call_threshold, const real min_put_threshold,
const real *d_call, const real *d_put, const real *d_stock_price_input,
const real *d_option_strike_input, const real *d_option_years_input,
const size_t n, real *d_stock_price_result, real *d_option_strike_result,
real *d_option_years_result
) {
// Keeps track of how many options are compacted
size_t h_num_compacted = 0;
size_t *d_num_compacted = 0;
hipMalloc((void**)&d_num_compacted, sizeof(size_t));
hipMemset(d_num_compacted, 0, sizeof(size_t));
check_cuda_error("Malloc (d_num_compacted)", __FILE__, __LINE__);
// Holds bit vector of options that meet call/put minimum thresholds
unsigned int *h_bit_vector = 0;
unsigned int *d_bit_vector = 0;
h_bit_vector = (unsigned int*)malloc(n * sizeof(unsigned int));
hipMalloc((void**)&d_bit_vector, n * sizeof(unsigned int));
hipMemset(d_bit_vector, 0, n * sizeof(unsigned int));
check_cuda_error("Malloc (d_bit_vector)", __FILE__, __LINE__);
// Holds the sums of each section of the bit vector
unsigned int *d_block_sums = 0;
hipMalloc((void**)&d_block_sums, (ceil((n) / BLOCK_SIZE)) * sizeof(unsigned int));
check_cuda_error("Malloc (d_block_sums)", __FILE__, __LINE__);
// Holds the final prefix sum calculated using the Hillis and Steele algorithm
unsigned int *d_prefix_sum = 0;
hipMalloc((void**)&d_prefix_sum, n * sizeof(unsigned int));
check_cuda_error("Malloc (d_prefix_sum)", __FILE__, __LINE__);
// Calculate number of threadblocks
size_t num_blocks = (n / BLOCK_SIZE);
hipLaunchKernelGGL(( create_bit_vector), dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0,
d_bit_vector, d_call, d_put, d_num_compacted, n, min_call_threshold, min_put_threshold
);
check_cuda_error("bit_vector", __FILE__, __LINE__);
hipLaunchKernelGGL(( scan), dim3(num_blocks), dim3(BLOCK_SIZE), 2 * BLOCK_SIZE * sizeof(unsigned int), 0,
d_bit_vector, d_prefix_sum, n, d_block_sums
);
check_cuda_error("scan", __FILE__, __LINE__);
hipLaunchKernelGGL(( scan_update), dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0, d_bit_vector, d_prefix_sum, n, d_block_sums);
check_cuda_error("scan_update", __FILE__, __LINE__);
hipLaunchKernelGGL(( scatter_options_kernel), dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0,
min_call_threshold, min_put_threshold, d_call,
d_put, d_stock_price_input, d_option_strike_input,
d_option_years_input, d_prefix_sum, n, d_stock_price_result,
d_option_strike_result, d_option_years_result
);
check_cuda_error("scan_update", __FILE__, __LINE__);
hipMemcpy(
&h_num_compacted, d_num_compacted, sizeof(unsigned int), hipMemcpyDeviceToHost
);
hipFree(d_num_compacted);
hipFree(d_bit_vector);
hipFree(d_block_sums);
hipFree(d_prefix_sum);
free(h_bit_vector);
return h_num_compacted;
}
| 0f82478b8d8c18eb371e23e1ce4140064f65d441.cu | #include "compact.h"
#include "scatter.h"
#include "mp3-util.h"
#include "math.h"
__global__
void create_bit_vector(
unsigned int* d_bit_vector, const real* d_call, const real* d_put,
size_t* num_compacted, const size_t n, const real min_call_threshold,
const real min_put_threshold
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
if (d_call[i] >= min_call_threshold && d_put[i] >= min_put_threshold)
{
d_bit_vector[i] = 1;
atomicAdd(num_compacted, 1);
}
//printf("%d, %d\n", d_call[i], d_put[i]);
}
}
__global__
void test_bit_vector(
unsigned int* d_bit_vector, const unsigned int *d_input,
unsigned int *num_compacted, const size_t n
) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
if (d_input[i] % 2 == 0)
{
d_bit_vector[i] = 1;
atomicAdd(num_compacted, 1);
}
}
}
// compact_options copies the input options whose call and put
// results from the first round meet or exceed the given call & put
// thresholds to a compacted output in three result arrays.
size_t compact_options(
const real min_call_threshold, const real min_put_threshold,
const real *d_call, const real *d_put, const real *d_stock_price_input,
const real *d_option_strike_input, const real *d_option_years_input,
const size_t n, real *d_stock_price_result, real *d_option_strike_result,
real *d_option_years_result
) {
// Keeps track of how many options are compacted
size_t h_num_compacted = 0;
size_t *d_num_compacted = 0;
cudaMalloc((void**)&d_num_compacted, sizeof(size_t));
cudaMemset(d_num_compacted, 0, sizeof(size_t));
check_cuda_error("Malloc (d_num_compacted)", __FILE__, __LINE__);
// Holds bit vector of options that meet call/put minimum thresholds
unsigned int *h_bit_vector = 0;
unsigned int *d_bit_vector = 0;
h_bit_vector = (unsigned int*)malloc(n * sizeof(unsigned int));
cudaMalloc((void**)&d_bit_vector, n * sizeof(unsigned int));
cudaMemset(d_bit_vector, 0, n * sizeof(unsigned int));
check_cuda_error("Malloc (d_bit_vector)", __FILE__, __LINE__);
// Holds the sums of each section of the bit vector
unsigned int *d_block_sums = 0;
cudaMalloc((void**)&d_block_sums, (ceil((n) / BLOCK_SIZE)) * sizeof(unsigned int));
check_cuda_error("Malloc (d_block_sums)", __FILE__, __LINE__);
// Holds the final prefix sum calculated using the Hillis and Steele algorithm
unsigned int *d_prefix_sum = 0;
cudaMalloc((void**)&d_prefix_sum, n * sizeof(unsigned int));
check_cuda_error("Malloc (d_prefix_sum)", __FILE__, __LINE__);
// Calculate number of threadblocks
size_t num_blocks = (n / BLOCK_SIZE);
create_bit_vector<<<num_blocks, BLOCK_SIZE>>>(
d_bit_vector, d_call, d_put, d_num_compacted, n, min_call_threshold, min_put_threshold
);
check_cuda_error("bit_vector", __FILE__, __LINE__);
scan<<<num_blocks, BLOCK_SIZE, 2 * BLOCK_SIZE * sizeof(unsigned int)>>>(
d_bit_vector, d_prefix_sum, n, d_block_sums
);
check_cuda_error("scan", __FILE__, __LINE__);
scan_update<<<num_blocks, BLOCK_SIZE>>>(d_bit_vector, d_prefix_sum, n, d_block_sums);
check_cuda_error("scan_update", __FILE__, __LINE__);
scatter_options_kernel<<<num_blocks, BLOCK_SIZE>>>(
min_call_threshold, min_put_threshold, d_call,
d_put, d_stock_price_input, d_option_strike_input,
d_option_years_input, d_prefix_sum, n, d_stock_price_result,
d_option_strike_result, d_option_years_result
);
check_cuda_error("scan_update", __FILE__, __LINE__);
cudaMemcpy(
&h_num_compacted, d_num_compacted, sizeof(unsigned int), cudaMemcpyDeviceToHost
);
cudaFree(d_num_compacted);
cudaFree(d_bit_vector);
cudaFree(d_block_sums);
cudaFree(d_prefix_sum);
free(h_bit_vector);
return h_num_compacted;
}
|
6d56f130a81cd895a74b5a2be345f535b92b3ef2.hip | // !!! This is a file automatically generated by hipify!!!
#include <fstream>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <string>
#include <sstream>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <ctime>
#include <hip/hip_runtime_api.h>
using namespace std;
struct pixel //to store RGB values
{
unsigned char r;
unsigned char g;
unsigned char b;
};
static void HandleError( hipError_t err, const char *file, int line ) {
if (err != hipSuccess) {
cout<<hipGetErrorString(err)<<" in "<< file <<" at line "<< line<<endl;
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__device__ pixel padding(pixel* Pixel_val, int x_coord, int y_coord, int img_width, int img_height)
{ pixel Px;
Px.r=0; Px.g=0; Px.b=0;
if(x_coord< img_width && y_coord <img_height && x_coord>=0 && y_coord>=0)
{
Px=Pixel_val[y_coord*img_width+x_coord];
}
return Px;
}
__global__ void vertical_conv(pixel* Pixel_in_v, pixel* Pixel_out_v,int img_wd_v, int img_ht_v, float* kernel_v, int k_v)
{
float tmp_r, tmp_g, tmp_b;
//int pix_idx_v=blockIdx.x*blockDim.x + threadIdx.x;
//int row=(int)(pix_idx_v/img_wd_v);
//int col=pix_idx_v%img_wd_v;
size_t col=blockIdx.x*blockDim.x + threadIdx.x;
size_t row=blockIdx.y*blockDim.y + threadIdx.y;
size_t pix_idx_v=row*img_wd_v+col;
tmp_r=0, tmp_g=0, tmp_b=0;
if(row<img_ht_v && col<img_wd_v){
for(int l=0;l<k_v;l++)
{//doing by 1 D arrays
pixel pix_val=padding(Pixel_in_v, col, (row+l-(k_v-1)/2), img_wd_v, img_ht_v);
tmp_r+=pix_val.r * kernel_v[l];
tmp_b+=pix_val.b * kernel_v[l];
tmp_g+=pix_val.g * kernel_v[l];
}
Pixel_out_v[pix_idx_v].r=tmp_r;
Pixel_out_v[pix_idx_v].g=tmp_g;
Pixel_out_v[pix_idx_v].b=tmp_b;
}
}
__global__ void horizontal_conv(pixel* Pixel_in, pixel* Pixel_out, int img_wd, int img_ht, float* kernel, int k)
{
float tmp_r, tmp_b, tmp_g;
//horizontal convolution
//int pix_idx=blockIdx.x*blockDim.x + threadIdx.x;
//int row=(int)(pix_idx/img_wd);
//int col=pix_idx%img_wd;
size_t col=blockIdx.x*blockDim.x + threadIdx.x;
size_t row=blockIdx.y*blockDim.y + threadIdx.y;
size_t pix_idx=row*img_wd+col;
tmp_r=0, tmp_g=0, tmp_b=0;
if(row<img_ht && col<img_wd)
{
for(int l=0; l<k;l++)
{
pixel pix_val=padding(Pixel_in, col+ l-(k-1)/2, row, img_wd, img_ht);
tmp_r+=pix_val.r * kernel[l];
tmp_g+=pix_val.g * kernel[l];
tmp_b+=pix_val.b * kernel[l];
}
Pixel_out[pix_idx].r=tmp_r;
Pixel_out[pix_idx].g=tmp_g;
Pixel_out[pix_idx].b=tmp_b;
}
}
int main(int argc, char* argv[])
{
int nDevices;
HANDLE_ERROR(hipGetDeviceCount(&nDevices));
cout<<"number of devices="<<nDevices<<endl;
for(int i=0;i<nDevices;i++){
hipDeviceProp_t prop;
HANDLE_ERROR(hipGetDeviceProperties(&prop, i));
/*printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
cout<<" Total global memory :"<<prop.totalGlobalMem<<endl;
cout<<" Shared memory per block :"<<prop.sharedMemPerBlock<<endl;
cout<<" Regs per block :"<<prop.regsPerBlock<<endl;
cout<<" Warp size :"<<prop.warpSize<<endl;
cout<<" Max threads per block :"<<prop.maxThreadsPerBlock<<endl;
cout<<" Max threads dimension :"<<prop.maxThreadsDim[0]<<" "<<prop.maxThreadsDim[1]<<" "<<prop.maxThreadsDim[2]<<endl;
cout<<" Max grid size: "<<prop.maxGridSize[0]<<" "<<prop.maxThreadsDim[1]<<" "<<prop.maxThreadsDim[2]<<endl;
*/
}
time_t start_of_code=time(NULL);
if(argc != 3) //there should be three arguments
return 1; //exit and return an error
float sigma = atof(argv[2]); //standard deviation for the gaussian
//Getting the kernel
int k=floor(6*sigma);//sigma might have fractional part
if(k%2==0) k++; //to make the size odd
float *kernel0 = (float *)malloc(k * sizeof(float)); //y based gaussian
float *kernel1 = (float *)malloc(k * sizeof(float)); //x based gaussian
float constant1=sqrt(2*M_PI*sigma*sigma);//constants needed to define the kernel
float constant2=2*sigma*sigma;
int mid=floor(k/2);
kernel0[mid]=1/constant1;
kernel1[mid]=1/constant1;
for(int i=0;i<floor(k/2);i++) //using symmetry from center, to generate the separable kernels
{
kernel0[i]=((exp(-(floor(k/2)-i)*(floor(k/2)-i)/constant2)))/constant1;
kernel1[i]=kernel0[i];
kernel0[k-1-i]=kernel0[i];
kernel1[k-1-i]=kernel1[i];
}
time_t kernel_generation=time(NULL); //find time taken for kernel generation
cout<<" Kernel generation time:"<<double(kernel_generation - start_of_code)<<" sec"<<endl;
//reading the PPM file line by line
ifstream infile;
infile.open(argv[1]);
string line;
int img_wd, img_ht;
int max_val;
int line_count=0;
//line one contains P6, line 2 mentions about gimp version, line 3 stores the height and width
getline(infile, line);
istringstream iss1(line);
//reading first line to check format
int word;
string str1;
iss1>>str1;
if(str1.compare("P6")!=0) //comparing magic number
{
cout<<"wrong file format"<<endl;
return 1;
}
getline(infile,line); //this line has version related comment, hence ignoring
getline(infile,line); //this stores image dims
istringstream iss2(line);
iss2>>word;// this will be image width
img_wd=word;
iss2>>word;// this will be image height
img_ht=word;
//cout<<"wd="<<img_wd<<", ht="<<img_ht<<endl;
size_t num_pixels=img_wd*img_ht;
pixel *Pixel_out=(pixel*)malloc(num_pixels*sizeof(pixel));
//storing the pixels as lexicographically
pixel *Pixel = (pixel*)malloc(num_pixels*sizeof(pixel));
int pix_cnt=0, cnt=0;
getline(infile,line); //this stores max value
istringstream iss3(line);
iss3>>word;
max_val=word;//max pixel value
unsigned int val;
while (getline(infile, line))
{
istringstream iss4(line);
for (int i=0; i<=line.length();i++)
{
if(pix_cnt<num_pixels)
{
val =((int)line[i]);
if(cnt%3==0)
{
Pixel[pix_cnt].r=val;
}
else if(cnt%3==1)
{
Pixel[pix_cnt].g=val;
}
else
{
Pixel[pix_cnt].b=val;
pix_cnt++;
}
cnt++;
}
}
line_count++;
}
time_t reading_file=time(NULL);
cout<<" File reading time:"<<double(reading_file - kernel_generation)<<" sec"<<endl;
hipDeviceProp_t prop;
HANDLE_ERROR(hipGetDeviceProperties(&prop, 0));
float thread_block=sqrt(prop.maxThreadsPerBlock);
dim3 DimGrid(ceil(img_wd/thread_block),ceil(img_ht/thread_block),1);
dim3 DimBlock(thread_block,thread_block,1);
cout<<"grid="<<DimGrid.x<<" "<<DimGrid.y<<" "<<DimGrid.z<<endl;
cout<<"block="<<DimBlock.x<<" "<<DimBlock.y<<" "<<DimBlock.z<<endl;
//allocating gpu memory
pixel *Pixel_tmp_gpu, *Pixel_gpu, *Pixel_gpu_res;
HANDLE_ERROR(hipMalloc(&Pixel_gpu_res,num_pixels*sizeof(pixel))); //allocate space to store convolution result
HANDLE_ERROR(hipMemset(Pixel_gpu_res,128,num_pixels*sizeof(pixel)));
HANDLE_ERROR(hipMalloc(&Pixel_tmp_gpu,num_pixels*sizeof(pixel))); //allocate space to store convolution temporary
HANDLE_ERROR(hipMalloc(&Pixel_gpu,num_pixels*sizeof(pixel))); //allocate space to copy image to GPU memory
float *kernel0_gpu, *kernel1_gpu;
HANDLE_ERROR(hipMalloc(&kernel0_gpu, k*sizeof(float)));//allocate memory for kernel0
HANDLE_ERROR(hipMalloc(&kernel1_gpu, k*sizeof(float)));//allocate memory for kernel1
cout<<"memory allocated"<<endl;
//copying needed data
HANDLE_ERROR(hipMemcpy(Pixel_gpu, Pixel, num_pixels*sizeof(pixel),hipMemcpyHostToDevice));//copy input image from global to gpu
HANDLE_ERROR(hipMemcpy(kernel0_gpu, kernel0,k*sizeof(float),hipMemcpyHostToDevice));//copy the kernel0 host to device
HANDLE_ERROR(hipMemcpy(kernel1_gpu,kernel1,k*sizeof(float),hipMemcpyHostToDevice));//copy kernel1 host to device
cout<<"memory transfers done"<<endl;
hipLaunchKernelGGL(( vertical_conv), dim3(DimGrid),dim3(DimBlock), 0, 0, Pixel_gpu, Pixel_tmp_gpu,img_wd, img_ht,kernel0_gpu,k);
cout<<img_wd<<" "<<img_ht<<endl;
time_t vertical_convolution=time(NULL);
cout<<" vertical_convolution time: "<<double(vertical_convolution - reading_file)<<"sec"<<endl;
hipLaunchKernelGGL(( horizontal_conv), dim3(DimGrid),dim3(DimBlock), 0, 0, Pixel_tmp_gpu, Pixel_gpu_res, img_wd, img_ht, kernel1_gpu, k);
time_t horizontal_convolution=time(NULL);
HANDLE_ERROR(hipMemcpy(Pixel_out,Pixel_gpu_res, num_pixels*sizeof(pixel),hipMemcpyDeviceToHost));
cout<<" horizontal convolution time:" <<double(horizontal_convolution-vertical_convolution)<<" sec"<<endl;
//writing this to PPM file
ofstream ofs;
ofs.open("output_gpu.ppm", ofstream::out);
ofs<<"P6\n"<<img_wd<<" "<<img_ht<<"\n"<<max_val<<"\n";
for(int i=0; i <num_pixels;i++)
{
ofs<<Pixel_out[i].r<<Pixel_out[i].g<<Pixel_out[i].b; //write as ascii
}
ofs.close();
time_t end=time(NULL);
//cout<<" Saving the result:"<<double(end-horizontal_convolution)<<" sec"<<endl;
//display time taken for different processes
cout<<" Total execution time: "<<double(end-start_of_code)<<" sec"<<endl;
return 0;
}
| 6d56f130a81cd895a74b5a2be345f535b92b3ef2.cu | #include <fstream>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <string>
#include <sstream>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <ctime>
#include <cuda_runtime_api.h>
using namespace std;
struct pixel //to store RGB values
{
unsigned char r;
unsigned char g;
unsigned char b;
};
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
cout<<cudaGetErrorString(err)<<" in "<< file <<" at line "<< line<<endl;
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__device__ pixel padding(pixel* Pixel_val, int x_coord, int y_coord, int img_width, int img_height)
{ pixel Px;
Px.r=0; Px.g=0; Px.b=0;
if(x_coord< img_width && y_coord <img_height && x_coord>=0 && y_coord>=0)
{
Px=Pixel_val[y_coord*img_width+x_coord];
}
return Px;
}
__global__ void vertical_conv(pixel* Pixel_in_v, pixel* Pixel_out_v,int img_wd_v, int img_ht_v, float* kernel_v, int k_v)
{
float tmp_r, tmp_g, tmp_b;
//int pix_idx_v=blockIdx.x*blockDim.x + threadIdx.x;
//int row=(int)(pix_idx_v/img_wd_v);
//int col=pix_idx_v%img_wd_v;
size_t col=blockIdx.x*blockDim.x + threadIdx.x;
size_t row=blockIdx.y*blockDim.y + threadIdx.y;
size_t pix_idx_v=row*img_wd_v+col;
tmp_r=0, tmp_g=0, tmp_b=0;
if(row<img_ht_v && col<img_wd_v){
for(int l=0;l<k_v;l++)
{//doing by 1 D arrays
pixel pix_val=padding(Pixel_in_v, col, (row+l-(k_v-1)/2), img_wd_v, img_ht_v);
tmp_r+=pix_val.r * kernel_v[l];
tmp_b+=pix_val.b * kernel_v[l];
tmp_g+=pix_val.g * kernel_v[l];
}
Pixel_out_v[pix_idx_v].r=tmp_r;
Pixel_out_v[pix_idx_v].g=tmp_g;
Pixel_out_v[pix_idx_v].b=tmp_b;
}
}
__global__ void horizontal_conv(pixel* Pixel_in, pixel* Pixel_out, int img_wd, int img_ht, float* kernel, int k)
{
float tmp_r, tmp_b, tmp_g;
//horizontal convolution
//int pix_idx=blockIdx.x*blockDim.x + threadIdx.x;
//int row=(int)(pix_idx/img_wd);
//int col=pix_idx%img_wd;
size_t col=blockIdx.x*blockDim.x + threadIdx.x;
size_t row=blockIdx.y*blockDim.y + threadIdx.y;
size_t pix_idx=row*img_wd+col;
tmp_r=0, tmp_g=0, tmp_b=0;
if(row<img_ht && col<img_wd)
{
for(int l=0; l<k;l++)
{
pixel pix_val=padding(Pixel_in, col+ l-(k-1)/2, row, img_wd, img_ht);
tmp_r+=pix_val.r * kernel[l];
tmp_g+=pix_val.g * kernel[l];
tmp_b+=pix_val.b * kernel[l];
}
Pixel_out[pix_idx].r=tmp_r;
Pixel_out[pix_idx].g=tmp_g;
Pixel_out[pix_idx].b=tmp_b;
}
}
int main(int argc, char* argv[])
{
int nDevices;
HANDLE_ERROR(cudaGetDeviceCount(&nDevices));
cout<<"number of devices="<<nDevices<<endl;
for(int i=0;i<nDevices;i++){
cudaDeviceProp prop;
HANDLE_ERROR(cudaGetDeviceProperties(&prop, i));
/*printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
cout<<" Total global memory :"<<prop.totalGlobalMem<<endl;
cout<<" Shared memory per block :"<<prop.sharedMemPerBlock<<endl;
cout<<" Regs per block :"<<prop.regsPerBlock<<endl;
cout<<" Warp size :"<<prop.warpSize<<endl;
cout<<" Max threads per block :"<<prop.maxThreadsPerBlock<<endl;
cout<<" Max threads dimension :"<<prop.maxThreadsDim[0]<<" "<<prop.maxThreadsDim[1]<<" "<<prop.maxThreadsDim[2]<<endl;
cout<<" Max grid size: "<<prop.maxGridSize[0]<<" "<<prop.maxThreadsDim[1]<<" "<<prop.maxThreadsDim[2]<<endl;
*/
}
time_t start_of_code=time(NULL);
if(argc != 3) //there should be three arguments
return 1; //exit and return an error
float sigma = atof(argv[2]); //standard deviation for the gaussian
//Getting the kernel
int k=floor(6*sigma);//sigma might have fractional part
if(k%2==0) k++; //to make the size odd
float *kernel0 = (float *)malloc(k * sizeof(float)); //y based gaussian
float *kernel1 = (float *)malloc(k * sizeof(float)); //x based gaussian
float constant1=sqrt(2*M_PI*sigma*sigma);//constants needed to define the kernel
float constant2=2*sigma*sigma;
int mid=floor(k/2);
kernel0[mid]=1/constant1;
kernel1[mid]=1/constant1;
for(int i=0;i<floor(k/2);i++) //using symmetry from center, to generate the separable kernels
{
kernel0[i]=((exp(-(floor(k/2)-i)*(floor(k/2)-i)/constant2)))/constant1;
kernel1[i]=kernel0[i];
kernel0[k-1-i]=kernel0[i];
kernel1[k-1-i]=kernel1[i];
}
time_t kernel_generation=time(NULL); //find time taken for kernel generation
cout<<" Kernel generation time:"<<double(kernel_generation - start_of_code)<<" sec"<<endl;
//reading the PPM file line by line
ifstream infile;
infile.open(argv[1]);
string line;
int img_wd, img_ht;
int max_val;
int line_count=0;
//line one contains P6, line 2 mentions about gimp version, line 3 stores the height and width
getline(infile, line);
istringstream iss1(line);
//reading first line to check format
int word;
string str1;
iss1>>str1;
if(str1.compare("P6")!=0) //comparing magic number
{
cout<<"wrong file format"<<endl;
return 1;
}
getline(infile,line); //this line has version related comment, hence ignoring
getline(infile,line); //this stores image dims
istringstream iss2(line);
iss2>>word;// this will be image width
img_wd=word;
iss2>>word;// this will be image height
img_ht=word;
//cout<<"wd="<<img_wd<<", ht="<<img_ht<<endl;
size_t num_pixels=img_wd*img_ht;
pixel *Pixel_out=(pixel*)malloc(num_pixels*sizeof(pixel));
//storing the pixels as lexicographically
pixel *Pixel = (pixel*)malloc(num_pixels*sizeof(pixel));
int pix_cnt=0, cnt=0;
getline(infile,line); //this stores max value
istringstream iss3(line);
iss3>>word;
max_val=word;//max pixel value
unsigned int val;
while (getline(infile, line))
{
istringstream iss4(line);
for (int i=0; i<=line.length();i++)
{
if(pix_cnt<num_pixels)
{
val =((int)line[i]);
if(cnt%3==0)
{
Pixel[pix_cnt].r=val;
}
else if(cnt%3==1)
{
Pixel[pix_cnt].g=val;
}
else
{
Pixel[pix_cnt].b=val;
pix_cnt++;
}
cnt++;
}
}
line_count++;
}
time_t reading_file=time(NULL);
cout<<" File reading time:"<<double(reading_file - kernel_generation)<<" sec"<<endl;
cudaDeviceProp prop;
HANDLE_ERROR(cudaGetDeviceProperties(&prop, 0));
float thread_block=sqrt(prop.maxThreadsPerBlock);
dim3 DimGrid(ceil(img_wd/thread_block),ceil(img_ht/thread_block),1);
dim3 DimBlock(thread_block,thread_block,1);
cout<<"grid="<<DimGrid.x<<" "<<DimGrid.y<<" "<<DimGrid.z<<endl;
cout<<"block="<<DimBlock.x<<" "<<DimBlock.y<<" "<<DimBlock.z<<endl;
//allocating gpu memory
pixel *Pixel_tmp_gpu, *Pixel_gpu, *Pixel_gpu_res;
HANDLE_ERROR(cudaMalloc(&Pixel_gpu_res,num_pixels*sizeof(pixel))); //allocate space to store convolution result
HANDLE_ERROR(cudaMemset(Pixel_gpu_res,128,num_pixels*sizeof(pixel)));
HANDLE_ERROR(cudaMalloc(&Pixel_tmp_gpu,num_pixels*sizeof(pixel))); //allocate space to store convolution temporary
HANDLE_ERROR(cudaMalloc(&Pixel_gpu,num_pixels*sizeof(pixel))); //allocate space to copy image to GPU memory
float *kernel0_gpu, *kernel1_gpu;
HANDLE_ERROR(cudaMalloc(&kernel0_gpu, k*sizeof(float)));//allocate memory for kernel0
HANDLE_ERROR(cudaMalloc(&kernel1_gpu, k*sizeof(float)));//allocate memory for kernel1
cout<<"memory allocated"<<endl;
//copying needed data
HANDLE_ERROR(cudaMemcpy(Pixel_gpu, Pixel, num_pixels*sizeof(pixel),cudaMemcpyHostToDevice));//copy input image from global to gpu
HANDLE_ERROR(cudaMemcpy(kernel0_gpu, kernel0,k*sizeof(float),cudaMemcpyHostToDevice));//copy the kernel0 host to device
HANDLE_ERROR(cudaMemcpy(kernel1_gpu,kernel1,k*sizeof(float),cudaMemcpyHostToDevice));//copy kernel1 host to device
cout<<"memory transfers done"<<endl;
vertical_conv<<<DimGrid,DimBlock>>>(Pixel_gpu, Pixel_tmp_gpu,img_wd, img_ht,kernel0_gpu,k);
cout<<img_wd<<" "<<img_ht<<endl;
time_t vertical_convolution=time(NULL);
cout<<" vertical_convolution time: "<<double(vertical_convolution - reading_file)<<"sec"<<endl;
horizontal_conv<<<DimGrid,DimBlock>>>(Pixel_tmp_gpu, Pixel_gpu_res, img_wd, img_ht, kernel1_gpu, k);
time_t horizontal_convolution=time(NULL);
HANDLE_ERROR(cudaMemcpy(Pixel_out,Pixel_gpu_res, num_pixels*sizeof(pixel),cudaMemcpyDeviceToHost));
cout<<" horizontal convolution time:" <<double(horizontal_convolution-vertical_convolution)<<" sec"<<endl;
//writing this to PPM file
ofstream ofs;
ofs.open("output_gpu.ppm", ofstream::out);
ofs<<"P6\n"<<img_wd<<" "<<img_ht<<"\n"<<max_val<<"\n";
for(int i=0; i <num_pixels;i++)
{
ofs<<Pixel_out[i].r<<Pixel_out[i].g<<Pixel_out[i].b; //write as ascii
}
ofs.close();
time_t end=time(NULL);
//cout<<" Saving the result:"<<double(end-horizontal_convolution)<<" sec"<<endl;
//display time taken for different processes
cout<<" Total execution time: "<<double(end-start_of_code)<<" sec"<<endl;
return 0;
}
|
f4cf8677d2f2359b42992bf331a2be8d181b6252.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_LOG_SOFTMAX_LAYER_INSTANTIATE
#include "lbann/layers/activations/log_softmax.hpp"
#include "lbann/utils/cuda.hpp"
namespace lbann {
namespace {
/** @brief Max functor */
template <class T>
struct max_op {
__device__ __forceinline__
DataType operator()(const T& x1, const T& x2) const {
return cuda::max(x1, x2);
}
};
/** @brief Kernel for max reduction on matrix columns
*
* Each CUDA block computes the max over a subset of matrix entries
* and outputs the result. This is repeated multiple times for
* column-wise max reduction.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param values (height x width) matrix
* @param max_values (nblocksx x width) matrix
*/
template <size_t bsize, typename TensorDataType>
__global__ void reduce_max_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ values,
size_t values_ldim,
TensorDataType* __restrict__ max_values) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Find largest value for each thread
TensorDataType thread_max_val{-cuda::infinity<DataType>()};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& val = values[row+col*values_ldim];
thread_max_val = cuda::max(thread_max_val, val);
}
// Find largest value for each block
const TensorDataType block_max_val
= cuda::block_reduce<bsize,1,1,DataType,max_op<DataType>>(thread_max_val);
if (tid == 0) {
max_values[bidx+col*nblocksx] = block_max_val;
}
}
}
/** @brief Kernel for matrix column sums
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param sums On input, array of zeros. On output, sum(x) for each
* column.
*/
template <size_t bsize, typename TensorDataType>
__global__ void reduce_sum_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ values,
size_t values_ldim,
TensorDataType* __restrict__ sums) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Compute sum for each thread
TensorDataType thread_sum{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
thread_sum += values[row+col*values_ldim];
}
// Compute sum for each block
const TensorDataType block_sum = cuda::block_reduce<bsize,1,1>(thread_sum);
if (tid == 0) {
cuda::atomic_add(&sums[col], block_sum);
}
}
}
/** @brief Compute sum(exp(x-shift)) for each matrix column
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param shifts max(x) for each column
* @param sums On input, array of zeros. On output,
* sum(exp(x-shift)) for each column.
*/
template <size_t bsize, typename TensorDataType>
__global__ void fp_sumexp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ shifts,
TensorDataType* __restrict__ sums) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
const auto& shift = shifts[col];
// Exponentiate inputs and compute sum for each thread
TensorDataType thread_sum{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& x = input[row+col*input_ldim];
thread_sum += cuda::exp(x-shift);
}
// Compute sum for each block
const TensorDataType block_sum = cuda::block_reduce<bsize,1,1>(thread_sum);
if (tid == 0) {
cuda::atomic_add(&sums[col], block_sum);
}
}
}
/** @brief Compute layer output
*
* y = x - shift - log(sum(x-shift))
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param shifts max(x) for each column
* @param sums sum(exp(x-shift)) for each column
*/
template <typename TensorDataType>
__global__ void fp_output_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ shifts,
const TensorDataType* __restrict__ sums) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& shift = shifts[col];
const TensorDataType log_sum_exp = cuda::log(sums[col]);
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& x = input[row+col*input_ldim];
auto& y = output[row+col*output_ldim];
y = x - shift - log_sum_exp;
}
}
}
/** @brief Compute gradient w.r.t. input
*
* dx = dy - softmax(x) * sum(dy)
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param sums Column sums of the gradient w.r.t. output
*/
template <typename TensorDataType>
__global__ void bp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
const TensorDataType* __restrict__ sums,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& sum = sums[col];
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& y = output[row+col*output_ldim];
const auto& dy = gradient_wrt_output[row+col*gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row+col*gradient_wrt_input_ldim];
dx = dy - cuda::exp(y) * sum;
}
}
}
} // namespace
template <typename TensorDataType>
void fp_compute_impl(log_softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) {
const TensorDataType zero = 0;
const TensorDataType one = 1;
const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_activations());
auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations());
if (!local_input.IsEmpty()) {
CHECK_CUDNN(cudnnSoftmaxForward(cudnn::get_handle(),
CUDNN_SOFTMAX_LOG,
CUDNN_SOFTMAX_MODE_INSTANCE,
&one,
l.m_tensors_cudnn_desc.get_prev_activations(),
local_input.LockedBuffer(),
&zero,
l.m_tensors_cudnn_desc.get_activations(),
local_output.Buffer()));
}
}
template <typename TensorDataType>
void bp_compute_impl(log_softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
const TensorDataType zero = 0;
const TensorDataType one = 1;
const auto& local_output = dynamic_cast<const GPUMatType&>(l.get_local_activations());
const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(l.get_local_prev_error_signals());
auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(l.get_local_error_signals());
if (!local_output.IsEmpty()) {
CHECK_CUDNN(cudnnSoftmaxBackward(cudnn::get_handle(),
CUDNN_SOFTMAX_LOG,
CUDNN_SOFTMAX_MODE_INSTANCE,
&one,
l.m_tensors_cudnn_desc.get_activations(),
local_output.LockedBuffer(),
l.m_tensors_cudnn_desc.get_prev_error_signals(),
local_gradient_wrt_output.LockedBuffer(),
&zero,
l.m_tensors_cudnn_desc.get_error_signals(),
local_gradient_wrt_input.Buffer()));
}
}
template <typename TensorDataType>
void fp_compute_impl(log_softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
// Local matrices
const auto& local_input = dynamic_cast<const GPUMatType&>(l.get_local_prev_activations());
auto& local_output = dynamic_cast<GPUMatType&>(l.get_local_activations());
auto& local_workspace = dynamic_cast<GPUMatType&>(l.m_workspace->Matrix());
const auto& local_height = local_input.Height();
const auto& local_width = local_input.Width();
// GPU objects
auto&& stream = El::GPUManager::Stream();
auto&& event = El::GPUManager::Event();
El::SyncInfo<El::Device::GPU> sync_info{stream, event};
// Find max value in each column
cuda::thrust::vector<TensorDataType> max_vals;
if (local_input.IsEmpty()) {
max_vals.resize(local_width,
-std::numeric_limits<DataType>::infinity());
}
else {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
max_vals.resize(grid_dims.x * local_width);
hipLaunchKernelGGL(( reduce_max_kernel<block_size>), dim3(grid_dims), dim3(block_dims), 0, stream,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
max_vals.data().get());
while (grid_dims.x > 1) {
const size_t prev_height = grid_dims.x;
grid_dims.x = (prev_height + block_size - 1) / block_size;
cuda::thrust::vector<TensorDataType> prev_vals(std::move(max_vals));
max_vals.resize(grid_dims.x * local_width);
hipLaunchKernelGGL(( reduce_max_kernel<block_size>), dim3(grid_dims), dim3(block_dims), 0, stream,
prev_height, local_width,
prev_vals.data().get(), prev_height,
max_vals.data().get());
}
}
El::mpi::AllReduce(max_vals.data().get(), max_vals.size(),
El::mpi::MAX, l.m_workspace->RedundantComm(),
sync_info);
// Compute sum(exp(x-max_val)) for each column
El::Zero(*l.m_workspace);
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hipLaunchKernelGGL(( fp_sumexp_kernel<block_size>), dim3(grid_dims), dim3(block_dims), 0, stream,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
max_vals.data().get(),
local_workspace.Buffer());
}
l.get_comm()->allreduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute output
// Note: y = x - max_val - log(sum(exp(x-max_val)))
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hipLaunchKernelGGL(( fp_output_kernel), dim3(grid_dims), dim3(block_dims), 0, stream,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_output.Buffer(), local_output.LDim(),
max_vals.data().get(),
local_workspace.LockedBuffer());
}
}
template <typename TensorDataType>
void bp_compute_impl(log_softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
// Local matrices
const auto& local_output = dynamic_cast<const GPUMatType&>(l.get_local_activations());
const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(l.get_local_prev_error_signals());
auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(l.get_local_error_signals());
auto& local_workspace = dynamic_cast<GPUMatType&>(l.m_workspace->Matrix());
const auto& local_height = local_output.Height();
const auto& local_width = local_output.Width();
// GPU objects
auto&& stream = El::GPUManager::Stream();
auto&& event = El::GPUManager::Event();
El::SyncInfo<El::Device::GPU> sync_info{stream, event};
// Compute sum of entries in gradient w.r.t. output
El::Zero(local_workspace);
if (!local_gradient_wrt_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hipLaunchKernelGGL(( reduce_sum_kernel<block_size>)
, dim3(grid_dims), dim3(block_dims), 0, stream,
local_height, local_width,
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.Buffer());
}
l.get_comm()->allreduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute gradient w.r.t. input
if (!local_gradient_wrt_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hipLaunchKernelGGL(( bp_kernel), dim3(grid_dims), dim3(block_dims), 0, stream,
local_height, local_width,
local_output.LockedBuffer(),
local_output.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.LockedBuffer(),
local_gradient_wrt_input.Buffer(),
local_gradient_wrt_input.LDim());
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void log_softmax_layer<TensorDataType, Layout, Device>::fp_compute() {
fp_compute_impl(*this);
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void log_softmax_layer<TensorDataType, Layout, Device>::bp_compute() {
bp_compute_impl(*this);
}
// Template instantiation
#define PROTO(T) \
template class log_softmax_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class log_softmax_layer<T, data_layout::MODEL_PARALLEL, El::Device::GPU>; \
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| f4cf8677d2f2359b42992bf331a2be8d181b6252.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_LOG_SOFTMAX_LAYER_INSTANTIATE
#include "lbann/layers/activations/log_softmax.hpp"
#include "lbann/utils/cuda.hpp"
namespace lbann {
namespace {
/** @brief Max functor */
template <class T>
struct max_op {
__device__ __forceinline__
DataType operator()(const T& x1, const T& x2) const {
return cuda::max(x1, x2);
}
};
/** @brief Kernel for max reduction on matrix columns
*
* Each CUDA block computes the max over a subset of matrix entries
* and outputs the result. This is repeated multiple times for
* column-wise max reduction.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param values (height x width) matrix
* @param max_values (nblocksx x width) matrix
*/
template <size_t bsize, typename TensorDataType>
__global__ void reduce_max_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ values,
size_t values_ldim,
TensorDataType* __restrict__ max_values) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Find largest value for each thread
TensorDataType thread_max_val{-cuda::infinity<DataType>()};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& val = values[row+col*values_ldim];
thread_max_val = cuda::max(thread_max_val, val);
}
// Find largest value for each block
const TensorDataType block_max_val
= cuda::block_reduce<bsize,1,1,DataType,max_op<DataType>>(thread_max_val);
if (tid == 0) {
max_values[bidx+col*nblocksx] = block_max_val;
}
}
}
/** @brief Kernel for matrix column sums
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param sums On input, array of zeros. On output, sum(x) for each
* column.
*/
template <size_t bsize, typename TensorDataType>
__global__ void reduce_sum_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ values,
size_t values_ldim,
TensorDataType* __restrict__ sums) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Compute sum for each thread
TensorDataType thread_sum{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
thread_sum += values[row+col*values_ldim];
}
// Compute sum for each block
const TensorDataType block_sum = cuda::block_reduce<bsize,1,1>(thread_sum);
if (tid == 0) {
cuda::atomic_add(&sums[col], block_sum);
}
}
}
/** @brief Compute sum(exp(x-shift)) for each matrix column
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param shifts max(x) for each column
* @param sums On input, array of zeros. On output,
* sum(exp(x-shift)) for each column.
*/
template <size_t bsize, typename TensorDataType>
__global__ void fp_sumexp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ shifts,
TensorDataType* __restrict__ sums) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
const auto& shift = shifts[col];
// Exponentiate inputs and compute sum for each thread
TensorDataType thread_sum{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& x = input[row+col*input_ldim];
thread_sum += cuda::exp(x-shift);
}
// Compute sum for each block
const TensorDataType block_sum = cuda::block_reduce<bsize,1,1>(thread_sum);
if (tid == 0) {
cuda::atomic_add(&sums[col], block_sum);
}
}
}
/** @brief Compute layer output
*
* y = x - shift - log(sum(x-shift))
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param shifts max(x) for each column
* @param sums sum(exp(x-shift)) for each column
*/
template <typename TensorDataType>
__global__ void fp_output_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ shifts,
const TensorDataType* __restrict__ sums) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& shift = shifts[col];
const TensorDataType log_sum_exp = cuda::log(sums[col]);
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& x = input[row+col*input_ldim];
auto& y = output[row+col*output_ldim];
y = x - shift - log_sum_exp;
}
}
}
/** @brief Compute gradient w.r.t. input
*
* dx = dy - softmax(x) * sum(dy)
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param sums Column sums of the gradient w.r.t. output
*/
template <typename TensorDataType>
__global__ void bp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
const TensorDataType* __restrict__ sums,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& sum = sums[col];
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& y = output[row+col*output_ldim];
const auto& dy = gradient_wrt_output[row+col*gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row+col*gradient_wrt_input_ldim];
dx = dy - cuda::exp(y) * sum;
}
}
}
} // namespace
template <typename TensorDataType>
void fp_compute_impl(log_softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) {
const TensorDataType zero = 0;
const TensorDataType one = 1;
const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_activations());
auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations());
if (!local_input.IsEmpty()) {
CHECK_CUDNN(cudnnSoftmaxForward(cudnn::get_handle(),
CUDNN_SOFTMAX_LOG,
CUDNN_SOFTMAX_MODE_INSTANCE,
&one,
l.m_tensors_cudnn_desc.get_prev_activations(),
local_input.LockedBuffer(),
&zero,
l.m_tensors_cudnn_desc.get_activations(),
local_output.Buffer()));
}
}
template <typename TensorDataType>
void bp_compute_impl(log_softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
const TensorDataType zero = 0;
const TensorDataType one = 1;
const auto& local_output = dynamic_cast<const GPUMatType&>(l.get_local_activations());
const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(l.get_local_prev_error_signals());
auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(l.get_local_error_signals());
if (!local_output.IsEmpty()) {
CHECK_CUDNN(cudnnSoftmaxBackward(cudnn::get_handle(),
CUDNN_SOFTMAX_LOG,
CUDNN_SOFTMAX_MODE_INSTANCE,
&one,
l.m_tensors_cudnn_desc.get_activations(),
local_output.LockedBuffer(),
l.m_tensors_cudnn_desc.get_prev_error_signals(),
local_gradient_wrt_output.LockedBuffer(),
&zero,
l.m_tensors_cudnn_desc.get_error_signals(),
local_gradient_wrt_input.Buffer()));
}
}
template <typename TensorDataType>
void fp_compute_impl(log_softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
// Local matrices
const auto& local_input = dynamic_cast<const GPUMatType&>(l.get_local_prev_activations());
auto& local_output = dynamic_cast<GPUMatType&>(l.get_local_activations());
auto& local_workspace = dynamic_cast<GPUMatType&>(l.m_workspace->Matrix());
const auto& local_height = local_input.Height();
const auto& local_width = local_input.Width();
// GPU objects
auto&& stream = El::GPUManager::Stream();
auto&& event = El::GPUManager::Event();
El::SyncInfo<El::Device::GPU> sync_info{stream, event};
// Find max value in each column
cuda::thrust::vector<TensorDataType> max_vals;
if (local_input.IsEmpty()) {
max_vals.resize(local_width,
-std::numeric_limits<DataType>::infinity());
}
else {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
max_vals.resize(grid_dims.x * local_width);
reduce_max_kernel<block_size><<<grid_dims, block_dims, 0, stream>>>(
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
max_vals.data().get());
while (grid_dims.x > 1) {
const size_t prev_height = grid_dims.x;
grid_dims.x = (prev_height + block_size - 1) / block_size;
cuda::thrust::vector<TensorDataType> prev_vals(std::move(max_vals));
max_vals.resize(grid_dims.x * local_width);
reduce_max_kernel<block_size><<<grid_dims, block_dims, 0, stream>>>(
prev_height, local_width,
prev_vals.data().get(), prev_height,
max_vals.data().get());
}
}
El::mpi::AllReduce(max_vals.data().get(), max_vals.size(),
El::mpi::MAX, l.m_workspace->RedundantComm(),
sync_info);
// Compute sum(exp(x-max_val)) for each column
El::Zero(*l.m_workspace);
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
fp_sumexp_kernel<block_size><<<grid_dims, block_dims, 0, stream>>>(
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
max_vals.data().get(),
local_workspace.Buffer());
}
l.get_comm()->allreduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute output
// Note: y = x - max_val - log(sum(exp(x-max_val)))
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
fp_output_kernel<<<grid_dims, block_dims, 0, stream>>>(
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_output.Buffer(), local_output.LDim(),
max_vals.data().get(),
local_workspace.LockedBuffer());
}
}
template <typename TensorDataType>
void bp_compute_impl(log_softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
// Local matrices
const auto& local_output = dynamic_cast<const GPUMatType&>(l.get_local_activations());
const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(l.get_local_prev_error_signals());
auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(l.get_local_error_signals());
auto& local_workspace = dynamic_cast<GPUMatType&>(l.m_workspace->Matrix());
const auto& local_height = local_output.Height();
const auto& local_width = local_output.Width();
// GPU objects
auto&& stream = El::GPUManager::Stream();
auto&& event = El::GPUManager::Event();
El::SyncInfo<El::Device::GPU> sync_info{stream, event};
// Compute sum of entries in gradient w.r.t. output
El::Zero(local_workspace);
if (!local_gradient_wrt_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
reduce_sum_kernel<block_size>
<<<grid_dims, block_dims, 0, stream>>>(
local_height, local_width,
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.Buffer());
}
l.get_comm()->allreduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute gradient w.r.t. input
if (!local_gradient_wrt_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
bp_kernel<<<grid_dims, block_dims, 0, stream>>>(
local_height, local_width,
local_output.LockedBuffer(),
local_output.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.LockedBuffer(),
local_gradient_wrt_input.Buffer(),
local_gradient_wrt_input.LDim());
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void log_softmax_layer<TensorDataType, Layout, Device>::fp_compute() {
fp_compute_impl(*this);
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void log_softmax_layer<TensorDataType, Layout, Device>::bp_compute() {
bp_compute_impl(*this);
}
// Template instantiation
#define PROTO(T) \
template class log_softmax_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class log_softmax_layer<T, data_layout::MODEL_PARALLEL, El::Device::GPU>; \
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
8887bc992e14b11f23d8b88bc3680dd46e03839e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
template <typename Dtype>
__global__ void trace_kernel(const int n, const Dtype* A, Dtype* out) {
CUDA_KERNEL_LOOP(index, 1) {
Dtype val = 0;
for (int i = 0; i < n; ++ i) {
val += A[i * (n + 1)];
}
out[0] = val;
}
}
template <>
void caffe_gpu_trace<float>(const int n, const float* A, float* x) {
Blob<float> temp;
temp.Reshape(1, 1, 1, 1);
hipLaunchKernelGGL(( trace_kernel<float>), dim3(CAFFE_GET_BLOCKS(1)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, A, temp.mutable_gpu_data());
*x = temp.cpu_data()[0];
}
template <>
void caffe_gpu_trace<double>(const int n, const double* A, double* x) {
Blob<double> temp;
temp.Reshape(1, 1, 1, 1);
hipLaunchKernelGGL(( trace_kernel<double>), dim3(CAFFE_GET_BLOCKS(1)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, A, temp.mutable_gpu_data());
*x = temp.cpu_data()[0];
}
} // namespace caffe
| 8887bc992e14b11f23d8b88bc3680dd46e03839e.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
template <typename Dtype>
__global__ void trace_kernel(const int n, const Dtype* A, Dtype* out) {
CUDA_KERNEL_LOOP(index, 1) {
Dtype val = 0;
for (int i = 0; i < n; ++ i) {
val += A[i * (n + 1)];
}
out[0] = val;
}
}
template <>
void caffe_gpu_trace<float>(const int n, const float* A, float* x) {
Blob<float> temp;
temp.Reshape(1, 1, 1, 1);
trace_kernel<float><<<CAFFE_GET_BLOCKS(1),
CAFFE_CUDA_NUM_THREADS>>>(n, A, temp.mutable_gpu_data());
*x = temp.cpu_data()[0];
}
template <>
void caffe_gpu_trace<double>(const int n, const double* A, double* x) {
Blob<double> temp;
temp.Reshape(1, 1, 1, 1);
trace_kernel<double><<<CAFFE_GET_BLOCKS(1),
CAFFE_CUDA_NUM_THREADS>>>(n, A, temp.mutable_gpu_data());
*x = temp.cpu_data()[0];
}
} // namespace caffe
|
55d20e3ee2d42e505a90e3f545ff9750d2cc377c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* File: smoothingomp.cc
* Assignment: 5
* Students: Teun Mathijssen, David Puroja
* Student email: [email protected], [email protected]
* Studentnumber: 11320788, 10469036
*
* Description: Applies the smoothing filter on the image using CUDA.
*/
#include <stdbool.h>
#include <stdlib.h>
#include <iostream>
#include "timer.h"
#include "cuda_helper.h"
using namespace std;
/* Kernel dimension values. */
#define KERNEL_WIDTH 5
#define KERNEL_SIZE KERNEL_WIDTH * KERNEL_WIDTH
#define KERNEL_OFFSET KERNEL_WIDTH / 2
#define KERNEL_MULTIPLIER (1.0 / 81.0)
/* Triangular smoothing kernel. */
const int kernel_1D[KERNEL_SIZE] = {1, 2, 3, 2, 1,
2, 4, 6, 4, 2,
3, 6, 9, 6, 3,
2, 4, 6, 4, 2,
1, 2, 3, 2, 1};
/* Smoothing filter kernel. */
__global__ void smoothing_kernel(unsigned char* image_data,
unsigned char* temp_image_data, int* kernel,
int num_pixels, int width, int height) {
/* Calculate thread index. */
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
int col = i % width;
int row = i / width;
/* Boundary check for the current kernel center. */
if (i < num_pixels && !(row < KERNEL_OFFSET || col < KERNEL_OFFSET ||
row > (height - KERNEL_OFFSET) || col > (width - KERNEL_OFFSET))) {
int accumulator = 0;
for (int kernel_index = 0; kernel_index < KERNEL_SIZE; kernel_index++) {
int kx = (kernel_index % KERNEL_WIDTH);
int ky = (kernel_index / KERNEL_WIDTH);
int index = i + (kx - KERNEL_OFFSET) + width * (ky - KERNEL_OFFSET);
accumulator += (kernel[ky * KERNEL_WIDTH + kx] \
* temp_image_data[index]);
}
accumulator *= KERNEL_MULTIPLIER;
image_data[i] = accumulator;
}
}
/* Allocates the necessary memory on the GPU and executes the CUDA-kernel. */
void filter_smoothing_cuda(unsigned char *image_data, int num_pixels,
int width, int height, int max_index,
int thread_block_size) {
timer kernelTime1 = timer("kernelTime");
timer memoryTime = timer("memoryTime");
/* Allocate the image used to calculate the smoothing. */
unsigned char* device_image = (unsigned char*) allocateDeviceMemory( \
num_pixels * sizeof (unsigned char));
unsigned char* device_temp_image = (unsigned char*) allocateDeviceMemory( \
num_pixels * sizeof (unsigned char));
int* device_kernel_array = (int*) allocateDeviceMemory( \
KERNEL_SIZE * sizeof (int));
/* Copy the data to the device. */
memoryTime.start();
memcpyHostToDevice(device_kernel_array, (int*) kernel_1D, \
KERNEL_SIZE * sizeof (int));
memcpyHostToDevice(device_image, image_data, \
num_pixels * sizeof (unsigned char));
memcpyDeviceToDevice(device_temp_image, device_image, \
num_pixels * sizeof (unsigned char));
memoryTime.stop();
int num_blocks = (num_pixels + thread_block_size - 1) / thread_block_size;
/* Start smoothing kernel for all the pixels. This can be changed if OpenMP
* is used to split the image size into different kernels. The kernels can
* be executed sequentially on the GPU.
*/
kernelTime1.start();
hipLaunchKernelGGL(( smoothing_kernel), dim3(num_blocks), dim3(thread_block_size), 0, 0, \
device_image, device_temp_image, device_kernel_array, max_index, \
width, height);
kernelTime1.stop();
checkCudaCall(hipGetLastError());
/* Copy the result back to the host. Only the pixels actually calculated
* by the GPU are copied back.
*/
memoryTime.start();
memcpyDeviceToHost(image_data, device_image,
max_index * sizeof (unsigned char));
memoryTime.stop();
freeDeviceMemory(device_image);
freeDeviceMemory(device_kernel_array);
/* Print the elapsed time. */
cout << fixed << setprecision(6);
cout << "smoothing (kernel): \t\t" << kernelTime1.getElapsed() \
<< " seconds." << endl;
cout << "smoothing (memory): \t\t" << memoryTime.getElapsed() \
<< " seconds." << endl;
}
| 55d20e3ee2d42e505a90e3f545ff9750d2cc377c.cu | /*
* File: smoothingomp.cc
* Assignment: 5
* Students: Teun Mathijssen, David Puroja
* Student email: [email protected], [email protected]
* Studentnumber: 11320788, 10469036
*
* Description: Applies the smoothing filter on the image using CUDA.
*/
#include <stdbool.h>
#include <stdlib.h>
#include <iostream>
#include "timer.h"
#include "cuda_helper.h"
using namespace std;
/* Kernel dimension values. */
#define KERNEL_WIDTH 5
#define KERNEL_SIZE KERNEL_WIDTH * KERNEL_WIDTH
#define KERNEL_OFFSET KERNEL_WIDTH / 2
#define KERNEL_MULTIPLIER (1.0 / 81.0)
/* Triangular smoothing kernel. */
const int kernel_1D[KERNEL_SIZE] = {1, 2, 3, 2, 1,
2, 4, 6, 4, 2,
3, 6, 9, 6, 3,
2, 4, 6, 4, 2,
1, 2, 3, 2, 1};
/* Smoothing filter kernel. */
__global__ void smoothing_kernel(unsigned char* image_data,
unsigned char* temp_image_data, int* kernel,
int num_pixels, int width, int height) {
/* Calculate thread index. */
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
int col = i % width;
int row = i / width;
/* Boundary check for the current kernel center. */
if (i < num_pixels && !(row < KERNEL_OFFSET || col < KERNEL_OFFSET ||
row > (height - KERNEL_OFFSET) || col > (width - KERNEL_OFFSET))) {
int accumulator = 0;
for (int kernel_index = 0; kernel_index < KERNEL_SIZE; kernel_index++) {
int kx = (kernel_index % KERNEL_WIDTH);
int ky = (kernel_index / KERNEL_WIDTH);
int index = i + (kx - KERNEL_OFFSET) + width * (ky - KERNEL_OFFSET);
accumulator += (kernel[ky * KERNEL_WIDTH + kx] \
* temp_image_data[index]);
}
accumulator *= KERNEL_MULTIPLIER;
image_data[i] = accumulator;
}
}
/* Allocates the necessary memory on the GPU and executes the CUDA-kernel. */
void filter_smoothing_cuda(unsigned char *image_data, int num_pixels,
int width, int height, int max_index,
int thread_block_size) {
timer kernelTime1 = timer("kernelTime");
timer memoryTime = timer("memoryTime");
/* Allocate the image used to calculate the smoothing. */
unsigned char* device_image = (unsigned char*) allocateDeviceMemory( \
num_pixels * sizeof (unsigned char));
unsigned char* device_temp_image = (unsigned char*) allocateDeviceMemory( \
num_pixels * sizeof (unsigned char));
int* device_kernel_array = (int*) allocateDeviceMemory( \
KERNEL_SIZE * sizeof (int));
/* Copy the data to the device. */
memoryTime.start();
memcpyHostToDevice(device_kernel_array, (int*) kernel_1D, \
KERNEL_SIZE * sizeof (int));
memcpyHostToDevice(device_image, image_data, \
num_pixels * sizeof (unsigned char));
memcpyDeviceToDevice(device_temp_image, device_image, \
num_pixels * sizeof (unsigned char));
memoryTime.stop();
int num_blocks = (num_pixels + thread_block_size - 1) / thread_block_size;
/* Start smoothing kernel for all the pixels. This can be changed if OpenMP
* is used to split the image size into different kernels. The kernels can
* be executed sequentially on the GPU.
*/
kernelTime1.start();
smoothing_kernel<<<num_blocks, thread_block_size>>> \
(device_image, device_temp_image, device_kernel_array, max_index, \
width, height);
kernelTime1.stop();
checkCudaCall(cudaGetLastError());
/* Copy the result back to the host. Only the pixels actually calculated
* by the GPU are copied back.
*/
memoryTime.start();
memcpyDeviceToHost(image_data, device_image,
max_index * sizeof (unsigned char));
memoryTime.stop();
freeDeviceMemory(device_image);
freeDeviceMemory(device_kernel_array);
/* Print the elapsed time. */
cout << fixed << setprecision(6);
cout << "smoothing (kernel): \t\t" << kernelTime1.getElapsed() \
<< " seconds." << endl;
cout << "smoothing (memory): \t\t" << memoryTime.getElapsed() \
<< " seconds." << endl;
}
|
f5e9bf54f42d1915432517efb51e73bb6655ecd8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file test_decimate.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. in. Marek
* Nacz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
#include <helper_cuda.h>
#include <iostream>
#include <sstream>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include <algorithm>
#include <list>
#include "rd/cpu/brute_force/choose.hpp"
#include "rd/cpu/brute_force/decimate.hpp"
#include "rd/gpu/device/brute_force/decimate.cuh"
#include "rd/gpu/device/device_decimate.cuh"
#include "rd/gpu/device/brute_force/rd_globals.cuh"
#include "rd/gpu/device/samples_generator.cuh"
#include "rd/gpu/util/data_order_traits.hpp"
#include "rd/gpu/util/dev_memcpy.cuh"
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "rd/utils/rd_samples.cuh"
#include "rd/utils/rd_params.hpp"
#include "cub/test_util.h"
#include "cub/util_device.cuh"
static const int TEST_DIM = 2;
template <typename T>
void testDecimateKernel(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> const &rds);
int main(int argc, char const **argv)
{
rd::RDParams<double> dParams;
rd::RDSpiralParams<double> dSParams;
rd::RDParams<float> fParams;
rd::RDSpiralParams<float> fSParams;
//-----------------------------------------------------------------
// Initialize command line
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t[--np=<P size>]\n"
"\t\t[--r1=<r1 param>]\n"
"\t\t[--r2=<r2 param>]\n"
"\t\t[--a=<spiral param>]\n"
"\t\t[--b=<spiral param>]\n"
"\t\t[--s=<spiral noise sigma>]\n"
"\t\t[--d=<device id>]\n"
"\t\t[--v <verbose>]\n"
"\n", argv[0]);
exit(0);
}
args.GetCmdLineArgument("r1", dParams.r1);
args.GetCmdLineArgument("r2", dParams.r2);
args.GetCmdLineArgument("r1", fParams.r1);
args.GetCmdLineArgument("r2", fParams.r2);
args.GetCmdLineArgument("np", dParams.np);
args.GetCmdLineArgument("np", fParams.np);
if (args.CheckCmdLineFlag("a"))
{
args.GetCmdLineArgument("a", fSParams.a);
args.GetCmdLineArgument("a", dSParams.a);
}
if (args.CheckCmdLineFlag("b"))
{
args.GetCmdLineArgument("b", fSParams.b);
args.GetCmdLineArgument("b", dSParams.b);
}
if (args.CheckCmdLineFlag("s"))
{
args.GetCmdLineArgument("s", fSParams.sigma);
args.GetCmdLineArgument("s", dSParams.sigma);
}
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", fParams.devId);
args.GetCmdLineArgument("d", dParams.devId);
}
if (args.CheckCmdLineFlag("v"))
{
fParams.verbose = true;
dParams.verbose = true;
}
deviceInit(fParams.devId);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT: " << std::endl;
testDecimateKernel<float>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE: " << std::endl;
testDecimateKernel<double>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
deviceReset();
std::cout << "END!" << std::endl;
return 0;
}
template <typename T>
void decimateGold(
rd::RDParams<T> &rdp,
T *P,
T *S,
T *chosenS,
int &chosenCount)
{
std::list<T*> csList;
rd::choose(P, S, csList, rdp.np, rdp.ns, TEST_DIM, rdp.r1);
chosenCount = rdp.ns;
rd::copyTable(S, chosenS, chosenCount * TEST_DIM);
std::cout << "Chosen count: " << rdp.ns << std::endl;
std::ostringstream os;
rd::GraphDrawer<T> gDrawer;
if (rdp.verbose)
{
os << typeid(T).name() << "_" << TEST_DIM << "D_ref_chosen_set";
gDrawer.startGraph(os.str(), TEST_DIM);
gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S, rd::GraphDrawer<T>::POINTS, rdp.ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
rd::decimate(S, csList, rdp.ns, TEST_DIM, rdp.r2);
std::cout << "Decimate count: " << rdp.ns << std::endl;
if (rdp.verbose)
{
os << typeid(T).name() << "_" << TEST_DIM << "D_ref_decimate";
gDrawer.startGraph(os.str(), TEST_DIM);
// gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
// P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S, rd::GraphDrawer<T>::POINTS, rdp.ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
}
template <typename T>
void testDecimateRowMajorOrder(
rd::RDParams<T> const &rdp,
T *d_S,
T *h_chosenS,
int h_chosenCount,
T const *S_gold)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testDecimateRowMajorOrder:" << std::endl;
T *S_gpu;
int *d_ns, h_ns;
checkCudaErrors(hipMemset(d_S, 0, rdp.np * TEST_DIM * sizeof(T)));
// get chosen samples to device memory properly ordered
checkCudaErrors(hipMemcpy(d_S, h_chosenS, rdp.np * TEST_DIM * sizeof(T), hipMemcpyHostToDevice));
checkCudaErrors(hipGetSymbolAddress((void**)&d_ns, rd::gpu::rdBruteForceNs));
checkCudaErrors(hipMemcpyToSymbol(rd::gpu::rdBruteForceNs, &h_chosenCount, sizeof(int)));
checkCudaErrors(hipDeviceSynchronize());
rd::gpu::bruteForce::DeviceDecimate::decimate<TEST_DIM, rd::ROW_MAJOR>(d_S, d_ns, rdp.r2);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpyFromSymbol(&h_ns, rd::gpu::rdBruteForceNs, sizeof(int)));
checkCudaErrors(hipDeviceSynchronize());
S_gpu = new T[h_ns * TEST_DIM];
checkCudaErrors(hipMemcpy(S_gpu, d_S, h_ns * TEST_DIM * sizeof(T), hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
if ((int)rdp.ns != h_ns)
{
std::cout << "[ERROR]Incorrect number of chosen samples!" << std::endl;
std::cout << "Is: " << h_ns << " and should be: " << rdp.ns << std::endl;
}
rd::checkResult(S_gold, S_gpu, rdp.ns * TEST_DIM);
delete[] S_gpu;
}
template <typename T>
void testDecimateColMajorOrder(
rd::RDParams<T> const &rdp,
T *d_S,
T const *h_chosenS,
int h_chosenCount,
T const *S_gold)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testDecimateColMajorOrder:" << std::endl;
T *S_gpu;
int *d_ns, h_ns;
size_t sPitch;
T *d_S2;
checkCudaErrors(hipMallocPitch(&d_S2, &sPitch, h_chosenCount * sizeof(T), TEST_DIM));
checkCudaErrors(hipMemset2D(d_S2, sPitch, 0, h_chosenCount * sizeof(T), TEST_DIM));
rd::gpu::rdMemcpy2D<rd::COL_MAJOR, rd::ROW_MAJOR, hipMemcpyHostToDevice>(
d_S2, h_chosenS, TEST_DIM, h_chosenCount, sPitch, TEST_DIM * sizeof(T));
checkCudaErrors(hipGetSymbolAddress((void**)&d_ns, rd::gpu::rdBruteForceNs));
checkCudaErrors(hipMemcpyToSymbol(rd::gpu::rdBruteForceNs, &h_chosenCount, sizeof(int)));
checkCudaErrors(hipDeviceSynchronize());
rd::gpu::bruteForce::DeviceDecimate::decimate<TEST_DIM, rd::COL_MAJOR>(d_S2, d_ns, rdp.r2,
sPitch / sizeof(T), nullptr, true);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpyFromSymbol(&h_ns, rd::gpu::rdBruteForceNs, sizeof(int)));
checkCudaErrors(hipDeviceSynchronize());
if ((int)rdp.ns != h_ns)
{
std::cout << "[ERROR]Incorrect number of chosen samples!" << std::endl;
std::cout << "Is: " << h_ns << " and should be: " << rdp.ns << std::endl;
}
S_gpu = new T[h_chosenCount * TEST_DIM];
rd::gpu::rdMemcpy2D<rd::ROW_MAJOR, rd::COL_MAJOR, hipMemcpyDeviceToHost>(
S_gpu, d_S2, h_chosenCount, TEST_DIM, TEST_DIM * sizeof(T), sPitch);
checkCudaErrors(hipDeviceSynchronize());
if (rdp.verbose)
{
std::ostringstream os;
rd::GraphDrawer<T> gDrawer;
os << typeid(T).name() << "_" << TEST_DIM << "D_gpu_decimate";
gDrawer.startGraph(os.str(), TEST_DIM);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S_gpu, rd::GraphDrawer<T>::POINTS, h_ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
rd::checkResult(S_gold, S_gpu, h_ns * TEST_DIM, rdp.verbose);
delete[] S_gpu;
checkCudaErrors(hipFree(d_S2));
}
template <typename T>
void testDecimateSOA(
rd::RDParams<T> const &rdp,
T const *h_chosenS,
int h_chosenCount,
T const *S_gold)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testDecimateSOA:" << std::endl;
typedef rd::ColMajorDeviceSamples<T, TEST_DIM> SamplesDevT;
SamplesDevT *d_S;
int *d_ns, h_ns;
d_S = new SamplesDevT(rdp.np);
T *h_aux = new T[rdp.np * TEST_DIM];
rd::copyTable(h_chosenS, h_aux, h_chosenCount * TEST_DIM);
rd::transposeInPlace(h_aux, h_aux + rdp.np * TEST_DIM, TEST_DIM);
d_S->copyFromContinuousData(h_aux, rdp.np);
checkCudaErrors(hipGetSymbolAddress((void**)&d_ns, rd::gpu::rdBruteForceNs));
checkCudaErrors(hipMemcpyToSymbol(rd::gpu::rdBruteForceNs, &h_chosenCount, sizeof(int)));
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( __decimate_kernel_v1<T, 512>), dim3(1), dim3(512), 0, 0, d_S->dSamples, d_ns, rdp.r1, TEST_DIM);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpyFromSymbol(&h_ns, rd::gpu::rdBruteForceNs, sizeof(int)));
checkCudaErrors(hipDeviceSynchronize());
d_S->copyToContinuousData(h_aux);
checkCudaErrors(hipDeviceSynchronize());
rd::transposeInPlace(h_aux, h_aux + rdp.np * TEST_DIM, rdp.np);
if ((int)rdp.ns != h_ns)
{
std::cout << "[ERROR]Incorrect number of chosen samples!" << std::endl;
std::cout << "Is: " << h_ns << " and should be: " << rdp.ns << std::endl;
}
rd::checkResult(S_gold, h_aux, rdp.ns * TEST_DIM);
delete[] h_aux;
delete d_S;
}
template <typename T>
void testDecimateKernel(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> const &sp)
{
std::cout << "Samples: " << std::endl;
std::cout << "\t dimension: " << TEST_DIM << std::endl;
std::cout << "\t n_samples: " << rdp.np << std::endl;
std::cout << "\t r1: " << rdp.r1 << std::endl;
std::cout << "\t r2: " << rdp.r2 << std::endl;
std::cout << "Spiral params: " << std::endl;
std::cout << "\t a: " << sp.a << std::endl;
std::cout << "\t b: " << sp.b << std::endl;
std::cout << "\t sigma: " << sp.sigma << std::endl;
rd::GraphDrawer<T> gDrawer;
T *d_P, *d_S;
T *h_P, *h_S, *h_chosenS;
checkCudaErrors(hipMalloc((void**)&d_P, rdp.np * TEST_DIM * sizeof(T)));
checkCudaErrors(hipMalloc((void**)&d_S, rdp.np * TEST_DIM * sizeof(T)));
checkCudaErrors(hipMemset(d_P, 0, rdp.np * TEST_DIM * sizeof(T)));
h_P = new T[rdp.np * TEST_DIM];
h_S = new T[rdp.np * TEST_DIM];
h_chosenS = new T[rdp.np * TEST_DIM];
switch(TEST_DIM)
{
case 2:
rd::gpu::SamplesGenerator<T>::template spiral2D<rd::COL_MAJOR>(
rdp.np, sp.a, sp.b, sp.sigma, d_P);
break;
case 3:
rd::gpu::SamplesGenerator<T>::template spiral3D<rd::COL_MAJOR>(
rdp.np, sp.a, sp.b, sp.sigma, d_P);
break;
default:
throw std::logic_error("Not supported dimension!");
}
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(h_P, d_P, rdp.np * TEST_DIM * sizeof(T),
hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
rd::transposeInPlace(h_P, h_P + rdp.np * TEST_DIM, rdp.np);
std::ostringstream os;
if (rdp.verbose)
{
os << typeid(T).name() << "_" << TEST_DIM;
os << "D_initial_samples_set_";
gDrawer.showPoints(os.str(), h_P, rdp.np, TEST_DIM);
os.clear();
os.str(std::string());
}
//---------------------------------------------------
// REFERENCE DECIMATE
//---------------------------------------------------
int h_chosenCount;
decimateGold(rdp, h_P, h_S, h_chosenS, h_chosenCount);
//---------------------------------------------------
// GPU DECIMATE
//---------------------------------------------------
rdp.devId = (rdp.devId != -1) ? rdp.devId : 0;
testDecimateRowMajorOrder(rdp, d_S, h_chosenS, h_chosenCount, h_S);
testDecimateColMajorOrder(rdp, d_S, h_chosenS, h_chosenCount, h_S);
// testDecimateSOA(rdp, h_chosenS, h_chosenCount, h_S);
// clean-up
delete[] h_P;
delete[] h_S;
delete[] h_chosenS;
checkCudaErrors(hipFree(d_P));
checkCudaErrors(hipFree(d_S));
}
| f5e9bf54f42d1915432517efb51e73bb6655ecd8.cu | /**
* @file test_decimate.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. inż. Marek
* Nałęcz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
#include <helper_cuda.h>
#include <iostream>
#include <sstream>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include <algorithm>
#include <list>
#include "rd/cpu/brute_force/choose.hpp"
#include "rd/cpu/brute_force/decimate.hpp"
#include "rd/gpu/device/brute_force/decimate.cuh"
#include "rd/gpu/device/device_decimate.cuh"
#include "rd/gpu/device/brute_force/rd_globals.cuh"
#include "rd/gpu/device/samples_generator.cuh"
#include "rd/gpu/util/data_order_traits.hpp"
#include "rd/gpu/util/dev_memcpy.cuh"
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "rd/utils/rd_samples.cuh"
#include "rd/utils/rd_params.hpp"
#include "cub/test_util.h"
#include "cub/util_device.cuh"
static const int TEST_DIM = 2;
template <typename T>
void testDecimateKernel(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> const &rds);
int main(int argc, char const **argv)
{
rd::RDParams<double> dParams;
rd::RDSpiralParams<double> dSParams;
rd::RDParams<float> fParams;
rd::RDSpiralParams<float> fSParams;
//-----------------------------------------------------------------
// Initialize command line
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t[--np=<P size>]\n"
"\t\t[--r1=<r1 param>]\n"
"\t\t[--r2=<r2 param>]\n"
"\t\t[--a=<spiral param>]\n"
"\t\t[--b=<spiral param>]\n"
"\t\t[--s=<spiral noise sigma>]\n"
"\t\t[--d=<device id>]\n"
"\t\t[--v <verbose>]\n"
"\n", argv[0]);
exit(0);
}
args.GetCmdLineArgument("r1", dParams.r1);
args.GetCmdLineArgument("r2", dParams.r2);
args.GetCmdLineArgument("r1", fParams.r1);
args.GetCmdLineArgument("r2", fParams.r2);
args.GetCmdLineArgument("np", dParams.np);
args.GetCmdLineArgument("np", fParams.np);
if (args.CheckCmdLineFlag("a"))
{
args.GetCmdLineArgument("a", fSParams.a);
args.GetCmdLineArgument("a", dSParams.a);
}
if (args.CheckCmdLineFlag("b"))
{
args.GetCmdLineArgument("b", fSParams.b);
args.GetCmdLineArgument("b", dSParams.b);
}
if (args.CheckCmdLineFlag("s"))
{
args.GetCmdLineArgument("s", fSParams.sigma);
args.GetCmdLineArgument("s", dSParams.sigma);
}
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", fParams.devId);
args.GetCmdLineArgument("d", dParams.devId);
}
if (args.CheckCmdLineFlag("v"))
{
fParams.verbose = true;
dParams.verbose = true;
}
deviceInit(fParams.devId);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT: " << std::endl;
testDecimateKernel<float>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE: " << std::endl;
testDecimateKernel<double>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
deviceReset();
std::cout << "END!" << std::endl;
return 0;
}
template <typename T>
void decimateGold(
rd::RDParams<T> &rdp,
T *P,
T *S,
T *chosenS,
int &chosenCount)
{
std::list<T*> csList;
rd::choose(P, S, csList, rdp.np, rdp.ns, TEST_DIM, rdp.r1);
chosenCount = rdp.ns;
rd::copyTable(S, chosenS, chosenCount * TEST_DIM);
std::cout << "Chosen count: " << rdp.ns << std::endl;
std::ostringstream os;
rd::GraphDrawer<T> gDrawer;
if (rdp.verbose)
{
os << typeid(T).name() << "_" << TEST_DIM << "D_ref_chosen_set";
gDrawer.startGraph(os.str(), TEST_DIM);
gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S, rd::GraphDrawer<T>::POINTS, rdp.ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
rd::decimate(S, csList, rdp.ns, TEST_DIM, rdp.r2);
std::cout << "Decimate count: " << rdp.ns << std::endl;
if (rdp.verbose)
{
os << typeid(T).name() << "_" << TEST_DIM << "D_ref_decimate";
gDrawer.startGraph(os.str(), TEST_DIM);
// gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
// P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S, rd::GraphDrawer<T>::POINTS, rdp.ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
}
template <typename T>
void testDecimateRowMajorOrder(
rd::RDParams<T> const &rdp,
T *d_S,
T *h_chosenS,
int h_chosenCount,
T const *S_gold)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testDecimateRowMajorOrder:" << std::endl;
T *S_gpu;
int *d_ns, h_ns;
checkCudaErrors(cudaMemset(d_S, 0, rdp.np * TEST_DIM * sizeof(T)));
// get chosen samples to device memory properly ordered
checkCudaErrors(cudaMemcpy(d_S, h_chosenS, rdp.np * TEST_DIM * sizeof(T), cudaMemcpyHostToDevice));
checkCudaErrors(cudaGetSymbolAddress((void**)&d_ns, rd::gpu::rdBruteForceNs));
checkCudaErrors(cudaMemcpyToSymbol(rd::gpu::rdBruteForceNs, &h_chosenCount, sizeof(int)));
checkCudaErrors(cudaDeviceSynchronize());
rd::gpu::bruteForce::DeviceDecimate::decimate<TEST_DIM, rd::ROW_MAJOR>(d_S, d_ns, rdp.r2);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpyFromSymbol(&h_ns, rd::gpu::rdBruteForceNs, sizeof(int)));
checkCudaErrors(cudaDeviceSynchronize());
S_gpu = new T[h_ns * TEST_DIM];
checkCudaErrors(cudaMemcpy(S_gpu, d_S, h_ns * TEST_DIM * sizeof(T), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
if ((int)rdp.ns != h_ns)
{
std::cout << "[ERROR]Incorrect number of chosen samples!" << std::endl;
std::cout << "Is: " << h_ns << " and should be: " << rdp.ns << std::endl;
}
rd::checkResult(S_gold, S_gpu, rdp.ns * TEST_DIM);
delete[] S_gpu;
}
template <typename T>
void testDecimateColMajorOrder(
rd::RDParams<T> const &rdp,
T *d_S,
T const *h_chosenS,
int h_chosenCount,
T const *S_gold)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testDecimateColMajorOrder:" << std::endl;
T *S_gpu;
int *d_ns, h_ns;
size_t sPitch;
T *d_S2;
checkCudaErrors(cudaMallocPitch(&d_S2, &sPitch, h_chosenCount * sizeof(T), TEST_DIM));
checkCudaErrors(cudaMemset2D(d_S2, sPitch, 0, h_chosenCount * sizeof(T), TEST_DIM));
rd::gpu::rdMemcpy2D<rd::COL_MAJOR, rd::ROW_MAJOR, cudaMemcpyHostToDevice>(
d_S2, h_chosenS, TEST_DIM, h_chosenCount, sPitch, TEST_DIM * sizeof(T));
checkCudaErrors(cudaGetSymbolAddress((void**)&d_ns, rd::gpu::rdBruteForceNs));
checkCudaErrors(cudaMemcpyToSymbol(rd::gpu::rdBruteForceNs, &h_chosenCount, sizeof(int)));
checkCudaErrors(cudaDeviceSynchronize());
rd::gpu::bruteForce::DeviceDecimate::decimate<TEST_DIM, rd::COL_MAJOR>(d_S2, d_ns, rdp.r2,
sPitch / sizeof(T), nullptr, true);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpyFromSymbol(&h_ns, rd::gpu::rdBruteForceNs, sizeof(int)));
checkCudaErrors(cudaDeviceSynchronize());
if ((int)rdp.ns != h_ns)
{
std::cout << "[ERROR]Incorrect number of chosen samples!" << std::endl;
std::cout << "Is: " << h_ns << " and should be: " << rdp.ns << std::endl;
}
S_gpu = new T[h_chosenCount * TEST_DIM];
rd::gpu::rdMemcpy2D<rd::ROW_MAJOR, rd::COL_MAJOR, cudaMemcpyDeviceToHost>(
S_gpu, d_S2, h_chosenCount, TEST_DIM, TEST_DIM * sizeof(T), sPitch);
checkCudaErrors(cudaDeviceSynchronize());
if (rdp.verbose)
{
std::ostringstream os;
rd::GraphDrawer<T> gDrawer;
os << typeid(T).name() << "_" << TEST_DIM << "D_gpu_decimate";
gDrawer.startGraph(os.str(), TEST_DIM);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S_gpu, rd::GraphDrawer<T>::POINTS, h_ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
rd::checkResult(S_gold, S_gpu, h_ns * TEST_DIM, rdp.verbose);
delete[] S_gpu;
checkCudaErrors(cudaFree(d_S2));
}
template <typename T>
void testDecimateSOA(
rd::RDParams<T> const &rdp,
T const *h_chosenS,
int h_chosenCount,
T const *S_gold)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testDecimateSOA:" << std::endl;
typedef rd::ColMajorDeviceSamples<T, TEST_DIM> SamplesDevT;
SamplesDevT *d_S;
int *d_ns, h_ns;
d_S = new SamplesDevT(rdp.np);
T *h_aux = new T[rdp.np * TEST_DIM];
rd::copyTable(h_chosenS, h_aux, h_chosenCount * TEST_DIM);
rd::transposeInPlace(h_aux, h_aux + rdp.np * TEST_DIM, TEST_DIM);
d_S->copyFromContinuousData(h_aux, rdp.np);
checkCudaErrors(cudaGetSymbolAddress((void**)&d_ns, rd::gpu::rdBruteForceNs));
checkCudaErrors(cudaMemcpyToSymbol(rd::gpu::rdBruteForceNs, &h_chosenCount, sizeof(int)));
checkCudaErrors(cudaDeviceSynchronize());
__decimate_kernel_v1<T, 512><<<1, 512>>>(d_S->dSamples, d_ns, rdp.r1, TEST_DIM);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpyFromSymbol(&h_ns, rd::gpu::rdBruteForceNs, sizeof(int)));
checkCudaErrors(cudaDeviceSynchronize());
d_S->copyToContinuousData(h_aux);
checkCudaErrors(cudaDeviceSynchronize());
rd::transposeInPlace(h_aux, h_aux + rdp.np * TEST_DIM, rdp.np);
if ((int)rdp.ns != h_ns)
{
std::cout << "[ERROR]Incorrect number of chosen samples!" << std::endl;
std::cout << "Is: " << h_ns << " and should be: " << rdp.ns << std::endl;
}
rd::checkResult(S_gold, h_aux, rdp.ns * TEST_DIM);
delete[] h_aux;
delete d_S;
}
template <typename T>
void testDecimateKernel(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> const &sp)
{
std::cout << "Samples: " << std::endl;
std::cout << "\t dimension: " << TEST_DIM << std::endl;
std::cout << "\t n_samples: " << rdp.np << std::endl;
std::cout << "\t r1: " << rdp.r1 << std::endl;
std::cout << "\t r2: " << rdp.r2 << std::endl;
std::cout << "Spiral params: " << std::endl;
std::cout << "\t a: " << sp.a << std::endl;
std::cout << "\t b: " << sp.b << std::endl;
std::cout << "\t sigma: " << sp.sigma << std::endl;
rd::GraphDrawer<T> gDrawer;
T *d_P, *d_S;
T *h_P, *h_S, *h_chosenS;
checkCudaErrors(cudaMalloc((void**)&d_P, rdp.np * TEST_DIM * sizeof(T)));
checkCudaErrors(cudaMalloc((void**)&d_S, rdp.np * TEST_DIM * sizeof(T)));
checkCudaErrors(cudaMemset(d_P, 0, rdp.np * TEST_DIM * sizeof(T)));
h_P = new T[rdp.np * TEST_DIM];
h_S = new T[rdp.np * TEST_DIM];
h_chosenS = new T[rdp.np * TEST_DIM];
switch(TEST_DIM)
{
case 2:
rd::gpu::SamplesGenerator<T>::template spiral2D<rd::COL_MAJOR>(
rdp.np, sp.a, sp.b, sp.sigma, d_P);
break;
case 3:
rd::gpu::SamplesGenerator<T>::template spiral3D<rd::COL_MAJOR>(
rdp.np, sp.a, sp.b, sp.sigma, d_P);
break;
default:
throw std::logic_error("Not supported dimension!");
}
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(h_P, d_P, rdp.np * TEST_DIM * sizeof(T),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
rd::transposeInPlace(h_P, h_P + rdp.np * TEST_DIM, rdp.np);
std::ostringstream os;
if (rdp.verbose)
{
os << typeid(T).name() << "_" << TEST_DIM;
os << "D_initial_samples_set_";
gDrawer.showPoints(os.str(), h_P, rdp.np, TEST_DIM);
os.clear();
os.str(std::string());
}
//---------------------------------------------------
// REFERENCE DECIMATE
//---------------------------------------------------
int h_chosenCount;
decimateGold(rdp, h_P, h_S, h_chosenS, h_chosenCount);
//---------------------------------------------------
// GPU DECIMATE
//---------------------------------------------------
rdp.devId = (rdp.devId != -1) ? rdp.devId : 0;
testDecimateRowMajorOrder(rdp, d_S, h_chosenS, h_chosenCount, h_S);
testDecimateColMajorOrder(rdp, d_S, h_chosenS, h_chosenCount, h_S);
// testDecimateSOA(rdp, h_chosenS, h_chosenCount, h_S);
// clean-up
delete[] h_P;
delete[] h_S;
delete[] h_chosenS;
checkCudaErrors(cudaFree(d_P));
checkCudaErrors(cudaFree(d_S));
}
|
eac3b9261156f3b555d91261b89abc311ef25afa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/layer_norm.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/hip/block_reduce.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <c10/hip/HIPMathCompat.h>
namespace at {
namespace native {
namespace {
constexpr int kCUDANumThreads = 256;
constexpr int kColwiseReduceTileSize = 32;
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
int64_t N,
T eps,
const T* X,
T* mean,
T* rstd) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC m_shared[C10_WARP_SIZE];
__shared__ T_ACC v_shared[C10_WARP_SIZE];
const int64_t i = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
sum1 += static_cast<T_ACC>(X[index]);
sum2 += static_cast<T_ACC>(X[index]) * static_cast<T_ACC>(X[index]);
}
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, m_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, v_shared);
if (threadIdx.x == 0) {
const T_ACC scale = T_ACC(1) / static_cast<T_ACC>(N);
sum1 *= scale;
sum2 = c10::hip::compat::max(sum2 * scale - sum1 * sum1, T_ACC(0));
mean[i] = sum1;
rstd[i] = c10::hip::compat::rsqrt(sum2 + static_cast<T_ACC>(eps));
}
}
template <typename T>
__global__ void LayerNormForwardCUDAKernel(
int64_t N,
const T* X,
const T* mean,
const T* rstd,
const T* gamma,
const T* beta,
T* Y) {
using T_ACC = acc_type<T, true>;
const int64_t i = blockIdx.x;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]);
const T_ACC beta_v =
beta == nullptr ? T_ACC(0) : static_cast<T_ACC>(beta[j]);
Y[index] = (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) *
static_cast<T_ACC>(rstd[i]) * gamma_v +
beta_v;
}
}
template <typename T>
__global__ void ComputeInternalGradientsCUDAKernel(
int64_t N,
const T* dY,
const T* X,
const T* gamma,
acc_type<T, true>* ds,
acc_type<T, true>* db) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
const int64_t i = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]);
sum1 +=
static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]) * gamma_v;
sum2 += static_cast<T_ACC>(dY[index]) * gamma_v;
}
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
if (threadIdx.x == 0) {
ds[i] = sum1;
db[i] = sum2;
}
}
template <typename T>
__global__ void ComputeGradientFusedParamsCUDAKernel(
int64_t M,
int64_t N,
const T* mean,
const T* rstd,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
acc_type<T, true>* c1,
acc_type<T, true>* c2) {
using T_ACC = acc_type<T, true>;
const int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < M) {
const T_ACC s = T_ACC(1) / static_cast<T_ACC>(N);
const T_ACC a = (db[index] * static_cast<T_ACC>(mean[index]) - ds[index]) *
static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) *
static_cast<T_ACC>(rstd[index]) * s;
c1[index] = a;
c2[index] =
-(a * static_cast<T_ACC>(mean[index]) +
db[index] * static_cast<T_ACC>(rstd[index]) * s);
}
}
template <typename T>
__global__ void LayerNormBackwardCUDAKenrel(
int64_t N,
const T* dY,
const T* X,
const T* gamma,
const T* a,
const acc_type<T, true>* b,
const acc_type<T, true>* c,
T* dX) {
using T_ACC = acc_type<T, true>;
const int64_t i = blockIdx.x;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]);
dX[index] =
static_cast<T_ACC>(a[i]) * static_cast<T_ACC>(dY[index]) * gamma_v +
b[i] * static_cast<T_ACC>(X[index]) + c[i];
}
}
template <typename T>
__global__ void GammaBetaBackwardSimpleCUDAKernel(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dg,
T* db) {
using T_ACC = acc_type<T, true>;
const int64_t j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < N) {
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t i = 0; i < M; ++i) {
const int64_t index = i * N + j;
sum1 += dg == nullptr ? T_ACC(0)
: static_cast<T_ACC>(dY[index]) *
(static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) *
static_cast<T_ACC>(rstd[i]);
sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]);
}
if (dg != nullptr) {
dg[j] = sum1;
}
if (db != nullptr) {
db[j] = sum2;
}
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dg,
T* db) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC g_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1];
__shared__ T_ACC b_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1];
const int64_t j = blockIdx.x * blockDim.x + threadIdx.x;
T_ACC dg_sum1 = 0;
T_ACC dg_sum2 = 0;
T_ACC db_sum1 = 0;
T_ACC db_sum2 = 0;
if (j < N) {
for (int64_t i = threadIdx.y; i < M; i += blockDim.y * 2) {
const int64_t i1 = i;
const int64_t i2 = i + blockDim.y;
const int64_t index1 = i1 * N + j;
const int64_t index2 = i2 * N + j;
dg_sum1 += dg == nullptr ? T_ACC(0)
: static_cast<T_ACC>(dY[index1]) *
(static_cast<T_ACC>(X[index1]) - static_cast<T_ACC>(mean[i1])) *
static_cast<T_ACC>(rstd[i1]);
db_sum1 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]);
if (i2 < M) {
dg_sum2 += dg == nullptr ? T_ACC(0)
: static_cast<T_ACC>(dY[index2]) *
(static_cast<T_ACC>(X[index2]) - static_cast<T_ACC>(mean[i2])) *
static_cast<T_ACC>(rstd[i2]);
db_sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]);
}
}
}
g_shared[threadIdx.y][threadIdx.x] = dg_sum1;
g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2;
b_shared[threadIdx.y][threadIdx.x] = db_sum1;
b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2;
__syncthreads();
T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y];
T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y];
sum1 = cuda_utils::WarpReduceSum(sum1);
sum2 = cuda_utils::WarpReduceSum(sum2);
if (threadIdx.x == 0) {
const int64_t j = blockIdx.x * blockDim.x + threadIdx.y;
if (j < N) {
if (dg != nullptr) {
dg[j] = sum1;
}
if (db != nullptr) {
db[j] = sum2;
}
}
}
sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum1 = cuda_utils::WarpReduceSum(sum1);
sum2 = cuda_utils::WarpReduceSum(sum2);
if (threadIdx.x == 0) {
const int64_t j = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y;
if (j < N) {
if (dg != nullptr) {
dg[j] = sum1;
}
if (db != nullptr) {
db[j] = sum2;
}
}
}
}
template <typename T>
void LayerNormKernelImplInternal(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t M,
int64_t N,
T eps,
Tensor* Y,
Tensor* mean,
Tensor* rstd) {
DCHECK_EQ(X.numel(), M * N);
DCHECK(!gamma.defined() || gamma.numel() == N);
DCHECK(!beta.defined() || beta.numel() == N);
const T* X_data = X.data_ptr<T>();
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr;
T* Y_data = Y->data_ptr<T>();
T* mean_data = mean->data_ptr<T>();
T* rstd_data = rstd->data_ptr<T>();
hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>)
, dim3(M), dim3(cuda_utils::kCUDABlockReduceNumThreads), 0, cuda_stream,
N, eps, X_data, mean_data, rstd_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( LayerNormForwardCUDAKernel<T>), dim3(M), dim3(kCUDANumThreads), 0, cuda_stream,
N, X_data, mean_data, rstd_data, gamma_data, beta_data, Y_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
void LayerNormKernelImpl(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t M,
int64_t N,
double eps,
Tensor* Y,
Tensor* mean,
Tensor* rstd) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
X.scalar_type(), "LayerNormKernelImpl", [&]() {
LayerNormKernelImplInternal<scalar_t>(
X, gamma, beta, M, N, static_cast<scalar_t>(eps), Y, mean, rstd);
});
}
template <typename T>
void LayerNormBackwardKernelImplInternal(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t M,
int64_t N,
Tensor* dX,
Tensor* dgamma,
Tensor* dbeta) {
using T_ACC = acc_type<T, true>;
DCHECK_EQ(dY.numel(), M * N);
DCHECK_EQ(X.numel(), M * N);
DCHECK_EQ(mean.numel(), M);
DCHECK_EQ(rstd.numel(), M);
DCHECK(!gamma.defined() || gamma.numel() == N);
const T* dY_data = dY.template data_ptr<T>();
const T* X_data = X.template data_ptr<T>();
const T* mean_data = mean.template data_ptr<T>();
const T* rstd_data = rstd.template data_ptr<T>();
const T* gamma_data =
gamma.defined() ? gamma.template data_ptr<T>() : nullptr;
T* dX_data = dX->defined() ? dX->template data_ptr<T>() : nullptr;
hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (dX_data != nullptr) {
const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type();
Tensor ds = at::empty({M}, X.options().dtype(kAccType));
Tensor db = at::empty({M}, X.options().dtype(kAccType));
Tensor scale = at::empty({M}, X.options().dtype(kAccType));
Tensor bias = at::empty({M}, X.options().dtype(kAccType));
T_ACC* ds_data = ds.template data_ptr<T_ACC>();
T_ACC* db_data = db.template data_ptr<T_ACC>();
T_ACC* scale_data = scale.template data_ptr<T_ACC>();
T_ACC* bias_data = bias.template data_ptr<T_ACC>();
hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<T>)
, dim3(M), dim3(cuda_utils::kCUDABlockReduceNumThreads), 0, cuda_stream,
N, dY_data, X_data, gamma_data, ds_data, db_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
const int64_t B = (M + kCUDANumThreads - 1) / kCUDANumThreads;
hipLaunchKernelGGL(( ComputeGradientFusedParamsCUDAKernel<T>)
, dim3(B), dim3(kCUDANumThreads), 0, cuda_stream,
M,
N,
mean_data,
rstd_data,
ds_data,
db_data,
scale_data,
bias_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( LayerNormBackwardCUDAKenrel<T>), dim3(M), dim3(kCUDANumThreads), 0, cuda_stream,
N,
dY_data,
X_data,
gamma_data,
rstd_data,
scale_data,
bias_data,
dX_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
if (dgamma->defined() || dbeta->defined()) {
T* dgamma_data =
dgamma->defined() ? dgamma->template data_ptr<T>() : nullptr;
T* dbeta_data = dbeta->defined() ? dbeta->template data_ptr<T>() : nullptr;
if (M < 512) {
// For small batch size, do colwise reduce directly.
const int64_t B = (N + kCUDANumThreads - 1) / kCUDANumThreads;
hipLaunchKernelGGL(( GammaBetaBackwardSimpleCUDAKernel<T>)
, dim3(B), dim3(kCUDANumThreads), 0, cuda_stream,
M,
N,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
const int64_t B =
(N + kColwiseReduceTileSize - 1) / kColwiseReduceTileSize;
constexpr int kThreadX = kColwiseReduceTileSize;
constexpr int kThreadY = kColwiseReduceTileSize / 2;
hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<T>)
, dim3(B), dim3(dim3(kThreadX, kThreadY)), 0, cuda_stream,
M,
N,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
}
void LayerNormBackwardKernelImpl(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t M,
int64_t N,
Tensor* dX,
Tensor* dgamma,
Tensor* dbeta) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
X.scalar_type(), "LayerNormBackwardKernelImpl", [&]() {
LayerNormBackwardKernelImplInternal<scalar_t>(
dY, X, mean, rstd, gamma, M, N, dX, dgamma, dbeta);
});
}
} // namespace
std::tuple<Tensor, Tensor, Tensor> layer_norm_cuda(
const Tensor& input,
IntArrayRef normalized_shape, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */,
double eps) {
// See [Note: hacky wrapper removal for optional tensor]
const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();});
const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();});
auto inputs = _prepare_layer_norm_inputs(input, normalized_shape, weight, bias);
auto X = std::get<0>(inputs);
auto gamma = std::get<1>(inputs);
auto beta = std::get<2>(inputs);
auto M = std::get<3>(inputs);
auto N = std::get<4>(inputs);
Tensor Y = at::native::empty_like(X, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor mean = at::empty({M}, X.options());
Tensor rstd = at::empty({M}, X.options());
if (M > 0) {
LayerNormKernelImpl(X, gamma, beta, M, N, eps, &Y, &mean, &rstd);
const auto input_shape = input.sizes();
const size_t axis = input.dim() - normalized_shape.size();
std::vector<int64_t> stat_shape;
for (size_t idx = 0; idx < axis; ++idx) {
stat_shape.push_back(input_shape[idx]);
}
for (size_t idx = axis; idx < input.dim(); ++idx) {
stat_shape.push_back(1);
}
mean = mean.view(stat_shape);
rstd = rstd.view(stat_shape);
}
return std::make_tuple(std::move(Y), std::move(mean), std::move(rstd));
}
std::tuple<Tensor, Tensor, Tensor> layer_norm_backward_cuda(
const Tensor& dY,
const Tensor& input,
IntArrayRef normalized_shape,
const Tensor& mean,
const Tensor& rstd, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */,
std::array<bool, 3> grad_input_mask) {
// See [Note: hacky wrapper removal for optional tensor]
const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();});
const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();});
auto inputs = _prepare_layer_norm_inputs(input, normalized_shape, weight, bias);
auto X = std::get<0>(inputs);
auto gamma = std::get<1>(inputs);
auto beta = std::get<2>(inputs);
auto M = std::get<3>(inputs);
auto N = std::get<4>(inputs);
Tensor dX;
Tensor dgamma;
Tensor dbeta;
if (grad_input_mask[0]) {
dX = at::native::empty_like(X, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
if (grad_input_mask[1]) {
dgamma = M > 0 ? at::native::empty_like(gamma, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like(gamma, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
if (grad_input_mask[2]) {
dbeta = M > 0 ? at::native::empty_like(beta, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like(beta, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
if (M > 0) {
LayerNormBackwardKernelImpl(
dY, X, mean, rstd, gamma, M, N, &dX, &dgamma, &dbeta);
}
return std::make_tuple(std::move(dX), std::move(dgamma), std::move(dbeta));
}
REGISTER_DISPATCH(LayerNormKernel, &LayerNormKernelImpl);
REGISTER_DISPATCH(LayerNormBackwardKernel, &LayerNormBackwardKernelImpl);
} // namespace native
} // namespace at
| eac3b9261156f3b555d91261b89abc311ef25afa.cu | #include <ATen/native/layer_norm.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/cuda/block_reduce.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <c10/cuda/CUDAMathCompat.h>
namespace at {
namespace native {
namespace {
constexpr int kCUDANumThreads = 256;
constexpr int kColwiseReduceTileSize = 32;
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
int64_t N,
T eps,
const T* X,
T* mean,
T* rstd) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC m_shared[C10_WARP_SIZE];
__shared__ T_ACC v_shared[C10_WARP_SIZE];
const int64_t i = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
sum1 += static_cast<T_ACC>(X[index]);
sum2 += static_cast<T_ACC>(X[index]) * static_cast<T_ACC>(X[index]);
}
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, m_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, v_shared);
if (threadIdx.x == 0) {
const T_ACC scale = T_ACC(1) / static_cast<T_ACC>(N);
sum1 *= scale;
sum2 = c10::cuda::compat::max(sum2 * scale - sum1 * sum1, T_ACC(0));
mean[i] = sum1;
rstd[i] = c10::cuda::compat::rsqrt(sum2 + static_cast<T_ACC>(eps));
}
}
template <typename T>
__global__ void LayerNormForwardCUDAKernel(
int64_t N,
const T* X,
const T* mean,
const T* rstd,
const T* gamma,
const T* beta,
T* Y) {
using T_ACC = acc_type<T, true>;
const int64_t i = blockIdx.x;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]);
const T_ACC beta_v =
beta == nullptr ? T_ACC(0) : static_cast<T_ACC>(beta[j]);
Y[index] = (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) *
static_cast<T_ACC>(rstd[i]) * gamma_v +
beta_v;
}
}
template <typename T>
__global__ void ComputeInternalGradientsCUDAKernel(
int64_t N,
const T* dY,
const T* X,
const T* gamma,
acc_type<T, true>* ds,
acc_type<T, true>* db) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
const int64_t i = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]);
sum1 +=
static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]) * gamma_v;
sum2 += static_cast<T_ACC>(dY[index]) * gamma_v;
}
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
if (threadIdx.x == 0) {
ds[i] = sum1;
db[i] = sum2;
}
}
template <typename T>
__global__ void ComputeGradientFusedParamsCUDAKernel(
int64_t M,
int64_t N,
const T* mean,
const T* rstd,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
acc_type<T, true>* c1,
acc_type<T, true>* c2) {
using T_ACC = acc_type<T, true>;
const int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < M) {
const T_ACC s = T_ACC(1) / static_cast<T_ACC>(N);
const T_ACC a = (db[index] * static_cast<T_ACC>(mean[index]) - ds[index]) *
static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) *
static_cast<T_ACC>(rstd[index]) * s;
c1[index] = a;
c2[index] =
-(a * static_cast<T_ACC>(mean[index]) +
db[index] * static_cast<T_ACC>(rstd[index]) * s);
}
}
template <typename T>
__global__ void LayerNormBackwardCUDAKenrel(
int64_t N,
const T* dY,
const T* X,
const T* gamma,
const T* a,
const acc_type<T, true>* b,
const acc_type<T, true>* c,
T* dX) {
using T_ACC = acc_type<T, true>;
const int64_t i = blockIdx.x;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]);
dX[index] =
static_cast<T_ACC>(a[i]) * static_cast<T_ACC>(dY[index]) * gamma_v +
b[i] * static_cast<T_ACC>(X[index]) + c[i];
}
}
template <typename T>
__global__ void GammaBetaBackwardSimpleCUDAKernel(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dg,
T* db) {
using T_ACC = acc_type<T, true>;
const int64_t j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < N) {
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t i = 0; i < M; ++i) {
const int64_t index = i * N + j;
sum1 += dg == nullptr ? T_ACC(0)
: static_cast<T_ACC>(dY[index]) *
(static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) *
static_cast<T_ACC>(rstd[i]);
sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]);
}
if (dg != nullptr) {
dg[j] = sum1;
}
if (db != nullptr) {
db[j] = sum2;
}
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dg,
T* db) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC g_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1];
__shared__ T_ACC b_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1];
const int64_t j = blockIdx.x * blockDim.x + threadIdx.x;
T_ACC dg_sum1 = 0;
T_ACC dg_sum2 = 0;
T_ACC db_sum1 = 0;
T_ACC db_sum2 = 0;
if (j < N) {
for (int64_t i = threadIdx.y; i < M; i += blockDim.y * 2) {
const int64_t i1 = i;
const int64_t i2 = i + blockDim.y;
const int64_t index1 = i1 * N + j;
const int64_t index2 = i2 * N + j;
dg_sum1 += dg == nullptr ? T_ACC(0)
: static_cast<T_ACC>(dY[index1]) *
(static_cast<T_ACC>(X[index1]) - static_cast<T_ACC>(mean[i1])) *
static_cast<T_ACC>(rstd[i1]);
db_sum1 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]);
if (i2 < M) {
dg_sum2 += dg == nullptr ? T_ACC(0)
: static_cast<T_ACC>(dY[index2]) *
(static_cast<T_ACC>(X[index2]) - static_cast<T_ACC>(mean[i2])) *
static_cast<T_ACC>(rstd[i2]);
db_sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]);
}
}
}
g_shared[threadIdx.y][threadIdx.x] = dg_sum1;
g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2;
b_shared[threadIdx.y][threadIdx.x] = db_sum1;
b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2;
__syncthreads();
T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y];
T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y];
sum1 = cuda_utils::WarpReduceSum(sum1);
sum2 = cuda_utils::WarpReduceSum(sum2);
if (threadIdx.x == 0) {
const int64_t j = blockIdx.x * blockDim.x + threadIdx.y;
if (j < N) {
if (dg != nullptr) {
dg[j] = sum1;
}
if (db != nullptr) {
db[j] = sum2;
}
}
}
sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum1 = cuda_utils::WarpReduceSum(sum1);
sum2 = cuda_utils::WarpReduceSum(sum2);
if (threadIdx.x == 0) {
const int64_t j = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y;
if (j < N) {
if (dg != nullptr) {
dg[j] = sum1;
}
if (db != nullptr) {
db[j] = sum2;
}
}
}
}
template <typename T>
void LayerNormKernelImplInternal(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t M,
int64_t N,
T eps,
Tensor* Y,
Tensor* mean,
Tensor* rstd) {
DCHECK_EQ(X.numel(), M * N);
DCHECK(!gamma.defined() || gamma.numel() == N);
DCHECK(!beta.defined() || beta.numel() == N);
const T* X_data = X.data_ptr<T>();
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr;
T* Y_data = Y->data_ptr<T>();
T* mean_data = mean->data_ptr<T>();
T* rstd_data = rstd->data_ptr<T>();
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();
RowwiseMomentsCUDAKernel<T>
<<<M, cuda_utils::kCUDABlockReduceNumThreads, 0, cuda_stream>>>(
N, eps, X_data, mean_data, rstd_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
LayerNormForwardCUDAKernel<T><<<M, kCUDANumThreads, 0, cuda_stream>>>(
N, X_data, mean_data, rstd_data, gamma_data, beta_data, Y_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
void LayerNormKernelImpl(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t M,
int64_t N,
double eps,
Tensor* Y,
Tensor* mean,
Tensor* rstd) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
X.scalar_type(), "LayerNormKernelImpl", [&]() {
LayerNormKernelImplInternal<scalar_t>(
X, gamma, beta, M, N, static_cast<scalar_t>(eps), Y, mean, rstd);
});
}
template <typename T>
void LayerNormBackwardKernelImplInternal(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t M,
int64_t N,
Tensor* dX,
Tensor* dgamma,
Tensor* dbeta) {
using T_ACC = acc_type<T, true>;
DCHECK_EQ(dY.numel(), M * N);
DCHECK_EQ(X.numel(), M * N);
DCHECK_EQ(mean.numel(), M);
DCHECK_EQ(rstd.numel(), M);
DCHECK(!gamma.defined() || gamma.numel() == N);
const T* dY_data = dY.template data_ptr<T>();
const T* X_data = X.template data_ptr<T>();
const T* mean_data = mean.template data_ptr<T>();
const T* rstd_data = rstd.template data_ptr<T>();
const T* gamma_data =
gamma.defined() ? gamma.template data_ptr<T>() : nullptr;
T* dX_data = dX->defined() ? dX->template data_ptr<T>() : nullptr;
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();
if (dX_data != nullptr) {
const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type();
Tensor ds = at::empty({M}, X.options().dtype(kAccType));
Tensor db = at::empty({M}, X.options().dtype(kAccType));
Tensor scale = at::empty({M}, X.options().dtype(kAccType));
Tensor bias = at::empty({M}, X.options().dtype(kAccType));
T_ACC* ds_data = ds.template data_ptr<T_ACC>();
T_ACC* db_data = db.template data_ptr<T_ACC>();
T_ACC* scale_data = scale.template data_ptr<T_ACC>();
T_ACC* bias_data = bias.template data_ptr<T_ACC>();
ComputeInternalGradientsCUDAKernel<T>
<<<M, cuda_utils::kCUDABlockReduceNumThreads, 0, cuda_stream>>>(
N, dY_data, X_data, gamma_data, ds_data, db_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
const int64_t B = (M + kCUDANumThreads - 1) / kCUDANumThreads;
ComputeGradientFusedParamsCUDAKernel<T>
<<<B, kCUDANumThreads, 0, cuda_stream>>>(
M,
N,
mean_data,
rstd_data,
ds_data,
db_data,
scale_data,
bias_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
LayerNormBackwardCUDAKenrel<T><<<M, kCUDANumThreads, 0, cuda_stream>>>(
N,
dY_data,
X_data,
gamma_data,
rstd_data,
scale_data,
bias_data,
dX_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
if (dgamma->defined() || dbeta->defined()) {
T* dgamma_data =
dgamma->defined() ? dgamma->template data_ptr<T>() : nullptr;
T* dbeta_data = dbeta->defined() ? dbeta->template data_ptr<T>() : nullptr;
if (M < 512) {
// For small batch size, do colwise reduce directly.
const int64_t B = (N + kCUDANumThreads - 1) / kCUDANumThreads;
GammaBetaBackwardSimpleCUDAKernel<T>
<<<B, kCUDANumThreads, 0, cuda_stream>>>(
M,
N,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
const int64_t B =
(N + kColwiseReduceTileSize - 1) / kColwiseReduceTileSize;
constexpr int kThreadX = kColwiseReduceTileSize;
constexpr int kThreadY = kColwiseReduceTileSize / 2;
GammaBetaBackwardCUDAKernel<T>
<<<B, dim3(kThreadX, kThreadY), 0, cuda_stream>>>(
M,
N,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
}
void LayerNormBackwardKernelImpl(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t M,
int64_t N,
Tensor* dX,
Tensor* dgamma,
Tensor* dbeta) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
X.scalar_type(), "LayerNormBackwardKernelImpl", [&]() {
LayerNormBackwardKernelImplInternal<scalar_t>(
dY, X, mean, rstd, gamma, M, N, dX, dgamma, dbeta);
});
}
} // namespace
std::tuple<Tensor, Tensor, Tensor> layer_norm_cuda(
const Tensor& input,
IntArrayRef normalized_shape, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */,
double eps) {
// See [Note: hacky wrapper removal for optional tensor]
const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();});
const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();});
auto inputs = _prepare_layer_norm_inputs(input, normalized_shape, weight, bias);
auto X = std::get<0>(inputs);
auto gamma = std::get<1>(inputs);
auto beta = std::get<2>(inputs);
auto M = std::get<3>(inputs);
auto N = std::get<4>(inputs);
Tensor Y = at::native::empty_like(X, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor mean = at::empty({M}, X.options());
Tensor rstd = at::empty({M}, X.options());
if (M > 0) {
LayerNormKernelImpl(X, gamma, beta, M, N, eps, &Y, &mean, &rstd);
const auto input_shape = input.sizes();
const size_t axis = input.dim() - normalized_shape.size();
std::vector<int64_t> stat_shape;
for (size_t idx = 0; idx < axis; ++idx) {
stat_shape.push_back(input_shape[idx]);
}
for (size_t idx = axis; idx < input.dim(); ++idx) {
stat_shape.push_back(1);
}
mean = mean.view(stat_shape);
rstd = rstd.view(stat_shape);
}
return std::make_tuple(std::move(Y), std::move(mean), std::move(rstd));
}
std::tuple<Tensor, Tensor, Tensor> layer_norm_backward_cuda(
const Tensor& dY,
const Tensor& input,
IntArrayRef normalized_shape,
const Tensor& mean,
const Tensor& rstd, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */,
std::array<bool, 3> grad_input_mask) {
// See [Note: hacky wrapper removal for optional tensor]
const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();});
const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();});
auto inputs = _prepare_layer_norm_inputs(input, normalized_shape, weight, bias);
auto X = std::get<0>(inputs);
auto gamma = std::get<1>(inputs);
auto beta = std::get<2>(inputs);
auto M = std::get<3>(inputs);
auto N = std::get<4>(inputs);
Tensor dX;
Tensor dgamma;
Tensor dbeta;
if (grad_input_mask[0]) {
dX = at::native::empty_like(X, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
if (grad_input_mask[1]) {
dgamma = M > 0 ? at::native::empty_like(gamma, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like(gamma, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
if (grad_input_mask[2]) {
dbeta = M > 0 ? at::native::empty_like(beta, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like(beta, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
if (M > 0) {
LayerNormBackwardKernelImpl(
dY, X, mean, rstd, gamma, M, N, &dX, &dgamma, &dbeta);
}
return std::make_tuple(std::move(dX), std::move(dgamma), std::move(dbeta));
}
REGISTER_DISPATCH(LayerNormKernel, &LayerNormKernelImpl);
REGISTER_DISPATCH(LayerNormBackwardKernel, &LayerNormBackwardKernelImpl);
} // namespace native
} // namespace at
|
6d4e30432d0124128d8f58f509e72490a576dcbd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
Merges to array A and B in M using a path
@file pathMerge.cu
@author Dang Vu Laurent Durand Homer
@version 1.0 14/12/20
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <ctime>
#include <string>
#include <iostream>
#include <stdlib.h>
/**
Verify cuda calls and return cuda error if any
*/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**
Initialise ascendant array with random values
@param array : the array to fill with random ascendant values
size : Size of the arrays
adder : Each x_i is higher than x_i-1 of a random value between 0 and adder
*/
void init_array(int* array, int size, int const adder=10)
{
array[0] = rand()%adder;
for(int i = 0; i < size;i++)
{
array[i] = array[i-1] + rand()%adder;
}
}
/**
Print an array of size size
@param a : array to print
size : size of arrays
*/
void print_array(int* a, int size)
{
printf("[");
for(int i = 0; i < size;i++)
{
printf("%d " , a[i]);
}
printf("]\n");
}
/**
Sequential version of merge
@param a_k, b_k : array to merge
m_k : merge of a and b
n_a, n_b, n_b : respective sizes of a_k, b_k, m_k
*/
void mergeSeq(int *a_k, int *b_k, int *m_k, int n_a, int n_b, int n_m)
{
int i, j;
i=0;
j=0;
while(i+j < n_m)
{
if (i>= n_a)
{
m_k[i+j]=b_k[j];
j++;
}
else if (j>= n_b || a_k[i] < b_k[j])
{
m_k[i+j]=a_k[i];
i++;
}
else
{
m_k[i+j]=b_k[j];
j++;
}
}
}
/**
Parallel version of merge of A and B with |A| + |B| <= 1024
@param d_a, d_b : device versions of arrays to merge
d_m : device version of merge of a and b
n_a, n_b, n_b : respective sizes of d_a, d_b, d_m
*/
__device__ void mergeSmall_k(int* d_a, int* d_b, int* d_m, int n_a, int n_b, int n_m){
int i = threadIdx.x;
if(i < n_m)
{
int2 K;
int2 P;
int2 Q;
if(i > n_a)
{
K.x = i - n_a;
K.y = n_a;
P.x = n_a;
P.y = i - n_a;
}
else
{
K.x = 0;
K.y = i;
P.x = i;
P.y = 0;
}
int offset = 0;
while(1)
{
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1]))
{
if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x])
{
if(Q.y < n_a && (Q.x == n_b || d_a[Q.y] <= d_b[Q.x]))
{
d_m[i] = d_a[Q.y];
}
else
{
d_m[i] = d_b[Q.x];
}
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
}
}
/**
Parallel version of merge of A and B of any sizes
@param a, b : device versions of arrays to merge
m : device version of merge of a and b
n_a, n_b, n_b : respective sizes of d_a, d_b, d_m
path : points of the path to cut A and B to pieces to merge
n_path : number of points in the path
nb_partition : number of pieces of A and B (a_k and b_k) to merge with mergeSmall_k
*/
__global__ void mergeBig_k(int *m, int n_m, int *a, int n_a, int *b, int n_b, int2 *path, int n_path, int nbPartitions)
{
int blockId = blockIdx.x;
int threadId = threadIdx.x;
int i = blockId * blockDim.x + threadId;
if (blockId <= nbPartitions)//On utilise un block pour chaque partition
{
int x0, y0, x1, y1;
x0 = path[blockId].x;
y0 = path[blockId].y;
x1 = path[blockId+1].x;
y1 = path[blockId+1].y;
const int dimx=x1-x0;
const int dimy = y1-y0;
//A modifier par dimx dimy dimx+dimy
__shared__ int a_k[1024];
__shared__ int b_k[1024];
__shared__ int m_k[1024];
if (threadId < dimx) //On rempli a_k[i] : 0 <= i < dimx
{
a_k[threadId] = a[x0+threadId];
}
else if (threadId < dimy+dimx)//On rempli b_k[i] : indice dimx <= i < dimx+dimy+1
{
b_k[threadId-dimx] = b[y0+threadId-dimx];
}
__syncthreads();
mergeSmall_k(a_k, b_k, m_k, dimx, dimy, dimx+dimy);
m[i] = m_k[threadId];
}
}
/**
Genearte the path to devide A and B to pieces that we'll give to mergeSmall_k
@param pas: size of pieces
path : store the points of the path
n_path : number of points in the path
nb_partition : number of pieces of A and B (a_k and b_k) to merge with mergeSmall_k
d_a, d_b : device versions of arrays to merge
n_a, n_b : respective sizes of d_a, d_b
*/
__global__ void pathBig_k(int pas, int2* path, int n_path , int* d_a, int n_a ,int* d_b, int n_b)
{
int thread_i = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_i <= (n_a + n_b)/pas) //<------------//On vrifie que l'indice du thread est infrieur la taille du tableau de retour et qu'il est un multiple du pas
{
int i = thread_i*pas;
int2 K;
int2 P;
int2 Q;
if(i > n_a)
{
K.x = i - n_a;
K.y = n_a;
P.x = n_a;
P.y = i - n_a;
}
else
{
K.x = 0;
K.y = i;
P.x = i;
P.y = 0;
}
int offset = 0;
while(1)
{
//Calcul des coordonnes du milieu de P et K
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
//
if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1]))
{
//
if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x])
{
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
//printf("thread : %d => (%d, %d)\n", thread_i, Q.y, Q.x);
//!\\ Problme ordre x et y
path[thread_i].x=Q.y;
path[thread_i].y=Q.x;
}
//Si |m| n'est pas un mutliple de pas, le thread 0 ajoute (n_a, n_b) la fin du tableau
if (thread_i==0 && (n_a+n_b)%pas!=0)
{
//printf("thread : %d => (%d, %d)\n", thread_i, n_a, n_b);
path[n_path-1].x=n_a;
path[n_path-1].y=n_b;
}
}
/**
verify that A and B are correctly merge in M
*/
int assertMerge(int *tab, int *tab2, int size)
{
for (int i=0; i<size-1; i++)
{
if (tab[i] > tab[i+1] || tab[i] != tab2[i] || (i>10000 && tab[i] == 0))
{
printf("WARNING : Unsuccessful merge on indice %d ...\n", i);
return 0;
}
}
printf("Successful merge !\n");
return 1;
}
/**
Merge 2 lists of arrays {A_i} and {B_i} in {M_i}1<=i<=N
@param argv[1] : size of A
argv[2] : size of B
*/
int main(int argc, char *argv[])
{
std::clock_t startS, endS;
float seqMergeTime, parMergeTime, DoH, HoD;
srand(time(NULL));
int n_a, n_b;
int pas;
if(argc>= 3)
{
n_a = atoi(argv[1]);
n_b = atoi(argv[2]);
pas = atoi(argv[3]);
}
else
{
n_a = 100;
n_b = 100;
pas = 1024;
}
int n_m = n_a+n_b;
// <1024
int nbPartitions = n_m/pas+(n_m%pas!=0); // On ajoute 1 si n_m n'est pas un mutliple de p
int n_path = (1 + nbPartitions); //1(pour (0,0)) + |m|/pas(nbr de morceau de taille pas) + 1(si dernier morceau de taille < pas))
printf("========== Merge of A and B ==========\n");
printf("* Size of A : %d\n", n_a);
printf("* Size of B : %d\n", n_b);
printf("* Step : %d\n* Nbr of partitions : %d\n\n", pas, nbPartitions);
//Initialisation des tableaux a et b
int *a, *aGPU;
a = (int*)malloc(n_a*sizeof(int));
init_array(a, n_a, 10);
gpuErrchk(hipMalloc(&aGPU, n_a*sizeof(int)));
int *b, *bGPU;
b = (int*)malloc(n_b*sizeof(int));
init_array(b, n_b, 10);
gpuErrchk(hipMalloc(&bGPU, n_b*sizeof(int)));
// print_array(b, n_b);
// print_array(a, n_a);
int *m, *mGPU, *mseq;
m = (int*)malloc(n_m*sizeof(int));
mseq = (int*)malloc(n_m*sizeof(int));
gpuErrchk(hipMalloc(&mGPU, n_m*sizeof(int)));
//Declaration et allocation de path
int2 *pathGPU;
gpuErrchk(hipMalloc(&pathGPU, n_path*sizeof(int2)));
startS = std::clock();
gpuErrchk(hipMemcpy(aGPU, a, n_a*sizeof(int), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(bGPU, b, n_b*sizeof(int), hipMemcpyHostToDevice));
endS = std::clock();
HoD = (endS - startS) / (float) CLOCKS_PER_SEC;
printf("Merge of A and B of size %d and %d runing...\n", n_a, n_b);
startS = std::clock();
//================ Parallel : =======================\\
hipLaunchKernelGGL(( pathBig_k), dim3(nbPartitions/1024+1), dim3(1024), 0, 0, pas, pathGPU, n_path, aGPU, n_a, bGPU, n_b);
hipLaunchKernelGGL(( mergeBig_k), dim3(nbPartitions), dim3(pas), 0, 0, mGPU, n_m, aGPU, n_a, bGPU, n_b, pathGPU, n_path, nbPartitions);
hipDeviceSynchronize();
endS = std::clock();
parMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
//Copy device to host
startS = std::clock();
hipMemcpy(m, mGPU, n_m*sizeof(int), hipMemcpyDeviceToHost);
endS = std::clock();
DoH = (endS - startS) / (float) CLOCKS_PER_SEC;
printf("Merge done !\n\n");
//================ Sequential : =======================\\
startS = std::clock();
mergeSeq(a, b, mseq, n_a, n_b, n_m);
endS = std::clock();
seqMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
//print_array(m, n_m);
// print_array(mseq, n_m);
printf("\n========= Sequential merge : =============\n");
printf("Total time elapsed : %f s\n", seqMergeTime);
printf("\n");
printf("========= Parallel merge : =============\n");
printf("Total time elapsed : %f s\n", parMergeTime+DoH+HoD);
printf("Time running algorithm : %f s\n", parMergeTime);
printf("Time to copy Host to Device : %f s\n", HoD);
printf("Time to copy Device to Host : %f s\n", DoH);
assertMerge(m, mseq, n_m);
printf("Parrallel algorithm is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime);
printf("Parrallel merge is %f times faster than sequential merge !\n", seqMergeTime/(parMergeTime+HoD+DoH));
//desallocation
hipFree(aGPU);
hipFree(bGPU);
hipFree(pathGPU);
return 0;
}
| 6d4e30432d0124128d8f58f509e72490a576dcbd.cu | /**
Merges to array A and B in M using a path
@file pathMerge.cu
@author Dang Vu Laurent Durand Homer
@version 1.0 14/12/20
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <ctime>
#include <string>
#include <iostream>
#include <stdlib.h>
/**
Verify cuda calls and return cuda error if any
*/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**
Initialise ascendant array with random values
@param array : the array to fill with random ascendant values
size : Size of the arrays
adder : Each x_i is higher than x_i-1 of a random value between 0 and adder
*/
void init_array(int* array, int size, int const adder=10)
{
array[0] = rand()%adder;
for(int i = 0; i < size;i++)
{
array[i] = array[i-1] + rand()%adder;
}
}
/**
Print an array of size size
@param a : array to print
size : size of arrays
*/
void print_array(int* a, int size)
{
printf("[");
for(int i = 0; i < size;i++)
{
printf("%d " , a[i]);
}
printf("]\n");
}
/**
Sequential version of merge
@param a_k, b_k : array to merge
m_k : merge of a and b
n_a, n_b, n_b : respective sizes of a_k, b_k, m_k
*/
void mergeSeq(int *a_k, int *b_k, int *m_k, int n_a, int n_b, int n_m)
{
int i, j;
i=0;
j=0;
while(i+j < n_m)
{
if (i>= n_a)
{
m_k[i+j]=b_k[j];
j++;
}
else if (j>= n_b || a_k[i] < b_k[j])
{
m_k[i+j]=a_k[i];
i++;
}
else
{
m_k[i+j]=b_k[j];
j++;
}
}
}
/**
Parallel version of merge of A and B with |A| + |B| <= 1024
@param d_a, d_b : device versions of arrays to merge
d_m : device version of merge of a and b
n_a, n_b, n_b : respective sizes of d_a, d_b, d_m
*/
__device__ void mergeSmall_k(int* d_a, int* d_b, int* d_m, int n_a, int n_b, int n_m){
int i = threadIdx.x;
if(i < n_m)
{
int2 K;
int2 P;
int2 Q;
if(i > n_a)
{
K.x = i - n_a;
K.y = n_a;
P.x = n_a;
P.y = i - n_a;
}
else
{
K.x = 0;
K.y = i;
P.x = i;
P.y = 0;
}
int offset = 0;
while(1)
{
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1]))
{
if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x])
{
if(Q.y < n_a && (Q.x == n_b || d_a[Q.y] <= d_b[Q.x]))
{
d_m[i] = d_a[Q.y];
}
else
{
d_m[i] = d_b[Q.x];
}
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
}
}
/**
Parallel version of merge of A and B of any sizes
@param a, b : device versions of arrays to merge
m : device version of merge of a and b
n_a, n_b, n_b : respective sizes of d_a, d_b, d_m
path : points of the path to cut A and B to pieces to merge
n_path : number of points in the path
nb_partition : number of pieces of A and B (a_k and b_k) to merge with mergeSmall_k
*/
__global__ void mergeBig_k(int *m, int n_m, int *a, int n_a, int *b, int n_b, int2 *path, int n_path, int nbPartitions)
{
int blockId = blockIdx.x;
int threadId = threadIdx.x;
int i = blockId * blockDim.x + threadId;
if (blockId <= nbPartitions)//On utilise un block pour chaque partition
{
int x0, y0, x1, y1;
x0 = path[blockId].x;
y0 = path[blockId].y;
x1 = path[blockId+1].x;
y1 = path[blockId+1].y;
const int dimx=x1-x0;
const int dimy = y1-y0;
//A modifier par dimx dimy dimx+dimy
__shared__ int a_k[1024];
__shared__ int b_k[1024];
__shared__ int m_k[1024];
if (threadId < dimx) //On rempli a_k[i] : 0 <= i < dimx
{
a_k[threadId] = a[x0+threadId];
}
else if (threadId < dimy+dimx)//On rempli b_k[i] : indice dimx <= i < dimx+dimy+1
{
b_k[threadId-dimx] = b[y0+threadId-dimx];
}
__syncthreads();
mergeSmall_k(a_k, b_k, m_k, dimx, dimy, dimx+dimy);
m[i] = m_k[threadId];
}
}
/**
Genearte the path to devide A and B to pieces that we'll give to mergeSmall_k
@param pas: size of pieces
path : store the points of the path
n_path : number of points in the path
nb_partition : number of pieces of A and B (a_k and b_k) to merge with mergeSmall_k
d_a, d_b : device versions of arrays to merge
n_a, n_b : respective sizes of d_a, d_b
*/
__global__ void pathBig_k(int pas, int2* path, int n_path , int* d_a, int n_a ,int* d_b, int n_b)
{
int thread_i = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_i <= (n_a + n_b)/pas) //<------------//On vérifie que l'indice du thread est inférieur à la taille du tableau de retour et qu'il est un multiple du pas
{
int i = thread_i*pas;
int2 K;
int2 P;
int2 Q;
if(i > n_a)
{
K.x = i - n_a;
K.y = n_a;
P.x = n_a;
P.y = i - n_a;
}
else
{
K.x = 0;
K.y = i;
P.x = i;
P.y = 0;
}
int offset = 0;
while(1)
{
//Calcul des coordonnées du milieu de P et K
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
//
if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1]))
{
//
if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x])
{
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
//printf("thread : %d => (%d, %d)\n", thread_i, Q.y, Q.x);
//!\\ Problème ordre x et y
path[thread_i].x=Q.y;
path[thread_i].y=Q.x;
}
//Si |m| n'est pas un mutliple de pas, le thread 0 ajoute (n_a, n_b) à la fin du tableau
if (thread_i==0 && (n_a+n_b)%pas!=0)
{
//printf("thread : %d => (%d, %d)\n", thread_i, n_a, n_b);
path[n_path-1].x=n_a;
path[n_path-1].y=n_b;
}
}
/**
verify that A and B are correctly merge in M
*/
int assertMerge(int *tab, int *tab2, int size)
{
for (int i=0; i<size-1; i++)
{
if (tab[i] > tab[i+1] || tab[i] != tab2[i] || (i>10000 && tab[i] == 0))
{
printf("WARNING : Unsuccessful merge on indice %d ...\n", i);
return 0;
}
}
printf("Successful merge !\n");
return 1;
}
/**
Merge 2 lists of arrays {A_i} and {B_i} in {M_i}1<=i<=N
@param argv[1] : size of A
argv[2] : size of B
*/
int main(int argc, char *argv[])
{
std::clock_t startS, endS;
float seqMergeTime, parMergeTime, DoH, HoD;
srand(time(NULL));
int n_a, n_b;
int pas;
if(argc>= 3)
{
n_a = atoi(argv[1]);
n_b = atoi(argv[2]);
pas = atoi(argv[3]);
}
else
{
n_a = 100;
n_b = 100;
pas = 1024;
}
int n_m = n_a+n_b;
// <1024
int nbPartitions = n_m/pas+(n_m%pas!=0); // On ajoute 1 si n_m n'est pas un mutliple de p
int n_path = (1 + nbPartitions); //1(pour (0,0)) + |m|/pas(nbr de morceau de taille pas) + 1(si dernier morceau de taille < pas))
printf("========== Merge of A and B ==========\n");
printf("* Size of A : %d\n", n_a);
printf("* Size of B : %d\n", n_b);
printf("* Step : %d\n* Nbr of partitions : %d\n\n", pas, nbPartitions);
//Initialisation des tableaux a et b
int *a, *aGPU;
a = (int*)malloc(n_a*sizeof(int));
init_array(a, n_a, 10);
gpuErrchk(cudaMalloc(&aGPU, n_a*sizeof(int)));
int *b, *bGPU;
b = (int*)malloc(n_b*sizeof(int));
init_array(b, n_b, 10);
gpuErrchk(cudaMalloc(&bGPU, n_b*sizeof(int)));
// print_array(b, n_b);
// print_array(a, n_a);
int *m, *mGPU, *mseq;
m = (int*)malloc(n_m*sizeof(int));
mseq = (int*)malloc(n_m*sizeof(int));
gpuErrchk(cudaMalloc(&mGPU, n_m*sizeof(int)));
//Declaration et allocation de path
int2 *pathGPU;
gpuErrchk(cudaMalloc(&pathGPU, n_path*sizeof(int2)));
startS = std::clock();
gpuErrchk(cudaMemcpy(aGPU, a, n_a*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(bGPU, b, n_b*sizeof(int), cudaMemcpyHostToDevice));
endS = std::clock();
HoD = (endS - startS) / (float) CLOCKS_PER_SEC;
printf("Merge of A and B of size %d and %d runing...\n", n_a, n_b);
startS = std::clock();
//================ Parallel : =======================\\
pathBig_k<<<nbPartitions/1024+1, 1024>>>(pas, pathGPU, n_path, aGPU, n_a, bGPU, n_b);
mergeBig_k<<<nbPartitions, pas>>>(mGPU, n_m, aGPU, n_a, bGPU, n_b, pathGPU, n_path, nbPartitions);
cudaDeviceSynchronize();
endS = std::clock();
parMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
//Copy device to host
startS = std::clock();
cudaMemcpy(m, mGPU, n_m*sizeof(int), cudaMemcpyDeviceToHost);
endS = std::clock();
DoH = (endS - startS) / (float) CLOCKS_PER_SEC;
printf("Merge done !\n\n");
//================ Sequential : =======================\\
startS = std::clock();
mergeSeq(a, b, mseq, n_a, n_b, n_m);
endS = std::clock();
seqMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
//print_array(m, n_m);
// print_array(mseq, n_m);
printf("\n========= Sequential merge : =============\n");
printf("Total time elapsed : %f s\n", seqMergeTime);
printf("\n");
printf("========= Parallel merge : =============\n");
printf("Total time elapsed : %f s\n", parMergeTime+DoH+HoD);
printf("Time running algorithm : %f s\n", parMergeTime);
printf("Time to copy Host to Device : %f s\n", HoD);
printf("Time to copy Device to Host : %f s\n", DoH);
assertMerge(m, mseq, n_m);
printf("Parrallel algorithm is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime);
printf("Parrallel merge is %f times faster than sequential merge !\n", seqMergeTime/(parMergeTime+HoD+DoH));
//desallocation
cudaFree(aGPU);
cudaFree(bGPU);
cudaFree(pathGPU);
return 0;
}
|
f6eac28624043ffafc1b681d0fd50daee2b0be39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define N 250
#define threads 32
__global__ void kernel(int *mat, int *vec)
{
//__shared__ int sh[threads*threads];
int value;
__shared__ int Qsh[threads], Psh[threads];
int P = 0, Q = 0;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
int indexMat = col + row * N;
int threadIndex = threadIdx.x + threadIdx.y * blockDim.x;
if (col >= N || row >= N) return;
Qsh[threadIdx.y] = 0;
Psh[threadIdx.y] = 0;
value = mat[indexMat];
if (value > 0) {
P = 1;
} else {
Q = 1;
}
atomicAdd(&Psh[threadIdx.y], P);
atomicAdd(&Qsh[threadIdx.y], Q);
__syncthreads();
if (threadIdx.x == 0)
atomicAdd(&vec[row], Psh[threadIdx.y]);
__syncthreads();
if (threadIdx.x == 0)
atomicSub(&vec[row], Qsh[threadIdx.y]);
__syncthreads();
if (value < 0) {
value = vec[row];
mat[indexMat] = value;
}
}
void PrintMat(int* mat,int n,int m) {
for (int i = 0; i < n; i++)
{
for (int j = 0; j < m; j++)
printf("%d ", mat[i * N + j]);
printf("\n");
}
}
void hostkernel(int* A,int* B)
{
for(int i = 0; i < N; i++) {
int pos = 0, neg = 0;
for (int j = 0; j < N; j++)
if (A[i * N + j] > 0) pos++; else neg++;
B[i] = pos - neg;
for (int j = 0; j < N; j++)
if (A[i * N + j] < 0) A[i * N + j] = B[i];
}
}
void compare(int* A, int* B)
{
for (int i = 0; i < N; i++)
if (A[i] != B[i])
printf("nisu isti");
}
int main()
{
int A[N * N], B[N], Bh[N],A2[N*N];
int* Ad, * Bd;
hipMalloc((void**)&Ad, sizeof(int) * N * N);
hipMalloc((void**)&Bd, sizeof(int) * N);
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++) {
int val = rand() % 10;
A[i * N + j] = val % 3 == 0 ? -val : val;
}
for (int i = 0; i < N; i++)
B[i] = 0;
printf("===== A =====\n");
//PrintMat(A, N, N);
printf("===== =====\n");
hipMemcpy(Ad, A, sizeof(int) * N * N, hipMemcpyHostToDevice);
hipMemcpy(Bd, B, sizeof(int) * N, hipMemcpyHostToDevice);
dim3 gridSize((N + threads - 1) / threads, (N + threads - 1) / threads);
dim3 blockSize(threads, threads);
kernel << <gridSize, blockSize >> > (Ad, Bd);
hipMemcpy(A2, Ad, sizeof(int) * N * N, hipMemcpyDeviceToHost);
hipMemcpy(B, Bd, sizeof(int) * N, hipMemcpyDeviceToHost);
hostkernel(A, Bh);
/* printf("===== DEVICE =====\n");
PrintMat(B,1,N);
printf("===== =====\n");
PrintMat(A2, N, N);
printf("===== HOST =====\n");
PrintMat(Bh, 1, N);
printf("===== =====\n");
PrintMat(A, N, N);*/
compare(B, Bh);
return 0;
} | f6eac28624043ffafc1b681d0fd50daee2b0be39.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define N 250
#define threads 32
__global__ void kernel(int *mat, int *vec)
{
//__shared__ int sh[threads*threads];
int value;
__shared__ int Qsh[threads], Psh[threads];
int P = 0, Q = 0;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
int indexMat = col + row * N;
int threadIndex = threadIdx.x + threadIdx.y * blockDim.x;
if (col >= N || row >= N) return;
Qsh[threadIdx.y] = 0;
Psh[threadIdx.y] = 0;
value = mat[indexMat];
if (value > 0) {
P = 1;
} else {
Q = 1;
}
atomicAdd(&Psh[threadIdx.y], P);
atomicAdd(&Qsh[threadIdx.y], Q);
__syncthreads();
if (threadIdx.x == 0)
atomicAdd(&vec[row], Psh[threadIdx.y]);
__syncthreads();
if (threadIdx.x == 0)
atomicSub(&vec[row], Qsh[threadIdx.y]);
__syncthreads();
if (value < 0) {
value = vec[row];
mat[indexMat] = value;
}
}
void PrintMat(int* mat,int n,int m) {
for (int i = 0; i < n; i++)
{
for (int j = 0; j < m; j++)
printf("%d ", mat[i * N + j]);
printf("\n");
}
}
void hostkernel(int* A,int* B)
{
for(int i = 0; i < N; i++) {
int pos = 0, neg = 0;
for (int j = 0; j < N; j++)
if (A[i * N + j] > 0) pos++; else neg++;
B[i] = pos - neg;
for (int j = 0; j < N; j++)
if (A[i * N + j] < 0) A[i * N + j] = B[i];
}
}
void compare(int* A, int* B)
{
for (int i = 0; i < N; i++)
if (A[i] != B[i])
printf("nisu isti");
}
int main()
{
int A[N * N], B[N], Bh[N],A2[N*N];
int* Ad, * Bd;
cudaMalloc((void**)&Ad, sizeof(int) * N * N);
cudaMalloc((void**)&Bd, sizeof(int) * N);
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++) {
int val = rand() % 10;
A[i * N + j] = val % 3 == 0 ? -val : val;
}
for (int i = 0; i < N; i++)
B[i] = 0;
printf("===== A =====\n");
//PrintMat(A, N, N);
printf("===== =====\n");
cudaMemcpy(Ad, A, sizeof(int) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(Bd, B, sizeof(int) * N, cudaMemcpyHostToDevice);
dim3 gridSize((N + threads - 1) / threads, (N + threads - 1) / threads);
dim3 blockSize(threads, threads);
kernel << <gridSize, blockSize >> > (Ad, Bd);
cudaMemcpy(A2, Ad, sizeof(int) * N * N, cudaMemcpyDeviceToHost);
cudaMemcpy(B, Bd, sizeof(int) * N, cudaMemcpyDeviceToHost);
hostkernel(A, Bh);
/* printf("===== DEVICE =====\n");
PrintMat(B,1,N);
printf("===== =====\n");
PrintMat(A2, N, N);
printf("===== HOST =====\n");
PrintMat(Bh, 1, N);
printf("===== =====\n");
PrintMat(A, N, N);*/
compare(B, Bh);
return 0;
} |
2256e5e113cd7959a8d33fd2efef7c277266fd61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#define BLOCK_SIZE 256
#define STR_SIZE 256
#define DEVICE 0
#define HALO 1 // halo width along one direction when advancing to the next iteration
//#define BENCH_PRINT
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
void run(int argc, char** argv);
int rows, cols;
int* data;
int** wall;
int* result;
int* resultsRef;
#define M_SEED 9
int pyramid_height;
//#define BENCH_PRINT
void
init(int argc, char** argv)
{
if(argc==4){
cols = atoi(argv[1]);
rows = atoi(argv[2]);
pyramid_height=atoi(argv[3]);
}else{
printf("Usage: dynproc row_len col_len pyramid_height\n");
exit(0);
}
data = new int[rows*cols];
wall = new int*[rows];
for(int n=0; n<rows; n++)
wall[n]=data+cols*n;
result = new int[cols];
resultsRef = new int[cols];
int seed = M_SEED;
srand(seed);
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
wall[i][j] = rand() % 10;
}
}
for (int j = 0; j < cols; j++)
resultsRef[j] = wall[0][j];
#ifdef BENCH_PRINT
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
printf("%d ",wall[i][j]) ;
}
printf("\n") ;
}
#endif
}
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void dynproc_kernel(
int iteration,
int *gpuWall,
int *gpuSrc,
int *gpuResults,
int cols,
int rows,
int startStep,
int border)
{
__shared__ int prev[BLOCK_SIZE];
__shared__ int result[BLOCK_SIZE];
int bx = blockIdx.x;
int tx=threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_cols = BLOCK_SIZE-iteration*HALO*2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkX = small_block_cols*bx-border;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int xidx = blkX+tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1;
int W = tx-1;
int E = tx+1;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if(IN_RANGE(xidx, 0, cols-1)){
prev[tx] = gpuSrc[xidx];
}
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
isValid){
computed = true;
int left = prev[W];
int up = prev[tx];
int right = prev[E];
int shortest = MIN(left, up);
shortest = MIN(shortest, right);
int index = cols*(startStep+i)+xidx;
result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
prev[tx]= result[tx];
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
gpuResults[xidx]=result[tx];
}
}
/*
compute N time steps
*/
int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \
int pyramid_height, int blockCols, int borderCols)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (int t = 0; t < rows-1; t+=pyramid_height) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( dynproc_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
MIN(pyramid_height, rows-t-1),
gpuWall, gpuResult[src], gpuResult[dst],
cols,rows, t, borderCols);
}
return dst;
}
int main(int argc, char** argv)
{
int num_devices;
hipGetDeviceCount(&num_devices);
if (num_devices > 1) hipSetDevice(DEVICE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
init(argc, argv);
/* --------------- pyramid parameters --------------- */
int borderCols = (pyramid_height)*HALO;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2;
int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1);
printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\
pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol);
int *gpuWall, *gpuResult[2];
int size = rows*cols;
hipMalloc((void**)&gpuResult[0], sizeof(int)*cols);
hipMalloc((void**)&gpuResult[1], sizeof(int)*cols);
hipMemcpy(gpuResult[0], data, sizeof(int)*cols, hipMemcpyHostToDevice);
hipMalloc((void**)&gpuWall, sizeof(int)*(size-cols));
hipMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), hipMemcpyHostToDevice);
int final_ret = calc_path(gpuWall, gpuResult, rows, cols, \
pyramid_height, blockCols, borderCols);
hipMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, hipMemcpyDeviceToHost);
printf(" Going for Ref path\n");
//
int *srcRef, *dstRef, *tempRef;
int minRef;
dstRef = resultsRef;
srcRef = new int[cols];
printf(" Going for Ref path- for loop\n");
fflush(stdout);
// pin_stats_reset();
for (int t = 0; t < rows-1; t++) {
printf(" t= %d %d\n",t, rows);
fflush(stdout);
tempRef = srcRef;
srcRef = dstRef;
dstRef = tempRef;
for(int n = 0; n < cols; n++){
minRef = srcRef[n];
if (n > 0)
minRef = MIN(minRef, srcRef[n-1]);
if (n < cols-1)
minRef = MIN(minRef, srcRef[n+1]);
dstRef[n] = wall[t+1][n]+minRef;
}
}
printf(" After for loop of Ref path\n");
fflush(stdout);
for (int i = 0; i < cols; i++)
{
if(result[i] != dstRef[i])
printf("failed results at %d %d %d\n",i,result[i],dstRef[i]) ;
}
//
#ifdef BENCH_PRINT
for (int i = 0; i < cols; i++)
printf("%d ",data[i]) ;
printf("\n") ;
for (int i = 0; i < cols; i++)
printf("%d ",result[i]) ;
printf("\n") ;
#endif
hipFree(gpuWall);
hipFree(gpuResult[0]);
hipFree(gpuResult[1]);
delete [] data;
delete [] wall;
delete [] result;
printf(" Going for delete mem objects\n");
fflush(stdout);
delete [] dstRef;
delete [] srcRef;
//delete [] resultsRef;
}
| 2256e5e113cd7959a8d33fd2efef7c277266fd61.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#define BLOCK_SIZE 256
#define STR_SIZE 256
#define DEVICE 0
#define HALO 1 // halo width along one direction when advancing to the next iteration
//#define BENCH_PRINT
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
void run(int argc, char** argv);
int rows, cols;
int* data;
int** wall;
int* result;
int* resultsRef;
#define M_SEED 9
int pyramid_height;
//#define BENCH_PRINT
void
init(int argc, char** argv)
{
if(argc==4){
cols = atoi(argv[1]);
rows = atoi(argv[2]);
pyramid_height=atoi(argv[3]);
}else{
printf("Usage: dynproc row_len col_len pyramid_height\n");
exit(0);
}
data = new int[rows*cols];
wall = new int*[rows];
for(int n=0; n<rows; n++)
wall[n]=data+cols*n;
result = new int[cols];
resultsRef = new int[cols];
int seed = M_SEED;
srand(seed);
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
wall[i][j] = rand() % 10;
}
}
for (int j = 0; j < cols; j++)
resultsRef[j] = wall[0][j];
#ifdef BENCH_PRINT
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
printf("%d ",wall[i][j]) ;
}
printf("\n") ;
}
#endif
}
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void dynproc_kernel(
int iteration,
int *gpuWall,
int *gpuSrc,
int *gpuResults,
int cols,
int rows,
int startStep,
int border)
{
__shared__ int prev[BLOCK_SIZE];
__shared__ int result[BLOCK_SIZE];
int bx = blockIdx.x;
int tx=threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_cols = BLOCK_SIZE-iteration*HALO*2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkX = small_block_cols*bx-border;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int xidx = blkX+tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1;
int W = tx-1;
int E = tx+1;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if(IN_RANGE(xidx, 0, cols-1)){
prev[tx] = gpuSrc[xidx];
}
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
isValid){
computed = true;
int left = prev[W];
int up = prev[tx];
int right = prev[E];
int shortest = MIN(left, up);
shortest = MIN(shortest, right);
int index = cols*(startStep+i)+xidx;
result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
prev[tx]= result[tx];
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
gpuResults[xidx]=result[tx];
}
}
/*
compute N time steps
*/
int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \
int pyramid_height, int blockCols, int borderCols)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (int t = 0; t < rows-1; t+=pyramid_height) {
int temp = src;
src = dst;
dst = temp;
dynproc_kernel<<<dimGrid, dimBlock>>>(
MIN(pyramid_height, rows-t-1),
gpuWall, gpuResult[src], gpuResult[dst],
cols,rows, t, borderCols);
}
return dst;
}
int main(int argc, char** argv)
{
int num_devices;
cudaGetDeviceCount(&num_devices);
if (num_devices > 1) cudaSetDevice(DEVICE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
init(argc, argv);
/* --------------- pyramid parameters --------------- */
int borderCols = (pyramid_height)*HALO;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2;
int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1);
printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\
pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol);
int *gpuWall, *gpuResult[2];
int size = rows*cols;
cudaMalloc((void**)&gpuResult[0], sizeof(int)*cols);
cudaMalloc((void**)&gpuResult[1], sizeof(int)*cols);
cudaMemcpy(gpuResult[0], data, sizeof(int)*cols, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpuWall, sizeof(int)*(size-cols));
cudaMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), cudaMemcpyHostToDevice);
int final_ret = calc_path(gpuWall, gpuResult, rows, cols, \
pyramid_height, blockCols, borderCols);
cudaMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, cudaMemcpyDeviceToHost);
printf(" Going for Ref path\n");
//
int *srcRef, *dstRef, *tempRef;
int minRef;
dstRef = resultsRef;
srcRef = new int[cols];
printf(" Going for Ref path- for loop\n");
fflush(stdout);
// pin_stats_reset();
for (int t = 0; t < rows-1; t++) {
printf(" t= %d %d\n",t, rows);
fflush(stdout);
tempRef = srcRef;
srcRef = dstRef;
dstRef = tempRef;
for(int n = 0; n < cols; n++){
minRef = srcRef[n];
if (n > 0)
minRef = MIN(minRef, srcRef[n-1]);
if (n < cols-1)
minRef = MIN(minRef, srcRef[n+1]);
dstRef[n] = wall[t+1][n]+minRef;
}
}
printf(" After for loop of Ref path\n");
fflush(stdout);
for (int i = 0; i < cols; i++)
{
if(result[i] != dstRef[i])
printf("failed results at %d %d %d\n",i,result[i],dstRef[i]) ;
}
//
#ifdef BENCH_PRINT
for (int i = 0; i < cols; i++)
printf("%d ",data[i]) ;
printf("\n") ;
for (int i = 0; i < cols; i++)
printf("%d ",result[i]) ;
printf("\n") ;
#endif
cudaFree(gpuWall);
cudaFree(gpuResult[0]);
cudaFree(gpuResult[1]);
delete [] data;
delete [] wall;
delete [] result;
printf(" Going for delete mem objects\n");
fflush(stdout);
delete [] dstRef;
delete [] srcRef;
//delete [] resultsRef;
}
|
edd1b0637e4f7d1931b261eaa11c97f3e2fff9bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void expandKernel(double* values, int n_original, int factor, double* expanded){
int tid0 = threadIdx.x + blockIdx.x*blockDim.x ;
int stride = blockDim.x*gridDim.x ;
for ( int tid = tid0 ; tid < n_original*factor ; tid += stride){
int idx = floor(double(tid)/factor) ;
expanded[tid] = values[idx] ;
}
} | edd1b0637e4f7d1931b261eaa11c97f3e2fff9bc.cu | #include "includes.h"
__global__ void expandKernel(double* values, int n_original, int factor, double* expanded){
int tid0 = threadIdx.x + blockIdx.x*blockDim.x ;
int stride = blockDim.x*gridDim.x ;
for ( int tid = tid0 ; tid < n_original*factor ; tid += stride){
int idx = floor(double(tid)/factor) ;
expanded[tid] = values[idx] ;
}
} |
6c33b764d51d3ff26141042ed35fcb5b76c32195.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/set_operations.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4>
__global__
void set_difference_kernel(Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator2 last2, Iterator3 result1, Iterator4 result2)
{
*result2 = thrust::set_difference(thrust::seq, first1, last1, first2, last2, result1);
}
void TestSetDifferenceDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a(4), b(5);
a[0] = 0; a[1] = 2; a[2] = 4; a[3] = 5;
b[0] = 0; b[1] = 3; b[2] = 3; b[3] = 4; b[4] = 6;
Vector ref(2);
ref[0] = 2; ref[1] = 5;
Vector result(2);
thrust::device_vector<Iterator> end_vec(1);
hipLaunchKernelGGL(( set_difference_kernel), dim3(1),dim3(1), 0, 0, a.begin(), a.end(), b.begin(), b.end(), result.begin(), end_vec.begin());
Iterator end = end_vec.front();
ASSERT_EQUAL_QUIET(result.end(), end);
ASSERT_EQUAL(ref, result);
}
DECLARE_UNITTEST(TestSetDifferenceDeviceSeq);
| 6c33b764d51d3ff26141042ed35fcb5b76c32195.cu | #include <unittest/unittest.h>
#include <thrust/set_operations.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4>
__global__
void set_difference_kernel(Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator2 last2, Iterator3 result1, Iterator4 result2)
{
*result2 = thrust::set_difference(thrust::seq, first1, last1, first2, last2, result1);
}
void TestSetDifferenceDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a(4), b(5);
a[0] = 0; a[1] = 2; a[2] = 4; a[3] = 5;
b[0] = 0; b[1] = 3; b[2] = 3; b[3] = 4; b[4] = 6;
Vector ref(2);
ref[0] = 2; ref[1] = 5;
Vector result(2);
thrust::device_vector<Iterator> end_vec(1);
set_difference_kernel<<<1,1>>>(a.begin(), a.end(), b.begin(), b.end(), result.begin(), end_vec.begin());
Iterator end = end_vec.front();
ASSERT_EQUAL_QUIET(result.end(), end);
ASSERT_EQUAL(ref, result);
}
DECLARE_UNITTEST(TestSetDifferenceDeviceSeq);
|
ecfefeebe8cd830e931e4d1d2fd8be767b731a84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <ATen/native/hip/LaunchUtils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_adaptive_avg_pool2d_backward_native.h>
#include <ATen/ops/_adaptive_avg_pool2d_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/zeros_like.h>
#endif
#include <ATen/native/AdaptivePooling.h>
#include <algorithm>
#include <cfloat>
#include <cmath>
#define START_IND(a,b,c) ((int64_t)((a / b) * c + ((a % b) * c) / b))
#define END_IND(a,b,c) (1 + ((int64_t)(a + 1) * c - 1) / b)
#define START_IND_INT(a,b,c) ((a * c) / b)
#define END_IND_INT(a,b,c) (((a + 1) * c + b - 1) / b)
// #define START_IND(a,b,c) a * c / b
// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
#define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched
namespace at::native {
namespace {
// 4d tensor B x D x H x W
// All kernels view batch dim B and feature dim D as collapsed.
/*
* Description:
* this function adaptively average pools an input 4D tensor along dimensions 2 and 3
* 4D input, 4D output
*/
template <typename T>
__global__ void adaptive_average_pool(T *input, T *output,
int isizeH, int isizeW,
int osizeH, int osizeW,
int64_t istrideD, int64_t istrideH, int64_t istrideW)
{
// iterators on output pixels
int oh, ow;
// select input/output plane based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
output = output + o_plane*osizeH*osizeW;
input = input + i_plane*istrideD;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
const int ostepH = blockDim.y*gridDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
const int ostepW = blockDim.x;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the average pooling over corresponding input pixels
T *ptr_input = input + istartH*istrideH + istartW*istrideW;
T *ptr_output = output + oh*osizeW + ow;
T sum = static_cast<T>(0);
int ih, iw;
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
T val = ptr_input[iw*istrideW];
sum += val;
}
ptr_input += istrideH; // next input line
}
// Update output
*ptr_output = sum / kH / kW;
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
*/
template <typename T>
__global__ void adaptive_average_gradinput(
T *gradInput, T *gradOutput,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators on input pixels
int ih, iw;
// select input/output plane based on thread/block ID
int i_plane = blockIdx.x;
int o_plane = i_plane;
gradOutput = gradOutput + o_plane*osizeH*osizeW;
gradInput = gradInput + i_plane*isizeH*isizeW;
int istartH = blockDim.y*blockIdx.y + threadIdx.y;
int iendH = isizeH;
int istepH = blockDim.y*gridDim.y;
int istartW = threadIdx.x;
int iendW = isizeW;
int istepW = blockDim.x;
// compute gradInput
for(ih = istartH; ih < iendH; ih += istepH) {
int ostartH = START_IND(ih, isizeH, osizeH);
int oendH = END_IND(ih, isizeH, osizeH);
for(iw = istartW; iw < iendW; iw += istepW) {
int ostartW = START_IND(iw, isizeW, osizeW);
int oendW = END_IND(iw, isizeW, osizeW);
// Compute the gradients over corresponding output pixels
T *ptr_gradInput = gradInput + ih*isizeW + iw;
int oh, ow;
for(oh = ostartH; oh < oendH; ++oh) {
int kH = START_IND(oh, osizeH, isizeH) - END_IND(oh, osizeH, isizeH);
for(ow = ostartW; ow < oendW; ++ow) {
int kW = START_IND(ow, osizeW, isizeW) - END_IND(ow, osizeW, isizeW);
T grad_delta = gradOutput[ow + oh*osizeW] / kH / kW;
*ptr_gradInput += grad_delta;
}
}
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
* (uses atomic add)
*/
template <typename T>
__global__ void atomic_adaptive_average_gradinput(
T *gradInput, T *gradOutput,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators on output indices
int oh, ow;
// select input/output plane based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
gradOutput = gradOutput + o_plane*osizeW*osizeH;
gradInput = gradInput + i_plane*isizeW*isizeH;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
int ostepH = blockDim.y*gridDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the gradients for over corresponding input pixels
T *ptr_gradInput = gradInput + istartH*isizeW + istartW;
T *ptr_gradOutput = gradOutput + oh*osizeW + ow;
T grad_delta = *ptr_gradOutput / kW / kH;
int ih, iw;
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
// atomic add since different threads could update same variable
gpuAtomicAddNoReturn(&(ptr_gradInput[iw]), grad_delta);
}
ptr_gradInput += isizeW; // next input line
}
}
}
}
/*
* Description:
* this function adaptively average pools an input 4D tensor along dimensions 2 and 3
* NHWC layout for both input and output tensor
* 4D input, 4D output
*/
template <typename index_t, typename scalar_t>
C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)
__global__ void adaptive_average_pool_nhwc(const scalar_t* __restrict__ input, scalar_t* __restrict__ output,
int sizeB, int sizeC,
int isizeH, int isizeW,
int osizeH, int osizeW,
int kernel_stride_C, int kernel_size_C,
index_t istrideB, index_t istrideC,
index_t istrideH, index_t istrideW)
{
extern __shared__ int smem[];
scalar_t *out_cached = reinterpret_cast<scalar_t*>(smem);
// flattening cta for pre-computation & smem initialization;
int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
int block_size = blockDim.x * blockDim.y * blockDim.z;
// use shared memory to store temporary output value. This is simply to
// reduce register usage.
for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {
out_cached[i] = scalar_t(0.0);
}
__syncthreads();
// each CTA handles a portion of a single slice on batch dimension;
int batch_id = blockIdx.x % sizeB;
int channel_id = blockIdx.x / sizeB;
int channel_offset = threadIdx.x + channel_id * blockDim.x;
// each CTA handles a single slice on batch dimension;
// We use gridDim.x to handle striding on C as well.
output = output + batch_id * osizeH * osizeW * sizeC;
input = input + batch_id * istrideB;
// split out_cached and exclusively it assigned to each thread;
out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C * blockDim.x];
// iterate on output H & W.
// Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on
// tile so there's a better chance to hit L1 cache.
index_t oH = (osizeH + gridDim.z-1) / gridDim.z;
index_t oW = (osizeW + gridDim.y-1) / gridDim.y;
index_t ostartH = threadIdx.z + blockIdx.z*oH;
index_t oendH = ::min(ostartH+oH, osizeH);
index_t ostartW = threadIdx.y + blockIdx.y*oW;
index_t oendW = ::min(ostartW+oW, osizeW);
// Stride for threads, each warp can reuse L1 as they go. So theoretically
// better chance to survive cache eviction.
for (int oh = ostartH; oh < oendH; oh+=blockDim.z) {
int istartH = START_IND_INT(oh, osizeH, isizeH);
int iendH = END_IND_INT(oh, osizeH, isizeH);
for (int ow = ostartW; ow < oendW; ow+=blockDim.y) {
int istartW = START_IND_INT(ow, osizeW, isizeW);
int iendW = END_IND_INT(ow, osizeW, isizeW);
scalar_t factor = scalar_t(1.0) / ((iendH-istartH) * (iendW-istartW));
// loop on input: hierarchy h->w->c, use shared memory here hopefully
// would not stall global memory read;
for (index_t ih = istartH; ih < iendH; ih++) {
for (index_t iw = istartW; iw < iendW; iw++) {
int cached_index = threadIdx.x;
const scalar_t *ptr_input = input + ih*istrideH + iw*istrideW;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
out_cached[cached_index] += ptr_input[c*istrideC];
cached_index += blockDim.x;
}
}
}
scalar_t *ptr_output = output + (oh * osizeW + ow) * sizeC;
int cached_index = threadIdx.x;
// write accumulated output to global memory;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
// This causes numerical issueptr when unit test with NCHW kernel;
// switch to could verify the correctness;
// output[c] = out_cached[c] / (iendH-istartH) / (iendW-istartW);
ptr_output[c] = out_cached[cached_index] * factor;
out_cached[cached_index] = scalar_t(0.0);
cached_index += blockDim.x;
}
// no need to __syncthreads() since out_cached is not shared.
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
* NHWC layout for both input and output tensor
* 4D input, 4D output
*/
template <typename index_t, typename scalar_t>
C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)
__global__ void adaptive_average_gradinput_nhwc(scalar_t* __restrict__ gradInput, const scalar_t* __restrict__ gradOutput,
int sizeB, int sizeC,
int isizeH, int isizeW,
int osizeH, int osizeW,
int kernel_stride_C, int kernel_size_C,
index_t ostrideB, index_t ostrideC,
index_t ostrideH, index_t ostrideW)
{
extern __shared__ int smem[];
index_t *ostartW_cached = smem;
index_t *oendW_cached = &ostartW_cached[isizeW];
// be careful with alignment, in case scalar_t is fp16, we want to assign
// int pointers first.
scalar_t *r_kW_cached = reinterpret_cast<scalar_t*>(&oendW_cached[isizeW]);
scalar_t *r_kH_cached = &r_kW_cached[osizeW];
scalar_t *out_cached = &r_kH_cached[osizeH];
// flattening cta for pre-computation & smem initialization;
int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
int block_size = blockDim.x * blockDim.y * blockDim.z;
// Precompute output start/end index per input index on width dimension;
// Not doing this for height dimension, as that's our out-most loop.
for (index_t i = thread_id; i < isizeW; i+= block_size) {
ostartW_cached[i] = START_IND_INT(i, isizeW, osizeW);
oendW_cached[i] = END_IND_INT(i, isizeW, osizeW);
}
// Precompute pooling height/weight factor for each output element;
// This is used to weight output gradient when accumulate them on input
// gradient.
// Technically we don't have to compute it for the whole `osizeH`, since
// each cta only covers a consecutive portion of the entire output. But it's
// not going to save us from code divergence, and shared memory save is not
// an issue neither, so just leave it as is for now.
for (index_t i = thread_id; i < osizeH; i+= block_size) {
r_kH_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeH, isizeH) - START_IND_INT(i, osizeH, isizeH));
}
for (index_t i = thread_id; i < osizeW; i+= block_size) {
r_kW_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeW, isizeW) - START_IND_INT(i, osizeW, isizeW));
}
// each CTA handles a portion of a single slice on batch dimension;
int batch_id = blockIdx.x % sizeB;
int channel_id = blockIdx.x / sizeB;
int channel_offset = threadIdx.x + channel_id * blockDim.x;
// use shared memory to store temporary output value. This is simply to
// reduce register usage.
for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {
out_cached[i] = scalar_t(0.0);
}
__syncthreads();
// each CTA handles a portion of a single slice on batch dimension;
// We use gridDim.x to handle striding on C as well.
gradInput = gradInput + batch_id * isizeH * isizeW * sizeC;
gradOutput = gradOutput + batch_id * ostrideB;
// split out_cached and exclusively it assigned to each thread;
out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x * kernel_size_C];
// iterate on input H & W.
// Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on
// tile so there's a better chance to hit L1 cache.
index_t iH = (isizeH + gridDim.z-1) / gridDim.z;
index_t iW = (isizeW + gridDim.y-1) / gridDim.y;
index_t istartH = threadIdx.z + blockIdx.z*iH;
index_t iendH = ::min(istartH+iH, isizeH);
index_t istartW = threadIdx.y + blockIdx.y*iW;
index_t iendW = ::min(istartW+iW, isizeW);
// Stride for threads, each warp can reuse L1 as they go. So theoretically
// better chance to survive cache eviction.
for (index_t ih = istartH; ih < iendH; ih+=blockDim.z) {
index_t ostartH = START_IND_INT(ih, isizeH, osizeH);
index_t oendH = END_IND_INT(ih, isizeH, osizeH);
for (index_t iw = istartW; iw < iendW; iw+=blockDim.y) {
// loop on output: hierarchy h->w->c, so we could reuse weight factor f
// because it remains the same for given oh & ow
for(index_t oh = ostartH; oh < oendH; ++oh) {
for(index_t ow = ostartW_cached[iw]; ow < oendW_cached[iw]; ++ow) {
scalar_t f = r_kW_cached[ow] * r_kH_cached[oh];
const scalar_t* ptr_gradOutput = gradOutput + oh*ostrideH + ow*ostrideW;
int cached_index = threadIdx.x;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
out_cached[cached_index] += ptr_gradOutput[c*ostrideC] * f;
cached_index += blockDim.x;
}
}
}
scalar_t *ptr_gradInput = gradInput + (ih * isizeW + iw) * sizeC;
int cached_index = threadIdx.x;
// write accumulated gradIput to global memory;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
ptr_gradInput[c] = out_cached[cached_index];
out_cached[cached_index] = scalar_t(0.0);
cached_index += blockDim.x;
}
// no need to __syncthreads() since out_cached is not shared.
}
}
}
// 4d tensor B x D x H x W
void adaptive_avg_pool2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size)
{
TensorArg input_arg{ input, "input", 1 },
output_arg{ output, "output", 2 };
checkAllSameGPU(__func__, {input_arg, output_arg});
TORCH_CHECK(output_size.size() == 2, "adaptive_avg_pool2d: output_size must be 2");
int64_t ndim = input.dim();
TORCH_CHECK((ndim == 3 || ndim == 4),
"adaptive_avg_pool2d(): Expected 3D or 4D tensor, but got ", input.sizes());
for (const auto i : {-2, -1}) {
TORCH_CHECK(input.size(i) > 0,
"adaptive_avg_pool2d(): Expected input to have non-zero size for non-batch dimensions, "
"but input has sizes ", input.sizes(), " with dimension ", i + ndim, " being "
"empty");
}
Tensor input_ = input;
switch (input.suggest_memory_format()) {
case at::MemoryFormat::ChannelsLast: {
// special case for tensor memory format in channels_last
TORCH_CHECK(input.ndimension() == 4,
"adaptive_avg_pool2d(): Expected 4D tensor, but got ",
input.sizes());
int sizeB = input_.size(0);
int sizeC = input_.size(1);
int isizeH = input_.size(2);
int isizeW = input_.size(3);
int64_t istrideB = input_.stride(0);
int64_t istrideC = input_.stride(1);
int64_t istrideH = input_.stride(2);
int64_t istrideW = input_.stride(3);
int osizeH = output_size[0];
int osizeW = output_size[1];
// preserve channels_last stride on output tensor;
if (!output.is_contiguous(at::MemoryFormat::ChannelsLast)) {
// TODO: modify this after resize_ added `memory_format` tag
output.resize_({sizeB, sizeC, osizeH, osizeW}).as_strided_({sizeB, sizeC, osizeH, osizeW}, {sizeC*osizeH*osizeW, 1, osizeW*sizeC, sizeC});
}
if (output.numel() == 0) {
return;
}
const int max_threads = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize;
size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock;
// Launch kernel on output tensor elements. Logic behind launch config:
// output tensor size NCHW, strides NHWC;
// Launch on:
// N -> grid.x
// H -> grid.z * block.z
// W -> grid.y * block.y
// C -> block.x
// encourage larger block_y & block_z for better cache hit while maintain
// reasonable block_x for coalesced memory access;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size()));
int block_y = std::min<int>(
maxThreadsDim[1], std::min<int>(lastPow2(osizeW), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(lastPow2(osizeH), max_threads / block_x / block_y));
block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z));
const dim3 block(block_x, block_y, block_z);
int kernel_stride_C = ceil_div(sizeC, block_x * 4);
int kernel_size_C = ceil_div(sizeC, block_x * kernel_stride_C);
// Do NOT clip grid_x, striding on Batch dimension is not in the kernel,
// although it could be easily implemented given current kernel.
int grid_x = sizeB*kernel_stride_C;
// it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel;
int grid_y = std::min<int>(
maxGridSize[1], ceil_div(osizeW, block_y*BLOCK_STRIDE));
int grid_z = std::min<int>(
maxGridSize[2], ceil_div(osizeH, block_z*BLOCK_STRIDE));
const dim3 grid(grid_x, grid_y, grid_z);
// we are dealing with packed tensor here. max index is the same as numel.
// TODO: to really support input tensor large enought to go beyond int32,
// we will need to restrict out shared memory usage and adjust the launch
// config;
AT_ASSERT(input_.numel() < std::numeric_limits<int32_t>::max());
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input_.scalar_type(), "adaptive_avg_pool2d_nhwc_cuda", [&] {
size_t shmem_size = (kernel_size_C * block_x * block_y * block_z) * sizeof(scalar_t);
AT_ASSERT(shmem_size <= sharedMemPerBlock);
hipLaunchKernelGGL(( adaptive_average_pool_nhwc<int32_t>), dim3(grid), dim3(block), shmem_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
sizeB, sizeC, isizeH, isizeW, osizeH, osizeW,
kernel_stride_C, kernel_size_C,
istrideB, istrideC, istrideH, istrideW);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
break;
}
case at::MemoryFormat::Contiguous: {
int64_t grid_x = input.size(-3);
if (input.ndimension() == 4) {
input_ = input.contiguous();
grid_x *= input_.size(-4);
}
int64_t sizeD = input_.size(-3);
int64_t isizeH = input_.size(-2);
int64_t isizeW = input_.size(-1);
int64_t istrideD = input_.stride(-3);
int64_t istrideH = input_.stride(-2);
int64_t istrideW = input_.stride(-1);
int64_t osizeH = output_size[0];
int64_t osizeW = output_size[1];
if (input.ndimension() == 4) {
output.resize_({input_.size(-4), sizeD, osizeH, osizeW});
} else {
output.resize_({sizeD, osizeH, osizeW});
}
if (output.numel() == 0) {
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input_.scalar_type(), "adaptive_avg_pool2d_cuda", [&] {
scalar_t *input_data = input_.data_ptr<scalar_t>();
scalar_t *output_data = output.data_ptr<scalar_t>();
// cuda blocks & threads:
int blocksH = std::max<int64_t>((int)(16L / sizeD), 1);
dim3 blocks(grid_x, blocksH);
dim3 threads(32, 8);
// run averagepool kernel
hipLaunchKernelGGL(( adaptive_average_pool) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_data, output_data,
isizeH, isizeW, osizeH, osizeW,
istrideD, istrideH, istrideW);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
break;
}
default:
TORCH_CHECK(
false,
"Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
}
void adaptive_avg_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input)
{
TensorArg grad_input_arg{ gradInput, "gradInput", 1 },
grad_output_arg{ gradOutput_, "gradOutput_", 2 },
input_arg{ input, "input", 3 };
adaptive_pool_empty_output_check(gradOutput_, "adaptive_avg_pool2d_backward");
checkAllSameGPU(__func__, {grad_input_arg, grad_output_arg, input_arg});
switch (input.suggest_memory_format()) {
case at::MemoryFormat::ChannelsLast: {
// special case for tensor memory format in channels_last
TORCH_CHECK(input.ndimension() == 4,
"adaptive_avg_pool2d_backward_cuda(): Expected 4D tensor, but got ", input.ndimension());
int sizeB = input.size(0);
int sizeC = input.size(1);
int isizeH = input.size(2);
int isizeW = input.size(3);
Tensor gradOutput = gradOutput_;
int64_t ostrideB = gradOutput.stride(0);
int64_t ostrideC = gradOutput.stride(1);
int64_t ostrideH = gradOutput.stride(2);
int64_t ostrideW = gradOutput.stride(3);
int osizeH = gradOutput.size(-2);
int osizeW = gradOutput.size(-1);
// preserve channels_last stride on input tensor;
if (!gradInput.is_contiguous(at::MemoryFormat::ChannelsLast)) {
gradInput.as_strided_(
{sizeB, sizeC, isizeH, isizeW},
{sizeC*isizeH*isizeW, 1, isizeW*sizeC, sizeC});
}
const int max_threads = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize;
size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock;
// Launch kernel on input tensor elements. Logic behind launch config:
// input tensor size NCHW, strides NHWC;
// Launch on:
// N(C) -> grid.x (striding on C to reduce sh_mem usage)
// H -> grid.z * block.z
// W -> grid.y * block.y
// C -> block.x
// encourage larger block_y & block_z for better cache hit while maintain
// reasonable block_x for coalesced memory access;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size()));
int block_y = std::min<int>(
maxThreadsDim[1], std::min<int>(lastPow2(isizeW), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(lastPow2(isizeH), max_threads / block_x / block_y));
block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z));
const dim3 block(block_x, block_y, block_z);
int kernel_stride_C = ceil_div(sizeC, block_x * 4);
int kernel_size_C = ceil_div(sizeC, block_x * kernel_stride_C);
// Do NOT clip grid_x, striding on Batch dimension is not in the kernel,
// although it could be easily implemented given current kernel.
int grid_x = sizeB*kernel_stride_C;
// it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel;
int grid_y = std::min<int>(
maxGridSize[1], ceil_div(isizeW, block_y*BLOCK_STRIDE));
int grid_z = std::min<int>(
maxGridSize[2], ceil_div(isizeH, block_z*BLOCK_STRIDE));
const dim3 grid(grid_x, grid_y, grid_z);
// we are dealing with packed tensor here. max index is the same as numel.
// TODO: to really support input tensor large enought to go beyond int32,
// we will need to restrict out shared memory usage and adjust the launch
// config;
AT_ASSERT(input.numel() < std::numeric_limits<int32_t>::max());
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool2d_backward_nhwc_cuda", [&] {
size_t shmem_size = (kernel_size_C * block_x * block_y * block_z + osizeH + osizeW) * sizeof(scalar_t) + 2 * isizeW * sizeof(int32_t);
AT_ASSERT(shmem_size <= sharedMemPerBlock);
hipLaunchKernelGGL(( adaptive_average_gradinput_nhwc<int32_t>), dim3(grid), dim3(block), shmem_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput.data_ptr<scalar_t>(),
gradOutput.data_ptr<scalar_t>(),
sizeB, sizeC, isizeH, isizeW, osizeH, osizeW,
kernel_stride_C, kernel_size_C,
ostrideB, ostrideC, ostrideH, ostrideW);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
break;
}
case at::MemoryFormat::Contiguous: {
bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests
Tensor gradOutput = gradOutput_.contiguous();
int64_t sizeD = input.size(-3);
int64_t isizeH = input.size(-2);
int64_t isizeW = input.size(-1);
int64_t osizeH = gradOutput.size(-2);
int64_t osizeW = gradOutput.size(-1);
int64_t grid_x = sizeD;
if (input.ndimension() == 4) grid_x *= input.size(-4);
//bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0);
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool2d_backward_cuda", [&] {
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
// cuda blocks & threads:
int blocksH = ::max((int)(16L / sizeD), 1);
dim3 blocks(grid_x, blocksH);
dim3 threads(32, 8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
hipLaunchKernelGGL(( atomic_adaptive_average_gradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_data, gradOutput_data,
isizeH, isizeW, osizeH, osizeW);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
else
{
// run updateGradInput kernel
hipLaunchKernelGGL(( adaptive_average_gradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_data, gradOutput_data,
isizeH, isizeW, osizeH, osizeW);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
);
break;
}
default:
TORCH_CHECK(
false,
"Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
}
} // namespace
Tensor& adaptive_avg_pool2d_out_cuda(
const Tensor& input,
IntArrayRef output_size,
Tensor& output)
{
adaptive_avg_pool2d_out_cuda_template(
output, input, output_size);
return output;
}
Tensor adaptive_avg_pool2d_cuda(
at::Tensor const& input,
IntArrayRef output_size)
{
auto output = at::empty({0}, input.options());
adaptive_avg_pool2d_out_cuda_template(
output, input, output_size);
return output;
}
Tensor& adaptive_avg_pool2d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("adaptive_avg_pool2d_backward_out_cuda");
gradInput.resize_as_(input);
if (gradInput.numel() != 0) {
adaptive_avg_pool2d_backward_out_cuda_template(
gradInput, gradOutput, input);
}
return gradInput;
}
Tensor adaptive_avg_pool2d_backward_cuda(
const Tensor& gradOutput,
const Tensor& input)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("adaptive_avg_pool2d_backward_cuda");
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (gradInput.numel() != 0) {
adaptive_avg_pool2d_backward_out_cuda_template(
gradInput, gradOutput, input);
}
return gradInput;
}
} // namespace at::native
#undef BLOCK_STRIDE
#undef CUDA_MAX_THREADS
#undef START_IND
#undef END_IND
| ecfefeebe8cd830e931e4d1d2fd8be767b731a84.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <ATen/native/cuda/LaunchUtils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_adaptive_avg_pool2d_backward_native.h>
#include <ATen/ops/_adaptive_avg_pool2d_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/zeros_like.h>
#endif
#include <ATen/native/AdaptivePooling.h>
#include <algorithm>
#include <cfloat>
#include <cmath>
#define START_IND(a,b,c) ((int64_t)((a / b) * c + ((a % b) * c) / b))
#define END_IND(a,b,c) (1 + ((int64_t)(a + 1) * c - 1) / b)
#define START_IND_INT(a,b,c) ((a * c) / b)
#define END_IND_INT(a,b,c) (((a + 1) * c + b - 1) / b)
// #define START_IND(a,b,c) a * c / b
// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
#define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched
namespace at::native {
namespace {
// 4d tensor B x D x H x W
// All kernels view batch dim B and feature dim D as collapsed.
/*
* Description:
* this function adaptively average pools an input 4D tensor along dimensions 2 and 3
* 4D input, 4D output
*/
template <typename T>
__global__ void adaptive_average_pool(T *input, T *output,
int isizeH, int isizeW,
int osizeH, int osizeW,
int64_t istrideD, int64_t istrideH, int64_t istrideW)
{
// iterators on output pixels
int oh, ow;
// select input/output plane based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
output = output + o_plane*osizeH*osizeW;
input = input + i_plane*istrideD;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
const int ostepH = blockDim.y*gridDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
const int ostepW = blockDim.x;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the average pooling over corresponding input pixels
T *ptr_input = input + istartH*istrideH + istartW*istrideW;
T *ptr_output = output + oh*osizeW + ow;
T sum = static_cast<T>(0);
int ih, iw;
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
T val = ptr_input[iw*istrideW];
sum += val;
}
ptr_input += istrideH; // next input line
}
// Update output
*ptr_output = sum / kH / kW;
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
*/
template <typename T>
__global__ void adaptive_average_gradinput(
T *gradInput, T *gradOutput,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators on input pixels
int ih, iw;
// select input/output plane based on thread/block ID
int i_plane = blockIdx.x;
int o_plane = i_plane;
gradOutput = gradOutput + o_plane*osizeH*osizeW;
gradInput = gradInput + i_plane*isizeH*isizeW;
int istartH = blockDim.y*blockIdx.y + threadIdx.y;
int iendH = isizeH;
int istepH = blockDim.y*gridDim.y;
int istartW = threadIdx.x;
int iendW = isizeW;
int istepW = blockDim.x;
// compute gradInput
for(ih = istartH; ih < iendH; ih += istepH) {
int ostartH = START_IND(ih, isizeH, osizeH);
int oendH = END_IND(ih, isizeH, osizeH);
for(iw = istartW; iw < iendW; iw += istepW) {
int ostartW = START_IND(iw, isizeW, osizeW);
int oendW = END_IND(iw, isizeW, osizeW);
// Compute the gradients over corresponding output pixels
T *ptr_gradInput = gradInput + ih*isizeW + iw;
int oh, ow;
for(oh = ostartH; oh < oendH; ++oh) {
int kH = START_IND(oh, osizeH, isizeH) - END_IND(oh, osizeH, isizeH);
for(ow = ostartW; ow < oendW; ++ow) {
int kW = START_IND(ow, osizeW, isizeW) - END_IND(ow, osizeW, isizeW);
T grad_delta = gradOutput[ow + oh*osizeW] / kH / kW;
*ptr_gradInput += grad_delta;
}
}
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
* (uses atomic add)
*/
template <typename T>
__global__ void atomic_adaptive_average_gradinput(
T *gradInput, T *gradOutput,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators on output indices
int oh, ow;
// select input/output plane based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
gradOutput = gradOutput + o_plane*osizeW*osizeH;
gradInput = gradInput + i_plane*isizeW*isizeH;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
int ostepH = blockDim.y*gridDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the gradients for over corresponding input pixels
T *ptr_gradInput = gradInput + istartH*isizeW + istartW;
T *ptr_gradOutput = gradOutput + oh*osizeW + ow;
T grad_delta = *ptr_gradOutput / kW / kH;
int ih, iw;
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
// atomic add since different threads could update same variable
gpuAtomicAddNoReturn(&(ptr_gradInput[iw]), grad_delta);
}
ptr_gradInput += isizeW; // next input line
}
}
}
}
/*
* Description:
* this function adaptively average pools an input 4D tensor along dimensions 2 and 3
* NHWC layout for both input and output tensor
* 4D input, 4D output
*/
template <typename index_t, typename scalar_t>
C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)
__global__ void adaptive_average_pool_nhwc(const scalar_t* __restrict__ input, scalar_t* __restrict__ output,
int sizeB, int sizeC,
int isizeH, int isizeW,
int osizeH, int osizeW,
int kernel_stride_C, int kernel_size_C,
index_t istrideB, index_t istrideC,
index_t istrideH, index_t istrideW)
{
extern __shared__ int smem[];
scalar_t *out_cached = reinterpret_cast<scalar_t*>(smem);
// flattening cta for pre-computation & smem initialization;
int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
int block_size = blockDim.x * blockDim.y * blockDim.z;
// use shared memory to store temporary output value. This is simply to
// reduce register usage.
for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {
out_cached[i] = scalar_t(0.0);
}
__syncthreads();
// each CTA handles a portion of a single slice on batch dimension;
int batch_id = blockIdx.x % sizeB;
int channel_id = blockIdx.x / sizeB;
int channel_offset = threadIdx.x + channel_id * blockDim.x;
// each CTA handles a single slice on batch dimension;
// We use gridDim.x to handle striding on C as well.
output = output + batch_id * osizeH * osizeW * sizeC;
input = input + batch_id * istrideB;
// split out_cached and exclusively it assigned to each thread;
out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C * blockDim.x];
// iterate on output H & W.
// Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on
// tile so there's a better chance to hit L1 cache.
index_t oH = (osizeH + gridDim.z-1) / gridDim.z;
index_t oW = (osizeW + gridDim.y-1) / gridDim.y;
index_t ostartH = threadIdx.z + blockIdx.z*oH;
index_t oendH = ::min(ostartH+oH, osizeH);
index_t ostartW = threadIdx.y + blockIdx.y*oW;
index_t oendW = ::min(ostartW+oW, osizeW);
// Stride for threads, each warp can reuse L1 as they go. So theoretically
// better chance to survive cache eviction.
for (int oh = ostartH; oh < oendH; oh+=blockDim.z) {
int istartH = START_IND_INT(oh, osizeH, isizeH);
int iendH = END_IND_INT(oh, osizeH, isizeH);
for (int ow = ostartW; ow < oendW; ow+=blockDim.y) {
int istartW = START_IND_INT(ow, osizeW, isizeW);
int iendW = END_IND_INT(ow, osizeW, isizeW);
scalar_t factor = scalar_t(1.0) / ((iendH-istartH) * (iendW-istartW));
// loop on input: hierarchy h->w->c, use shared memory here hopefully
// would not stall global memory read;
for (index_t ih = istartH; ih < iendH; ih++) {
for (index_t iw = istartW; iw < iendW; iw++) {
int cached_index = threadIdx.x;
const scalar_t *ptr_input = input + ih*istrideH + iw*istrideW;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
out_cached[cached_index] += ptr_input[c*istrideC];
cached_index += blockDim.x;
}
}
}
scalar_t *ptr_output = output + (oh * osizeW + ow) * sizeC;
int cached_index = threadIdx.x;
// write accumulated output to global memory;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
// This causes numerical issueptr when unit test with NCHW kernel;
// switch to could verify the correctness;
// output[c] = out_cached[c] / (iendH-istartH) / (iendW-istartW);
ptr_output[c] = out_cached[cached_index] * factor;
out_cached[cached_index] = scalar_t(0.0);
cached_index += blockDim.x;
}
// no need to __syncthreads() since out_cached is not shared.
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
* NHWC layout for both input and output tensor
* 4D input, 4D output
*/
template <typename index_t, typename scalar_t>
C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)
__global__ void adaptive_average_gradinput_nhwc(scalar_t* __restrict__ gradInput, const scalar_t* __restrict__ gradOutput,
int sizeB, int sizeC,
int isizeH, int isizeW,
int osizeH, int osizeW,
int kernel_stride_C, int kernel_size_C,
index_t ostrideB, index_t ostrideC,
index_t ostrideH, index_t ostrideW)
{
extern __shared__ int smem[];
index_t *ostartW_cached = smem;
index_t *oendW_cached = &ostartW_cached[isizeW];
// be careful with alignment, in case scalar_t is fp16, we want to assign
// int pointers first.
scalar_t *r_kW_cached = reinterpret_cast<scalar_t*>(&oendW_cached[isizeW]);
scalar_t *r_kH_cached = &r_kW_cached[osizeW];
scalar_t *out_cached = &r_kH_cached[osizeH];
// flattening cta for pre-computation & smem initialization;
int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
int block_size = blockDim.x * blockDim.y * blockDim.z;
// Precompute output start/end index per input index on width dimension;
// Not doing this for height dimension, as that's our out-most loop.
for (index_t i = thread_id; i < isizeW; i+= block_size) {
ostartW_cached[i] = START_IND_INT(i, isizeW, osizeW);
oendW_cached[i] = END_IND_INT(i, isizeW, osizeW);
}
// Precompute pooling height/weight factor for each output element;
// This is used to weight output gradient when accumulate them on input
// gradient.
// Technically we don't have to compute it for the whole `osizeH`, since
// each cta only covers a consecutive portion of the entire output. But it's
// not going to save us from code divergence, and shared memory save is not
// an issue neither, so just leave it as is for now.
for (index_t i = thread_id; i < osizeH; i+= block_size) {
r_kH_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeH, isizeH) - START_IND_INT(i, osizeH, isizeH));
}
for (index_t i = thread_id; i < osizeW; i+= block_size) {
r_kW_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeW, isizeW) - START_IND_INT(i, osizeW, isizeW));
}
// each CTA handles a portion of a single slice on batch dimension;
int batch_id = blockIdx.x % sizeB;
int channel_id = blockIdx.x / sizeB;
int channel_offset = threadIdx.x + channel_id * blockDim.x;
// use shared memory to store temporary output value. This is simply to
// reduce register usage.
for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {
out_cached[i] = scalar_t(0.0);
}
__syncthreads();
// each CTA handles a portion of a single slice on batch dimension;
// We use gridDim.x to handle striding on C as well.
gradInput = gradInput + batch_id * isizeH * isizeW * sizeC;
gradOutput = gradOutput + batch_id * ostrideB;
// split out_cached and exclusively it assigned to each thread;
out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x * kernel_size_C];
// iterate on input H & W.
// Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on
// tile so there's a better chance to hit L1 cache.
index_t iH = (isizeH + gridDim.z-1) / gridDim.z;
index_t iW = (isizeW + gridDim.y-1) / gridDim.y;
index_t istartH = threadIdx.z + blockIdx.z*iH;
index_t iendH = ::min(istartH+iH, isizeH);
index_t istartW = threadIdx.y + blockIdx.y*iW;
index_t iendW = ::min(istartW+iW, isizeW);
// Stride for threads, each warp can reuse L1 as they go. So theoretically
// better chance to survive cache eviction.
for (index_t ih = istartH; ih < iendH; ih+=blockDim.z) {
index_t ostartH = START_IND_INT(ih, isizeH, osizeH);
index_t oendH = END_IND_INT(ih, isizeH, osizeH);
for (index_t iw = istartW; iw < iendW; iw+=blockDim.y) {
// loop on output: hierarchy h->w->c, so we could reuse weight factor f
// because it remains the same for given oh & ow
for(index_t oh = ostartH; oh < oendH; ++oh) {
for(index_t ow = ostartW_cached[iw]; ow < oendW_cached[iw]; ++ow) {
scalar_t f = r_kW_cached[ow] * r_kH_cached[oh];
const scalar_t* ptr_gradOutput = gradOutput + oh*ostrideH + ow*ostrideW;
int cached_index = threadIdx.x;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
out_cached[cached_index] += ptr_gradOutput[c*ostrideC] * f;
cached_index += blockDim.x;
}
}
}
scalar_t *ptr_gradInput = gradInput + (ih * isizeW + iw) * sizeC;
int cached_index = threadIdx.x;
// write accumulated gradIput to global memory;
for (index_t c = channel_offset;
c < sizeC;
c += blockDim.x*kernel_stride_C) {
ptr_gradInput[c] = out_cached[cached_index];
out_cached[cached_index] = scalar_t(0.0);
cached_index += blockDim.x;
}
// no need to __syncthreads() since out_cached is not shared.
}
}
}
// 4d tensor B x D x H x W
void adaptive_avg_pool2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size)
{
TensorArg input_arg{ input, "input", 1 },
output_arg{ output, "output", 2 };
checkAllSameGPU(__func__, {input_arg, output_arg});
TORCH_CHECK(output_size.size() == 2, "adaptive_avg_pool2d: output_size must be 2");
int64_t ndim = input.dim();
TORCH_CHECK((ndim == 3 || ndim == 4),
"adaptive_avg_pool2d(): Expected 3D or 4D tensor, but got ", input.sizes());
for (const auto i : {-2, -1}) {
TORCH_CHECK(input.size(i) > 0,
"adaptive_avg_pool2d(): Expected input to have non-zero size for non-batch dimensions, "
"but input has sizes ", input.sizes(), " with dimension ", i + ndim, " being "
"empty");
}
Tensor input_ = input;
switch (input.suggest_memory_format()) {
case at::MemoryFormat::ChannelsLast: {
// special case for tensor memory format in channels_last
TORCH_CHECK(input.ndimension() == 4,
"adaptive_avg_pool2d(): Expected 4D tensor, but got ",
input.sizes());
int sizeB = input_.size(0);
int sizeC = input_.size(1);
int isizeH = input_.size(2);
int isizeW = input_.size(3);
int64_t istrideB = input_.stride(0);
int64_t istrideC = input_.stride(1);
int64_t istrideH = input_.stride(2);
int64_t istrideW = input_.stride(3);
int osizeH = output_size[0];
int osizeW = output_size[1];
// preserve channels_last stride on output tensor;
if (!output.is_contiguous(at::MemoryFormat::ChannelsLast)) {
// TODO: modify this after resize_ added `memory_format` tag
output.resize_({sizeB, sizeC, osizeH, osizeW}).as_strided_({sizeB, sizeC, osizeH, osizeW}, {sizeC*osizeH*osizeW, 1, osizeW*sizeC, sizeC});
}
if (output.numel() == 0) {
return;
}
const int max_threads = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize;
size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock;
// Launch kernel on output tensor elements. Logic behind launch config:
// output tensor size NCHW, strides NHWC;
// Launch on:
// N -> grid.x
// H -> grid.z * block.z
// W -> grid.y * block.y
// C -> block.x
// encourage larger block_y & block_z for better cache hit while maintain
// reasonable block_x for coalesced memory access;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size()));
int block_y = std::min<int>(
maxThreadsDim[1], std::min<int>(lastPow2(osizeW), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(lastPow2(osizeH), max_threads / block_x / block_y));
block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z));
const dim3 block(block_x, block_y, block_z);
int kernel_stride_C = ceil_div(sizeC, block_x * 4);
int kernel_size_C = ceil_div(sizeC, block_x * kernel_stride_C);
// Do NOT clip grid_x, striding on Batch dimension is not in the kernel,
// although it could be easily implemented given current kernel.
int grid_x = sizeB*kernel_stride_C;
// it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel;
int grid_y = std::min<int>(
maxGridSize[1], ceil_div(osizeW, block_y*BLOCK_STRIDE));
int grid_z = std::min<int>(
maxGridSize[2], ceil_div(osizeH, block_z*BLOCK_STRIDE));
const dim3 grid(grid_x, grid_y, grid_z);
// we are dealing with packed tensor here. max index is the same as numel.
// TODO: to really support input tensor large enought to go beyond int32,
// we will need to restrict out shared memory usage and adjust the launch
// config;
AT_ASSERT(input_.numel() < std::numeric_limits<int32_t>::max());
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input_.scalar_type(), "adaptive_avg_pool2d_nhwc_cuda", [&] {
size_t shmem_size = (kernel_size_C * block_x * block_y * block_z) * sizeof(scalar_t);
AT_ASSERT(shmem_size <= sharedMemPerBlock);
adaptive_average_pool_nhwc<int32_t><<<grid, block, shmem_size, at::cuda::getCurrentCUDAStream()>>> (
input_.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
sizeB, sizeC, isizeH, isizeW, osizeH, osizeW,
kernel_stride_C, kernel_size_C,
istrideB, istrideC, istrideH, istrideW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
break;
}
case at::MemoryFormat::Contiguous: {
int64_t grid_x = input.size(-3);
if (input.ndimension() == 4) {
input_ = input.contiguous();
grid_x *= input_.size(-4);
}
int64_t sizeD = input_.size(-3);
int64_t isizeH = input_.size(-2);
int64_t isizeW = input_.size(-1);
int64_t istrideD = input_.stride(-3);
int64_t istrideH = input_.stride(-2);
int64_t istrideW = input_.stride(-1);
int64_t osizeH = output_size[0];
int64_t osizeW = output_size[1];
if (input.ndimension() == 4) {
output.resize_({input_.size(-4), sizeD, osizeH, osizeW});
} else {
output.resize_({sizeD, osizeH, osizeW});
}
if (output.numel() == 0) {
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input_.scalar_type(), "adaptive_avg_pool2d_cuda", [&] {
scalar_t *input_data = input_.data_ptr<scalar_t>();
scalar_t *output_data = output.data_ptr<scalar_t>();
// cuda blocks & threads:
int blocksH = std::max<int64_t>((int)(16L / sizeD), 1);
dim3 blocks(grid_x, blocksH);
dim3 threads(32, 8);
// run averagepool kernel
adaptive_average_pool <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> (
input_data, output_data,
isizeH, isizeW, osizeH, osizeW,
istrideD, istrideH, istrideW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
break;
}
default:
TORCH_CHECK(
false,
"Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
}
void adaptive_avg_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input)
{
TensorArg grad_input_arg{ gradInput, "gradInput", 1 },
grad_output_arg{ gradOutput_, "gradOutput_", 2 },
input_arg{ input, "input", 3 };
adaptive_pool_empty_output_check(gradOutput_, "adaptive_avg_pool2d_backward");
checkAllSameGPU(__func__, {grad_input_arg, grad_output_arg, input_arg});
switch (input.suggest_memory_format()) {
case at::MemoryFormat::ChannelsLast: {
// special case for tensor memory format in channels_last
TORCH_CHECK(input.ndimension() == 4,
"adaptive_avg_pool2d_backward_cuda(): Expected 4D tensor, but got ", input.ndimension());
int sizeB = input.size(0);
int sizeC = input.size(1);
int isizeH = input.size(2);
int isizeW = input.size(3);
Tensor gradOutput = gradOutput_;
int64_t ostrideB = gradOutput.stride(0);
int64_t ostrideC = gradOutput.stride(1);
int64_t ostrideH = gradOutput.stride(2);
int64_t ostrideW = gradOutput.stride(3);
int osizeH = gradOutput.size(-2);
int osizeW = gradOutput.size(-1);
// preserve channels_last stride on input tensor;
if (!gradInput.is_contiguous(at::MemoryFormat::ChannelsLast)) {
gradInput.as_strided_(
{sizeB, sizeC, isizeH, isizeW},
{sizeC*isizeH*isizeW, 1, isizeW*sizeC, sizeC});
}
const int max_threads = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize;
size_t sharedMemPerBlock = at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock;
// Launch kernel on input tensor elements. Logic behind launch config:
// input tensor size NCHW, strides NHWC;
// Launch on:
// N(C) -> grid.x (striding on C to reduce sh_mem usage)
// H -> grid.z * block.z
// W -> grid.y * block.y
// C -> block.x
// encourage larger block_y & block_z for better cache hit while maintain
// reasonable block_x for coalesced memory access;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size()));
int block_y = std::min<int>(
maxThreadsDim[1], std::min<int>(lastPow2(isizeW), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(lastPow2(isizeH), max_threads / block_x / block_y));
block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z));
const dim3 block(block_x, block_y, block_z);
int kernel_stride_C = ceil_div(sizeC, block_x * 4);
int kernel_size_C = ceil_div(sizeC, block_x * kernel_stride_C);
// Do NOT clip grid_x, striding on Batch dimension is not in the kernel,
// although it could be easily implemented given current kernel.
int grid_x = sizeB*kernel_stride_C;
// it's OK to clip grid_y & grid_z, as we block the two dimensions in the kernel;
int grid_y = std::min<int>(
maxGridSize[1], ceil_div(isizeW, block_y*BLOCK_STRIDE));
int grid_z = std::min<int>(
maxGridSize[2], ceil_div(isizeH, block_z*BLOCK_STRIDE));
const dim3 grid(grid_x, grid_y, grid_z);
// we are dealing with packed tensor here. max index is the same as numel.
// TODO: to really support input tensor large enought to go beyond int32,
// we will need to restrict out shared memory usage and adjust the launch
// config;
AT_ASSERT(input.numel() < std::numeric_limits<int32_t>::max());
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool2d_backward_nhwc_cuda", [&] {
size_t shmem_size = (kernel_size_C * block_x * block_y * block_z + osizeH + osizeW) * sizeof(scalar_t) + 2 * isizeW * sizeof(int32_t);
AT_ASSERT(shmem_size <= sharedMemPerBlock);
adaptive_average_gradinput_nhwc<int32_t><<<grid, block, shmem_size, at::cuda::getCurrentCUDAStream()>>> (
gradInput.data_ptr<scalar_t>(),
gradOutput.data_ptr<scalar_t>(),
sizeB, sizeC, isizeH, isizeW, osizeH, osizeW,
kernel_stride_C, kernel_size_C,
ostrideB, ostrideC, ostrideH, ostrideW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
break;
}
case at::MemoryFormat::Contiguous: {
bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests
Tensor gradOutput = gradOutput_.contiguous();
int64_t sizeD = input.size(-3);
int64_t isizeH = input.size(-2);
int64_t isizeW = input.size(-1);
int64_t osizeH = gradOutput.size(-2);
int64_t osizeW = gradOutput.size(-1);
int64_t grid_x = sizeD;
if (input.ndimension() == 4) grid_x *= input.size(-4);
//bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0);
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool2d_backward_cuda", [&] {
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
// cuda blocks & threads:
int blocksH = std::max((int)(16L / sizeD), 1);
dim3 blocks(grid_x, blocksH);
dim3 threads(32, 8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
atomic_adaptive_average_gradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> (
gradInput_data, gradOutput_data,
isizeH, isizeW, osizeH, osizeW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
else
{
// run updateGradInput kernel
adaptive_average_gradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> (
gradInput_data, gradOutput_data,
isizeH, isizeW, osizeH, osizeW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
);
break;
}
default:
TORCH_CHECK(
false,
"Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
}
} // namespace
Tensor& adaptive_avg_pool2d_out_cuda(
const Tensor& input,
IntArrayRef output_size,
Tensor& output)
{
adaptive_avg_pool2d_out_cuda_template(
output, input, output_size);
return output;
}
Tensor adaptive_avg_pool2d_cuda(
at::Tensor const& input,
IntArrayRef output_size)
{
auto output = at::empty({0}, input.options());
adaptive_avg_pool2d_out_cuda_template(
output, input, output_size);
return output;
}
Tensor& adaptive_avg_pool2d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("adaptive_avg_pool2d_backward_out_cuda");
gradInput.resize_as_(input);
if (gradInput.numel() != 0) {
adaptive_avg_pool2d_backward_out_cuda_template(
gradInput, gradOutput, input);
}
return gradInput;
}
Tensor adaptive_avg_pool2d_backward_cuda(
const Tensor& gradOutput,
const Tensor& input)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("adaptive_avg_pool2d_backward_cuda");
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (gradInput.numel() != 0) {
adaptive_avg_pool2d_backward_out_cuda_template(
gradInput, gradOutput, input);
}
return gradInput;
}
} // namespace at::native
#undef BLOCK_STRIDE
#undef CUDA_MAX_THREADS
#undef START_IND
#undef END_IND
|
0e5cdd76209a21f44ea5061752bef21bbc9da0bd.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "bw.h"
BWConverter::BWConverter()
{
}
/* Declaration of kernel functions.*/
BWConverter * BWConverter::instance = NULL;
__global__ void rgba_to_greyscale(const uchar4* rgbaImage , unsigned char* greyImage , int numRows , int numCols);
/* After each image has been processed, the converter is reset so that the data structures for each image are created according to the
* size of the input image.
* All instance members are taken care of here.
* The destructor here is therefore a dummy destructor.
*/
void BWConverter::resetConverter()
{
hipFree(d_rgbaImage);
hipFree(d_greyImage);
h_rgbaImage = NULL;
d_rgbaImage = NULL;
h_greyImage = NULL;
d_greyImage = NULL;
numRows = 0;
numCols = 0;
step = 0;
}
/* The input image is converted to the appropriate format for processing.
* Helper methods are called for allocating memory on the GPU and copying from the CPU to GPU , making the filter and performing the blurring
* The output image is copied from GPU to CPU memory
* The output image is converted to cv::Mat format and returned to the client.
*/
cv::Mat BWConverter::operator()(const cv::Mat& image)
{
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA);
numRows = imageRGBA.rows;
numCols = imageRGBA.cols;
step = imageRGBA.step;
imageGrey.create(image.rows, image.cols, CV_8UC1 );
preProcess();
wrapper_rgba_to_greyscale();
hipDeviceSynchronize();
hipGetLastError();
hipMemcpy(imageGrey.ptr<unsigned char>(0), d_greyImage, sizeof(unsigned char) * numRows * numCols, hipMemcpyDeviceToHost);
resetConverter();
return imageGrey;
}
/* Allocates memory on the GPU for the structures needed and initializes them to 0.
* Copies the source image from the CPU to the GPU memory.
*/
void BWConverter::preProcess()
{
h_rgbaImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0);
h_greyImage = (unsigned char *)imageGrey.ptr<unsigned char>(0);
hipMalloc(&d_rgbaImage, sizeof(uchar4) * numRows * numCols);
hipMalloc(&d_greyImage, sizeof(unsigned char) * numRows * numCols);
hipMemset(d_greyImage, 0, numRows * numCols * sizeof(unsigned char)); //make sure no memory is left laying around
hipMemcpy(d_rgbaImage, h_rgbaImage, sizeof(uchar4) * numRows * numCols, hipMemcpyHostToDevice);
}
/* Kernel that takes an image in RGBA and converts it to black and white, by performing the following function on each pixel(map operation)
* output_pixel = .299f * (rgb.x) + .587f * (rgb.y) + .114f * (rgb.z);
*/
__global__ void rgba_to_greyscale(const uchar4* rgbaImage , unsigned char* greyImage , int numRows , int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x , blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
{
return;
}
uchar4 rgb = rgbaImage[thread_1D_pos];
greyImage[thread_1D_pos] = .299f * (rgb.x) + .587f * (rgb.y) + .114f * (rgb.z);
}
/* Wrapper to call the kernel.
* Initializes the block and grid dimensions and calls the black and white kernel.
*/
void BWConverter::wrapper_rgba_to_greyscale()
{
const int BLOCK_SIZE = 32;
const dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
const dim3 gridSize((numCols/BLOCK_SIZE) + 1, (numRows/BLOCK_SIZE) + 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize();
hipGetLastError();
}
/* Dummy destructor */
BWConverter::~BWConverter()
{
}
/* The client can create an object of BWConverter class only through the static factory function.
* It returns a pointer to the only currently existing instance of the class.
*/
BWConverter * BWConverter::factory()
{
if(BWConverter::instance == NULL)
{
BWConverter::instance = new BWConverter();
}
return BWConverter::instance;
}
| 0e5cdd76209a21f44ea5061752bef21bbc9da0bd.cu | #include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <cuda.h>
#include <cuda_runtime.h>
#include "bw.h"
BWConverter::BWConverter()
{
}
/* Declaration of kernel functions.*/
BWConverter * BWConverter::instance = NULL;
__global__ void rgba_to_greyscale(const uchar4* rgbaImage , unsigned char* greyImage , int numRows , int numCols);
/* After each image has been processed, the converter is reset so that the data structures for each image are created according to the
* size of the input image.
* All instance members are taken care of here.
* The destructor here is therefore a dummy destructor.
*/
void BWConverter::resetConverter()
{
cudaFree(d_rgbaImage);
cudaFree(d_greyImage);
h_rgbaImage = NULL;
d_rgbaImage = NULL;
h_greyImage = NULL;
d_greyImage = NULL;
numRows = 0;
numCols = 0;
step = 0;
}
/* The input image is converted to the appropriate format for processing.
* Helper methods are called for allocating memory on the GPU and copying from the CPU to GPU , making the filter and performing the blurring
* The output image is copied from GPU to CPU memory
* The output image is converted to cv::Mat format and returned to the client.
*/
cv::Mat BWConverter::operator()(const cv::Mat& image)
{
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA);
numRows = imageRGBA.rows;
numCols = imageRGBA.cols;
step = imageRGBA.step;
imageGrey.create(image.rows, image.cols, CV_8UC1 );
preProcess();
wrapper_rgba_to_greyscale();
cudaDeviceSynchronize();
cudaGetLastError();
cudaMemcpy(imageGrey.ptr<unsigned char>(0), d_greyImage, sizeof(unsigned char) * numRows * numCols, cudaMemcpyDeviceToHost);
resetConverter();
return imageGrey;
}
/* Allocates memory on the GPU for the structures needed and initializes them to 0.
* Copies the source image from the CPU to the GPU memory.
*/
void BWConverter::preProcess()
{
h_rgbaImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0);
h_greyImage = (unsigned char *)imageGrey.ptr<unsigned char>(0);
cudaMalloc(&d_rgbaImage, sizeof(uchar4) * numRows * numCols);
cudaMalloc(&d_greyImage, sizeof(unsigned char) * numRows * numCols);
cudaMemset(d_greyImage, 0, numRows * numCols * sizeof(unsigned char)); //make sure no memory is left laying around
cudaMemcpy(d_rgbaImage, h_rgbaImage, sizeof(uchar4) * numRows * numCols, cudaMemcpyHostToDevice);
}
/* Kernel that takes an image in RGBA and converts it to black and white, by performing the following function on each pixel(map operation)
* output_pixel = .299f * (rgb.x) + .587f * (rgb.y) + .114f * (rgb.z);
*/
__global__ void rgba_to_greyscale(const uchar4* rgbaImage , unsigned char* greyImage , int numRows , int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x , blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
{
return;
}
uchar4 rgb = rgbaImage[thread_1D_pos];
greyImage[thread_1D_pos] = .299f * (rgb.x) + .587f * (rgb.y) + .114f * (rgb.z);
}
/* Wrapper to call the kernel.
* Initializes the block and grid dimensions and calls the black and white kernel.
*/
void BWConverter::wrapper_rgba_to_greyscale()
{
const int BLOCK_SIZE = 32;
const dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
const dim3 gridSize((numCols/BLOCK_SIZE) + 1, (numRows/BLOCK_SIZE) + 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize();
cudaGetLastError();
}
/* Dummy destructor */
BWConverter::~BWConverter()
{
}
/* The client can create an object of BWConverter class only through the static factory function.
* It returns a pointer to the only currently existing instance of the class.
*/
BWConverter * BWConverter::factory()
{
if(BWConverter::instance == NULL)
{
BWConverter::instance = new BWConverter();
}
return BWConverter::instance;
}
|
5b5066c5a06767553f39ac719d42d9322425efce.hip | // !!! This is a file automatically generated by hipify!!!
#include "../CUDA/CUDA_func.h"
#include "PoolAvgLayer.h"
using namespace std;
namespace NN {
namespace Layers {
PoolAvgLayer::PoolAvgLayer(std::vector<int> dependencies,
int input_width, int input_height, int input_depth,
int filter_width, int filter_height, int filter_depth) {
this->dependencies = dependencies;
this->input_width = input_width;
this->input_height = input_height;
this->input_depth = input_depth;
this->filter_width = filter_width;
this->filter_height = filter_height;
this->filter_depth = filter_depth;
output_width = input_width / filter_width;
output_height = input_height / filter_height;
output_depth = input_depth / filter_depth;
output_size = output_width * output_height * output_depth;
hipMallocManaged(&output, output_size * sizeof(float));
hipMallocManaged(&output_gradient, output_size * sizeof(float));
hipMemset(output, 0.0f, output_size * sizeof(float));
hipMemset(output_gradient, 0.0f, output_size * sizeof(float));
}
void PoolAvgLayer::compute() {
int output_size = output_width * output_height * output_depth;
float filter_size = filter_width * filter_height * filter_depth;
int block_size = (output_size + 511) / 512;
NN::CUDA::compute_pool_avg_layer << <block_size, 512 >> > (input, output,
input_width, input_height, input_depth,
filter_width, filter_height, filter_depth,
output_width, output_height, output_depth,
output_size, filter_size);
hipDeviceSynchronize();
}
void PoolAvgLayer::backpropagate() {
int input_size = input_width * input_height * input_depth;
float filter_size = filter_width * filter_height * filter_depth;
int block_size = (input_size + 511) / 512;
NN::CUDA::backprop_pool_avg_layer << <block_size, 512 >> > (input, input_gradient, output_gradient,
input_width, input_height, input_depth,
filter_width, filter_height, filter_depth,
output_width, output_height, output_depth,
input_size, filter_size);
hipDeviceSynchronize();
}
int PoolAvgLayer::get_parameters_size() {
return 0;
}
void PoolAvgLayer::update_dependencies(vector<NN::Layers::Layer*> layer_dependencies) {
input = layer_dependencies[0]->get_output_iterator();
input_gradient = layer_dependencies[0]->get_output_gradient_iterator();
}
void PoolAvgLayer::save(NN::File& file) {
int id = 6;
file.save(id);
save_dependencies(file);
file.save(input_width);
file.save(input_height);
file.save(input_depth);
file.save(filter_width);
file.save(filter_height);
file.save(filter_depth);
};
void PoolAvgLayer::load(NN::File& file) {
load_dependencies(file);
file.load(input_width);
file.load(input_height);
file.load(input_depth);
file.load(filter_width);
file.load(filter_height);
file.load(filter_depth);
output_width = input_width / filter_width;
output_height = input_height / filter_height;
output_depth = input_depth / filter_depth;
output_size = output_width * output_height * output_depth;
hipMallocManaged(&output, output_size * sizeof(float));
hipMallocManaged(&output_gradient, output_size * sizeof(float));
hipMemset(output, 0.0f, output_size * sizeof(float));
hipMemset(output_gradient, 0.0f, output_size * sizeof(float));
};
PoolAvgLayer::~PoolAvgLayer() = default;
}
} | 5b5066c5a06767553f39ac719d42d9322425efce.cu | #include "../CUDA/CUDA_func.h"
#include "PoolAvgLayer.h"
using namespace std;
namespace NN {
namespace Layers {
PoolAvgLayer::PoolAvgLayer(std::vector<int> dependencies,
int input_width, int input_height, int input_depth,
int filter_width, int filter_height, int filter_depth) {
this->dependencies = dependencies;
this->input_width = input_width;
this->input_height = input_height;
this->input_depth = input_depth;
this->filter_width = filter_width;
this->filter_height = filter_height;
this->filter_depth = filter_depth;
output_width = input_width / filter_width;
output_height = input_height / filter_height;
output_depth = input_depth / filter_depth;
output_size = output_width * output_height * output_depth;
cudaMallocManaged(&output, output_size * sizeof(float));
cudaMallocManaged(&output_gradient, output_size * sizeof(float));
cudaMemset(output, 0.0f, output_size * sizeof(float));
cudaMemset(output_gradient, 0.0f, output_size * sizeof(float));
}
void PoolAvgLayer::compute() {
int output_size = output_width * output_height * output_depth;
float filter_size = filter_width * filter_height * filter_depth;
int block_size = (output_size + 511) / 512;
NN::CUDA::compute_pool_avg_layer << <block_size, 512 >> > (input, output,
input_width, input_height, input_depth,
filter_width, filter_height, filter_depth,
output_width, output_height, output_depth,
output_size, filter_size);
cudaDeviceSynchronize();
}
void PoolAvgLayer::backpropagate() {
int input_size = input_width * input_height * input_depth;
float filter_size = filter_width * filter_height * filter_depth;
int block_size = (input_size + 511) / 512;
NN::CUDA::backprop_pool_avg_layer << <block_size, 512 >> > (input, input_gradient, output_gradient,
input_width, input_height, input_depth,
filter_width, filter_height, filter_depth,
output_width, output_height, output_depth,
input_size, filter_size);
cudaDeviceSynchronize();
}
int PoolAvgLayer::get_parameters_size() {
return 0;
}
void PoolAvgLayer::update_dependencies(vector<NN::Layers::Layer*> layer_dependencies) {
input = layer_dependencies[0]->get_output_iterator();
input_gradient = layer_dependencies[0]->get_output_gradient_iterator();
}
void PoolAvgLayer::save(NN::File& file) {
int id = 6;
file.save(id);
save_dependencies(file);
file.save(input_width);
file.save(input_height);
file.save(input_depth);
file.save(filter_width);
file.save(filter_height);
file.save(filter_depth);
};
void PoolAvgLayer::load(NN::File& file) {
load_dependencies(file);
file.load(input_width);
file.load(input_height);
file.load(input_depth);
file.load(filter_width);
file.load(filter_height);
file.load(filter_depth);
output_width = input_width / filter_width;
output_height = input_height / filter_height;
output_depth = input_depth / filter_depth;
output_size = output_width * output_height * output_depth;
cudaMallocManaged(&output, output_size * sizeof(float));
cudaMallocManaged(&output_gradient, output_size * sizeof(float));
cudaMemset(output, 0.0f, output_size * sizeof(float));
cudaMemset(output_gradient, 0.0f, output_size * sizeof(float));
};
PoolAvgLayer::~PoolAvgLayer() = default;
}
} |
a03d1053d7bb62ef8dd28ed4553b0adca5971f81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Macro.h"
#include "CUPOT.h"
#if ( defined GRAVITY && defined GPU && POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 )
#define POT_NXT_F ( PATCH_SIZE+2*POT_GHOST_SIZE )
#define POT_NTHREAD ( RHO_NXT*RHO_NXT*POT_BLOCK_SIZE_Z/2 )
#define POT_USELESS ( POT_GHOST_SIZE%2 )
// for Fermi GPUs, we can save density into shared memory for higher performance
#if ( defined FERMI && !defined FLOAT8 )
#define RHO_SHARED
#endif
// for single precision, we can save coarse-grid potential into shared memory for higher performance
#ifndef FLOAT8
#define CPOT_SHARED
#endif
// variables reside in constant memory
__constant__ real Mp[3];
__constant__ real Mm[3];
//-------------------------------------------------------------------------------------------------------
// Function : CUPOT_PoissonSolver_SOR_10to14cube
// Description : GPU Poisson solver using the SOR scheme
//
// Note : a. Work for POT_GHOST_SIZE = 1, 2, 3 <--> POT_NXT_F = 10, 12, 14
// b. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// c. Each patch requires about 3.1*10^6 FLOPS (include the gravity solver)
// --> 133 GFLOPS is achieved in one C2050 GPU
// d. Reference : Numerical Recipes, Chapter 20.5
//
// Parameter : g_Rho_Array : Global memory array to store the input density
// g_Pot_Array_In : Global memory array storing the input "coarse-grid" potential for
// interpolation
// g_Pot_Array_Out : Global memory array to store the output potential
// Min_Iter : Minimum # of iterations for SOR
// Max_Iter : Maximum # of iterations for SOR
// Omega_6 : Omega / 6
// Const : (Coefficient in front of the RHS in the Poisson eq.) / dh^2
// IntScheme : Interpolation scheme for potential
// --> currently supported schemes include
// INT_CENTRAL : central interpolation
// INT_CQUAD : conservative quadratic interpolation
// INT_QUAD : quadratic interpolation
//---------------------------------------------------------------------------------------------------
__global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const int Min_Iter, const int Max_Iter, const real Omega_6,
const real Const, const IntScheme_t IntScheme )
{
const uint bid = blockIdx.x;
const uint tid_x = threadIdx.x;
const uint tid_y = threadIdx.y;
const uint tid_z = threadIdx.z;
const uint bdim_x = blockDim.x;
const uint bdim_y = blockDim.y;
const uint bdim_z = blockDim.z;
const uint ID = __umul24( tid_z, __umul24(bdim_x,bdim_y) ) + __umul24( tid_y, bdim_x ) + tid_x;
const uint dx = 1;
const uint dy = POT_NXT_F;
const uint dz = POT_NXT_F*POT_NXT_F;
const uint PotID0 = __umul24( 1+tid_z, dz ) + __umul24( 1+tid_y, dy ) + ( tid_x << 1 ) + 1;
const uint RhoID0 = __umul24( tid_z, RHO_NXT*RHO_NXT ) + __umul24( tid_y, RHO_NXT )+ ( tid_x << 1 );
const uint DispEven = ( tid_y + tid_z ) & 1;
const uint DispOdd = DispEven^1;
const uint DispFlip = bdim_z & 1;
const uint dPotID = __umul24( bdim_z, POT_NXT_F*POT_NXT_F );
const uint dRhoID = __umul24( bdim_z, RHO_NXT *RHO_NXT );
const uint FloorPow2 = 1<<(31-__clz(POT_NTHREAD) ); // largest power-of-two value not greater than POT_NTHREAD
const uint Remain = POT_NTHREAD - FloorPow2;
uint ip, im, jp, jm, kp, km, t, s_index;
uint PotID, RhoID, DispPotID, DispRhoID, Disp;
real Residual, Residual_Total_Old;
__shared__ real s_Residual_Total[POT_NTHREAD];
__shared__ real s_FPot[ POT_NXT_F*POT_NXT_F*POT_NXT_F ];
# ifdef CPOT_SHARED
__shared__ real s_CPot[ POT_NXT *POT_NXT *POT_NXT ];
# endif
# ifdef RHO_SHARED
__shared__ real s_Rho_Array[ RHO_NXT*RHO_NXT*RHO_NXT ];
# endif
// a1. load the fine-grid density into the shared memory
// -----------------------------------------------------------------------------------------------------------
# ifdef RHO_SHARED
t = ID;
do { s_Rho_Array[t] = g_Rho_Array[bid][t]; t += POT_NTHREAD; } while ( t < RHO_NXT*RHO_NXT*RHO_NXT );
__syncthreads();
# else
const real *s_Rho_Array = g_Rho_Array[bid];
# endif
// a2. load the coarse-grid potential into the shared memory
// -----------------------------------------------------------------------------------------------------------
# ifdef CPOT_SHARED
t = ID;
do { s_CPot[t] = g_Pot_Array_In[bid][t]; t += POT_NTHREAD; } while ( t < POT_NXT*POT_NXT*POT_NXT );
__syncthreads();
# else
const real *s_CPot = g_Pot_Array_In[bid];
# endif
// b. evaluate the "fine-grid" potential by interpolation (as the initial guess and the B.C.)
// -----------------------------------------------------------------------------------------------------------
const int N_CSlice = POT_NTHREAD / ( (POT_NXT-2)*(POT_NXT-2) );
if ( ID < N_CSlice*(POT_NXT-2)*(POT_NXT-2) )
{
const real Const_8 = 1.0/8.0;
const real Const_64 = 1.0/64.0;
const real Const_512 = 1.0/512.0;
const int Cdx = 1;
const int Cdy = POT_NXT;
const int Cdz = POT_NXT*POT_NXT;
const int CIDx = 1 + ID % ( POT_NXT-2 );
const int CIDy = 1 + ( ID % ( (POT_NXT-2)*(POT_NXT-2) ) ) / ( POT_NXT-2 );
const int CIDz = 1 + ID / ( (POT_NXT-2)*(POT_NXT-2) );
int CID = __mul24( CIDz, Cdz ) + __mul24( CIDy, Cdy ) + __mul24( CIDx, Cdx );
const int Fdx = 1;
const int Fdy = POT_NXT_F;
const int Fdz = POT_NXT_F*POT_NXT_F;
const int FIDx = ( (CIDx-1)<<1 ) - POT_USELESS;
const int FIDy = ( (CIDy-1)<<1 ) - POT_USELESS;
int FIDz = ( (CIDz-1)<<1 ) - POT_USELESS;
int FID = __mul24( FIDz, Fdz ) + __mul24( FIDy, Fdy ) + __mul24( FIDx, Fdx );
real TempFPot1, TempFPot2, TempFPot3, TempFPot4, TempFPot5, TempFPot6, TempFPot7, TempFPot8;
real Slope_00, Slope_01, Slope_02, Slope_03, Slope_04, Slope_05, Slope_06, Slope_07;
real Slope_08, Slope_09, Slope_10, Slope_11, Slope_12;
int Idx, Idy, Idz, ii, jj, kk;
for (int z=CIDz; z<POT_NXT-1; z+=N_CSlice)
{
switch ( IntScheme )
{
case INT_CENTRAL :
{
Slope_00 = (real)0.125 * ( s_CPot[CID+Cdx] - s_CPot[CID-Cdx] );
Slope_01 = (real)0.125 * ( s_CPot[CID+Cdy] - s_CPot[CID-Cdy] );
Slope_02 = (real)0.125 * ( s_CPot[CID+Cdz] - s_CPot[CID-Cdz] );
TempFPot1 = s_CPot[CID] - Slope_00 - Slope_01 - Slope_02;
TempFPot2 = s_CPot[CID] + Slope_00 - Slope_01 - Slope_02;
TempFPot3 = s_CPot[CID] - Slope_00 + Slope_01 - Slope_02;
TempFPot4 = s_CPot[CID] + Slope_00 + Slope_01 - Slope_02;
TempFPot5 = s_CPot[CID] - Slope_00 - Slope_01 + Slope_02;
TempFPot6 = s_CPot[CID] + Slope_00 - Slope_01 + Slope_02;
TempFPot7 = s_CPot[CID] - Slope_00 + Slope_01 + Slope_02;
TempFPot8 = s_CPot[CID] + Slope_00 + Slope_01 + Slope_02;
}
break; // INT_CENTRAL
case INT_CQUAD :
{
Slope_00 = Const_8 * ( s_CPot[CID+Cdx ] - s_CPot[CID-Cdx ] );
Slope_01 = Const_8 * ( s_CPot[CID +Cdy ] - s_CPot[CID -Cdy ] );
Slope_02 = Const_8 * ( s_CPot[CID +Cdz] - s_CPot[CID -Cdz] );
Slope_03 = Const_64 * ( s_CPot[CID+Cdx -Cdz] - s_CPot[CID-Cdx -Cdz] );
Slope_04 = Const_64 * ( s_CPot[CID +Cdy-Cdz] - s_CPot[CID -Cdy-Cdz] );
Slope_05 = Const_64 * ( s_CPot[CID+Cdx-Cdy ] - s_CPot[CID-Cdx-Cdy ] );
Slope_06 = Const_64 * ( s_CPot[CID+Cdx+Cdy ] - s_CPot[CID-Cdx+Cdy ] );
Slope_07 = Const_64 * ( s_CPot[CID+Cdx +Cdz] - s_CPot[CID-Cdx +Cdz] );
Slope_08 = Const_64 * ( s_CPot[CID +Cdy+Cdz] - s_CPot[CID -Cdy+Cdz] );
Slope_09 = Const_512 * ( s_CPot[CID+Cdx-Cdy-Cdz] - s_CPot[CID-Cdx-Cdy-Cdz] );
Slope_10 = Const_512 * ( s_CPot[CID+Cdx+Cdy-Cdz] - s_CPot[CID-Cdx+Cdy-Cdz] );
Slope_11 = Const_512 * ( s_CPot[CID+Cdx-Cdy+Cdz] - s_CPot[CID-Cdx-Cdy+Cdz] );
Slope_12 = Const_512 * ( s_CPot[CID+Cdx+Cdy+Cdz] - s_CPot[CID-Cdx+Cdy+Cdz] );
TempFPot1 = - Slope_00 - Slope_01 - Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06
+ Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID];
TempFPot2 = + Slope_00 - Slope_01 - Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06
- Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID];
TempFPot3 = - Slope_00 + Slope_01 - Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06
+ Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID];
TempFPot4 = + Slope_00 + Slope_01 - Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06
- Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID];
TempFPot5 = - Slope_00 - Slope_01 + Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06
- Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID];
TempFPot6 = + Slope_00 - Slope_01 + Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06
+ Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID];
TempFPot7 = - Slope_00 + Slope_01 + Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06
- Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID];
TempFPot8 = + Slope_00 + Slope_01 + Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06
+ Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID];
}
break; // INT_CQUAD
case INT_QUAD :
{
TempFPot1 = TempFPot2 = TempFPot3 = TempFPot4 = (real)0.0;
TempFPot5 = TempFPot6 = TempFPot7 = TempFPot8 = (real)0.0;
for (int dk=-1; dk<=1; dk++) { Idz = dk+1; kk = __mul24( dk, Cdz );
for (int dj=-1; dj<=1; dj++) { Idy = dj+1; jj = __mul24( dj, Cdy );
for (int di=-1; di<=1; di++) { Idx = di+1; ii = __mul24( di, Cdx );
TempFPot1 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mm[Idy] * Mm[Idx];
TempFPot2 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mm[Idy] * Mp[Idx];
TempFPot3 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mp[Idy] * Mm[Idx];
TempFPot4 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mp[Idy] * Mp[Idx];
TempFPot5 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mm[Idy] * Mm[Idx];
TempFPot6 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mm[Idy] * Mp[Idx];
TempFPot7 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mp[Idy] * Mm[Idx];
TempFPot8 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mp[Idy] * Mp[Idx];
}}}
}
break; // INT_QUAD
} // switch ( IntScheme )
// save data to the shared-memory array.
// Currently this part is highly diverge. However, since the interpolation takes much less time than the
// SOR iteration does, we have not yet tried to optimize this part
if ( FIDz >= 0 )
{
if ( FIDx >= 0 && FIDy >= 0 ) s_FPot[FID ] = TempFPot1;
if ( FIDx <= POT_NXT_F-2 && FIDy >= 0 ) s_FPot[FID+Fdx ] = TempFPot2;
if ( FIDx >= 0 && FIDy <= POT_NXT_F-2 ) s_FPot[FID +Fdy ] = TempFPot3;
if ( FIDx <= POT_NXT_F-2 && FIDy <= POT_NXT_F-2 ) s_FPot[FID+Fdx+Fdy ] = TempFPot4;
}
if ( FIDz <= POT_NXT_F-2 )
{
if ( FIDx >= 0 && FIDy >= 0 ) s_FPot[FID +Fdz] = TempFPot5;
if ( FIDx <= POT_NXT_F-2 && FIDy >= 0 ) s_FPot[FID+Fdx +Fdz] = TempFPot6;
if ( FIDx >= 0 && FIDy <= POT_NXT_F-2 ) s_FPot[FID +Fdy+Fdz] = TempFPot7;
if ( FIDx <= POT_NXT_F-2 && FIDy <= POT_NXT_F-2 ) s_FPot[FID+Fdx+Fdy+Fdz] = TempFPot8;
}
CID += __mul24( N_CSlice, Cdz );
FID += __mul24( 2*N_CSlice, Fdz );
FIDz += 2*N_CSlice;
} // for (int z=CIDz; z<POT_NXT-1; z+=N_CSlice)
} // if ( ID < N_CSlice*(POT_NXT-2)*(POT_NXT-2) )
__syncthreads();
// c. use the SOR scheme to evaluate potential
// -----------------------------------------------------------------------------------------------------------
Residual_Total_Old = __FLT_MAX__;
for (uint Iter=0; Iter<Max_Iter; Iter++)
{
// (c1). evaluate residual, update potential
// ==============================================================================
s_Residual_Total[ID] = (real)0.0;
Disp = DispEven;
for (uint pass=0; pass<2; pass++) // pass = (0,1) <--> (even,odd) step
{
PotID = PotID0;
RhoID = RhoID0;
for (uint z=tid_z; z<RHO_NXT; z+=bdim_z)
{
DispPotID = PotID + Disp;
DispRhoID = RhoID + Disp;
ip = DispPotID + dx;
jp = DispPotID + dy;
kp = DispPotID + dz;
im = DispPotID - dx;
jm = DispPotID - dy;
km = DispPotID - dz;
// evaluate the residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*s_FPot[DispPotID] - Const*s_Rho_Array[DispRhoID] );
// update potential
s_FPot[DispPotID] += Omega_6*Residual;
// save residual of each cell into a shared-memory array for evaluating the sum
s_Residual_Total[ID] += FABS( Residual );
PotID += dPotID;
RhoID += dRhoID;
Disp = Disp^DispFlip;
} // for (int ZLoop=0; ZLoop<RHO_NXT; ZLoop+=bdim_z)
Disp = DispOdd;
__syncthreads();
} // for (int pass=0; pass<2; pass++)
// (c2). perform the reduction operation to get the one-norm of residual
// ==============================================================================
// sum up the elements larger than FloorPow2 to ensure that the number of remaining elements is power-of-two
if ( ID < Remain ) s_Residual_Total[ID] += s_Residual_Total[ ID + FloorPow2 ];
// parallel reduction
# if ( POT_NTHREAD >= 1024 )
# error : ERROR : POT_NTHREAD must < 1024 !!
# endif
# if ( POT_NTHREAD >= 512 )
if ( ID < 256 ) s_Residual_Total[ID] += s_Residual_Total[ ID + 256 ]; __syncthreads();
# endif
# if ( POT_NTHREAD >= 256 )
if ( ID < 128 ) s_Residual_Total[ID] += s_Residual_Total[ ID + 128 ]; __syncthreads();
# endif
# if ( POT_NTHREAD >= 128 )
if ( ID < 64 ) s_Residual_Total[ID] += s_Residual_Total[ ID + 64 ]; __syncthreads();
# endif
// adopting warp-synchronous mechanism
if ( ID < 32 )
{
// declare volatile pointer to ensure that the operations are not reordered
volatile real *s_Sum = s_Residual_Total;
s_Sum[ID] += s_Sum[ID+32]; // here we have assumed that POT_NTHREAD >= 64
s_Sum[ID] += s_Sum[ID+16];
s_Sum[ID] += s_Sum[ID+ 8];
s_Sum[ID] += s_Sum[ID+ 4];
s_Sum[ID] += s_Sum[ID+ 2];
s_Sum[ID] += s_Sum[ID+ 1];
}
__syncthreads();
// (c3). termination criterion
// ==============================================================================
if ( Iter+1 >= Min_Iter && s_Residual_Total[0] > Residual_Total_Old ) break;
Residual_Total_Old = s_Residual_Total[0];
__syncthreads();
} // for (int Iter=0; Iter<Max_Iter; Iter++)
// d. store potential back to the global memory
// -----------------------------------------------------------------------------------------------------------
t = ID;
do
{
s_index = __umul24( t/(GRA_NXT*GRA_NXT) + POT_GHOST_SIZE - GRA_GHOST_SIZE, dz )
+ __umul24( t%(GRA_NXT*GRA_NXT)/GRA_NXT + POT_GHOST_SIZE - GRA_GHOST_SIZE, dy )
+ t%(GRA_NXT ) + POT_GHOST_SIZE - GRA_GHOST_SIZE;
g_Pot_Array_Out[bid][t] = s_FPot[s_index];
t += POT_NTHREAD;
}
while ( t < GRA_NXT*GRA_NXT*GRA_NXT );
} // FUNCTION : CUPOT_PoissonSolver_SOR_10to14cube
#endif // #if ( defined GRAVITY && defined GPU && POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 )
| a03d1053d7bb62ef8dd28ed4553b0adca5971f81.cu |
#include "Macro.h"
#include "CUPOT.h"
#if ( defined GRAVITY && defined GPU && POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 )
#define POT_NXT_F ( PATCH_SIZE+2*POT_GHOST_SIZE )
#define POT_NTHREAD ( RHO_NXT*RHO_NXT*POT_BLOCK_SIZE_Z/2 )
#define POT_USELESS ( POT_GHOST_SIZE%2 )
// for Fermi GPUs, we can save density into shared memory for higher performance
#if ( defined FERMI && !defined FLOAT8 )
#define RHO_SHARED
#endif
// for single precision, we can save coarse-grid potential into shared memory for higher performance
#ifndef FLOAT8
#define CPOT_SHARED
#endif
// variables reside in constant memory
__constant__ real Mp[3];
__constant__ real Mm[3];
//-------------------------------------------------------------------------------------------------------
// Function : CUPOT_PoissonSolver_SOR_10to14cube
// Description : GPU Poisson solver using the SOR scheme
//
// Note : a. Work for POT_GHOST_SIZE = 1, 2, 3 <--> POT_NXT_F = 10, 12, 14
// b. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// c. Each patch requires about 3.1*10^6 FLOPS (include the gravity solver)
// --> 133 GFLOPS is achieved in one C2050 GPU
// d. Reference : Numerical Recipes, Chapter 20.5
//
// Parameter : g_Rho_Array : Global memory array to store the input density
// g_Pot_Array_In : Global memory array storing the input "coarse-grid" potential for
// interpolation
// g_Pot_Array_Out : Global memory array to store the output potential
// Min_Iter : Minimum # of iterations for SOR
// Max_Iter : Maximum # of iterations for SOR
// Omega_6 : Omega / 6
// Const : (Coefficient in front of the RHS in the Poisson eq.) / dh^2
// IntScheme : Interpolation scheme for potential
// --> currently supported schemes include
// INT_CENTRAL : central interpolation
// INT_CQUAD : conservative quadratic interpolation
// INT_QUAD : quadratic interpolation
//---------------------------------------------------------------------------------------------------
__global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const int Min_Iter, const int Max_Iter, const real Omega_6,
const real Const, const IntScheme_t IntScheme )
{
const uint bid = blockIdx.x;
const uint tid_x = threadIdx.x;
const uint tid_y = threadIdx.y;
const uint tid_z = threadIdx.z;
const uint bdim_x = blockDim.x;
const uint bdim_y = blockDim.y;
const uint bdim_z = blockDim.z;
const uint ID = __umul24( tid_z, __umul24(bdim_x,bdim_y) ) + __umul24( tid_y, bdim_x ) + tid_x;
const uint dx = 1;
const uint dy = POT_NXT_F;
const uint dz = POT_NXT_F*POT_NXT_F;
const uint PotID0 = __umul24( 1+tid_z, dz ) + __umul24( 1+tid_y, dy ) + ( tid_x << 1 ) + 1;
const uint RhoID0 = __umul24( tid_z, RHO_NXT*RHO_NXT ) + __umul24( tid_y, RHO_NXT )+ ( tid_x << 1 );
const uint DispEven = ( tid_y + tid_z ) & 1;
const uint DispOdd = DispEven^1;
const uint DispFlip = bdim_z & 1;
const uint dPotID = __umul24( bdim_z, POT_NXT_F*POT_NXT_F );
const uint dRhoID = __umul24( bdim_z, RHO_NXT *RHO_NXT );
const uint FloorPow2 = 1<<(31-__clz(POT_NTHREAD) ); // largest power-of-two value not greater than POT_NTHREAD
const uint Remain = POT_NTHREAD - FloorPow2;
uint ip, im, jp, jm, kp, km, t, s_index;
uint PotID, RhoID, DispPotID, DispRhoID, Disp;
real Residual, Residual_Total_Old;
__shared__ real s_Residual_Total[POT_NTHREAD];
__shared__ real s_FPot[ POT_NXT_F*POT_NXT_F*POT_NXT_F ];
# ifdef CPOT_SHARED
__shared__ real s_CPot[ POT_NXT *POT_NXT *POT_NXT ];
# endif
# ifdef RHO_SHARED
__shared__ real s_Rho_Array[ RHO_NXT*RHO_NXT*RHO_NXT ];
# endif
// a1. load the fine-grid density into the shared memory
// -----------------------------------------------------------------------------------------------------------
# ifdef RHO_SHARED
t = ID;
do { s_Rho_Array[t] = g_Rho_Array[bid][t]; t += POT_NTHREAD; } while ( t < RHO_NXT*RHO_NXT*RHO_NXT );
__syncthreads();
# else
const real *s_Rho_Array = g_Rho_Array[bid];
# endif
// a2. load the coarse-grid potential into the shared memory
// -----------------------------------------------------------------------------------------------------------
# ifdef CPOT_SHARED
t = ID;
do { s_CPot[t] = g_Pot_Array_In[bid][t]; t += POT_NTHREAD; } while ( t < POT_NXT*POT_NXT*POT_NXT );
__syncthreads();
# else
const real *s_CPot = g_Pot_Array_In[bid];
# endif
// b. evaluate the "fine-grid" potential by interpolation (as the initial guess and the B.C.)
// -----------------------------------------------------------------------------------------------------------
const int N_CSlice = POT_NTHREAD / ( (POT_NXT-2)*(POT_NXT-2) );
if ( ID < N_CSlice*(POT_NXT-2)*(POT_NXT-2) )
{
const real Const_8 = 1.0/8.0;
const real Const_64 = 1.0/64.0;
const real Const_512 = 1.0/512.0;
const int Cdx = 1;
const int Cdy = POT_NXT;
const int Cdz = POT_NXT*POT_NXT;
const int CIDx = 1 + ID % ( POT_NXT-2 );
const int CIDy = 1 + ( ID % ( (POT_NXT-2)*(POT_NXT-2) ) ) / ( POT_NXT-2 );
const int CIDz = 1 + ID / ( (POT_NXT-2)*(POT_NXT-2) );
int CID = __mul24( CIDz, Cdz ) + __mul24( CIDy, Cdy ) + __mul24( CIDx, Cdx );
const int Fdx = 1;
const int Fdy = POT_NXT_F;
const int Fdz = POT_NXT_F*POT_NXT_F;
const int FIDx = ( (CIDx-1)<<1 ) - POT_USELESS;
const int FIDy = ( (CIDy-1)<<1 ) - POT_USELESS;
int FIDz = ( (CIDz-1)<<1 ) - POT_USELESS;
int FID = __mul24( FIDz, Fdz ) + __mul24( FIDy, Fdy ) + __mul24( FIDx, Fdx );
real TempFPot1, TempFPot2, TempFPot3, TempFPot4, TempFPot5, TempFPot6, TempFPot7, TempFPot8;
real Slope_00, Slope_01, Slope_02, Slope_03, Slope_04, Slope_05, Slope_06, Slope_07;
real Slope_08, Slope_09, Slope_10, Slope_11, Slope_12;
int Idx, Idy, Idz, ii, jj, kk;
for (int z=CIDz; z<POT_NXT-1; z+=N_CSlice)
{
switch ( IntScheme )
{
case INT_CENTRAL :
{
Slope_00 = (real)0.125 * ( s_CPot[CID+Cdx] - s_CPot[CID-Cdx] );
Slope_01 = (real)0.125 * ( s_CPot[CID+Cdy] - s_CPot[CID-Cdy] );
Slope_02 = (real)0.125 * ( s_CPot[CID+Cdz] - s_CPot[CID-Cdz] );
TempFPot1 = s_CPot[CID] - Slope_00 - Slope_01 - Slope_02;
TempFPot2 = s_CPot[CID] + Slope_00 - Slope_01 - Slope_02;
TempFPot3 = s_CPot[CID] - Slope_00 + Slope_01 - Slope_02;
TempFPot4 = s_CPot[CID] + Slope_00 + Slope_01 - Slope_02;
TempFPot5 = s_CPot[CID] - Slope_00 - Slope_01 + Slope_02;
TempFPot6 = s_CPot[CID] + Slope_00 - Slope_01 + Slope_02;
TempFPot7 = s_CPot[CID] - Slope_00 + Slope_01 + Slope_02;
TempFPot8 = s_CPot[CID] + Slope_00 + Slope_01 + Slope_02;
}
break; // INT_CENTRAL
case INT_CQUAD :
{
Slope_00 = Const_8 * ( s_CPot[CID+Cdx ] - s_CPot[CID-Cdx ] );
Slope_01 = Const_8 * ( s_CPot[CID +Cdy ] - s_CPot[CID -Cdy ] );
Slope_02 = Const_8 * ( s_CPot[CID +Cdz] - s_CPot[CID -Cdz] );
Slope_03 = Const_64 * ( s_CPot[CID+Cdx -Cdz] - s_CPot[CID-Cdx -Cdz] );
Slope_04 = Const_64 * ( s_CPot[CID +Cdy-Cdz] - s_CPot[CID -Cdy-Cdz] );
Slope_05 = Const_64 * ( s_CPot[CID+Cdx-Cdy ] - s_CPot[CID-Cdx-Cdy ] );
Slope_06 = Const_64 * ( s_CPot[CID+Cdx+Cdy ] - s_CPot[CID-Cdx+Cdy ] );
Slope_07 = Const_64 * ( s_CPot[CID+Cdx +Cdz] - s_CPot[CID-Cdx +Cdz] );
Slope_08 = Const_64 * ( s_CPot[CID +Cdy+Cdz] - s_CPot[CID -Cdy+Cdz] );
Slope_09 = Const_512 * ( s_CPot[CID+Cdx-Cdy-Cdz] - s_CPot[CID-Cdx-Cdy-Cdz] );
Slope_10 = Const_512 * ( s_CPot[CID+Cdx+Cdy-Cdz] - s_CPot[CID-Cdx+Cdy-Cdz] );
Slope_11 = Const_512 * ( s_CPot[CID+Cdx-Cdy+Cdz] - s_CPot[CID-Cdx-Cdy+Cdz] );
Slope_12 = Const_512 * ( s_CPot[CID+Cdx+Cdy+Cdz] - s_CPot[CID-Cdx+Cdy+Cdz] );
TempFPot1 = - Slope_00 - Slope_01 - Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06
+ Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID];
TempFPot2 = + Slope_00 - Slope_01 - Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06
- Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID];
TempFPot3 = - Slope_00 + Slope_01 - Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06
+ Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID];
TempFPot4 = + Slope_00 + Slope_01 - Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06
- Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID];
TempFPot5 = - Slope_00 - Slope_01 + Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06
- Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID];
TempFPot6 = + Slope_00 - Slope_01 + Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06
+ Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID];
TempFPot7 = - Slope_00 + Slope_01 + Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06
- Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID];
TempFPot8 = + Slope_00 + Slope_01 + Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06
+ Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID];
}
break; // INT_CQUAD
case INT_QUAD :
{
TempFPot1 = TempFPot2 = TempFPot3 = TempFPot4 = (real)0.0;
TempFPot5 = TempFPot6 = TempFPot7 = TempFPot8 = (real)0.0;
for (int dk=-1; dk<=1; dk++) { Idz = dk+1; kk = __mul24( dk, Cdz );
for (int dj=-1; dj<=1; dj++) { Idy = dj+1; jj = __mul24( dj, Cdy );
for (int di=-1; di<=1; di++) { Idx = di+1; ii = __mul24( di, Cdx );
TempFPot1 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mm[Idy] * Mm[Idx];
TempFPot2 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mm[Idy] * Mp[Idx];
TempFPot3 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mp[Idy] * Mm[Idx];
TempFPot4 += s_CPot[CID+kk+jj+ii] * Mm[Idz] * Mp[Idy] * Mp[Idx];
TempFPot5 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mm[Idy] * Mm[Idx];
TempFPot6 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mm[Idy] * Mp[Idx];
TempFPot7 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mp[Idy] * Mm[Idx];
TempFPot8 += s_CPot[CID+kk+jj+ii] * Mp[Idz] * Mp[Idy] * Mp[Idx];
}}}
}
break; // INT_QUAD
} // switch ( IntScheme )
// save data to the shared-memory array.
// Currently this part is highly diverge. However, since the interpolation takes much less time than the
// SOR iteration does, we have not yet tried to optimize this part
if ( FIDz >= 0 )
{
if ( FIDx >= 0 && FIDy >= 0 ) s_FPot[FID ] = TempFPot1;
if ( FIDx <= POT_NXT_F-2 && FIDy >= 0 ) s_FPot[FID+Fdx ] = TempFPot2;
if ( FIDx >= 0 && FIDy <= POT_NXT_F-2 ) s_FPot[FID +Fdy ] = TempFPot3;
if ( FIDx <= POT_NXT_F-2 && FIDy <= POT_NXT_F-2 ) s_FPot[FID+Fdx+Fdy ] = TempFPot4;
}
if ( FIDz <= POT_NXT_F-2 )
{
if ( FIDx >= 0 && FIDy >= 0 ) s_FPot[FID +Fdz] = TempFPot5;
if ( FIDx <= POT_NXT_F-2 && FIDy >= 0 ) s_FPot[FID+Fdx +Fdz] = TempFPot6;
if ( FIDx >= 0 && FIDy <= POT_NXT_F-2 ) s_FPot[FID +Fdy+Fdz] = TempFPot7;
if ( FIDx <= POT_NXT_F-2 && FIDy <= POT_NXT_F-2 ) s_FPot[FID+Fdx+Fdy+Fdz] = TempFPot8;
}
CID += __mul24( N_CSlice, Cdz );
FID += __mul24( 2*N_CSlice, Fdz );
FIDz += 2*N_CSlice;
} // for (int z=CIDz; z<POT_NXT-1; z+=N_CSlice)
} // if ( ID < N_CSlice*(POT_NXT-2)*(POT_NXT-2) )
__syncthreads();
// c. use the SOR scheme to evaluate potential
// -----------------------------------------------------------------------------------------------------------
Residual_Total_Old = __FLT_MAX__;
for (uint Iter=0; Iter<Max_Iter; Iter++)
{
// (c1). evaluate residual, update potential
// ==============================================================================
s_Residual_Total[ID] = (real)0.0;
Disp = DispEven;
for (uint pass=0; pass<2; pass++) // pass = (0,1) <--> (even,odd) step
{
PotID = PotID0;
RhoID = RhoID0;
for (uint z=tid_z; z<RHO_NXT; z+=bdim_z)
{
DispPotID = PotID + Disp;
DispRhoID = RhoID + Disp;
ip = DispPotID + dx;
jp = DispPotID + dy;
kp = DispPotID + dz;
im = DispPotID - dx;
jm = DispPotID - dy;
km = DispPotID - dz;
// evaluate the residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*s_FPot[DispPotID] - Const*s_Rho_Array[DispRhoID] );
// update potential
s_FPot[DispPotID] += Omega_6*Residual;
// save residual of each cell into a shared-memory array for evaluating the sum
s_Residual_Total[ID] += FABS( Residual );
PotID += dPotID;
RhoID += dRhoID;
Disp = Disp^DispFlip;
} // for (int ZLoop=0; ZLoop<RHO_NXT; ZLoop+=bdim_z)
Disp = DispOdd;
__syncthreads();
} // for (int pass=0; pass<2; pass++)
// (c2). perform the reduction operation to get the one-norm of residual
// ==============================================================================
// sum up the elements larger than FloorPow2 to ensure that the number of remaining elements is power-of-two
if ( ID < Remain ) s_Residual_Total[ID] += s_Residual_Total[ ID + FloorPow2 ];
// parallel reduction
# if ( POT_NTHREAD >= 1024 )
# error : ERROR : POT_NTHREAD must < 1024 !!
# endif
# if ( POT_NTHREAD >= 512 )
if ( ID < 256 ) s_Residual_Total[ID] += s_Residual_Total[ ID + 256 ]; __syncthreads();
# endif
# if ( POT_NTHREAD >= 256 )
if ( ID < 128 ) s_Residual_Total[ID] += s_Residual_Total[ ID + 128 ]; __syncthreads();
# endif
# if ( POT_NTHREAD >= 128 )
if ( ID < 64 ) s_Residual_Total[ID] += s_Residual_Total[ ID + 64 ]; __syncthreads();
# endif
// adopting warp-synchronous mechanism
if ( ID < 32 )
{
// declare volatile pointer to ensure that the operations are not reordered
volatile real *s_Sum = s_Residual_Total;
s_Sum[ID] += s_Sum[ID+32]; // here we have assumed that POT_NTHREAD >= 64
s_Sum[ID] += s_Sum[ID+16];
s_Sum[ID] += s_Sum[ID+ 8];
s_Sum[ID] += s_Sum[ID+ 4];
s_Sum[ID] += s_Sum[ID+ 2];
s_Sum[ID] += s_Sum[ID+ 1];
}
__syncthreads();
// (c3). termination criterion
// ==============================================================================
if ( Iter+1 >= Min_Iter && s_Residual_Total[0] > Residual_Total_Old ) break;
Residual_Total_Old = s_Residual_Total[0];
__syncthreads();
} // for (int Iter=0; Iter<Max_Iter; Iter++)
// d. store potential back to the global memory
// -----------------------------------------------------------------------------------------------------------
t = ID;
do
{
s_index = __umul24( t/(GRA_NXT*GRA_NXT) + POT_GHOST_SIZE - GRA_GHOST_SIZE, dz )
+ __umul24( t%(GRA_NXT*GRA_NXT)/GRA_NXT + POT_GHOST_SIZE - GRA_GHOST_SIZE, dy )
+ t%(GRA_NXT ) + POT_GHOST_SIZE - GRA_GHOST_SIZE;
g_Pot_Array_Out[bid][t] = s_FPot[s_index];
t += POT_NTHREAD;
}
while ( t < GRA_NXT*GRA_NXT*GRA_NXT );
} // FUNCTION : CUPOT_PoissonSolver_SOR_10to14cube
#endif // #if ( defined GRAVITY && defined GPU && POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 )
|
c73e17d6057c365d4eec18f0a79783750ed58d84.hip | // !!! This is a file automatically generated by hipify!!!
// In Part 3, we'll use the stream compaction implementation we built
// in Part 2 to create a simple schedule for the second round of
// Black-Scholes jobs from Part 1. Since subsequent rounds of our options
// pricing are very sparse, a naive scheduling of this job will
// cause the majority of CUDA threads to idle while ther active warp
// neighbors work on stocks from the first round which meet our criteria. A better
// scheduling should compact all of the data which meets or exceeds the
// threshold for subsequent rounds to the front of the input, and launch only as many
// threads as there is work to do. This will ensure that all of the CUDA threads
// launched in subsequent rounds will be active (modulo the few inactive threads
// at the end of the last block). This compaction scheme eliminates the warp
// divergence penalty and increases the throughput of the second round of Black-Scholes.
// Note that your stream compaction implementation may come with significant overhead.
// How many sparse rounds of Black-Scholes does it take before the cost becomes worth
// it? It may take quite a few when computing in single precision. For double precision,
// it requires just a handful. You can control whether the "real" type is "float"
// (single precision) or "double" (double precision) by editing the Makefile.
// If you've made it this far, Part 3 should be trivial. The only thing you need
// to do is launch kernels and, as always, allocate & deallocate device storage in
// mp3-part3.cu and implement the body of compact_options in compact.cu. This is mostly
// a copy and paste job from compact_even_elements, but this time, there is more data
// to move around. Search for the lines marked TODO: and get hacking!
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <iostream>
#include <limits>
#include <numeric>
#include "black_scholes.h"
#include "compact.h"
#include "mp3-util.h"
bool allocate_device_storage(real *&d_first_round_call_result, real *&d_first_round_put_result,
real *&d_subsequent_round_call_result, real *&d_subsequent_round_put_result,
real *&d_stock_price, real *&d_option_strike,
real *&d_option_years,
real *&d_compacted_stock_price,
real *&d_compacted_option_strike,
real *&d_compacted_option_years,
const size_t n)
{
// TODO: your device memory allocations here
// TODO: don't forget to check for CUDA errors!
// TODO: return true to indicate successful memory allocation
return false;
}
void deallocate_device_storage(real *d_first_round_call_result, real *d_first_round_put_result,
real *d_subsequent_round_call_result, real *d_subsequent_round_put_result,
real *d_stock_price, real *d_option_strike,
real *d_option_years,
real *d_compacted_stock_price,
real *d_compacted_option_strike,
real *d_compacted_option_years)
{
// TODO: your device memory deallocations here
// TODO: don't forget to check for CUDA errors!
}
int main(void)
{
event_pair timer;
const size_t num_subsequent_rounds = 5;
float compaction_time = 0;
std::vector<float> gpu_time(1 + num_subsequent_rounds);
std::vector<float> cpu_time(1 + num_subsequent_rounds);
// create arrays for 4M options
size_t num_options = 1<<22;
// allocate host storage
std::vector<real> h_first_round_call_result(num_options,0);
std::vector<real> h_first_round_put_result(num_options, 0);
std::vector<real> h_subsequent_round_call_result(num_options,0);
std::vector<real> h_subsequent_round_put_result(num_options, 0);
std::vector<real> h_stock_price(num_options);
std::vector<real> h_option_strike(num_options);
std::vector<real> h_option_years(num_options);
// generate options set
srand(5347);
for(int i = 0; i < num_options; ++i)
{
h_stock_price[i] = random_real(5.0, 30.0);
h_option_strike[i] = random_real(1.0, 100.0);
h_option_years[i] = random_real(0.25, 10.0);
}
// some pointers to the data set which will live in device memory
real *d_first_round_call_result = 0;
real *d_first_round_put_result = 0;
real *d_subsequent_round_call_result = 0;
real *d_subsequent_round_put_result = 0;
real *d_stock_price = 0;
real *d_option_strike = 0;
real *d_option_years = 0;
real *d_compacted_stock_price = 0;
real *d_compacted_option_strike = 0;
real *d_compacted_option_years = 0;
// allocate device storage
if(!allocate_device_storage(d_first_round_call_result, d_first_round_put_result,
d_subsequent_round_call_result, d_subsequent_round_put_result,
d_stock_price, d_option_strike, d_option_years,
d_compacted_stock_price,
d_compacted_option_strike,
d_compacted_option_years,
num_options))
{
std::cerr << "Error allocating device memory!" << std::endl;
exit(-1);
}
// fill the result arrays with 0
hipMemset(d_first_round_call_result, 0, sizeof(real) * num_options);
hipMemset(d_first_round_put_result, 0, sizeof(real) * num_options);
hipMemset(d_subsequent_round_call_result, 0, sizeof(real) * num_options);
hipMemset(d_subsequent_round_put_result, 0, sizeof(real) * num_options);
// copy input to GPU
start_timer(&timer);
// TODO: your host to device copies here
stop_timer(&timer, "host to device copy of input");
// BEGIN ROUND 0
// we will use the two following parameters
// to first round of the Black-Scholes algorithm
const real first_round_riskless_rate = 0.02;
const real first_round_volatility = 0.30;
// do one round of Black-Scholes using our parameters
start_timer(&timer);
// TODO: your black_scholes_kernel launch here
gpu_time[0] = stop_timer(&timer, "GPU Black-Scholes round 0");
check_cuda_error("GPU Black-Scholes round 0", __FILE__, __LINE__);
// do round 0 of Black-Scholes on the host
start_timer(&timer);
black_scholes_host(&h_stock_price[0],
&h_option_strike[0],
&h_option_years[0],
&h_first_round_call_result[0],
&h_first_round_put_result[0],
first_round_riskless_rate,
first_round_volatility,
num_options);
cpu_time[0] = stop_timer(&timer, "CPU Black-Scholes round 0");
// validate gpu results from round 0
std::vector<real> h_validate_me(num_options);
hipMemcpy(&h_validate_me[0], d_first_round_call_result, sizeof(real) * num_options, hipMemcpyDeviceToHost);
// pass true as a final optional argument to fuzzy_validate for verbose output
if(!fuzzy_validate(&h_validate_me[0], &h_first_round_call_result[0], num_options))
{
std::cerr << "Error: round 0 of call results don't match!" << std::endl;
exit(-1);
}
hipMemcpy(&h_validate_me[0], d_first_round_put_result, sizeof(real) * num_options, hipMemcpyDeviceToHost);
if(!fuzzy_validate(&h_validate_me[0], &h_first_round_put_result[0], num_options))
{
std::cerr << "Error: round 0 of put results don't match!" << std::endl;
exit(-1);
}
// BEGIN COMPACTION
// in subsequent rounds, select the stocks whose call & put prices from the first round
// meet or exceed these thresholds
const real min_call_threshold = 2.0;
const real min_put_threshold = 4.0;
// compact the options, copying those that meet our call & put thresholds
// to the arrays for round 2
start_timer(&timer);
size_t num_compacted_options = 0;
// TODO: your call to compact_options here
compaction_time = stop_timer(&timer, "GPU Compaction");
// BEGIN SUBSEQUENT ROUNDS
size_t num_compacted_options_reference = 0;
for(int round = 1; round < num_subsequent_rounds + 1; ++round)
{
// change the parameters of the model in each subsequent round
const real riskless_rate = random_real(0.03, 0.04);
const real volatility = random_real(0.50, 0.60);
// do round of Black-Scholes using new parameters on the device
start_timer(&timer);
// TODO: your black_scholes_kernel launch here
char message[256];
sprintf(message, "GPU Black-Scholes round %d", round);
gpu_time[round] = stop_timer(&timer, message);
check_cuda_error(message, __FILE__, __LINE__);
// do a round of Black-Scholes on the host using new parameters
// filter the set of options to compute given the results of the last round,
// but compact the output
start_timer(&timer);
num_compacted_options_reference =
compacted_black_scholes_host(&h_stock_price[0],
&h_option_strike[0],
&h_option_years[0],
&h_first_round_call_result[0],
&h_first_round_put_result[0],
&h_subsequent_round_call_result[0],
&h_subsequent_round_put_result[0],
min_call_threshold,
min_put_threshold,
riskless_rate,
volatility,
num_options);
sprintf(message, "CPU Black-Scholes round %d", round);
cpu_time[round] = stop_timer(&timer, message);
if(num_compacted_options_reference != num_compacted_options)
{
std::cerr << "Error: round " << round << " num_compacted_options (" << num_compacted_options << ") doesn't match num_compacted_options_reference (" << num_compacted_options_reference << ")" << std::endl;
exit(-1);
}
// validate gpu results from this round
hipMemcpy(&h_validate_me[0], d_subsequent_round_call_result, sizeof(real) * num_compacted_options_reference, hipMemcpyDeviceToHost);
if(!fuzzy_validate(&h_validate_me[0], &h_subsequent_round_call_result[0], num_compacted_options_reference))
{
std::cerr << "Error: round " << round << " of call results don't match!" << std::endl;
exit(-1);
}
hipMemcpy(&h_validate_me[0], d_subsequent_round_put_result, sizeof(real) * num_compacted_options_reference, hipMemcpyDeviceToHost);
if(!fuzzy_validate(&h_validate_me[0], &h_subsequent_round_put_result[0], num_compacted_options_reference))
{
std::cerr << "Error: round " << round << " of put results don't match!" << std::endl;
exit(-1);
}
} // end for subsequent round
deallocate_device_storage(d_first_round_call_result, d_first_round_put_result,
d_subsequent_round_call_result, d_subsequent_round_put_result,
d_stock_price, d_option_strike,
d_option_years,
d_compacted_stock_price,
d_compacted_option_strike,
d_compacted_option_years);
// output a report
std::cout << std::endl;
real first_round_gpu_throughput = static_cast<real>(num_options) / (gpu_time[0] / 1000.0f);
real first_round_cpu_throughput = static_cast<real>(num_options) / (cpu_time[0] / 1000.0f);
std::cout << "Round 0: " << num_options << " options" << std::endl;
std::cout << "Throughput of GPU Black-Scholes Round 0: " << (first_round_gpu_throughput / 1e6) << " Megaoptions/sec" << std::endl;
std::cout << "Throughput of CPU Black-Scholes Round 0: " << (first_round_cpu_throughput / 1e6) << " Megaoptions/sec" << std::endl;
std::cout << "Speedup of Round 0: " << first_round_gpu_throughput / first_round_cpu_throughput << "x" << std::endl << std::endl;
for(int i = 1; i < gpu_time.size(); ++i)
{
real gpu_throughput = static_cast<real>(num_compacted_options_reference) / (gpu_time[i] / 1000.0f);
real cpu_throughput = static_cast<real>(num_compacted_options_reference) / (cpu_time[i] / 1000.0f);
std::cout << "Round " << i << ": " << num_compacted_options_reference << " options" << std::endl;
std::cout << "Throughput of GPU Black-Scholes Round " << i << ": " << (gpu_throughput / 1e6) << " Megaoptions/sec" << std::endl;
std::cout << "Throughput of CPU Black-Scholes Round " << i << ": " << (cpu_throughput / 1e6) << " Megaoptions/sec" << std::endl;
std::cout << "Speedup of Round " << i << ": " << gpu_throughput / cpu_throughput << "x" << std::endl << std::endl;
}
// report overall performance
real total_gpu_time = compaction_time + std::accumulate(gpu_time.begin(), gpu_time.end(), 0.0);
real total_cpu_time = std::accumulate(cpu_time.begin(), cpu_time.end(), 0.0);
real gpu_throughput = static_cast<real>(num_options + num_subsequent_rounds*num_compacted_options_reference) / ((total_gpu_time) / 1000.0f);
real cpu_throughput = static_cast<real>(num_options + num_subsequent_rounds*num_compacted_options_reference) / ((total_cpu_time) / 1000.0f);
std::cout << "Overall GPU throughput: " << (gpu_throughput / 1e6) << " Megaoptions/sec" << std::endl;
std::cout << "Overall CPU throughput: " << (cpu_throughput / 1e6) << " Megaoptions/sec" << std::endl << std::endl;
std::cout << "Overall speedup: " << gpu_throughput / cpu_throughput << "x" << std::endl;
return 0;
}
| c73e17d6057c365d4eec18f0a79783750ed58d84.cu | // In Part 3, we'll use the stream compaction implementation we built
// in Part 2 to create a simple schedule for the second round of
// Black-Scholes jobs from Part 1. Since subsequent rounds of our options
// pricing are very sparse, a naive scheduling of this job will
// cause the majority of CUDA threads to idle while ther active warp
// neighbors work on stocks from the first round which meet our criteria. A better
// scheduling should compact all of the data which meets or exceeds the
// threshold for subsequent rounds to the front of the input, and launch only as many
// threads as there is work to do. This will ensure that all of the CUDA threads
// launched in subsequent rounds will be active (modulo the few inactive threads
// at the end of the last block). This compaction scheme eliminates the warp
// divergence penalty and increases the throughput of the second round of Black-Scholes.
// Note that your stream compaction implementation may come with significant overhead.
// How many sparse rounds of Black-Scholes does it take before the cost becomes worth
// it? It may take quite a few when computing in single precision. For double precision,
// it requires just a handful. You can control whether the "real" type is "float"
// (single precision) or "double" (double precision) by editing the Makefile.
// If you've made it this far, Part 3 should be trivial. The only thing you need
// to do is launch kernels and, as always, allocate & deallocate device storage in
// mp3-part3.cu and implement the body of compact_options in compact.cu. This is mostly
// a copy and paste job from compact_even_elements, but this time, there is more data
// to move around. Search for the lines marked TODO: and get hacking!
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <iostream>
#include <limits>
#include <numeric>
#include "black_scholes.h"
#include "compact.h"
#include "mp3-util.h"
bool allocate_device_storage(real *&d_first_round_call_result, real *&d_first_round_put_result,
real *&d_subsequent_round_call_result, real *&d_subsequent_round_put_result,
real *&d_stock_price, real *&d_option_strike,
real *&d_option_years,
real *&d_compacted_stock_price,
real *&d_compacted_option_strike,
real *&d_compacted_option_years,
const size_t n)
{
// TODO: your device memory allocations here
// TODO: don't forget to check for CUDA errors!
// TODO: return true to indicate successful memory allocation
return false;
}
void deallocate_device_storage(real *d_first_round_call_result, real *d_first_round_put_result,
real *d_subsequent_round_call_result, real *d_subsequent_round_put_result,
real *d_stock_price, real *d_option_strike,
real *d_option_years,
real *d_compacted_stock_price,
real *d_compacted_option_strike,
real *d_compacted_option_years)
{
// TODO: your device memory deallocations here
// TODO: don't forget to check for CUDA errors!
}
int main(void)
{
event_pair timer;
const size_t num_subsequent_rounds = 5;
float compaction_time = 0;
std::vector<float> gpu_time(1 + num_subsequent_rounds);
std::vector<float> cpu_time(1 + num_subsequent_rounds);
// create arrays for 4M options
size_t num_options = 1<<22;
// allocate host storage
std::vector<real> h_first_round_call_result(num_options,0);
std::vector<real> h_first_round_put_result(num_options, 0);
std::vector<real> h_subsequent_round_call_result(num_options,0);
std::vector<real> h_subsequent_round_put_result(num_options, 0);
std::vector<real> h_stock_price(num_options);
std::vector<real> h_option_strike(num_options);
std::vector<real> h_option_years(num_options);
// generate options set
srand(5347);
for(int i = 0; i < num_options; ++i)
{
h_stock_price[i] = random_real(5.0, 30.0);
h_option_strike[i] = random_real(1.0, 100.0);
h_option_years[i] = random_real(0.25, 10.0);
}
// some pointers to the data set which will live in device memory
real *d_first_round_call_result = 0;
real *d_first_round_put_result = 0;
real *d_subsequent_round_call_result = 0;
real *d_subsequent_round_put_result = 0;
real *d_stock_price = 0;
real *d_option_strike = 0;
real *d_option_years = 0;
real *d_compacted_stock_price = 0;
real *d_compacted_option_strike = 0;
real *d_compacted_option_years = 0;
// allocate device storage
if(!allocate_device_storage(d_first_round_call_result, d_first_round_put_result,
d_subsequent_round_call_result, d_subsequent_round_put_result,
d_stock_price, d_option_strike, d_option_years,
d_compacted_stock_price,
d_compacted_option_strike,
d_compacted_option_years,
num_options))
{
std::cerr << "Error allocating device memory!" << std::endl;
exit(-1);
}
// fill the result arrays with 0
cudaMemset(d_first_round_call_result, 0, sizeof(real) * num_options);
cudaMemset(d_first_round_put_result, 0, sizeof(real) * num_options);
cudaMemset(d_subsequent_round_call_result, 0, sizeof(real) * num_options);
cudaMemset(d_subsequent_round_put_result, 0, sizeof(real) * num_options);
// copy input to GPU
start_timer(&timer);
// TODO: your host to device copies here
stop_timer(&timer, "host to device copy of input");
// BEGIN ROUND 0
// we will use the two following parameters
// to first round of the Black-Scholes algorithm
const real first_round_riskless_rate = 0.02;
const real first_round_volatility = 0.30;
// do one round of Black-Scholes using our parameters
start_timer(&timer);
// TODO: your black_scholes_kernel launch here
gpu_time[0] = stop_timer(&timer, "GPU Black-Scholes round 0");
check_cuda_error("GPU Black-Scholes round 0", __FILE__, __LINE__);
// do round 0 of Black-Scholes on the host
start_timer(&timer);
black_scholes_host(&h_stock_price[0],
&h_option_strike[0],
&h_option_years[0],
&h_first_round_call_result[0],
&h_first_round_put_result[0],
first_round_riskless_rate,
first_round_volatility,
num_options);
cpu_time[0] = stop_timer(&timer, "CPU Black-Scholes round 0");
// validate gpu results from round 0
std::vector<real> h_validate_me(num_options);
cudaMemcpy(&h_validate_me[0], d_first_round_call_result, sizeof(real) * num_options, cudaMemcpyDeviceToHost);
// pass true as a final optional argument to fuzzy_validate for verbose output
if(!fuzzy_validate(&h_validate_me[0], &h_first_round_call_result[0], num_options))
{
std::cerr << "Error: round 0 of call results don't match!" << std::endl;
exit(-1);
}
cudaMemcpy(&h_validate_me[0], d_first_round_put_result, sizeof(real) * num_options, cudaMemcpyDeviceToHost);
if(!fuzzy_validate(&h_validate_me[0], &h_first_round_put_result[0], num_options))
{
std::cerr << "Error: round 0 of put results don't match!" << std::endl;
exit(-1);
}
// BEGIN COMPACTION
// in subsequent rounds, select the stocks whose call & put prices from the first round
// meet or exceed these thresholds
const real min_call_threshold = 2.0;
const real min_put_threshold = 4.0;
// compact the options, copying those that meet our call & put thresholds
// to the arrays for round 2
start_timer(&timer);
size_t num_compacted_options = 0;
// TODO: your call to compact_options here
compaction_time = stop_timer(&timer, "GPU Compaction");
// BEGIN SUBSEQUENT ROUNDS
size_t num_compacted_options_reference = 0;
for(int round = 1; round < num_subsequent_rounds + 1; ++round)
{
// change the parameters of the model in each subsequent round
const real riskless_rate = random_real(0.03, 0.04);
const real volatility = random_real(0.50, 0.60);
// do round of Black-Scholes using new parameters on the device
start_timer(&timer);
// TODO: your black_scholes_kernel launch here
char message[256];
sprintf(message, "GPU Black-Scholes round %d", round);
gpu_time[round] = stop_timer(&timer, message);
check_cuda_error(message, __FILE__, __LINE__);
// do a round of Black-Scholes on the host using new parameters
// filter the set of options to compute given the results of the last round,
// but compact the output
start_timer(&timer);
num_compacted_options_reference =
compacted_black_scholes_host(&h_stock_price[0],
&h_option_strike[0],
&h_option_years[0],
&h_first_round_call_result[0],
&h_first_round_put_result[0],
&h_subsequent_round_call_result[0],
&h_subsequent_round_put_result[0],
min_call_threshold,
min_put_threshold,
riskless_rate,
volatility,
num_options);
sprintf(message, "CPU Black-Scholes round %d", round);
cpu_time[round] = stop_timer(&timer, message);
if(num_compacted_options_reference != num_compacted_options)
{
std::cerr << "Error: round " << round << " num_compacted_options (" << num_compacted_options << ") doesn't match num_compacted_options_reference (" << num_compacted_options_reference << ")" << std::endl;
exit(-1);
}
// validate gpu results from this round
cudaMemcpy(&h_validate_me[0], d_subsequent_round_call_result, sizeof(real) * num_compacted_options_reference, cudaMemcpyDeviceToHost);
if(!fuzzy_validate(&h_validate_me[0], &h_subsequent_round_call_result[0], num_compacted_options_reference))
{
std::cerr << "Error: round " << round << " of call results don't match!" << std::endl;
exit(-1);
}
cudaMemcpy(&h_validate_me[0], d_subsequent_round_put_result, sizeof(real) * num_compacted_options_reference, cudaMemcpyDeviceToHost);
if(!fuzzy_validate(&h_validate_me[0], &h_subsequent_round_put_result[0], num_compacted_options_reference))
{
std::cerr << "Error: round " << round << " of put results don't match!" << std::endl;
exit(-1);
}
} // end for subsequent round
deallocate_device_storage(d_first_round_call_result, d_first_round_put_result,
d_subsequent_round_call_result, d_subsequent_round_put_result,
d_stock_price, d_option_strike,
d_option_years,
d_compacted_stock_price,
d_compacted_option_strike,
d_compacted_option_years);
// output a report
std::cout << std::endl;
real first_round_gpu_throughput = static_cast<real>(num_options) / (gpu_time[0] / 1000.0f);
real first_round_cpu_throughput = static_cast<real>(num_options) / (cpu_time[0] / 1000.0f);
std::cout << "Round 0: " << num_options << " options" << std::endl;
std::cout << "Throughput of GPU Black-Scholes Round 0: " << (first_round_gpu_throughput / 1e6) << " Megaoptions/sec" << std::endl;
std::cout << "Throughput of CPU Black-Scholes Round 0: " << (first_round_cpu_throughput / 1e6) << " Megaoptions/sec" << std::endl;
std::cout << "Speedup of Round 0: " << first_round_gpu_throughput / first_round_cpu_throughput << "x" << std::endl << std::endl;
for(int i = 1; i < gpu_time.size(); ++i)
{
real gpu_throughput = static_cast<real>(num_compacted_options_reference) / (gpu_time[i] / 1000.0f);
real cpu_throughput = static_cast<real>(num_compacted_options_reference) / (cpu_time[i] / 1000.0f);
std::cout << "Round " << i << ": " << num_compacted_options_reference << " options" << std::endl;
std::cout << "Throughput of GPU Black-Scholes Round " << i << ": " << (gpu_throughput / 1e6) << " Megaoptions/sec" << std::endl;
std::cout << "Throughput of CPU Black-Scholes Round " << i << ": " << (cpu_throughput / 1e6) << " Megaoptions/sec" << std::endl;
std::cout << "Speedup of Round " << i << ": " << gpu_throughput / cpu_throughput << "x" << std::endl << std::endl;
}
// report overall performance
real total_gpu_time = compaction_time + std::accumulate(gpu_time.begin(), gpu_time.end(), 0.0);
real total_cpu_time = std::accumulate(cpu_time.begin(), cpu_time.end(), 0.0);
real gpu_throughput = static_cast<real>(num_options + num_subsequent_rounds*num_compacted_options_reference) / ((total_gpu_time) / 1000.0f);
real cpu_throughput = static_cast<real>(num_options + num_subsequent_rounds*num_compacted_options_reference) / ((total_cpu_time) / 1000.0f);
std::cout << "Overall GPU throughput: " << (gpu_throughput / 1e6) << " Megaoptions/sec" << std::endl;
std::cout << "Overall CPU throughput: " << (cpu_throughput / 1e6) << " Megaoptions/sec" << std::endl << std::endl;
std::cout << "Overall speedup: " << gpu_throughput / cpu_throughput << "x" << std::endl;
return 0;
}
|
343cd5ccda4259111d3372a3abe7482357346d14.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#define SIZEM 100
__global__
void matrixAddKernel(float* A, float* B, float* C, int n){
/* the input is A the output matrix, B matrix, c matrix, n size */
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < (n*n)){
A[i] = B[i] + C[i];
}
}
__global__
void matrixAddKernelRow(float* A, float* B, float* C, int n){
/* the input is A the output matrix, B matrix, c matrix, n size */
int j = threadIdx.x + blockDim.x * blockIdx.x;
if(j < n)
for (int i = 0; i < n; ++i){
A[ ( j * n ) + i ] = B[ ( j * n ) + i ] + C[ ( j * n ) + i ];
}
}
__global__
void matrixAddKernelColumn(float* A, float* B, float* C, int n){
/* the input is A the output matrix, B matrix, c matrix, n size */
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < n)
for (int j = 0; j < n; ++j){
A[ ( j * n ) + i ] = B[ ( j * n ) + i ] + C[ ( j * n ) + i ];
}
}
__host__
void matrixAdd(float* A, float* B, float* C, int n){
int size = n * n * sizeof(float);
float* d_A;
float* d_B;
float* d_C;
hipMalloc((void **) &d_B, size);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_C, size);
hipMemcpy(d_C, C, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_A, size);
hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( matrixAddKernelColumn), dim3(ceil((n*n)/256.0)), dim3(256), 0, 0, d_A, d_B, d_C, n);
hipMemcpy(A, d_A, size, hipMemcpyDeviceToHost);
// Free device memory for A, B, C
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
__host__
int main(int argc, char const *argv[]){
/*
float a[SIZEM];
float b[SIZEM*SIZEM];
float c[SIZEM];
*/
float* a;
float* b;
float* c;
a = (float*) malloc(SIZEM*SIZEM*sizeof(float));
b = (float*) malloc(SIZEM*SIZEM*sizeof(float));
c = (float*) malloc(SIZEM*SIZEM*sizeof(float));
for (long int i = 0; i < SIZEM*SIZEM; ++i){
a[i] = 0;
}
for (long int i = 0; i < SIZEM*SIZEM; ++i){
b[i] = 1;
}
for (long int i = 0; i < SIZEM*SIZEM; ++i){
c[i] = 1;
}
matrixAdd(a, b, c, SIZEM);
for (int i = 0; i < SIZEM*SIZEM; ++i){
if (i % SIZEM == 0){
std::cout << '\n';
}
std::cout << a[i] << '\t';
}
std::cout << '\n';
return 0;
}
/*
*/
| 343cd5ccda4259111d3372a3abe7482357346d14.cu | #include <iostream>
#include <stdio.h>
#include <cuda.h>
#define SIZEM 100
__global__
void matrixAddKernel(float* A, float* B, float* C, int n){
/* the input is A the output matrix, B matrix, c matrix, n size */
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < (n*n)){
A[i] = B[i] + C[i];
}
}
__global__
void matrixAddKernelRow(float* A, float* B, float* C, int n){
/* the input is A the output matrix, B matrix, c matrix, n size */
int j = threadIdx.x + blockDim.x * blockIdx.x;
if(j < n)
for (int i = 0; i < n; ++i){
A[ ( j * n ) + i ] = B[ ( j * n ) + i ] + C[ ( j * n ) + i ];
}
}
__global__
void matrixAddKernelColumn(float* A, float* B, float* C, int n){
/* the input is A the output matrix, B matrix, c matrix, n size */
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < n)
for (int j = 0; j < n; ++j){
A[ ( j * n ) + i ] = B[ ( j * n ) + i ] + C[ ( j * n ) + i ];
}
}
__host__
void matrixAdd(float* A, float* B, float* C, int n){
int size = n * n * sizeof(float);
float* d_A;
float* d_B;
float* d_C;
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_C, size);
cudaMemcpy(d_C, C, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
matrixAddKernelColumn<<<ceil((n*n)/256.0), 256>>>(d_A, d_B, d_C, n);
cudaMemcpy(A, d_A, size, cudaMemcpyDeviceToHost);
// Free device memory for A, B, C
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__host__
int main(int argc, char const *argv[]){
/*
float a[SIZEM];
float b[SIZEM*SIZEM];
float c[SIZEM];
*/
float* a;
float* b;
float* c;
a = (float*) malloc(SIZEM*SIZEM*sizeof(float));
b = (float*) malloc(SIZEM*SIZEM*sizeof(float));
c = (float*) malloc(SIZEM*SIZEM*sizeof(float));
for (long int i = 0; i < SIZEM*SIZEM; ++i){
a[i] = 0;
}
for (long int i = 0; i < SIZEM*SIZEM; ++i){
b[i] = 1;
}
for (long int i = 0; i < SIZEM*SIZEM; ++i){
c[i] = 1;
}
matrixAdd(a, b, c, SIZEM);
for (int i = 0; i < SIZEM*SIZEM; ++i){
if (i % SIZEM == 0){
std::cout << '\n';
}
std::cout << a[i] << '\t';
}
std::cout << '\n';
return 0;
}
/*
*/
|
cc91c7209aaae537f3a29ee97f05638b1b22f585.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "double_value.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
double_value), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
double_value), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
double_value), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | cc91c7209aaae537f3a29ee97f05638b1b22f585.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "double_value.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
double_value<<<gridBlock,threadBlock>>>(x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
double_value<<<gridBlock,threadBlock>>>(x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
double_value<<<gridBlock,threadBlock>>>(x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0f4607ba0b3d39580d7348726c786c7ec6d061ca.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
//#include <stdio.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 1024
//#define ITERATIONS 40
//#include "../include/ContAcq-IntClk.h"
// Variables
double* h_A;
double* h_B;
double* h_C;
double* d_A;
double* d_B;
double* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(double*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(const double* A, const double* B, double* C, unsigned iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
double Value1=0;
double Value2=0;
double Value3=0;
double Value=0;
double I1=A[i];
double I2=B[i];
#pragma unroll 1000
// Excessive Addition access
for(unsigned k=0; k<iterations;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main(int argc, char** argv)
{
unsigned iterations;
unsigned blocks;
if (argc != 3){
fprintf(stderr,"usage: %s #iterations #cores\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
blocks = atoi(argv[2]);
}
printf("Power Microbenchmarks with iterations %lu\n",iterations);
int N = THREADS_PER_BLOCK*blocks;
size_t size = N * sizeof(double);
// Allocate input vectors h_A and h_B in host memory
h_A = (double*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (double*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (double*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(blocks,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
/*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif*/
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(double* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| 0f4607ba0b3d39580d7348726c786c7ec6d061ca.cu | #include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
//#include <stdio.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 1024
//#define ITERATIONS 40
//#include "../include/ContAcq-IntClk.h"
// Variables
double* h_A;
double* h_B;
double* h_C;
double* d_A;
double* d_B;
double* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(double*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(const double* A, const double* B, double* C, unsigned iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
double Value1=0;
double Value2=0;
double Value3=0;
double Value=0;
double I1=A[i];
double I2=B[i];
#pragma unroll 1000
// Excessive Addition access
for(unsigned k=0; k<iterations;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main(int argc, char** argv)
{
unsigned iterations;
unsigned blocks;
if (argc != 3){
fprintf(stderr,"usage: %s #iterations #cores\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
blocks = atoi(argv[2]);
}
printf("Power Microbenchmarks with iterations %lu\n",iterations);
int N = THREADS_PER_BLOCK*blocks;
size_t size = N * sizeof(double);
// Allocate input vectors h_A and h_B in host memory
h_A = (double*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (double*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (double*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(blocks,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
/*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif*/
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(double* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
541443a071444ed839242b34719f8d26e0fd97c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "layers/graph_conv.h"
#include "layers/network.h"
#include "functions/activations.h"
#include "initializer/random_normal.h"
#include "functions/loss.h"
#include "optimization/gradient_descent.h"
#include <fstream>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <iostream>
#include <cstdlib>
size_t argmax(float* data, size_t n) {
float m = data[0];
size_t mi = 0;
for (int i = 0; i < n; ++i) {
if (data[i] > m) {
m = data[i]; mi = i;
} } return mi;
}
int main() {
hipblasHandle_t handle;
hipblasCreate(&handle);
hipsparseHandle_t sparseHandle;
hipsparseCreate(&sparseHandle);
std::ifstream content("../data/Pubmed-Diabetes/data/Pubmed-Diabetes.NODE.paper.tab");
const int numPapers = 19717;
const int numWords = 500;
const int numEdges = 44338;
std::vector<std::string> ids(numPapers);
std::unordered_map<std::string, size_t> id_map;
std::string label;
std::unordered_map<std::string, int> label_map = {
{"Agents", 0},
{"AI", 1},
{"DB", 2},
{"IR", 3},
{"ML", 4},
{"HCI", 5}
};
float* data = new float[numPapers*numWords];
float* labels = new float[numPapers*3];
float* labels2 = new float[numPapers*3];
int count[3];
for (int i =0; i < 3; ++i) {
count[i] = 0;
}
for (int i = 0; i < numPapers*3; ++i) {
labels[i] = 0;
labels2[i] = 0;
}
std::unordered_map<std::string, int> w_map;
std::vector<size_t> test;
std::string temp;
getline(content, temp);
content >> temp;
for (int i = 0; i < numWords; ++i) {
content >> temp;
std::cout << temp << std::endl;
int i1 = 0, i2 = 0;
while (i1 < temp.length() && temp[i1] != ':') {
++i1;
}
i2 = i1+1;
while (i2 < temp.length() && temp[i2] != ':') {
++i2;
}
std::cout << i1 << " " << i2 << std::endl;
temp = temp.substr(i1+1, i2-(i1+1));
w_map[temp] = i;
}
std::string temp2;
getline(content, temp2);
for (int i = 0; i < numPapers; ++i) {
content >> ids[i];
content >> label;
std::cout << ids[i] << std::endl;
std::cout << label << std::endl;
int cindex = 0;
while (cindex < label.length() && !isdigit(label[cindex])) {
cindex++;
}
label = label.substr(cindex, label.length()-cindex);
id_map[ids[i]] = i;
float sum = 0.0f;
content >> temp;
while(temp.length() > 0 && temp[0] != 's') {
std::cout << temp;
cindex = 0;
while (cindex < temp.length() && !isdigit(temp[cindex])) {
cindex++;
}
std::cout << temp << std::endl;
temp2 = temp.substr(cindex, temp.length()-cindex);
temp = temp.substr(0, cindex-1);
std::cout << temp << std::endl;
std::cout << temp2 << std::endl;
data[w_map[temp]*numPapers+i] = atof(temp2.c_str());
sum += data[w_map[temp]*numPapers+i];
content >> temp;
}
std::cout << "blargh" << std::endl;
for (int j = 0; j < numWords; ++j) {
data[j*numPapers+i] /= sum;
}
int li = atoi(label.c_str()) - 1;
std::cout << label << std::endl;
std::cout << li << std::endl;
std::cout << "blargh 2" << std::endl;
if (count[li] < 20) {
labels[li*numPapers+i] = 1.0f;
++count[li];
//test.push_back(i);
}
else {
labels2[li*numPapers+i] = 1.0f;
test.push_back(i);
}
//getline(content, temp);
//std::cin.get();
}
content.close();
std::cin.get();
std::vector<std::vector<size_t>> adj_list(numPapers);
std::vector<std::unordered_set<size_t>> adj_set(numPapers);
std::ifstream cites("../data/Pubmed-Diabetes/data/Pubmed-Diabetes.DIRECTED.cites.tab");
getline(cites, temp);
getline(cites, temp);
std::string id1;
std::string id2;
for (int i = 0; i < numEdges; ++i) {
cites >> id1;
cites >> id1;
cites >> id2;
cites >> id2;
std::cout << id1 << std::endl;
std::cout << id2 << std::endl;
int cindex = 0;
while (cindex < id1.length() && id1[cindex] != ':') {
++cindex;
}
++cindex;
id1 = id1.substr(cindex, id1.length()-cindex);
cindex = 0;
while (cindex < id2.length() && id2[cindex] != ':') {
++cindex;
}
++cindex;
id2 = id2.substr(cindex, id2.length()-cindex);
int i1 = id_map[id1];
int i2 = id_map[id2];
if (adj_set[i1].find(i2) == adj_set[i1].end()) {
adj_list[i1].push_back(i2);
adj_set[i1].insert(i2);
}
if (adj_set[i2].find(i1) == adj_set[i2].end()) {
adj_list[i2].push_back(i1);
adj_set[i2].insert(i1);
}
}
cites.close();
Matrix<float> features(numPapers, numWords);
features.setValues(data);
Graph<float> g(adj_list, sparseHandle);
std::cin.get();
GCNLayer<random_normal_init, relu> layer1("l1", numPapers, numWords, 32, relu(),
random_normal_init(0, 0.01));
GCNLayer<random_normal_init, softmax> layer2("l2", numPapers, 32, 3, softmax(),
random_normal_init(0, 0.1), -1);
Network<cross_entropy_with_logits, adam, GCNLayer<random_normal_init, relu>, GCNLayer<random_normal_init, softmax>> network(numPapers, 3, {}, adam(), handle, sparseHandle, layer1, layer2);
network.setGraph(&g);
network.setLabels(labels);
float* result = new float[numPapers*3];
try {
hipMemcpy(result, network.result(features).getData(), sizeof(float)*3*numPapers, hipMemcpyDeviceToHost);
} catch(int i) { std::cout << "Error " << i << std::endl; }
for (int i = 0; i < numPapers; ++i) {
std::cout << ids[i] << " ";
for (int j = 0; j < 3; j++) {
std::cout << result[j*numPapers+i] << " ";
}
std::cout << std::endl;
}
for (int i = 0; i < 20; ++i) {
hipMemcpy(result, network.result(features, false).getData(), sizeof(float)*3*numPapers, hipMemcpyDeviceToHost);
float l[3];
float l_[3];
float total = 0.0f;
float correct = 0.0f;
for (auto& t:test) {
for (int j = 0; j < 3; ++j) {
l[j] = labels2[j*numPapers+t];
//l[j] = labels[j*numPapers+t];
l_[j] = result[j*numPapers+t];
}
if (argmax(l, 3) == argmax(l_, 3)) {
correct += 1;
}
total += 1;
}
std::cout << network.getLoss() << std::endl;
network.setLabels(labels2);
std::cout << network.getLoss() << std::endl;
network.setLabels(labels);
std::cout << "Acc: " << (correct/total) << std::endl;
std::cin.get();
network.train(10, features);
}
delete[] data;
delete[] labels;
delete[] result;
hipblasDestroy(handle);
hipsparseDestroy(sparseHandle);
}
| 541443a071444ed839242b34719f8d26e0fd97c7.cu | #include "layers/graph_conv.h"
#include "layers/network.h"
#include "functions/activations.h"
#include "initializer/random_normal.h"
#include "functions/loss.h"
#include "optimization/gradient_descent.h"
#include <fstream>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <iostream>
#include <cstdlib>
size_t argmax(float* data, size_t n) {
float m = data[0];
size_t mi = 0;
for (int i = 0; i < n; ++i) {
if (data[i] > m) {
m = data[i]; mi = i;
} } return mi;
}
int main() {
cublasHandle_t handle;
cublasCreate(&handle);
cusparseHandle_t sparseHandle;
cusparseCreate(&sparseHandle);
std::ifstream content("../data/Pubmed-Diabetes/data/Pubmed-Diabetes.NODE.paper.tab");
const int numPapers = 19717;
const int numWords = 500;
const int numEdges = 44338;
std::vector<std::string> ids(numPapers);
std::unordered_map<std::string, size_t> id_map;
std::string label;
std::unordered_map<std::string, int> label_map = {
{"Agents", 0},
{"AI", 1},
{"DB", 2},
{"IR", 3},
{"ML", 4},
{"HCI", 5}
};
float* data = new float[numPapers*numWords];
float* labels = new float[numPapers*3];
float* labels2 = new float[numPapers*3];
int count[3];
for (int i =0; i < 3; ++i) {
count[i] = 0;
}
for (int i = 0; i < numPapers*3; ++i) {
labels[i] = 0;
labels2[i] = 0;
}
std::unordered_map<std::string, int> w_map;
std::vector<size_t> test;
std::string temp;
getline(content, temp);
content >> temp;
for (int i = 0; i < numWords; ++i) {
content >> temp;
std::cout << temp << std::endl;
int i1 = 0, i2 = 0;
while (i1 < temp.length() && temp[i1] != ':') {
++i1;
}
i2 = i1+1;
while (i2 < temp.length() && temp[i2] != ':') {
++i2;
}
std::cout << i1 << " " << i2 << std::endl;
temp = temp.substr(i1+1, i2-(i1+1));
w_map[temp] = i;
}
std::string temp2;
getline(content, temp2);
for (int i = 0; i < numPapers; ++i) {
content >> ids[i];
content >> label;
std::cout << ids[i] << std::endl;
std::cout << label << std::endl;
int cindex = 0;
while (cindex < label.length() && !isdigit(label[cindex])) {
cindex++;
}
label = label.substr(cindex, label.length()-cindex);
id_map[ids[i]] = i;
float sum = 0.0f;
content >> temp;
while(temp.length() > 0 && temp[0] != 's') {
std::cout << temp;
cindex = 0;
while (cindex < temp.length() && !isdigit(temp[cindex])) {
cindex++;
}
std::cout << temp << std::endl;
temp2 = temp.substr(cindex, temp.length()-cindex);
temp = temp.substr(0, cindex-1);
std::cout << temp << std::endl;
std::cout << temp2 << std::endl;
data[w_map[temp]*numPapers+i] = atof(temp2.c_str());
sum += data[w_map[temp]*numPapers+i];
content >> temp;
}
std::cout << "blargh" << std::endl;
for (int j = 0; j < numWords; ++j) {
data[j*numPapers+i] /= sum;
}
int li = atoi(label.c_str()) - 1;
std::cout << label << std::endl;
std::cout << li << std::endl;
std::cout << "blargh 2" << std::endl;
if (count[li] < 20) {
labels[li*numPapers+i] = 1.0f;
++count[li];
//test.push_back(i);
}
else {
labels2[li*numPapers+i] = 1.0f;
test.push_back(i);
}
//getline(content, temp);
//std::cin.get();
}
content.close();
std::cin.get();
std::vector<std::vector<size_t>> adj_list(numPapers);
std::vector<std::unordered_set<size_t>> adj_set(numPapers);
std::ifstream cites("../data/Pubmed-Diabetes/data/Pubmed-Diabetes.DIRECTED.cites.tab");
getline(cites, temp);
getline(cites, temp);
std::string id1;
std::string id2;
for (int i = 0; i < numEdges; ++i) {
cites >> id1;
cites >> id1;
cites >> id2;
cites >> id2;
std::cout << id1 << std::endl;
std::cout << id2 << std::endl;
int cindex = 0;
while (cindex < id1.length() && id1[cindex] != ':') {
++cindex;
}
++cindex;
id1 = id1.substr(cindex, id1.length()-cindex);
cindex = 0;
while (cindex < id2.length() && id2[cindex] != ':') {
++cindex;
}
++cindex;
id2 = id2.substr(cindex, id2.length()-cindex);
int i1 = id_map[id1];
int i2 = id_map[id2];
if (adj_set[i1].find(i2) == adj_set[i1].end()) {
adj_list[i1].push_back(i2);
adj_set[i1].insert(i2);
}
if (adj_set[i2].find(i1) == adj_set[i2].end()) {
adj_list[i2].push_back(i1);
adj_set[i2].insert(i1);
}
}
cites.close();
Matrix<float> features(numPapers, numWords);
features.setValues(data);
Graph<float> g(adj_list, sparseHandle);
std::cin.get();
GCNLayer<random_normal_init, relu> layer1("l1", numPapers, numWords, 32, relu(),
random_normal_init(0, 0.01));
GCNLayer<random_normal_init, softmax> layer2("l2", numPapers, 32, 3, softmax(),
random_normal_init(0, 0.1), -1);
Network<cross_entropy_with_logits, adam, GCNLayer<random_normal_init, relu>, GCNLayer<random_normal_init, softmax>> network(numPapers, 3, {}, adam(), handle, sparseHandle, layer1, layer2);
network.setGraph(&g);
network.setLabels(labels);
float* result = new float[numPapers*3];
try {
cudaMemcpy(result, network.result(features).getData(), sizeof(float)*3*numPapers, cudaMemcpyDeviceToHost);
} catch(int i) { std::cout << "Error " << i << std::endl; }
for (int i = 0; i < numPapers; ++i) {
std::cout << ids[i] << " ";
for (int j = 0; j < 3; j++) {
std::cout << result[j*numPapers+i] << " ";
}
std::cout << std::endl;
}
for (int i = 0; i < 20; ++i) {
cudaMemcpy(result, network.result(features, false).getData(), sizeof(float)*3*numPapers, cudaMemcpyDeviceToHost);
float l[3];
float l_[3];
float total = 0.0f;
float correct = 0.0f;
for (auto& t:test) {
for (int j = 0; j < 3; ++j) {
l[j] = labels2[j*numPapers+t];
//l[j] = labels[j*numPapers+t];
l_[j] = result[j*numPapers+t];
}
if (argmax(l, 3) == argmax(l_, 3)) {
correct += 1;
}
total += 1;
}
std::cout << network.getLoss() << std::endl;
network.setLabels(labels2);
std::cout << network.getLoss() << std::endl;
network.setLabels(labels);
std::cout << "Acc: " << (correct/total) << std::endl;
std::cin.get();
network.train(10, features);
}
delete[] data;
delete[] labels;
delete[] result;
cublasDestroy(handle);
cusparseDestroy(sparseHandle);
}
|
4566021d070fca1279f0e465c78232de7bffc727.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
| 4566021d070fca1279f0e465c78232de7bffc727.cu | /***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_multiplies_dsrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
|
22a865ed391c96a64359d2d453f916d3c41ba1af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zhemv_mgpu.cu normal z -> s, Fri Sep 11 18:29:22 2015
@author Mark Gates
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
********************************************************************/
__global__ void
ssymv_kernel_L_mgpu(
int n,
float const * __restrict__ A, int lda,
float const * __restrict__ x, int incx,
float * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset )
{
#if (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// GPUs are renumbered so that GPU 0 starts with block 0, GPU 1 starts with block 1, etc.
if ( blk < my_gpu_id ) {
return;
}
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
float psum, psum_t;
float total = MAGMA_S_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ float sA [quarter_NB_X][NB_X + 2]; // TODO +3 used in ssymv (single GPU); why?
__shared__ float sx_blk[NB_X]; // for x[ blk ]
__shared__ float sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
float rA[4];
float psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
// GPUs are renumbered so that GPU 0 has block 0, which is partial of offset.
if ( (partial && tx >= partial) ||
(blk == 0 /*&& my_gpu_id == 0*/ && tx < block_offset) ) {
sx_blk[tx] = MAGMA_S_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=my_gpu_id; jj < blk; jj += ngpu) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
// only the first block column (jj=0, on GPU 0) deals with offset
if ( ty == 0 ) {
if ( jj == 0 && tx < block_offset ) {
sx_jj[tx] = MAGMA_S_ZERO;
}
else {
sx_jj[tx] = x[jj*NB_X*incx];
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
// end ssymv_kernel_L_mgpu
/**************************************************************
Lower case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ]
[ (A21*x1 + A22*x2 + A33*x3) ]
Note beta*y is not included here; see magmablas_ssymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * x x x x ] blk=0 * data for non-transposed row w_blk = A_{blk,1:blk} * x_{1:blk}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ * x x ] blk=2 blanks are not set
[ * ] blk=3
[ * ] blk=4
[ ] blk=0 (blank)
work[gpu=1] = [ * x x x ] blk=1
[ * ] blk=2
[ * x ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries left of the diagonal blocks are not accessed.
Blank rows, where a GPU has no data to contribute, are explicitly set to zero in y.
[ * + x + x + x ]
y[gpu=0] = [ * ]
[ * + x ]
[ * ]
[ 0 ] (explicitly set to 0)
y[gpu=1] = [ * + x + x ]
[ * ]
[ * ]
********************************************************************/
__global__ void
ssymv_kernel_L_mgpu_sum(
int n,
float alpha,
int lda,
float * __restrict__ y, int incy,
float const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
float Ax = MAGMA_S_ZERO;
// GPUs are renumbered so that GPU 0 starts with block 0,
// GPU 1 starts with block 1, etc.,
// therefore only blk >= my_gpu_id have non-zero data.
if ( blk >= my_gpu_id ) {
work += ind;
// if this GPU owns block-column blk, all blocks j=[blk, ..., blocks) contain data;
// else only block j=blk contains data.
int last = blocks-1;
if ( blk % ngpu != my_gpu_id ) {
last = blk;
}
for (int j = blk; j <= last; ++j) {
Ax += work[j*lda];
}
}
y[ind * incy] = alpha*Ax; // see magmablas_ssymv_sync for beta*y
}
}
// end ssymv_kernel_L_mgpu_sum
/**
Purpose
-------
magmablas_ssymv_mgpu performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced. **Not currently supported.**
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha REAL.
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_lA Array of pointers, dimension (ngpu), to block-column distributed
matrix A, with block size nb.
d_lA[dev] is a REAL array on GPU dev, of
dimension (LDDA, nlocal), where
\n
{ floor(n/nb/ngpu)*nb + nb if dev < floor(n/nb) % ngpu,
nlocal = { floor(n/nb/ngpu)*nb + n%nb if dev == floor(n/nb) % ngpu,
{ floor(n/nb/ngpu)*nb otherwise.
\n
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
offset INTEGER.
Row & column offset to start of matrix A within the distributed d_lA
structure. Note that N is the size of this multiply, excluding the
offset, so the size of the original parent matrix is N+offset.
Also, x and y do not have an offset.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n + offset ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x REAL array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
y REAL array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param
hwork (workspace) REAL array on the CPU, of dimension (lhwork).
@param[in]
lhwork INTEGER.
The dimension of the array hwork. lhwork >= ngpu*nb.
@param
dwork (workspaces) Array of pointers, dimension (ngpu), to workspace on each GPU.
dwork[dev] is a REAL array on GPU dev, of dimension (ldwork).
@param[in]
ldwork INTEGER.
The dimension of each array dwork[dev].
ldwork >= ldda*( ceil((n + offset % nb) / nb) + 1 ).
@param[in]
ngpu INTEGER.
The number of GPUs to use.
@param[in]
nb INTEGER.
The block size used for distributing d_lA. Must be 64.
@param[in]
queues magma_queue_t array of dimension (ngpu).
queues[dev] is an execution queue on GPU dev.
@ingroup magma_sblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_ssymv_mgpu(
magma_uplo_t uplo,
magma_int_t n,
float alpha,
magmaFloat_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset,
float const *x, magma_int_t incx,
float beta, // unused, see magmablas_ssymv_mgpu_sync
float *y, magma_int_t incy, // unused
float *hwork, magma_int_t lhwork,
magmaFloat_ptr dwork[], magma_int_t ldwork,
magma_int_t ngpu,
magma_int_t nb,
magma_queue_t queues[] )
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
int upper = (uplo == MagmaUpper);
magma_int_t offset_block_id = offset / NB_X;
magma_int_t offset_gpu_id = offset_block_id % ngpu;
magma_int_t block_offset = offset % NB_X;
magma_int_t blocks = magma_ceildiv( n + block_offset, NB_X );
magma_int_t ldwmin = ldda*(blocks + 1);
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( (! upper) && (uplo != MagmaLower) ) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
} else if ( ldwork < ldwmin ) {
info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
magma_int_t dev;
for (dev=0; dev < ngpu; dev++) {
magma_setdevice( dev );
// blocks before the offset block
magma_int_t num_blocks_skipped = offset_block_id / ngpu;
if ( dev < offset_gpu_id ) {
num_blocks_skipped += 1;
}
// shift dA to first block >= offset block that is owned by this GPU
float const *dA_dev = d_lA[dev] + offset_block_id*NB_X + num_blocks_skipped*NB_X*ldda;
// first column of dwork is to broadcast x to all GPUs.
// remaining blocks number of columns is for partial sums from
// each block, as in single GPU version.
float *dx_dev = dwork[dev];
float *dwork_dev = dwork[dev] + ldda;
// renumber GPUs starting from the offset block
magma_int_t new_gpu_id = (dev + ngpu - offset_gpu_id) % ngpu;
dim3 grid( blocks, 1 );
// copy x to each GPU
magma_ssetvector_async( n, x, incx, dx_dev + block_offset, 1, queues[dev] );
// perform work = A*x, partial row sums
dim3 threads( NB_X, NB_Y );
// perform w = sum( work ), larger partial row sums
dim3 threads_sum( NB_X, 1 );
if ( upper ) {
hipLaunchKernelGGL(( ssymv_kernel_U_mgpu), dim3(grid), dim3(threads), 0, queues[dev] ,
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
hipLaunchKernelGGL(( ssymv_kernel_U_mgpu_sum), dim3(grid), dim3(threads_sum), 0, queues[dev] ,
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
else {
hipLaunchKernelGGL(( ssymv_kernel_L_mgpu), dim3(grid), dim3(threads), 0, queues[dev] ,
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
hipLaunchKernelGGL(( ssymv_kernel_L_mgpu_sum), dim3(grid), dim3(threads_sum), 0, queues[dev] ,
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
}
// 2nd loop in case hwork is not pinned, causing this to be sync instead of async.
for (dev=0; dev < ngpu; dev++) {
// copy w to CPU
magma_setdevice( dev );
float *dx_dev = dwork[dev];
magma_sgetvector_async( n, dx_dev + block_offset, 1, &hwork[dev*n], 1, queues[dev] );
}
// see magmablas_ssymv_mgpu_sync for final row sums
magma_setdevice( orig_dev );
return info;
}
/**
Synchronizes and acculumates final ssymv result.
For convenience, the parameters are identical to magmablas_ssymv_mgpu
(though some are unused here).
@see magmablas_ssymv_mgpu
@ingroup magma_sblas2
********************************************************************/
extern "C" magma_int_t
magmablas_ssymv_mgpu_sync(
magma_uplo_t uplo, // unused, see magmablas_ssymv_mgpu
magma_int_t n,
float alpha, // unused
magmaFloat_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset, // unused
float const *x, magma_int_t incx, // unused
float beta,
float *y, magma_int_t incy, // unused
float *hwork, magma_int_t lhwork,
magmaFloat_ptr dwork[], magma_int_t ldwork, // unused
magma_int_t ngpu,
magma_int_t nb, // unused
magma_queue_t queues[] )
{
const float c_one = MAGMA_S_ONE;
const magma_int_t ione = 1;
magma_device_t dev;
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
//if ( (! upper) && (uplo != MagmaLower) ) { // unused
// info = -1;
//} else
if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
//} else if ( ldwork < ldwmin ) { // unused
// info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
// scale y = beta*y
blasf77_sscal( &n, &beta, y, &incy );
// sum reduce, y += sum( hwork )
for (dev=0; dev < ngpu; ++dev) {
magma_setdevice( dev );
magma_queue_sync( queues[dev] );
blasf77_saxpy( &n, &c_one, &hwork[dev*n], &ione, y, &ione );
}
magma_setdevice( orig_dev );
return info;
}
| 22a865ed391c96a64359d2d453f916d3c41ba1af.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zhemv_mgpu.cu normal z -> s, Fri Sep 11 18:29:22 2015
@author Mark Gates
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
********************************************************************/
__global__ void
ssymv_kernel_L_mgpu(
int n,
float const * __restrict__ A, int lda,
float const * __restrict__ x, int incx,
float * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset )
{
#if (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// GPUs are renumbered so that GPU 0 starts with block 0, GPU 1 starts with block 1, etc.
if ( blk < my_gpu_id ) {
return;
}
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
float psum, psum_t;
float total = MAGMA_S_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ float sA [quarter_NB_X][NB_X + 2]; // TODO +3 used in ssymv (single GPU); why?
__shared__ float sx_blk[NB_X]; // for x[ blk ]
__shared__ float sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
float rA[4];
float psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
// GPUs are renumbered so that GPU 0 has block 0, which is partial of offset.
if ( (partial && tx >= partial) ||
(blk == 0 /*&& my_gpu_id == 0*/ && tx < block_offset) ) {
sx_blk[tx] = MAGMA_S_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=my_gpu_id; jj < blk; jj += ngpu) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
// only the first block column (jj=0, on GPU 0) deals with offset
if ( ty == 0 ) {
if ( jj == 0 && tx < block_offset ) {
sx_jj[tx] = MAGMA_S_ZERO;
}
else {
sx_jj[tx] = x[jj*NB_X*incx];
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
// end ssymv_kernel_L_mgpu
/**************************************************************
Lower case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ]
[ (A21*x1 + A22*x2 + A33*x3) ]
Note beta*y is not included here; see magmablas_ssymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * x x x x ] blk=0 * data for non-transposed row w_blk = A_{blk,1:blk} * x_{1:blk}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ * x x ] blk=2 blanks are not set
[ * ] blk=3
[ * ] blk=4
[ ] blk=0 (blank)
work[gpu=1] = [ * x x x ] blk=1
[ * ] blk=2
[ * x ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries left of the diagonal blocks are not accessed.
Blank rows, where a GPU has no data to contribute, are explicitly set to zero in y.
[ * + x + x + x ]
y[gpu=0] = [ * ]
[ * + x ]
[ * ]
[ 0 ] (explicitly set to 0)
y[gpu=1] = [ * + x + x ]
[ * ]
[ * ]
********************************************************************/
__global__ void
ssymv_kernel_L_mgpu_sum(
int n,
float alpha,
int lda,
float * __restrict__ y, int incy,
float const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
float Ax = MAGMA_S_ZERO;
// GPUs are renumbered so that GPU 0 starts with block 0,
// GPU 1 starts with block 1, etc.,
// therefore only blk >= my_gpu_id have non-zero data.
if ( blk >= my_gpu_id ) {
work += ind;
// if this GPU owns block-column blk, all blocks j=[blk, ..., blocks) contain data;
// else only block j=blk contains data.
int last = blocks-1;
if ( blk % ngpu != my_gpu_id ) {
last = blk;
}
for (int j = blk; j <= last; ++j) {
Ax += work[j*lda];
}
}
y[ind * incy] = alpha*Ax; // see magmablas_ssymv_sync for beta*y
}
}
// end ssymv_kernel_L_mgpu_sum
/**
Purpose
-------
magmablas_ssymv_mgpu performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced. **Not currently supported.**
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha REAL.
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_lA Array of pointers, dimension (ngpu), to block-column distributed
matrix A, with block size nb.
d_lA[dev] is a REAL array on GPU dev, of
dimension (LDDA, nlocal), where
\n
{ floor(n/nb/ngpu)*nb + nb if dev < floor(n/nb) % ngpu,
nlocal = { floor(n/nb/ngpu)*nb + n%nb if dev == floor(n/nb) % ngpu,
{ floor(n/nb/ngpu)*nb otherwise.
\n
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
offset INTEGER.
Row & column offset to start of matrix A within the distributed d_lA
structure. Note that N is the size of this multiply, excluding the
offset, so the size of the original parent matrix is N+offset.
Also, x and y do not have an offset.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n + offset ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x REAL array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
y REAL array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param
hwork (workspace) REAL array on the CPU, of dimension (lhwork).
@param[in]
lhwork INTEGER.
The dimension of the array hwork. lhwork >= ngpu*nb.
@param
dwork (workspaces) Array of pointers, dimension (ngpu), to workspace on each GPU.
dwork[dev] is a REAL array on GPU dev, of dimension (ldwork).
@param[in]
ldwork INTEGER.
The dimension of each array dwork[dev].
ldwork >= ldda*( ceil((n + offset % nb) / nb) + 1 ).
@param[in]
ngpu INTEGER.
The number of GPUs to use.
@param[in]
nb INTEGER.
The block size used for distributing d_lA. Must be 64.
@param[in]
queues magma_queue_t array of dimension (ngpu).
queues[dev] is an execution queue on GPU dev.
@ingroup magma_sblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_ssymv_mgpu(
magma_uplo_t uplo,
magma_int_t n,
float alpha,
magmaFloat_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset,
float const *x, magma_int_t incx,
float beta, // unused, see magmablas_ssymv_mgpu_sync
float *y, magma_int_t incy, // unused
float *hwork, magma_int_t lhwork,
magmaFloat_ptr dwork[], magma_int_t ldwork,
magma_int_t ngpu,
magma_int_t nb,
magma_queue_t queues[] )
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
int upper = (uplo == MagmaUpper);
magma_int_t offset_block_id = offset / NB_X;
magma_int_t offset_gpu_id = offset_block_id % ngpu;
magma_int_t block_offset = offset % NB_X;
magma_int_t blocks = magma_ceildiv( n + block_offset, NB_X );
magma_int_t ldwmin = ldda*(blocks + 1);
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( (! upper) && (uplo != MagmaLower) ) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
} else if ( ldwork < ldwmin ) {
info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
magma_int_t dev;
for (dev=0; dev < ngpu; dev++) {
magma_setdevice( dev );
// blocks before the offset block
magma_int_t num_blocks_skipped = offset_block_id / ngpu;
if ( dev < offset_gpu_id ) {
num_blocks_skipped += 1;
}
// shift dA to first block >= offset block that is owned by this GPU
float const *dA_dev = d_lA[dev] + offset_block_id*NB_X + num_blocks_skipped*NB_X*ldda;
// first column of dwork is to broadcast x to all GPUs.
// remaining blocks number of columns is for partial sums from
// each block, as in single GPU version.
float *dx_dev = dwork[dev];
float *dwork_dev = dwork[dev] + ldda;
// renumber GPUs starting from the offset block
magma_int_t new_gpu_id = (dev + ngpu - offset_gpu_id) % ngpu;
dim3 grid( blocks, 1 );
// copy x to each GPU
magma_ssetvector_async( n, x, incx, dx_dev + block_offset, 1, queues[dev] );
// perform work = A*x, partial row sums
dim3 threads( NB_X, NB_Y );
// perform w = sum( work ), larger partial row sums
dim3 threads_sum( NB_X, 1 );
if ( upper ) {
ssymv_kernel_U_mgpu<<< grid, threads, 0, queues[dev] >>>(
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
ssymv_kernel_U_mgpu_sum<<< grid, threads_sum, 0, queues[dev] >>>(
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
else {
ssymv_kernel_L_mgpu<<< grid, threads, 0, queues[dev] >>>(
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
ssymv_kernel_L_mgpu_sum<<< grid, threads_sum, 0, queues[dev] >>>(
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
}
// 2nd loop in case hwork is not pinned, causing this to be sync instead of async.
for (dev=0; dev < ngpu; dev++) {
// copy w to CPU
magma_setdevice( dev );
float *dx_dev = dwork[dev];
magma_sgetvector_async( n, dx_dev + block_offset, 1, &hwork[dev*n], 1, queues[dev] );
}
// see magmablas_ssymv_mgpu_sync for final row sums
magma_setdevice( orig_dev );
return info;
}
/**
Synchronizes and acculumates final ssymv result.
For convenience, the parameters are identical to magmablas_ssymv_mgpu
(though some are unused here).
@see magmablas_ssymv_mgpu
@ingroup magma_sblas2
********************************************************************/
extern "C" magma_int_t
magmablas_ssymv_mgpu_sync(
magma_uplo_t uplo, // unused, see magmablas_ssymv_mgpu
magma_int_t n,
float alpha, // unused
magmaFloat_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset, // unused
float const *x, magma_int_t incx, // unused
float beta,
float *y, magma_int_t incy, // unused
float *hwork, magma_int_t lhwork,
magmaFloat_ptr dwork[], magma_int_t ldwork, // unused
magma_int_t ngpu,
magma_int_t nb, // unused
magma_queue_t queues[] )
{
const float c_one = MAGMA_S_ONE;
const magma_int_t ione = 1;
magma_device_t dev;
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
//if ( (! upper) && (uplo != MagmaLower) ) { // unused
// info = -1;
//} else
if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
//} else if ( ldwork < ldwmin ) { // unused
// info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
// scale y = beta*y
blasf77_sscal( &n, &beta, y, &incy );
// sum reduce, y += sum( hwork )
for (dev=0; dev < ngpu; ++dev) {
magma_setdevice( dev );
magma_queue_sync( queues[dev] );
blasf77_saxpy( &n, &c_one, &hwork[dev*n], &ione, y, &ione );
}
magma_setdevice( orig_dev );
return info;
}
|
2f129f9738ad21c4cc20ad2967b3a5b5c5680b11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TW 10
__global__ void x_dot_w(float *a, float *b, float *c, const unsigned int X, const unsigned int Y, const unsigned int Z)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
float temp = 0;
__shared__ float S_X [10][TW];
__shared__ float S_Y [10][TW];
for (int t = 0; t < (Y-1)/TW + 1; t++) {
if(row < X && (t* TW +tx) < Y )
S_X[ty][tx] = a[row * Y + t*TW + tx];
else
S_X[ty][tx] = 0.0;
if ( (t* TW + ty) < Y && col < Z )
S_Y[ty][tx] = b[(t*TW + ty)* Z + col];
else
S_Y[ty][tx] = 0.0;
__syncthreads();
for (int k = 0; k < TW; k++) {
temp+= S_X[ty][k] * S_Y[k][tx];
}
__syncthreads();
}
if(row < X && col <Z) {
c[row * Z + col] = temp;
}
}
| 2f129f9738ad21c4cc20ad2967b3a5b5c5680b11.cu | #define TW 10
__global__ void x_dot_w(float *a, float *b, float *c, const unsigned int X, const unsigned int Y, const unsigned int Z)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
float temp = 0;
__shared__ float S_X [10][TW];
__shared__ float S_Y [10][TW];
for (int t = 0; t < (Y-1)/TW + 1; t++) {
if(row < X && (t* TW +tx) < Y )
S_X[ty][tx] = a[row * Y + t*TW + tx];
else
S_X[ty][tx] = 0.0;
if ( (t* TW + ty) < Y && col < Z )
S_Y[ty][tx] = b[(t*TW + ty)* Z + col];
else
S_Y[ty][tx] = 0.0;
__syncthreads();
for (int k = 0; k < TW; k++) {
temp+= S_X[ty][k] * S_Y[k][tx];
}
__syncthreads();
}
if(row < X && col <Z) {
c[row * Z + col] = temp;
}
}
|
3c9a37bac7b3f043fafc26295639a3c2f05874d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "IntegratorHPMCMonoGPU.cuh"
#include "hoomd/RandomNumbers.h"
namespace hpmc
{
namespace gpu
{
namespace kernel
{
//! Kernel to generate expanded cells
/*! \param d_excell_idx Output array to list the particle indices in the expanded cells
\param d_excell_size Output array to list the number of particles in each expanded cell
\param excli Indexer for the expanded cells
\param d_cell_idx Particle indices in the normal cells
\param d_cell_size Number of particles in each cell
\param d_cell_adj Cell adjacency list
\param ci Cell indexer
\param cli Cell list indexer
\param cadji Cell adjacency indexer
\param ngpu Number of active devices
gpu_hpmc_excell_kernel executes one thread per cell. It gathers the particle indices from all neighboring cells
into the output expanded cell.
*/
__global__ void hpmc_excell(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const unsigned int ngpu)
{
// compute the output cell
unsigned int my_cell = 0;
my_cell = blockDim.x * blockIdx.x + threadIdx.x;
if (my_cell >= ci.getNumElements())
return;
unsigned int my_cell_size = 0;
// loop over neighboring cells and build up the expanded cell list
for (unsigned int offset = 0; offset < cadji.getW(); offset++)
{
unsigned int neigh_cell = d_cell_adj[cadji(offset, my_cell)];
// iterate over per-device cell lists
for (unsigned int igpu = 0; igpu < ngpu; ++igpu)
{
unsigned int neigh_cell_size = d_cell_size[neigh_cell+igpu*ci.getNumElements()];
for (unsigned int k = 0; k < neigh_cell_size; k++)
{
// read in the index of the new particle to add to our cell
unsigned int new_idx = d_cell_idx[cli(k, neigh_cell)+igpu*cli.getNumElements()];
d_excell_idx[excli(my_cell_size, my_cell)] = new_idx;
my_cell_size++;
}
}
}
// write out the final size
d_excell_size[my_cell] = my_cell_size;
}
//! Kernel for grid shift
/*! \param d_postype postype of each particle
\param d_image Image flags for each particle
\param N number of particles
\param box Simulation box
\param shift Vector by which to translate the particles
Shift all the particles by a given vector.
\ingroup hpmc_kernels
*/
__global__ void hpmc_shift(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim box,
const Scalar3 shift)
{
// identify the active cell that this thread handles
unsigned int my_pidx = blockIdx.x * blockDim.x + threadIdx.x;
// this thread is inactive if it indexes past the end of the particle list
if (my_pidx >= N)
return;
// pull in the current position
Scalar4 postype = d_postype[my_pidx];
// shift the position
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
pos += shift;
// wrap the particle back into the box
int3 image = d_image[my_pidx];
box.wrap(pos, image);
// write out the new position and orientation
d_postype[my_pidx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[my_pidx] = image;
}
//!< Kernel to accept/reject
__global__ void hpmc_accept(const unsigned int *d_update_order_by_ptl,
const unsigned int *d_trial_move_type,
const unsigned int *d_reject_out_of_cell,
unsigned int *d_reject,
unsigned int *d_reject_out,
const unsigned int *d_nneigh,
const unsigned int *d_nlist,
const unsigned int N_old,
const unsigned int N,
const unsigned int nwork,
const unsigned work_offset,
const unsigned int maxn,
bool patch,
const unsigned int *d_nlist_patch_old,
const unsigned int *d_nlist_patch_new,
const unsigned int *d_nneigh_patch_old,
const unsigned int *d_nneigh_patch_new,
const float *d_energy_old,
const float *d_energy_new,
const unsigned int maxn_patch,
unsigned int *d_condition,
const unsigned int seed,
const unsigned int select,
const unsigned int timestep)
{
unsigned offset = threadIdx.x;
unsigned int group_size = blockDim.x;
unsigned int group = threadIdx.y;
unsigned int n_groups = blockDim.y;
bool master = offset == 0;
// the particle we are handling
unsigned int i = blockIdx.x*n_groups + group;
bool active = true;
if (i >= nwork)
active = false;
i += work_offset;
extern __shared__ char sdata[];
float *s_energy_old = (float *) sdata;
float *s_energy_new = (float *) (s_energy_old + n_groups);
unsigned int *s_reject = (unsigned int *) (s_energy_new + n_groups);
bool move_active = false;
if (active && master)
{
s_reject[group] = d_reject_out_of_cell[i];
s_energy_old[group] = 0.0f;
s_energy_new[group] = 0.0f;
}
if (active)
{
move_active = d_trial_move_type[i] > 0;
}
__syncthreads();
if (active && move_active)
{
unsigned int update_order_i = d_update_order_by_ptl[i];
// iterate over overlapping neighbors in old configuration
unsigned int nneigh = d_nneigh[i];
bool accept = true;
for (unsigned int cur_neigh = offset; cur_neigh < nneigh; cur_neigh += group_size)
{
unsigned int primitive = d_nlist[cur_neigh+maxn*i];
unsigned int j = primitive;
bool old = true;
if (j >= N_old)
{
j -= N_old;
old = false;
}
// has j been updated? ghost particles are not updated
bool j_has_been_updated = j < N && d_trial_move_type[j]
&& d_update_order_by_ptl[j] < update_order_i && !d_reject[j];
// acceptance, reject if current configuration of particle overlaps
if ((old && !j_has_been_updated) || (!old && j_has_been_updated))
{
accept = false;
break;
}
} // end loop over neighbors
if (!accept)
{
atomicMax(&s_reject[group], 1);
}
if (patch)
{
// iterate over overlapping neighbors in old configuration
float energy_old = 0.0f;
unsigned int nneigh = d_nneigh_patch_old[i];
bool evaluated = false;
for (unsigned int cur_neigh = offset; cur_neigh < nneigh; cur_neigh += group_size)
{
unsigned int primitive = d_nlist_patch_old[cur_neigh+maxn_patch*i];
unsigned int j = primitive;
bool old = true;
if (j >= N_old)
{
j -= N_old;
old = false;
}
// has j been updated? ghost particles are not updated
bool j_has_been_updated = j < N && d_trial_move_type[j]
&& d_update_order_by_ptl[j] < update_order_i && !d_reject[j];
if ((old && !j_has_been_updated) || (!old && j_has_been_updated))
{
energy_old += d_energy_old[cur_neigh+maxn_patch*i];
evaluated = true;
}
} // end loop over neighbors
if (evaluated)
atomicAdd(&s_energy_old[group], energy_old);
// iterate over overlapping neighbors in new configuration
float energy_new = 0.0f;
nneigh = d_nneigh_patch_new[i];
evaluated = false;
for (unsigned int cur_neigh = offset; cur_neigh < nneigh; cur_neigh += group_size)
{
unsigned int primitive = d_nlist_patch_new[cur_neigh+maxn_patch*i];
unsigned int j = primitive;
bool old = true;
if (j >= N_old)
{
j -= N_old;
old = false;
}
// has j been updated? ghost particles are not updated
bool j_has_been_updated = j < N && d_trial_move_type[j]
&& d_update_order_by_ptl[j] < update_order_i && !d_reject[j];
if ((old && !j_has_been_updated) || (!old && j_has_been_updated))
{
energy_new += d_energy_new[cur_neigh+maxn_patch*i];
evaluated = true;
}
} // end loop over neighbors
if (evaluated)
atomicAdd(&s_energy_new[group], energy_new);
}
} // end if (active && move_active)
__syncthreads();
if (master && active && move_active)
{
float delta_U = s_energy_new[group] - s_energy_old[group];
// Metropolis-Hastings
hoomd::RandomGenerator rng_i(hoomd::RNGIdentifier::HPMCMonoAccept, seed, i, select, timestep);
bool accept = !s_reject[group] && (!patch || (hoomd::detail::generate_canonical<double>(rng_i) < slow::exp(-delta_U)));
if ((accept && d_reject[i]) || (!accept && !d_reject[i]))
{
// flag that we're not done yet (a trivial race condition upon write)
*d_condition = 1;
}
// write out to device memory
d_reject_out[i] = accept ? 0 : 1;
}
}
} // end namespace kernel
//! Driver for kernel::hpmc_excell()
void hpmc_excell(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D& excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const unsigned int ngpu,
const unsigned int block_size)
{
assert(d_excell_idx);
assert(d_excell_size);
assert(d_cell_idx);
assert(d_cell_size);
assert(d_cell_adj);
// determine the maximum block size and clamp the input block size down
static int max_block_size = -1;
if (max_block_size == -1)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(kernel::hpmc_excell));
max_block_size = attr.maxThreadsPerBlock;
}
// setup the grid to run the kernel
unsigned int run_block_size = min(block_size, (unsigned int)max_block_size);
dim3 threads(run_block_size, 1, 1);
dim3 grid(ci.getNumElements() / run_block_size + 1, 1, 1);
hipLaunchKernelGGL(kernel::hpmc_excell, dim3(grid), dim3(threads), 0, 0, d_excell_idx,
d_excell_size,
excli,
d_cell_idx,
d_cell_size,
d_cell_adj,
ci,
cli,
cadji,
ngpu);
}
//! Kernel driver for kernel::hpmc_shift()
void hpmc_shift(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim& box,
const Scalar3 shift,
const unsigned int block_size)
{
assert(d_postype);
assert(d_image);
// setup the grid to run the kernel
dim3 threads_shift(block_size, 1, 1);
dim3 grid_shift(N / block_size + 1, 1, 1);
hipLaunchKernelGGL(kernel::hpmc_shift, dim3(grid_shift), dim3(threads_shift), 0, 0, d_postype,
d_image,
N,
box,
shift);
// after this kernel we return control of cuda managed memory to the host
hipDeviceSynchronize();
}
void hpmc_accept(const unsigned int *d_update_order_by_ptl,
const unsigned int *d_trial_move_type,
const unsigned int *d_reject_out_of_cell,
unsigned int *d_reject,
unsigned int *d_reject_out,
const unsigned int *d_nneigh,
const unsigned int *d_nlist,
const unsigned int N_old,
const unsigned int N,
const GPUPartition& gpu_partition,
const unsigned int maxn,
bool patch,
const unsigned int *d_nlist_patch_old,
const unsigned int *d_nlist_patch_new,
const unsigned int *d_nneigh_patch_old,
const unsigned int *d_nneigh_patch_new,
const float *d_energy_old,
const float *d_energy_new,
const unsigned int maxn_patch,
unsigned int *d_condition,
const unsigned int seed,
const unsigned int select,
const unsigned int timestep,
const unsigned int block_size,
const unsigned int tpp)
{
// determine the maximum block size and clamp the input block size down
static int max_block_size = -1;
if (max_block_size == -1)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(kernel::hpmc_accept));
max_block_size = attr.maxThreadsPerBlock;
}
// setup the grid to run the kernel
unsigned int run_block_size = min(block_size, (unsigned int)max_block_size);
// threads per particle
unsigned int cur_tpp = min(run_block_size,tpp);
while (run_block_size % cur_tpp != 0)
cur_tpp--;
unsigned int n_groups = run_block_size/cur_tpp;
dim3 threads(cur_tpp, n_groups, 1);
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
const unsigned int num_blocks = (nwork + n_groups - 1)/n_groups;
dim3 grid(num_blocks, 1, 1);
unsigned int shared_bytes = n_groups * (2*sizeof(float) + sizeof(unsigned int));
hipLaunchKernelGGL(kernel::hpmc_accept, grid, threads, shared_bytes, 0,
d_update_order_by_ptl,
d_trial_move_type,
d_reject_out_of_cell,
d_reject,
d_reject_out,
d_nneigh,
d_nlist,
N_old,
N,
nwork,
range.first,
maxn,
patch,
d_nlist_patch_old,
d_nlist_patch_new,
d_nneigh_patch_old,
d_nneigh_patch_new,
d_energy_old,
d_energy_new,
maxn_patch,
d_condition,
seed,
select,
timestep);
}
}
} // end namespace gpu
} // end namespace hpmc
| 3c9a37bac7b3f043fafc26295639a3c2f05874d4.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "IntegratorHPMCMonoGPU.cuh"
#include "hoomd/RandomNumbers.h"
namespace hpmc
{
namespace gpu
{
namespace kernel
{
//! Kernel to generate expanded cells
/*! \param d_excell_idx Output array to list the particle indices in the expanded cells
\param d_excell_size Output array to list the number of particles in each expanded cell
\param excli Indexer for the expanded cells
\param d_cell_idx Particle indices in the normal cells
\param d_cell_size Number of particles in each cell
\param d_cell_adj Cell adjacency list
\param ci Cell indexer
\param cli Cell list indexer
\param cadji Cell adjacency indexer
\param ngpu Number of active devices
gpu_hpmc_excell_kernel executes one thread per cell. It gathers the particle indices from all neighboring cells
into the output expanded cell.
*/
__global__ void hpmc_excell(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const unsigned int ngpu)
{
// compute the output cell
unsigned int my_cell = 0;
my_cell = blockDim.x * blockIdx.x + threadIdx.x;
if (my_cell >= ci.getNumElements())
return;
unsigned int my_cell_size = 0;
// loop over neighboring cells and build up the expanded cell list
for (unsigned int offset = 0; offset < cadji.getW(); offset++)
{
unsigned int neigh_cell = d_cell_adj[cadji(offset, my_cell)];
// iterate over per-device cell lists
for (unsigned int igpu = 0; igpu < ngpu; ++igpu)
{
unsigned int neigh_cell_size = d_cell_size[neigh_cell+igpu*ci.getNumElements()];
for (unsigned int k = 0; k < neigh_cell_size; k++)
{
// read in the index of the new particle to add to our cell
unsigned int new_idx = d_cell_idx[cli(k, neigh_cell)+igpu*cli.getNumElements()];
d_excell_idx[excli(my_cell_size, my_cell)] = new_idx;
my_cell_size++;
}
}
}
// write out the final size
d_excell_size[my_cell] = my_cell_size;
}
//! Kernel for grid shift
/*! \param d_postype postype of each particle
\param d_image Image flags for each particle
\param N number of particles
\param box Simulation box
\param shift Vector by which to translate the particles
Shift all the particles by a given vector.
\ingroup hpmc_kernels
*/
__global__ void hpmc_shift(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim box,
const Scalar3 shift)
{
// identify the active cell that this thread handles
unsigned int my_pidx = blockIdx.x * blockDim.x + threadIdx.x;
// this thread is inactive if it indexes past the end of the particle list
if (my_pidx >= N)
return;
// pull in the current position
Scalar4 postype = d_postype[my_pidx];
// shift the position
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
pos += shift;
// wrap the particle back into the box
int3 image = d_image[my_pidx];
box.wrap(pos, image);
// write out the new position and orientation
d_postype[my_pidx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[my_pidx] = image;
}
//!< Kernel to accept/reject
__global__ void hpmc_accept(const unsigned int *d_update_order_by_ptl,
const unsigned int *d_trial_move_type,
const unsigned int *d_reject_out_of_cell,
unsigned int *d_reject,
unsigned int *d_reject_out,
const unsigned int *d_nneigh,
const unsigned int *d_nlist,
const unsigned int N_old,
const unsigned int N,
const unsigned int nwork,
const unsigned work_offset,
const unsigned int maxn,
bool patch,
const unsigned int *d_nlist_patch_old,
const unsigned int *d_nlist_patch_new,
const unsigned int *d_nneigh_patch_old,
const unsigned int *d_nneigh_patch_new,
const float *d_energy_old,
const float *d_energy_new,
const unsigned int maxn_patch,
unsigned int *d_condition,
const unsigned int seed,
const unsigned int select,
const unsigned int timestep)
{
unsigned offset = threadIdx.x;
unsigned int group_size = blockDim.x;
unsigned int group = threadIdx.y;
unsigned int n_groups = blockDim.y;
bool master = offset == 0;
// the particle we are handling
unsigned int i = blockIdx.x*n_groups + group;
bool active = true;
if (i >= nwork)
active = false;
i += work_offset;
extern __shared__ char sdata[];
float *s_energy_old = (float *) sdata;
float *s_energy_new = (float *) (s_energy_old + n_groups);
unsigned int *s_reject = (unsigned int *) (s_energy_new + n_groups);
bool move_active = false;
if (active && master)
{
s_reject[group] = d_reject_out_of_cell[i];
s_energy_old[group] = 0.0f;
s_energy_new[group] = 0.0f;
}
if (active)
{
move_active = d_trial_move_type[i] > 0;
}
__syncthreads();
if (active && move_active)
{
unsigned int update_order_i = d_update_order_by_ptl[i];
// iterate over overlapping neighbors in old configuration
unsigned int nneigh = d_nneigh[i];
bool accept = true;
for (unsigned int cur_neigh = offset; cur_neigh < nneigh; cur_neigh += group_size)
{
unsigned int primitive = d_nlist[cur_neigh+maxn*i];
unsigned int j = primitive;
bool old = true;
if (j >= N_old)
{
j -= N_old;
old = false;
}
// has j been updated? ghost particles are not updated
bool j_has_been_updated = j < N && d_trial_move_type[j]
&& d_update_order_by_ptl[j] < update_order_i && !d_reject[j];
// acceptance, reject if current configuration of particle overlaps
if ((old && !j_has_been_updated) || (!old && j_has_been_updated))
{
accept = false;
break;
}
} // end loop over neighbors
if (!accept)
{
atomicMax(&s_reject[group], 1);
}
if (patch)
{
// iterate over overlapping neighbors in old configuration
float energy_old = 0.0f;
unsigned int nneigh = d_nneigh_patch_old[i];
bool evaluated = false;
for (unsigned int cur_neigh = offset; cur_neigh < nneigh; cur_neigh += group_size)
{
unsigned int primitive = d_nlist_patch_old[cur_neigh+maxn_patch*i];
unsigned int j = primitive;
bool old = true;
if (j >= N_old)
{
j -= N_old;
old = false;
}
// has j been updated? ghost particles are not updated
bool j_has_been_updated = j < N && d_trial_move_type[j]
&& d_update_order_by_ptl[j] < update_order_i && !d_reject[j];
if ((old && !j_has_been_updated) || (!old && j_has_been_updated))
{
energy_old += d_energy_old[cur_neigh+maxn_patch*i];
evaluated = true;
}
} // end loop over neighbors
if (evaluated)
atomicAdd(&s_energy_old[group], energy_old);
// iterate over overlapping neighbors in new configuration
float energy_new = 0.0f;
nneigh = d_nneigh_patch_new[i];
evaluated = false;
for (unsigned int cur_neigh = offset; cur_neigh < nneigh; cur_neigh += group_size)
{
unsigned int primitive = d_nlist_patch_new[cur_neigh+maxn_patch*i];
unsigned int j = primitive;
bool old = true;
if (j >= N_old)
{
j -= N_old;
old = false;
}
// has j been updated? ghost particles are not updated
bool j_has_been_updated = j < N && d_trial_move_type[j]
&& d_update_order_by_ptl[j] < update_order_i && !d_reject[j];
if ((old && !j_has_been_updated) || (!old && j_has_been_updated))
{
energy_new += d_energy_new[cur_neigh+maxn_patch*i];
evaluated = true;
}
} // end loop over neighbors
if (evaluated)
atomicAdd(&s_energy_new[group], energy_new);
}
} // end if (active && move_active)
__syncthreads();
if (master && active && move_active)
{
float delta_U = s_energy_new[group] - s_energy_old[group];
// Metropolis-Hastings
hoomd::RandomGenerator rng_i(hoomd::RNGIdentifier::HPMCMonoAccept, seed, i, select, timestep);
bool accept = !s_reject[group] && (!patch || (hoomd::detail::generate_canonical<double>(rng_i) < slow::exp(-delta_U)));
if ((accept && d_reject[i]) || (!accept && !d_reject[i]))
{
// flag that we're not done yet (a trivial race condition upon write)
*d_condition = 1;
}
// write out to device memory
d_reject_out[i] = accept ? 0 : 1;
}
}
} // end namespace kernel
//! Driver for kernel::hpmc_excell()
void hpmc_excell(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D& excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const unsigned int ngpu,
const unsigned int block_size)
{
assert(d_excell_idx);
assert(d_excell_size);
assert(d_cell_idx);
assert(d_cell_size);
assert(d_cell_adj);
// determine the maximum block size and clamp the input block size down
static int max_block_size = -1;
if (max_block_size == -1)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(kernel::hpmc_excell));
max_block_size = attr.maxThreadsPerBlock;
}
// setup the grid to run the kernel
unsigned int run_block_size = min(block_size, (unsigned int)max_block_size);
dim3 threads(run_block_size, 1, 1);
dim3 grid(ci.getNumElements() / run_block_size + 1, 1, 1);
hipLaunchKernelGGL(kernel::hpmc_excell, dim3(grid), dim3(threads), 0, 0, d_excell_idx,
d_excell_size,
excli,
d_cell_idx,
d_cell_size,
d_cell_adj,
ci,
cli,
cadji,
ngpu);
}
//! Kernel driver for kernel::hpmc_shift()
void hpmc_shift(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim& box,
const Scalar3 shift,
const unsigned int block_size)
{
assert(d_postype);
assert(d_image);
// setup the grid to run the kernel
dim3 threads_shift(block_size, 1, 1);
dim3 grid_shift(N / block_size + 1, 1, 1);
hipLaunchKernelGGL(kernel::hpmc_shift, dim3(grid_shift), dim3(threads_shift), 0, 0, d_postype,
d_image,
N,
box,
shift);
// after this kernel we return control of cuda managed memory to the host
hipDeviceSynchronize();
}
void hpmc_accept(const unsigned int *d_update_order_by_ptl,
const unsigned int *d_trial_move_type,
const unsigned int *d_reject_out_of_cell,
unsigned int *d_reject,
unsigned int *d_reject_out,
const unsigned int *d_nneigh,
const unsigned int *d_nlist,
const unsigned int N_old,
const unsigned int N,
const GPUPartition& gpu_partition,
const unsigned int maxn,
bool patch,
const unsigned int *d_nlist_patch_old,
const unsigned int *d_nlist_patch_new,
const unsigned int *d_nneigh_patch_old,
const unsigned int *d_nneigh_patch_new,
const float *d_energy_old,
const float *d_energy_new,
const unsigned int maxn_patch,
unsigned int *d_condition,
const unsigned int seed,
const unsigned int select,
const unsigned int timestep,
const unsigned int block_size,
const unsigned int tpp)
{
// determine the maximum block size and clamp the input block size down
static int max_block_size = -1;
if (max_block_size == -1)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(kernel::hpmc_accept));
max_block_size = attr.maxThreadsPerBlock;
}
// setup the grid to run the kernel
unsigned int run_block_size = min(block_size, (unsigned int)max_block_size);
// threads per particle
unsigned int cur_tpp = min(run_block_size,tpp);
while (run_block_size % cur_tpp != 0)
cur_tpp--;
unsigned int n_groups = run_block_size/cur_tpp;
dim3 threads(cur_tpp, n_groups, 1);
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
const unsigned int num_blocks = (nwork + n_groups - 1)/n_groups;
dim3 grid(num_blocks, 1, 1);
unsigned int shared_bytes = n_groups * (2*sizeof(float) + sizeof(unsigned int));
hipLaunchKernelGGL(kernel::hpmc_accept, grid, threads, shared_bytes, 0,
d_update_order_by_ptl,
d_trial_move_type,
d_reject_out_of_cell,
d_reject,
d_reject_out,
d_nneigh,
d_nlist,
N_old,
N,
nwork,
range.first,
maxn,
patch,
d_nlist_patch_old,
d_nlist_patch_new,
d_nneigh_patch_old,
d_nneigh_patch_new,
d_energy_old,
d_energy_new,
maxn_patch,
d_condition,
seed,
select,
timestep);
}
}
} // end namespace gpu
} // end namespace hpmc
|
d184cb458793945808a095725216bc351a4e04da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_activation.h"
#include "hip/hip_fp16.h"
namespace anakin{
namespace saber{
template<typename Dtype>
__global__ void ker_relu_fwd(Dtype * out_data,
const Dtype* in_data, const int count, Dtype neg_slop,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = in_var > Dtype(0) ? in_var : in_var * neg_slop;
}
}
template<typename Dtype>
__global__ void ker_sigmoid_fwd(Dtype * out_data,
const Dtype* in_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = Dtype( Dtype(1) / (Dtype(1)+ expf(-in_var)));
}
}
template<typename Dtype>
__global__ void ker_tanh_fwd(Dtype * out_data,
const Dtype* in_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = Dtype( (expf(in_var) - expf(-in_var)) / (expf(in_var)+ expf(-in_var)));
}
}
template<typename Dtype>
__global__ void ker_clipped_relu_fwd(Dtype * out_data,
const Dtype* in_data, const int count, Dtype clipped_threadhold,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
in_var = in_var > 0 ? in_var : 0;
out_data[out_idx] = in_var < clipped_threadhold? in_var : clipped_threadhold;
}
}
template<typename Dtype>
__global__ void ker_elu_fwd(Dtype * out_data,
const Dtype* in_data, const int count, Dtype coef,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = in_var > 0 ? in_var : coef * (expf(in_var)-1);
}
}
template<typename Dtype>
__global__ void ker_prelu_fwd(Dtype * out_data,
const Dtype* in_data, const int count,
const Dtype* slope, bool is_channel_shared,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
if (is_channel_shared) {
out_data[out_idx] = in_var > 0 ? in_var : slope[0] * in_var;
} else {
out_data[out_idx] = in_var > 0 ? in_var : slope[c] * in_var;
}
}
}
template<typename Dtype>
__global__ void ker_prelu_fwd(Dtype * out_data,
const Dtype* in_data, const int count,
const Dtype slope, bool is_channel_shared,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = in_var > 0 ? in_var : slope * in_var;
}
}
template <>
SaberStatus SaberActivation<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, \
NCHW, NCHW, NCHW>::dispatch( \
const std::vector<DataTensor_in*>& inputs,
std::vector<DataTensor_out*>& outputs,
ActivationParam<OpTensor>& param) {
Shape in_shape = inputs[0]->valid_shape();
Shape out_shape = outputs[0]->valid_shape();
Shape stride_in = inputs[0]->get_stride();
Shape stride_out = outputs[0]->get_stride();
const InDataType *in_data = (const InDataType*)inputs[0]->data();
OutDataType *out_data = (OutDataType*)outputs[0]->mutable_data();
const int count = inputs[0]->valid_size();
hipStream_t cuda_stream = this->_ctx->get_compute_stream();
InDataType negative_slope = param.negative_slope;
InDataType coef = param.coef;
switch (param.active){
case Active_relu:
hipLaunchKernelGGL(( ker_relu_fwd<InDataType>)
, dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream,
out_data, in_data, count, negative_slope,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
case Active_sigmoid:
hipLaunchKernelGGL(( ker_sigmoid_fwd<InDataType>)
, dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream,
out_data, in_data, count,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
case Active_tanh:
hipLaunchKernelGGL(( ker_tanh_fwd<InDataType>)
, dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream,
out_data, in_data, count,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
case Active_clipped_relu:
hipLaunchKernelGGL(( ker_clipped_relu_fwd<InDataType>)
, dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream,
out_data, in_data, count, coef,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
case Active_elu:
hipLaunchKernelGGL(( ker_elu_fwd<InDataType>)
, dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream,
out_data, in_data, count, coef,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
case Active_prelu:
auto prelu_param = param.prelu_param;
if (param.prelu_param.slope == nullptr) {
ker_prelu_fwd<InDataType>
<< < CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream >> > (
out_data, in_data, count, param.negative_slope, prelu_param.channel_shared,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
} else {
ker_prelu_fwd<InDataType>
<< < CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream >> > (
out_data, in_data, count, prelu_param.slope->data(), prelu_param.channel_shared,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
}
break;
}
CUDA_POST_KERNEL_CHECK;
return SaberSuccess;
}
}
}
| d184cb458793945808a095725216bc351a4e04da.cu | #include "saber/funcs/impl/cuda/saber_activation.h"
#include "cuda_fp16.h"
namespace anakin{
namespace saber{
template<typename Dtype>
__global__ void ker_relu_fwd(Dtype * out_data,
const Dtype* in_data, const int count, Dtype neg_slop,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = in_var > Dtype(0) ? in_var : in_var * neg_slop;
}
}
template<typename Dtype>
__global__ void ker_sigmoid_fwd(Dtype * out_data,
const Dtype* in_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = Dtype( Dtype(1) / (Dtype(1)+ expf(-in_var)));
}
}
template<typename Dtype>
__global__ void ker_tanh_fwd(Dtype * out_data,
const Dtype* in_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = Dtype( (expf(in_var) - expf(-in_var)) / (expf(in_var)+ expf(-in_var)));
}
}
template<typename Dtype>
__global__ void ker_clipped_relu_fwd(Dtype * out_data,
const Dtype* in_data, const int count, Dtype clipped_threadhold,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
in_var = in_var > 0 ? in_var : 0;
out_data[out_idx] = in_var < clipped_threadhold? in_var : clipped_threadhold;
}
}
template<typename Dtype>
__global__ void ker_elu_fwd(Dtype * out_data,
const Dtype* in_data, const int count, Dtype coef,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = in_var > 0 ? in_var : coef * (expf(in_var)-1);
}
}
template<typename Dtype>
__global__ void ker_prelu_fwd(Dtype * out_data,
const Dtype* in_data, const int count,
const Dtype* slope, bool is_channel_shared,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
if (is_channel_shared) {
out_data[out_idx] = in_var > 0 ? in_var : slope[0] * in_var;
} else {
out_data[out_idx] = in_var > 0 ? in_var : slope[c] * in_var;
}
}
}
template<typename Dtype>
__global__ void ker_prelu_fwd(Dtype * out_data,
const Dtype* in_data, const int count,
const Dtype slope, bool is_channel_shared,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = in_var > 0 ? in_var : slope * in_var;
}
}
template <>
SaberStatus SaberActivation<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, \
NCHW, NCHW, NCHW>::dispatch( \
const std::vector<DataTensor_in*>& inputs,
std::vector<DataTensor_out*>& outputs,
ActivationParam<OpTensor>& param) {
Shape in_shape = inputs[0]->valid_shape();
Shape out_shape = outputs[0]->valid_shape();
Shape stride_in = inputs[0]->get_stride();
Shape stride_out = outputs[0]->get_stride();
const InDataType *in_data = (const InDataType*)inputs[0]->data();
OutDataType *out_data = (OutDataType*)outputs[0]->mutable_data();
const int count = inputs[0]->valid_size();
cudaStream_t cuda_stream = this->_ctx->get_compute_stream();
InDataType negative_slope = param.negative_slope;
InDataType coef = param.coef;
switch (param.active){
case Active_relu:
ker_relu_fwd<InDataType>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count, negative_slope,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
case Active_sigmoid:
ker_sigmoid_fwd<InDataType>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
case Active_tanh:
ker_tanh_fwd<InDataType>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
case Active_clipped_relu:
ker_clipped_relu_fwd<InDataType>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count, coef,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
case Active_elu:
ker_elu_fwd<InDataType>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count, coef,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
case Active_prelu:
auto prelu_param = param.prelu_param;
if (param.prelu_param.slope == nullptr) {
ker_prelu_fwd<InDataType>
<< < CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream >> > (
out_data, in_data, count, param.negative_slope, prelu_param.channel_shared,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
} else {
ker_prelu_fwd<InDataType>
<< < CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream >> > (
out_data, in_data, count, prelu_param.slope->data(), prelu_param.channel_shared,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
}
break;
}
CUDA_POST_KERNEL_CHECK;
return SaberSuccess;
}
}
}
|
dc872a4c786b50488bca5edff4ab45acd54d46cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//STL
#include <iostream>
#include <vector>
#include <time.h>
#include <algorithm>
using std::cout; using std::endl; using namespace std;
unsigned i;
const unsigned N = 2048 * 4, bigN = 1000000;
unsigned gpuThr = 512;
unsigned gpuBl = N / gpuThr;
std::vector < float > inputVec( N );
void hostCalculateDCTPSNR( vector < float > &vec, float & vecMedian );
//=========================== gpu ===========================
__device__ float d_x[ N ], d_Xfp32[ N ], d_ix[ N ], d_rms[ N ];
__constant__ unsigned d_N[ 1 ];
__constant__ float d_median[ 1 ], d_max[ 1 ];
__device__ float d_inOut[ bigN ];
__device__ float d_inOutCopy[ bigN ];
__global__ void dummyCopy()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
d_inOutCopy[ ind ] = d_inOut[ ind ];
}
__global__ void psnr()
{
double acc = 0.0f;
for ( unsigned i = 0; i < d_N[ 0 ]; i++ )
acc += d_rms[ i ];
acc /= float( d_N[ 0 ] );
printf( "GPU PSNR: %f[dB]\n ", 10.0f * log10f( ( d_max[ 0 ] * d_max[ 0 ] ) / ( acc ) ) );
}
__global__ void rms()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
float x1 = d_x[ ind ] + d_median[ 0 ];
float x2 = d_ix[ ind ];
d_rms[ ind ] = ( x1 - x2 ) * ( x1 - x2 );
}
__global__ void printKernel()
{
printf( "======= GPU SIDE: =========\n" );
unsigned resNo = 3;
for ( unsigned i = 0; i < resNo; i++ )
printf( "d_x[%i]: %4f\n", i, d_x[ i ] + d_median[ 0 ] );
/*
for ( unsigned i = 0; i < resNo; i++ )
printf( "d_xNorm[%i]: %.4f\n", i, d_x[ i ] );
for ( unsigned i = 0; i < resNo; i++ )
printf( "d_Xfp32[%i]: %.4f\n", i, d_Xfp32[ i ] );
*/
for ( unsigned i = 0; i < resNo; i++ )
printf( "d_ix[%i]: %.4f\n", i, d_ix[ i ] );
for ( unsigned i = d_N[ 0 ] - 1; i > d_N[ 0 ] - 4; i-- )
printf( "d_x[%i]: %.4f\n", i, d_x[ i ] + d_median[ 0 ] );
for ( unsigned i = d_N[ 0 ] - 1; i > d_N[ 0 ] - 4; i-- )
printf( "d_ix[%i]: %.4f\n", i, d_ix[ i ] );
}
__global__ void idctKernelFloat()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
float constVal = ( float( ind ) + 0.5f ) * 3.14159265f / float( d_N[ 0 ] );
float sqrConst = sqrtf( 2.0f / float( d_N[ 0 ] ) );
float tmpX = sqrtf( 1.0f / float( d_N[ 0 ] ) ) * d_Xfp32[ 0 ];
float accDC = 0.0f, tmpx = 0.0f;
for ( unsigned k = 1; k < N; k++ )
{
tmpx = d_Xfp32[ k ];
tmpX += tmpx * sqrConst * __cosf( constVal * ( float( k ) ) );
accDC += tmpx;
}
d_ix[ ind ] = tmpX + d_median[ 0 ];
}
__global__ void dctKernelFloat()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
float constVal = float( ind ) * 3.14159265f / float( d_N[ 0 ] );
float sqrConst = sqrtf( 2.0f / float( d_N[ 0 ] ) );
float tmpX = 0.0f, accDC = 0.0f, tmpx = 0.0f;
for ( unsigned i = 0; i < N; i++ )
{
tmpx = d_x[ i ];
tmpX += sqrConst * tmpx * __cosf( constVal * ( float( i ) + 0.5f ) );
accDC += tmpx;
}
d_Xfp32[ ind ] = tmpX;
d_Xfp32[ 0 ] = accDC / sqrtf( float( d_N[ 0 ] ) );
}
//median extraction from input vector <float> for float better calculations precision
__global__ void dataMedianPreprocess()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
d_x[ ind ] -= d_median[ 0 ];
}
int main( int argc, char* argv[] )
{//memory copying
vector < float > h_vecIn;
for ( i = 0; i < bigN; i++ )
h_vecIn.push_back( rand() % 100 * 0.01f * i );
hipMemcpyToSymbol( d_inOut, &h_vecIn[ 0 ], sizeof( float ) * bigN );
vector < float > h_vecOut( bigN, 0.0f );
hipMemcpyFromSymbol( &h_vecOut[ 0 ], d_inOut, sizeof( float ) * bigN );
double acc = 0.0f; float x1 = 0.0f, x2 = 0.0f;
for ( i = 0; i < bigN; i++ )
{
x1 = h_vecIn[ i ];
x2 = h_vecOut[ i ];
acc += ( x1 - x2 ) * ( x1 - x2 );
}
acc /= double( bigN );
float maxEl = *std::max_element( h_vecIn.begin(), h_vecIn.end() );
printf( "psnr raw HOST2GPU copy: %f[dB]\n", 10.0f * log10( maxEl * maxEl / acc ) );
hipLaunchKernelGGL(( dummyCopy), dim3(bigN / 500), dim3(500) , 0, 0, );
hipMemcpyFromSymbol( &h_vecOut[ 0 ], d_inOutCopy, sizeof( float ) * bigN );
acc = 0.0f; x1 = 0.0f; x2 = 0.0f;
for ( i = 0; i < bigN; i++ )
{
x1 = h_vecIn[ i ];
x2 = h_vecOut[ i ];
acc += ( x1 - x2 ) * ( x1 - x2 );
}
acc /= double( bigN );
maxEl = *std::max_element( h_vecIn.begin(), h_vecIn.end() );
printf( "psnr raw GPU2GPU copy: %f[dB]\n", 10.0f * log10( maxEl * maxEl / acc ) );
hipFree( d_inOut ); hipFree( d_inOutCopy );
//gpu DCT from definition accuracuy
for(i=0;i<(unsigned)inputVec.size();i++)inputVec[i]=rand()%100*0.001f*i;
inputVec[ 3 ] = 0.05f;
vector < float > sortVec( inputVec ); sort( sortVec.begin(), sortVec.end() );
float vecMedian = sortVec[ sortVec.size() / 2 ];
hipMemcpyToSymbol( d_x, &inputVec[ 0 ], sizeof( float ) * ( unsigned )inputVec.size() );
hipMemcpyToSymbol( d_N, &N, sizeof( unsigned ) );
hipMemcpyToSymbol( d_median, &vecMedian, sizeof( float ) );
hipMemcpyToSymbol( d_max, &sortVec[ sortVec.size() - 1 ], sizeof( float ) );
hipLaunchKernelGGL(( dataMedianPreprocess), dim3(gpuBl), dim3(gpuThr) , 0, 0, );
clock_t t = clock();
hipLaunchKernelGGL(( dctKernelFloat), dim3(gpuBl), dim3(gpuThr) , 0, 0, );
hipDeviceSynchronize();
cout << "CPU clocks GPU dct float accumulator: " << double( clock() - t ) << endl;
t = clock();
hipLaunchKernelGGL(( idctKernelFloat), dim3(gpuBl), dim3(gpuThr) , 0, 0, );
hipDeviceSynchronize();
cout << "CPU clocks GPU idct float accumulator: " << double( clock() - t ) << endl;
hipLaunchKernelGGL(( printKernel), dim3(1), dim3(1) , 0, 0, );
hipLaunchKernelGGL(( rms), dim3(gpuBl), dim3(gpuThr) , 0, 0, );
hipLaunchKernelGGL(( psnr), dim3(1), dim3(1) , 0, 0, );
//host DCT from definition accuracy
hostCalculateDCTPSNR( inputVec, vecMedian );
hipFree( d_x );
hipFree( d_ix );
hipFree( d_median );
hipFree( d_rms );
hipFree( d_max );
hipFree( d_Xfp32 );
hipFree( d_N );
hipDeviceSynchronize();
hipDeviceReset();
cout << endl << "PSNR - higher = better" << endl;
return 0;
}
void hostCalculateDCTPSNR( vector < float > &vec, float & vecMedian )
{
clock_t t;
unsigned vecSize = ( unsigned )vec.size();
for ( i = 0; i < vecSize; i++ )
vec[ i ] -= vecMedian;
vector < float > vecDCT( vecSize );
vector < float > ix( vecSize );
t = clock();
float dc = 0.0f;
for ( i = 0; i < vecSize; i++ )
dc += vec[ i ];
dc /= sqrt( vecSize );
vecDCT[ 0 ] = dc;
float acDCT = 0.0f, cons = sqrt( 2.0f / vecSize );
float pi = 3.14159265f;
for ( unsigned k = 1; k < vecSize; k++ )
{
acDCT = 0.0f;
for ( i = 0; i < vecSize; i++ )
acDCT += vec[ i ] * cos( pi * k * ( 2 * i + 1 ) / ( 2 * vecSize ) );
vecDCT[ k ] = cons * acDCT;
}
cout << "CPU clocks HOST dct float accumulator: " << double( clock() - t ) << endl;
t = clock();
float dcCons = ( 1.0f / sqrt( vecSize ) ) * vecDCT[ 0 ];
for ( i = 0; i < vecSize; i++ )
{
acDCT = 0.0f;
for ( unsigned k = 1; k < vecSize; k++ )
acDCT += vecDCT[ k ] * cos( pi * k * ( 2 * i + 1 ) / ( 2 * vecSize ) );
ix[ i ] = dcCons + cons * acDCT + vecMedian; //results median addition
}
cout << "CPU clocks HOST idct float accumulator: " << double( clock() - t ) << endl;
for ( i = 0; i < vecSize; i++ )
vec[ i ] += vecMedian;
cout << endl << "======= HOST SIDE: =========" << endl;
for ( i = 0; i < 3; i++ )
cout << "h_x[" << i << "]: " << vec[ i ] << endl;
for ( i = 0; i < 3; i++ )
cout << "h_ix[" << i << "]: " << ix[ i ] << endl;
for ( i = vecSize - 1; i > vecSize - 4; i-- )
cout << "h_x[" << i << "]: " << vec[ i ] << endl;
for ( i = vecSize - 1; i > vecSize - 4; i-- )
cout << "h_ix[" << i << "]: " << ix[ i ] << endl;
double mse = 0.0f;
for ( i = 0; i < vecSize; i++ )
mse += ( vec[ i ] - ix[ i ] ) * ( vec[ i ] - ix[ i ] );
mse /= vecSize;
double maxEl = *std::max_element( vec.begin(), vec.end() );
double psnr = 10.0f * log10( maxEl * maxEl / mse );
cout << "HOST PSNR: " << psnr << "[dB]" << endl << endl;
}
//P.S. PSNR( x1[], x2[] ) = +InfdB for identical inputs x1[] and x2[]; PSNR = 0dB for x1[] != x2[]; higher = better accuracy to true/real value
//P.P.S for range [-1; +1] float datatype has biggest mantissa precision
| dc872a4c786b50488bca5edff4ab45acd54d46cf.cu | //STL
#include <iostream>
#include <vector>
#include <time.h>
#include <algorithm>
using std::cout; using std::endl; using namespace std;
unsigned i;
const unsigned N = 2048 * 4, bigN = 1000000;
unsigned gpuThr = 512;
unsigned gpuBl = N / gpuThr;
std::vector < float > inputVec( N );
void hostCalculateDCTPSNR( vector < float > &vec, float & vecMedian );
//=========================== gpu ===========================
__device__ float d_x[ N ], d_Xfp32[ N ], d_ix[ N ], d_rms[ N ];
__constant__ unsigned d_N[ 1 ];
__constant__ float d_median[ 1 ], d_max[ 1 ];
__device__ float d_inOut[ bigN ];
__device__ float d_inOutCopy[ bigN ];
__global__ void dummyCopy()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
d_inOutCopy[ ind ] = d_inOut[ ind ];
}
__global__ void psnr()
{
double acc = 0.0f;
for ( unsigned i = 0; i < d_N[ 0 ]; i++ )
acc += d_rms[ i ];
acc /= float( d_N[ 0 ] );
printf( "GPU PSNR: %f[dB]\n ", 10.0f * log10f( ( d_max[ 0 ] * d_max[ 0 ] ) / ( acc ) ) );
}
__global__ void rms()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
float x1 = d_x[ ind ] + d_median[ 0 ];
float x2 = d_ix[ ind ];
d_rms[ ind ] = ( x1 - x2 ) * ( x1 - x2 );
}
__global__ void printKernel()
{
printf( "======= GPU SIDE: =========\n" );
unsigned resNo = 3;
for ( unsigned i = 0; i < resNo; i++ )
printf( "d_x[%i]: %4f\n", i, d_x[ i ] + d_median[ 0 ] );
/*
for ( unsigned i = 0; i < resNo; i++ )
printf( "d_xNorm[%i]: %.4f\n", i, d_x[ i ] );
for ( unsigned i = 0; i < resNo; i++ )
printf( "d_Xfp32[%i]: %.4f\n", i, d_Xfp32[ i ] );
*/
for ( unsigned i = 0; i < resNo; i++ )
printf( "d_ix[%i]: %.4f\n", i, d_ix[ i ] );
for ( unsigned i = d_N[ 0 ] - 1; i > d_N[ 0 ] - 4; i-- )
printf( "d_x[%i]: %.4f\n", i, d_x[ i ] + d_median[ 0 ] );
for ( unsigned i = d_N[ 0 ] - 1; i > d_N[ 0 ] - 4; i-- )
printf( "d_ix[%i]: %.4f\n", i, d_ix[ i ] );
}
__global__ void idctKernelFloat()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
float constVal = ( float( ind ) + 0.5f ) * 3.14159265f / float( d_N[ 0 ] );
float sqrConst = sqrtf( 2.0f / float( d_N[ 0 ] ) );
float tmpX = sqrtf( 1.0f / float( d_N[ 0 ] ) ) * d_Xfp32[ 0 ];
float accDC = 0.0f, tmpx = 0.0f;
for ( unsigned k = 1; k < N; k++ )
{
tmpx = d_Xfp32[ k ];
tmpX += tmpx * sqrConst * __cosf( constVal * ( float( k ) ) );
accDC += tmpx;
}
d_ix[ ind ] = tmpX + d_median[ 0 ];
}
__global__ void dctKernelFloat()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
float constVal = float( ind ) * 3.14159265f / float( d_N[ 0 ] );
float sqrConst = sqrtf( 2.0f / float( d_N[ 0 ] ) );
float tmpX = 0.0f, accDC = 0.0f, tmpx = 0.0f;
for ( unsigned i = 0; i < N; i++ )
{
tmpx = d_x[ i ];
tmpX += sqrConst * tmpx * __cosf( constVal * ( float( i ) + 0.5f ) );
accDC += tmpx;
}
d_Xfp32[ ind ] = tmpX;
d_Xfp32[ 0 ] = accDC / sqrtf( float( d_N[ 0 ] ) );
}
//median extraction from input vector <float> for float better calculations precision
__global__ void dataMedianPreprocess()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
d_x[ ind ] -= d_median[ 0 ];
}
int main( int argc, char* argv[] )
{//memory copying
vector < float > h_vecIn;
for ( i = 0; i < bigN; i++ )
h_vecIn.push_back( rand() % 100 * 0.01f * i );
cudaMemcpyToSymbol( d_inOut, &h_vecIn[ 0 ], sizeof( float ) * bigN );
vector < float > h_vecOut( bigN, 0.0f );
cudaMemcpyFromSymbol( &h_vecOut[ 0 ], d_inOut, sizeof( float ) * bigN );
double acc = 0.0f; float x1 = 0.0f, x2 = 0.0f;
for ( i = 0; i < bigN; i++ )
{
x1 = h_vecIn[ i ];
x2 = h_vecOut[ i ];
acc += ( x1 - x2 ) * ( x1 - x2 );
}
acc /= double( bigN );
float maxEl = *std::max_element( h_vecIn.begin(), h_vecIn.end() );
printf( "psnr raw HOST2GPU copy: %f[dB]\n", 10.0f * log10( maxEl * maxEl / acc ) );
dummyCopy<<< bigN / 500, 500 >>>();
cudaMemcpyFromSymbol( &h_vecOut[ 0 ], d_inOutCopy, sizeof( float ) * bigN );
acc = 0.0f; x1 = 0.0f; x2 = 0.0f;
for ( i = 0; i < bigN; i++ )
{
x1 = h_vecIn[ i ];
x2 = h_vecOut[ i ];
acc += ( x1 - x2 ) * ( x1 - x2 );
}
acc /= double( bigN );
maxEl = *std::max_element( h_vecIn.begin(), h_vecIn.end() );
printf( "psnr raw GPU2GPU copy: %f[dB]\n", 10.0f * log10( maxEl * maxEl / acc ) );
cudaFree( d_inOut ); cudaFree( d_inOutCopy );
//gpu DCT from definition accuracuy
for(i=0;i<(unsigned)inputVec.size();i++)inputVec[i]=rand()%100*0.001f*i;
inputVec[ 3 ] = 0.05f;
vector < float > sortVec( inputVec ); sort( sortVec.begin(), sortVec.end() );
float vecMedian = sortVec[ sortVec.size() / 2 ];
cudaMemcpyToSymbol( d_x, &inputVec[ 0 ], sizeof( float ) * ( unsigned )inputVec.size() );
cudaMemcpyToSymbol( d_N, &N, sizeof( unsigned ) );
cudaMemcpyToSymbol( d_median, &vecMedian, sizeof( float ) );
cudaMemcpyToSymbol( d_max, &sortVec[ sortVec.size() - 1 ], sizeof( float ) );
dataMedianPreprocess<<< gpuBl, gpuThr >>>();
clock_t t = clock();
dctKernelFloat<<< gpuBl, gpuThr >>>();
cudaDeviceSynchronize();
cout << "CPU clocks GPU dct float accumulator: " << double( clock() - t ) << endl;
t = clock();
idctKernelFloat<<< gpuBl, gpuThr >>>();
cudaDeviceSynchronize();
cout << "CPU clocks GPU idct float accumulator: " << double( clock() - t ) << endl;
printKernel<<< 1, 1 >>>();
rms<<< gpuBl, gpuThr >>>();
psnr<<< 1, 1 >>>();
//host DCT from definition accuracy
hostCalculateDCTPSNR( inputVec, vecMedian );
cudaFree( d_x );
cudaFree( d_ix );
cudaFree( d_median );
cudaFree( d_rms );
cudaFree( d_max );
cudaFree( d_Xfp32 );
cudaFree( d_N );
cudaDeviceSynchronize();
cudaDeviceReset();
cout << endl << "PSNR - higher = better" << endl;
return 0;
}
void hostCalculateDCTPSNR( vector < float > &vec, float & vecMedian )
{
clock_t t;
unsigned vecSize = ( unsigned )vec.size();
for ( i = 0; i < vecSize; i++ )
vec[ i ] -= vecMedian;
vector < float > vecDCT( vecSize );
vector < float > ix( vecSize );
t = clock();
float dc = 0.0f;
for ( i = 0; i < vecSize; i++ )
dc += vec[ i ];
dc /= sqrt( vecSize );
vecDCT[ 0 ] = dc;
float acDCT = 0.0f, cons = sqrt( 2.0f / vecSize );
float pi = 3.14159265f;
for ( unsigned k = 1; k < vecSize; k++ )
{
acDCT = 0.0f;
for ( i = 0; i < vecSize; i++ )
acDCT += vec[ i ] * cos( pi * k * ( 2 * i + 1 ) / ( 2 * vecSize ) );
vecDCT[ k ] = cons * acDCT;
}
cout << "CPU clocks HOST dct float accumulator: " << double( clock() - t ) << endl;
t = clock();
float dcCons = ( 1.0f / sqrt( vecSize ) ) * vecDCT[ 0 ];
for ( i = 0; i < vecSize; i++ )
{
acDCT = 0.0f;
for ( unsigned k = 1; k < vecSize; k++ )
acDCT += vecDCT[ k ] * cos( pi * k * ( 2 * i + 1 ) / ( 2 * vecSize ) );
ix[ i ] = dcCons + cons * acDCT + vecMedian; //results median addition
}
cout << "CPU clocks HOST idct float accumulator: " << double( clock() - t ) << endl;
for ( i = 0; i < vecSize; i++ )
vec[ i ] += vecMedian;
cout << endl << "======= HOST SIDE: =========" << endl;
for ( i = 0; i < 3; i++ )
cout << "h_x[" << i << "]: " << vec[ i ] << endl;
for ( i = 0; i < 3; i++ )
cout << "h_ix[" << i << "]: " << ix[ i ] << endl;
for ( i = vecSize - 1; i > vecSize - 4; i-- )
cout << "h_x[" << i << "]: " << vec[ i ] << endl;
for ( i = vecSize - 1; i > vecSize - 4; i-- )
cout << "h_ix[" << i << "]: " << ix[ i ] << endl;
double mse = 0.0f;
for ( i = 0; i < vecSize; i++ )
mse += ( vec[ i ] - ix[ i ] ) * ( vec[ i ] - ix[ i ] );
mse /= vecSize;
double maxEl = *std::max_element( vec.begin(), vec.end() );
double psnr = 10.0f * log10( maxEl * maxEl / mse );
cout << "HOST PSNR: " << psnr << "[dB]" << endl << endl;
}
//P.S. PSNR( x1[], x2[] ) = +InfdB for identical inputs x1[] and x2[]; PSNR = 0dB for x1[] != x2[]; higher = better accuracy to true/real value
//P.P.S for range [-1; +1] float datatype has biggest mantissa precision
|
12128d8bc3eda900ac7e2c71e0c3d3c96ce1db01.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <fstream>
#include <cmath>
#include <chrono>
#include <memory>
#include <cstring>
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <cudnn.h>
#include <rocblas.h>
#include "NvCaffeParser.h"
#include "NvInferPlugin.h"
#include "ros/ros.h"
#include "cv_bridge/cv_bridge.h"
#include "object_msgs/ObjectsInBoxes.h"
using namespace std;
using namespace nvinfer1;
using namespace nvcaffeparser1;
using namespace plugin;
#define CHECK(status) \
{ \
if (status != 0) \
{ \
std::cout << "Cuda failure: " << status; \
abort(); \
} \
}
// stuff we know about the network and the caffe input/output blobs
static const int INPUT_C = 3;
static const int INPUT_H = 375;
static const int INPUT_W = 500;
static const int IM_INFO_SIZE = 3;
static const int OUTPUT_CLS_SIZE = 21;
static const int OUTPUT_BBOX_SIZE = OUTPUT_CLS_SIZE * 4;
const size_t stridesCv[3] = { INPUT_W * INPUT_C, INPUT_C, 1 };
const size_t strides[3] = { INPUT_H * INPUT_W, INPUT_W, 1 };
const float pixelMean[3] = { 102.9801f, 115.9465f, 122.7717f };
const std::string CLASSES[OUTPUT_CLS_SIZE]{ "background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair",
"cow", "diningtable", "dog", "horse", "motorbike",
"person", "pottedplant", "sheep", "sofa", "train",
"tvmonitor" };
const char* INPUT_BLOB_NAME0 = "data";
const char* INPUT_BLOB_NAME1 = "im_info";
const char* OUTPUT_BLOB_NAME0 = "bbox_pred";
const char* OUTPUT_BLOB_NAME1 = "cls_prob";
const char* OUTPUT_BLOB_NAME2 = "rois";
const int poolingH = 7;
const int poolingW = 7;
const int featureStride = 16;
const int preNmsTop = 6000;
const int nmsMaxOut = 300;
const int anchorsRatioCount = 3;
const int anchorsScaleCount = 3;
const float iouThreshold = 0.7f;
const float minBoxSize = 16;
const float spatialScale = 0.0625f;
const float anchorsRatios[anchorsRatioCount] = { 0.5f, 1.0f, 2.0f };
const float anchorsScales[anchorsScaleCount] = { 8.0f, 16.0f, 32.0f };
float nms_threshold, score_threshold;
class Logger : public ILogger
{
void log(Severity severity, const char* msg) override
{
if (severity != Severity::kINFO)
ROS_INFO("[[trt_infer.cu]] %s", msg);
}
} gLogger;
template <int OutC>
class Reshape : public IPlugin
{
public:
Reshape()
{
}
Reshape(const void* buffer, size_t size)
{
assert(size == sizeof(mCopySize));
mCopySize = *reinterpret_cast<const size_t*>(buffer);
}
int getNbOutputs() const override
{
return 1;
}
Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) override
{
assert(nbInputDims == 1);
assert(index == 0);
assert(inputs[index].nbDims == 3);
assert((inputs[0].d[0]) * (inputs[0].d[1]) % OutC == 0);
return DimsCHW(OutC, inputs[0].d[0] * inputs[0].d[1] / OutC, inputs[0].d[2]);
}
int initialize() override
{
return 0;
}
void terminate() override
{
}
size_t getWorkspaceSize(int) const override
{
return 0;
}
// currently it is not possible for a plugin to execute "in place". Therefore we memcpy the data from the input to the
// output buffer
int enqueue(int batchSize, const void* const* inputs, void** outputs, void*, hipStream_t stream) override
{
CHECK(hipMemcpyAsync(outputs[0], inputs[0], mCopySize * batchSize, hipMemcpyDeviceToDevice, stream));
return 0;
}
size_t getSerializationSize() override
{
return sizeof(mCopySize);
}
void serialize(void* buffer) override
{
*reinterpret_cast<size_t*>(buffer) = mCopySize;
}
void configure(const Dims* inputs, int nbInputs, const Dims* outputs, int nbOutputs, int) override
{
mCopySize = inputs[0].d[0] * inputs[0].d[1] * inputs[0].d[2] * sizeof(float);
}
protected:
size_t mCopySize;
};
// integration for serialization
class PluginFactory : public nvinfer1::IPluginFactory, public nvcaffeparser1::IPluginFactory
{
public:
// deserialization plugin implementation
virtual nvinfer1::IPlugin* createPlugin(const char* layerName, const nvinfer1::Weights* weights,
int nbWeights) override
{
assert(isPlugin(layerName));
if (!strcmp(layerName, "ReshapeCTo2"))
{
assert(mPluginRshp2 == nullptr);
assert(nbWeights == 0 && weights == nullptr);
mPluginRshp2 = std::unique_ptr<Reshape<2>>(new Reshape<2>());
return mPluginRshp2.get();
}
else if (!strcmp(layerName, "ReshapeCTo18"))
{
assert(mPluginRshp18 == nullptr);
assert(nbWeights == 0 && weights == nullptr);
mPluginRshp18 = std::unique_ptr<Reshape<18>>(new Reshape<18>());
return mPluginRshp18.get();
}
else if (!strcmp(layerName, "RPROIFused"))
{
assert(mPluginRPROI == nullptr);
assert(nbWeights == 0 && weights == nullptr);
mPluginRPROI = std::unique_ptr<INvPlugin, decltype(nvPluginDeleter)>(
createFasterRCNNPlugin(featureStride, preNmsTop, nmsMaxOut, iouThreshold, minBoxSize, spatialScale,
DimsHW(poolingH, poolingW),
Weights{ nvinfer1::DataType::kFLOAT, anchorsRatios, anchorsRatioCount },
Weights{ nvinfer1::DataType::kFLOAT, anchorsScales, anchorsScaleCount }),
nvPluginDeleter);
return mPluginRPROI.get();
}
else
{
assert(0);
return nullptr;
}
}
IPlugin* createPlugin(const char* layerName, const void* serialData, size_t serialLength) override
{
assert(isPlugin(layerName));
if (!strcmp(layerName, "ReshapeCTo2"))
{
assert(mPluginRshp2 == nullptr);
mPluginRshp2 = std::unique_ptr<Reshape<2>>(new Reshape<2>(serialData, serialLength));
return mPluginRshp2.get();
}
else if (!strcmp(layerName, "ReshapeCTo18"))
{
assert(mPluginRshp18 == nullptr);
mPluginRshp18 = std::unique_ptr<Reshape<18>>(new Reshape<18>(serialData, serialLength));
return mPluginRshp18.get();
}
else if (!strcmp(layerName, "RPROIFused"))
{
assert(mPluginRPROI == nullptr);
mPluginRPROI = std::unique_ptr<INvPlugin, decltype(nvPluginDeleter)>(
createFasterRCNNPlugin(serialData, serialLength), nvPluginDeleter);
return mPluginRPROI.get();
}
else
{
assert(0);
return nullptr;
}
}
// caffe parser plugin implementation
bool isPlugin(const char* name) override
{
return (!strcmp(name, "ReshapeCTo2") || !strcmp(name, "ReshapeCTo18") || !strcmp(name, "RPROIFused"));
}
// the application has to destroy the plugin when it knows it's safe to do so
void destroyPlugin()
{
mPluginRshp2.release();
mPluginRshp2 = nullptr;
mPluginRshp18.release();
mPluginRshp18 = nullptr;
mPluginRPROI.release();
mPluginRPROI = nullptr;
}
std::unique_ptr<Reshape<2>> mPluginRshp2{ nullptr };
std::unique_ptr<Reshape<18>> mPluginRshp18{ nullptr };
void (*nvPluginDeleter)(INvPlugin*){ [](INvPlugin* ptr) { ptr->destroy(); } };
std::unique_ptr<INvPlugin, decltype(nvPluginDeleter)> mPluginRPROI{ nullptr, nvPluginDeleter };
};
void bboxTransformInvAndClip(float* rois, float* deltas, float* predBBoxes, float* imInfo, const int N,
const int nmsMaxOut, const int numCls)
{
float width, height, ctr_x, ctr_y;
float dx, dy, dw, dh, pred_ctr_x, pred_ctr_y, pred_w, pred_h;
float *deltas_offset, *predBBoxes_offset, *imInfo_offset;
for (int i = 0; i < N * nmsMaxOut; ++i)
{
width = rois[i * 4 + 2] - rois[i * 4] + 1;
height = rois[i * 4 + 3] - rois[i * 4 + 1] + 1;
ctr_x = rois[i * 4] + 0.5f * width;
ctr_y = rois[i * 4 + 1] + 0.5f * height;
deltas_offset = deltas + i * numCls * 4;
predBBoxes_offset = predBBoxes + i * numCls * 4;
imInfo_offset = imInfo + i / nmsMaxOut * 3;
for (int j = 0; j < numCls; ++j)
{
dx = deltas_offset[j * 4];
dy = deltas_offset[j * 4 + 1];
dw = deltas_offset[j * 4 + 2];
dh = deltas_offset[j * 4 + 3];
pred_ctr_x = dx * width + ctr_x;
pred_ctr_y = dy * height + ctr_y;
pred_w = exp(dw) * width;
pred_h = exp(dh) * height;
predBBoxes_offset[j * 4] = ::max(::min(pred_ctr_x - 0.5f * pred_w, imInfo_offset[1] - 1.f), 0.f);
predBBoxes_offset[j * 4 + 1] = ::max(::min(pred_ctr_y - 0.5f * pred_h, imInfo_offset[0] - 1.f), 0.f);
predBBoxes_offset[j * 4 + 2] = ::max(::min(pred_ctr_x + 0.5f * pred_w, imInfo_offset[1] - 1.f), 0.f);
predBBoxes_offset[j * 4 + 3] = ::max(::min(pred_ctr_y + 0.5f * pred_h, imInfo_offset[0] - 1.f), 0.f);
}
}
}
std::vector<int> nms(std::vector<std::pair<float, int>>& score_index, float* bbox, const int classNum,
const int numClasses, const float nms_threshold)
{
auto overlap1D = [](float x1min, float x1max, float x2min, float x2max) -> float {
if (x1min > x2min)
{
std::swap(x1min, x2min);
std::swap(x1max, x2max);
}
return x1max < x2min ? 0 : ::min(x1max, x2max) - x2min;
};
auto computeIoU = [&overlap1D](float* bbox1, float* bbox2) -> float {
float overlapX = overlap1D(bbox1[0], bbox1[2], bbox2[0], bbox2[2]);
float overlapY = overlap1D(bbox1[1], bbox1[3], bbox2[1], bbox2[3]);
float area1 = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1]);
float area2 = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1]);
float overlap2D = overlapX * overlapY;
float u = area1 + area2 - overlap2D;
return u == 0 ? 0 : overlap2D / u;
};
std::vector<int> indices;
for (auto i : score_index)
{
const int idx = i.second;
bool keep = true;
for (unsigned k = 0; k < indices.size(); ++k)
{
if (keep)
{
const int kept_idx = indices[k];
float overlap =
computeIoU(&bbox[(idx * numClasses + classNum) * 4], &bbox[(kept_idx * numClasses + classNum) * 4]);
keep = overlap <= nms_threshold;
}
else
break;
}
if (keep)
indices.push_back(idx);
}
return indices;
}
IRuntime* runtime;
ICudaEngine* engine;
IExecutionContext* context;
PluginFactory pluginFactory;
hipStream_t stream;
int inputIndex0, inputIndex1, outputIndex0, outputIndex1, outputIndex2;
void* buffers[5];
bool is_initialized = false;
void setup(std::string planFilename, float nms_th, float score_th)
{
nms_threshold = nms_th;
score_threshold = score_th;
ifstream planFile(planFilename.c_str());
if (!planFile.is_open())
{
ROS_INFO("Plan Not Found!!!");
is_initialized = false;
}
else
{
ROS_INFO("Begin loading plan...");
stringstream planBuffer;
planBuffer << planFile.rdbuf();
string plan = planBuffer.str();
ROS_INFO("*** deserializing");
runtime = createInferRuntime(gLogger);
assert(runtime != nullptr);
engine = runtime->deserializeCudaEngine((void*)plan.data(), plan.size(), &pluginFactory);
assert(engine != nullptr);
context = engine->createExecutionContext();
assert(context != nullptr);
ROS_INFO("End loading plan...");
// In order to bind the buffers, we need to know the names of the input and output tensors.
// note that indices are guaranteed to be less than IEngine::getNbBindings()
inputIndex0 = engine->getBindingIndex(INPUT_BLOB_NAME0);
inputIndex1 = engine->getBindingIndex(INPUT_BLOB_NAME1);
outputIndex0 = engine->getBindingIndex(OUTPUT_BLOB_NAME0);
outputIndex1 = engine->getBindingIndex(OUTPUT_BLOB_NAME1);
outputIndex2 = engine->getBindingIndex(OUTPUT_BLOB_NAME2);
// create GPU buffers and a stream
CHECK(hipMalloc(&buffers[inputIndex0], INPUT_C * INPUT_H * INPUT_W * sizeof(float))); // data
CHECK(hipMalloc(&buffers[inputIndex1], IM_INFO_SIZE * sizeof(float))); // im_info
CHECK(hipMalloc(&buffers[outputIndex0], nmsMaxOut * OUTPUT_BBOX_SIZE * sizeof(float))); // bbox_pred
CHECK(hipMalloc(&buffers[outputIndex1], nmsMaxOut * OUTPUT_CLS_SIZE * sizeof(float))); // cls_prob
CHECK(hipMalloc(&buffers[outputIndex2], nmsMaxOut * 4 * sizeof(float))); // rois
CHECK(hipStreamCreate(&stream));
is_initialized = true;
}
}
void destroy(void)
{
if (is_initialized)
{
runtime->destroy();
engine->destroy();
context->destroy();
pluginFactory.destroyPlugin();
// Release the stream and the buffers
hipStreamDestroy(stream);
CHECK(hipFree(buffers[inputIndex0]));
CHECK(hipFree(buffers[inputIndex1]));
CHECK(hipFree(buffers[outputIndex0]));
CHECK(hipFree(buffers[outputIndex1]));
CHECK(hipFree(buffers[outputIndex2]));
}
is_initialized = false;
}
object_msgs::ObjectsInBoxes infer(const sensor_msgs::ImageConstPtr& color_msg)
{
object_msgs::ObjectsInBoxes bboxes;
// preprocessing
cv::Mat image = cv_bridge::toCvShare(color_msg, "bgr8")->image;
cv::Size imsize = image.size();
float inputImInfo[3]{ float(imsize.height), float(imsize.width), 1 };
cv::resize(image, image, cv::Size(INPUT_W, INPUT_H));
float* inputData = new float[INPUT_C * INPUT_H * INPUT_W];
for (int i = 0; i < INPUT_H; i++)
{
for (int j = 0; j < INPUT_W; j++)
{
for (int k = 0; k < INPUT_C; k++)
{
const size_t offsetCv = i * stridesCv[0] + j * stridesCv[1] + k * stridesCv[2];
const size_t offset = k * strides[0] + i * strides[1] + j * strides[2];
inputData[offset] = (float)image.data[offsetCv] - pixelMean[k];
}
}
}
// DMA the input to the GPU, execute the batch asynchronously, and DMA it back:
auto t_start = chrono::high_resolution_clock::now();
CHECK(hipMemcpyAsync(buffers[inputIndex0], inputData, INPUT_C * INPUT_H * INPUT_W * sizeof(float),
hipMemcpyHostToDevice, stream));
CHECK(
hipMemcpyAsync(buffers[inputIndex1], inputImInfo, IM_INFO_SIZE * sizeof(float), hipMemcpyHostToDevice, stream));
context->enqueue(1, buffers, stream, nullptr);
// host memory for outputs
float* outputRois = new float[nmsMaxOut * 4];
float* outputBboxPred = new float[nmsMaxOut * OUTPUT_BBOX_SIZE];
float* outputClsProb = new float[nmsMaxOut * OUTPUT_CLS_SIZE];
CHECK(hipMemcpyAsync(outputBboxPred, buffers[outputIndex0], nmsMaxOut * OUTPUT_BBOX_SIZE * sizeof(float),
hipMemcpyDeviceToHost, stream));
CHECK(hipMemcpyAsync(outputClsProb, buffers[outputIndex1], nmsMaxOut * OUTPUT_CLS_SIZE * sizeof(float),
hipMemcpyDeviceToHost, stream));
CHECK(hipMemcpyAsync(outputRois, buffers[outputIndex2], nmsMaxOut * 4 * sizeof(float), hipMemcpyDeviceToHost,
stream));
hipStreamSynchronize(stream);
// predicted bounding boxes
float* predBBoxes = new float[nmsMaxOut * OUTPUT_BBOX_SIZE];
bboxTransformInvAndClip(outputRois, outputBboxPred, predBBoxes, inputImInfo, 1, nmsMaxOut, OUTPUT_CLS_SIZE);
float* bbox = predBBoxes + nmsMaxOut * OUTPUT_BBOX_SIZE;
float* scores = outputClsProb + nmsMaxOut * OUTPUT_CLS_SIZE;
for (int c = 1; c < OUTPUT_CLS_SIZE; ++c) // skip the background
{
std::vector<std::pair<float, int>> score_index;
for (int r = 0; r < nmsMaxOut; ++r)
{
if (scores[r * OUTPUT_CLS_SIZE + c] > score_threshold)
{
score_index.push_back(std::make_pair(scores[r * OUTPUT_CLS_SIZE + c], r));
std::stable_sort(score_index.begin(), score_index.end(),
[](const std::pair<float, int>& pair1, const std::pair<float, int>& pair2) {
return pair1.first > pair2.first;
});
}
}
// apply NMS algorithm
std::vector<int> indices = nms(score_index, bbox, c, OUTPUT_CLS_SIZE, nms_threshold);
auto t_end = chrono::high_resolution_clock::now();
float total = chrono::duration<float, milli>(t_end - t_start).count();
for (unsigned k = 0; k < indices.size(); ++k)
{
object_msgs::ObjectInBox BBox;
BBox.object.object_name = CLASSES[c];
BBox.object.probability = scores[indices[k] * OUTPUT_CLS_SIZE + c];
BBox.roi.x_offset = bbox[indices[k] * OUTPUT_BBOX_SIZE + c * 4] * imsize.width;
BBox.roi.y_offset = bbox[indices[k] * OUTPUT_BBOX_SIZE + c * 4 + 1] * imsize.height;
BBox.roi.width = (bbox[indices[k] * OUTPUT_BBOX_SIZE + c * 4 + 2] - bbox[indices[k] * OUTPUT_BBOX_SIZE + c * 4]) *
imsize.width;
BBox.roi.height =
(bbox[indices[k] * OUTPUT_BBOX_SIZE + c * 4 + 3] - bbox[indices[k] * OUTPUT_BBOX_SIZE + c * 4 + 1]) *
imsize.height;
BBox.roi.do_rectify = false;
bboxes.objects_vector.push_back(BBox);
bboxes.inference_time_ms = total;
}
}
bboxes.header = color_msg->header;
return bboxes;
} | 12128d8bc3eda900ac7e2c71e0c3d3c96ce1db01.cu | #include <cassert>
#include <fstream>
#include <cmath>
#include <chrono>
#include <memory>
#include <cstring>
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cudnn.h>
#include <cublas_v2.h>
#include "NvCaffeParser.h"
#include "NvInferPlugin.h"
#include "ros/ros.h"
#include "cv_bridge/cv_bridge.h"
#include "object_msgs/ObjectsInBoxes.h"
using namespace std;
using namespace nvinfer1;
using namespace nvcaffeparser1;
using namespace plugin;
#define CHECK(status) \
{ \
if (status != 0) \
{ \
std::cout << "Cuda failure: " << status; \
abort(); \
} \
}
// stuff we know about the network and the caffe input/output blobs
static const int INPUT_C = 3;
static const int INPUT_H = 375;
static const int INPUT_W = 500;
static const int IM_INFO_SIZE = 3;
static const int OUTPUT_CLS_SIZE = 21;
static const int OUTPUT_BBOX_SIZE = OUTPUT_CLS_SIZE * 4;
const size_t stridesCv[3] = { INPUT_W * INPUT_C, INPUT_C, 1 };
const size_t strides[3] = { INPUT_H * INPUT_W, INPUT_W, 1 };
const float pixelMean[3] = { 102.9801f, 115.9465f, 122.7717f };
const std::string CLASSES[OUTPUT_CLS_SIZE]{ "background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair",
"cow", "diningtable", "dog", "horse", "motorbike",
"person", "pottedplant", "sheep", "sofa", "train",
"tvmonitor" };
const char* INPUT_BLOB_NAME0 = "data";
const char* INPUT_BLOB_NAME1 = "im_info";
const char* OUTPUT_BLOB_NAME0 = "bbox_pred";
const char* OUTPUT_BLOB_NAME1 = "cls_prob";
const char* OUTPUT_BLOB_NAME2 = "rois";
const int poolingH = 7;
const int poolingW = 7;
const int featureStride = 16;
const int preNmsTop = 6000;
const int nmsMaxOut = 300;
const int anchorsRatioCount = 3;
const int anchorsScaleCount = 3;
const float iouThreshold = 0.7f;
const float minBoxSize = 16;
const float spatialScale = 0.0625f;
const float anchorsRatios[anchorsRatioCount] = { 0.5f, 1.0f, 2.0f };
const float anchorsScales[anchorsScaleCount] = { 8.0f, 16.0f, 32.0f };
float nms_threshold, score_threshold;
class Logger : public ILogger
{
void log(Severity severity, const char* msg) override
{
if (severity != Severity::kINFO)
ROS_INFO("[[trt_infer.cu]] %s", msg);
}
} gLogger;
template <int OutC>
class Reshape : public IPlugin
{
public:
Reshape()
{
}
Reshape(const void* buffer, size_t size)
{
assert(size == sizeof(mCopySize));
mCopySize = *reinterpret_cast<const size_t*>(buffer);
}
int getNbOutputs() const override
{
return 1;
}
Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) override
{
assert(nbInputDims == 1);
assert(index == 0);
assert(inputs[index].nbDims == 3);
assert((inputs[0].d[0]) * (inputs[0].d[1]) % OutC == 0);
return DimsCHW(OutC, inputs[0].d[0] * inputs[0].d[1] / OutC, inputs[0].d[2]);
}
int initialize() override
{
return 0;
}
void terminate() override
{
}
size_t getWorkspaceSize(int) const override
{
return 0;
}
// currently it is not possible for a plugin to execute "in place". Therefore we memcpy the data from the input to the
// output buffer
int enqueue(int batchSize, const void* const* inputs, void** outputs, void*, cudaStream_t stream) override
{
CHECK(cudaMemcpyAsync(outputs[0], inputs[0], mCopySize * batchSize, cudaMemcpyDeviceToDevice, stream));
return 0;
}
size_t getSerializationSize() override
{
return sizeof(mCopySize);
}
void serialize(void* buffer) override
{
*reinterpret_cast<size_t*>(buffer) = mCopySize;
}
void configure(const Dims* inputs, int nbInputs, const Dims* outputs, int nbOutputs, int) override
{
mCopySize = inputs[0].d[0] * inputs[0].d[1] * inputs[0].d[2] * sizeof(float);
}
protected:
size_t mCopySize;
};
// integration for serialization
class PluginFactory : public nvinfer1::IPluginFactory, public nvcaffeparser1::IPluginFactory
{
public:
// deserialization plugin implementation
virtual nvinfer1::IPlugin* createPlugin(const char* layerName, const nvinfer1::Weights* weights,
int nbWeights) override
{
assert(isPlugin(layerName));
if (!strcmp(layerName, "ReshapeCTo2"))
{
assert(mPluginRshp2 == nullptr);
assert(nbWeights == 0 && weights == nullptr);
mPluginRshp2 = std::unique_ptr<Reshape<2>>(new Reshape<2>());
return mPluginRshp2.get();
}
else if (!strcmp(layerName, "ReshapeCTo18"))
{
assert(mPluginRshp18 == nullptr);
assert(nbWeights == 0 && weights == nullptr);
mPluginRshp18 = std::unique_ptr<Reshape<18>>(new Reshape<18>());
return mPluginRshp18.get();
}
else if (!strcmp(layerName, "RPROIFused"))
{
assert(mPluginRPROI == nullptr);
assert(nbWeights == 0 && weights == nullptr);
mPluginRPROI = std::unique_ptr<INvPlugin, decltype(nvPluginDeleter)>(
createFasterRCNNPlugin(featureStride, preNmsTop, nmsMaxOut, iouThreshold, minBoxSize, spatialScale,
DimsHW(poolingH, poolingW),
Weights{ nvinfer1::DataType::kFLOAT, anchorsRatios, anchorsRatioCount },
Weights{ nvinfer1::DataType::kFLOAT, anchorsScales, anchorsScaleCount }),
nvPluginDeleter);
return mPluginRPROI.get();
}
else
{
assert(0);
return nullptr;
}
}
IPlugin* createPlugin(const char* layerName, const void* serialData, size_t serialLength) override
{
assert(isPlugin(layerName));
if (!strcmp(layerName, "ReshapeCTo2"))
{
assert(mPluginRshp2 == nullptr);
mPluginRshp2 = std::unique_ptr<Reshape<2>>(new Reshape<2>(serialData, serialLength));
return mPluginRshp2.get();
}
else if (!strcmp(layerName, "ReshapeCTo18"))
{
assert(mPluginRshp18 == nullptr);
mPluginRshp18 = std::unique_ptr<Reshape<18>>(new Reshape<18>(serialData, serialLength));
return mPluginRshp18.get();
}
else if (!strcmp(layerName, "RPROIFused"))
{
assert(mPluginRPROI == nullptr);
mPluginRPROI = std::unique_ptr<INvPlugin, decltype(nvPluginDeleter)>(
createFasterRCNNPlugin(serialData, serialLength), nvPluginDeleter);
return mPluginRPROI.get();
}
else
{
assert(0);
return nullptr;
}
}
// caffe parser plugin implementation
bool isPlugin(const char* name) override
{
return (!strcmp(name, "ReshapeCTo2") || !strcmp(name, "ReshapeCTo18") || !strcmp(name, "RPROIFused"));
}
// the application has to destroy the plugin when it knows it's safe to do so
void destroyPlugin()
{
mPluginRshp2.release();
mPluginRshp2 = nullptr;
mPluginRshp18.release();
mPluginRshp18 = nullptr;
mPluginRPROI.release();
mPluginRPROI = nullptr;
}
std::unique_ptr<Reshape<2>> mPluginRshp2{ nullptr };
std::unique_ptr<Reshape<18>> mPluginRshp18{ nullptr };
void (*nvPluginDeleter)(INvPlugin*){ [](INvPlugin* ptr) { ptr->destroy(); } };
std::unique_ptr<INvPlugin, decltype(nvPluginDeleter)> mPluginRPROI{ nullptr, nvPluginDeleter };
};
void bboxTransformInvAndClip(float* rois, float* deltas, float* predBBoxes, float* imInfo, const int N,
const int nmsMaxOut, const int numCls)
{
float width, height, ctr_x, ctr_y;
float dx, dy, dw, dh, pred_ctr_x, pred_ctr_y, pred_w, pred_h;
float *deltas_offset, *predBBoxes_offset, *imInfo_offset;
for (int i = 0; i < N * nmsMaxOut; ++i)
{
width = rois[i * 4 + 2] - rois[i * 4] + 1;
height = rois[i * 4 + 3] - rois[i * 4 + 1] + 1;
ctr_x = rois[i * 4] + 0.5f * width;
ctr_y = rois[i * 4 + 1] + 0.5f * height;
deltas_offset = deltas + i * numCls * 4;
predBBoxes_offset = predBBoxes + i * numCls * 4;
imInfo_offset = imInfo + i / nmsMaxOut * 3;
for (int j = 0; j < numCls; ++j)
{
dx = deltas_offset[j * 4];
dy = deltas_offset[j * 4 + 1];
dw = deltas_offset[j * 4 + 2];
dh = deltas_offset[j * 4 + 3];
pred_ctr_x = dx * width + ctr_x;
pred_ctr_y = dy * height + ctr_y;
pred_w = exp(dw) * width;
pred_h = exp(dh) * height;
predBBoxes_offset[j * 4] = std::max(std::min(pred_ctr_x - 0.5f * pred_w, imInfo_offset[1] - 1.f), 0.f);
predBBoxes_offset[j * 4 + 1] = std::max(std::min(pred_ctr_y - 0.5f * pred_h, imInfo_offset[0] - 1.f), 0.f);
predBBoxes_offset[j * 4 + 2] = std::max(std::min(pred_ctr_x + 0.5f * pred_w, imInfo_offset[1] - 1.f), 0.f);
predBBoxes_offset[j * 4 + 3] = std::max(std::min(pred_ctr_y + 0.5f * pred_h, imInfo_offset[0] - 1.f), 0.f);
}
}
}
std::vector<int> nms(std::vector<std::pair<float, int>>& score_index, float* bbox, const int classNum,
const int numClasses, const float nms_threshold)
{
auto overlap1D = [](float x1min, float x1max, float x2min, float x2max) -> float {
if (x1min > x2min)
{
std::swap(x1min, x2min);
std::swap(x1max, x2max);
}
return x1max < x2min ? 0 : std::min(x1max, x2max) - x2min;
};
auto computeIoU = [&overlap1D](float* bbox1, float* bbox2) -> float {
float overlapX = overlap1D(bbox1[0], bbox1[2], bbox2[0], bbox2[2]);
float overlapY = overlap1D(bbox1[1], bbox1[3], bbox2[1], bbox2[3]);
float area1 = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1]);
float area2 = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1]);
float overlap2D = overlapX * overlapY;
float u = area1 + area2 - overlap2D;
return u == 0 ? 0 : overlap2D / u;
};
std::vector<int> indices;
for (auto i : score_index)
{
const int idx = i.second;
bool keep = true;
for (unsigned k = 0; k < indices.size(); ++k)
{
if (keep)
{
const int kept_idx = indices[k];
float overlap =
computeIoU(&bbox[(idx * numClasses + classNum) * 4], &bbox[(kept_idx * numClasses + classNum) * 4]);
keep = overlap <= nms_threshold;
}
else
break;
}
if (keep)
indices.push_back(idx);
}
return indices;
}
IRuntime* runtime;
ICudaEngine* engine;
IExecutionContext* context;
PluginFactory pluginFactory;
cudaStream_t stream;
int inputIndex0, inputIndex1, outputIndex0, outputIndex1, outputIndex2;
void* buffers[5];
bool is_initialized = false;
void setup(std::string planFilename, float nms_th, float score_th)
{
nms_threshold = nms_th;
score_threshold = score_th;
ifstream planFile(planFilename.c_str());
if (!planFile.is_open())
{
ROS_INFO("Plan Not Found!!!");
is_initialized = false;
}
else
{
ROS_INFO("Begin loading plan...");
stringstream planBuffer;
planBuffer << planFile.rdbuf();
string plan = planBuffer.str();
ROS_INFO("*** deserializing");
runtime = createInferRuntime(gLogger);
assert(runtime != nullptr);
engine = runtime->deserializeCudaEngine((void*)plan.data(), plan.size(), &pluginFactory);
assert(engine != nullptr);
context = engine->createExecutionContext();
assert(context != nullptr);
ROS_INFO("End loading plan...");
// In order to bind the buffers, we need to know the names of the input and output tensors.
// note that indices are guaranteed to be less than IEngine::getNbBindings()
inputIndex0 = engine->getBindingIndex(INPUT_BLOB_NAME0);
inputIndex1 = engine->getBindingIndex(INPUT_BLOB_NAME1);
outputIndex0 = engine->getBindingIndex(OUTPUT_BLOB_NAME0);
outputIndex1 = engine->getBindingIndex(OUTPUT_BLOB_NAME1);
outputIndex2 = engine->getBindingIndex(OUTPUT_BLOB_NAME2);
// create GPU buffers and a stream
CHECK(cudaMalloc(&buffers[inputIndex0], INPUT_C * INPUT_H * INPUT_W * sizeof(float))); // data
CHECK(cudaMalloc(&buffers[inputIndex1], IM_INFO_SIZE * sizeof(float))); // im_info
CHECK(cudaMalloc(&buffers[outputIndex0], nmsMaxOut * OUTPUT_BBOX_SIZE * sizeof(float))); // bbox_pred
CHECK(cudaMalloc(&buffers[outputIndex1], nmsMaxOut * OUTPUT_CLS_SIZE * sizeof(float))); // cls_prob
CHECK(cudaMalloc(&buffers[outputIndex2], nmsMaxOut * 4 * sizeof(float))); // rois
CHECK(cudaStreamCreate(&stream));
is_initialized = true;
}
}
void destroy(void)
{
if (is_initialized)
{
runtime->destroy();
engine->destroy();
context->destroy();
pluginFactory.destroyPlugin();
// Release the stream and the buffers
cudaStreamDestroy(stream);
CHECK(cudaFree(buffers[inputIndex0]));
CHECK(cudaFree(buffers[inputIndex1]));
CHECK(cudaFree(buffers[outputIndex0]));
CHECK(cudaFree(buffers[outputIndex1]));
CHECK(cudaFree(buffers[outputIndex2]));
}
is_initialized = false;
}
object_msgs::ObjectsInBoxes infer(const sensor_msgs::ImageConstPtr& color_msg)
{
object_msgs::ObjectsInBoxes bboxes;
// preprocessing
cv::Mat image = cv_bridge::toCvShare(color_msg, "bgr8")->image;
cv::Size imsize = image.size();
float inputImInfo[3]{ float(imsize.height), float(imsize.width), 1 };
cv::resize(image, image, cv::Size(INPUT_W, INPUT_H));
float* inputData = new float[INPUT_C * INPUT_H * INPUT_W];
for (int i = 0; i < INPUT_H; i++)
{
for (int j = 0; j < INPUT_W; j++)
{
for (int k = 0; k < INPUT_C; k++)
{
const size_t offsetCv = i * stridesCv[0] + j * stridesCv[1] + k * stridesCv[2];
const size_t offset = k * strides[0] + i * strides[1] + j * strides[2];
inputData[offset] = (float)image.data[offsetCv] - pixelMean[k];
}
}
}
// DMA the input to the GPU, execute the batch asynchronously, and DMA it back:
auto t_start = chrono::high_resolution_clock::now();
CHECK(cudaMemcpyAsync(buffers[inputIndex0], inputData, INPUT_C * INPUT_H * INPUT_W * sizeof(float),
cudaMemcpyHostToDevice, stream));
CHECK(
cudaMemcpyAsync(buffers[inputIndex1], inputImInfo, IM_INFO_SIZE * sizeof(float), cudaMemcpyHostToDevice, stream));
context->enqueue(1, buffers, stream, nullptr);
// host memory for outputs
float* outputRois = new float[nmsMaxOut * 4];
float* outputBboxPred = new float[nmsMaxOut * OUTPUT_BBOX_SIZE];
float* outputClsProb = new float[nmsMaxOut * OUTPUT_CLS_SIZE];
CHECK(cudaMemcpyAsync(outputBboxPred, buffers[outputIndex0], nmsMaxOut * OUTPUT_BBOX_SIZE * sizeof(float),
cudaMemcpyDeviceToHost, stream));
CHECK(cudaMemcpyAsync(outputClsProb, buffers[outputIndex1], nmsMaxOut * OUTPUT_CLS_SIZE * sizeof(float),
cudaMemcpyDeviceToHost, stream));
CHECK(cudaMemcpyAsync(outputRois, buffers[outputIndex2], nmsMaxOut * 4 * sizeof(float), cudaMemcpyDeviceToHost,
stream));
cudaStreamSynchronize(stream);
// predicted bounding boxes
float* predBBoxes = new float[nmsMaxOut * OUTPUT_BBOX_SIZE];
bboxTransformInvAndClip(outputRois, outputBboxPred, predBBoxes, inputImInfo, 1, nmsMaxOut, OUTPUT_CLS_SIZE);
float* bbox = predBBoxes + nmsMaxOut * OUTPUT_BBOX_SIZE;
float* scores = outputClsProb + nmsMaxOut * OUTPUT_CLS_SIZE;
for (int c = 1; c < OUTPUT_CLS_SIZE; ++c) // skip the background
{
std::vector<std::pair<float, int>> score_index;
for (int r = 0; r < nmsMaxOut; ++r)
{
if (scores[r * OUTPUT_CLS_SIZE + c] > score_threshold)
{
score_index.push_back(std::make_pair(scores[r * OUTPUT_CLS_SIZE + c], r));
std::stable_sort(score_index.begin(), score_index.end(),
[](const std::pair<float, int>& pair1, const std::pair<float, int>& pair2) {
return pair1.first > pair2.first;
});
}
}
// apply NMS algorithm
std::vector<int> indices = nms(score_index, bbox, c, OUTPUT_CLS_SIZE, nms_threshold);
auto t_end = chrono::high_resolution_clock::now();
float total = chrono::duration<float, milli>(t_end - t_start).count();
for (unsigned k = 0; k < indices.size(); ++k)
{
object_msgs::ObjectInBox BBox;
BBox.object.object_name = CLASSES[c];
BBox.object.probability = scores[indices[k] * OUTPUT_CLS_SIZE + c];
BBox.roi.x_offset = bbox[indices[k] * OUTPUT_BBOX_SIZE + c * 4] * imsize.width;
BBox.roi.y_offset = bbox[indices[k] * OUTPUT_BBOX_SIZE + c * 4 + 1] * imsize.height;
BBox.roi.width = (bbox[indices[k] * OUTPUT_BBOX_SIZE + c * 4 + 2] - bbox[indices[k] * OUTPUT_BBOX_SIZE + c * 4]) *
imsize.width;
BBox.roi.height =
(bbox[indices[k] * OUTPUT_BBOX_SIZE + c * 4 + 3] - bbox[indices[k] * OUTPUT_BBOX_SIZE + c * 4 + 1]) *
imsize.height;
BBox.roi.do_rectify = false;
bboxes.objects_vector.push_back(BBox);
bboxes.inference_time_ms = total;
}
}
bboxes.header = color_msg->header;
return bboxes;
} |
791814262a7dcbd240dad71c1da8c1c853d031b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/elementwise_mul_op.h"
#include <algorithm>
#include <functional>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/elementwise_ops_utils.h"
namespace caffe2 {
namespace {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename TGrad, typename TIn, int D>
__global__ void ComputeMulGradientCUDAKernel(
const int outer_size,
const int inner_size,
const SimpleArray<int, D> Y_dims,
const SimpleArray<int, D> Y_strides,
const SimpleArray<int, D> W_strides,
const SimpleArray<int, D> X_dims,
const TGrad* dY,
const TIn* W,
TGrad* dX) {
__shared__ typename BlockReduce<TGrad>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
TGrad sum = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int X_index = i * inner_size + j;
int Y_index = 0;
int X_index_val = X_index;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
Y_index += (X_index_val % X_dims.data[d]) * Y_strides.data[d];
X_index_val /= X_dims.data[d];
}
int W_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
W_index += W_strides.data[d] == 0
? 0
: (Y_index_val % Y_dims.data[d]) * W_strides.data[d];
Y_index_val /= Y_dims.data[d];
}
#if __CUDA_ARCH__ >= 350
sum += __ldg(dY + Y_index) * __ldg(W + W_index);
#else
sum += dY[Y_index] * W[W_index];
#endif
}
sum = BlockReduce<TGrad>(temp_storage).Reduce(sum, hipcub::Sum());
if (threadIdx.x == 0) {
dX[i] = sum;
}
__syncthreads();
}
}
template <typename TGrad, typename TIn, int D>
void ComputeMulGradientCUDAImpl(
const int outer_size,
const int inner_size,
const int* Y_dims,
const int* W_dims,
const int* X_axes,
const TGrad* dY,
const TIn* W,
TGrad* dX,
CUDAContext* context) {
SimpleArray<int, D> Y_dims_arr;
SimpleArray<int, D> Y_strides_arr;
SimpleArray<int, D> W_strides_arr;
SimpleArray<int, D> X_dims_arr;
std::copy_n(Y_dims, D, Y_dims_arr.data);
math::utils::ComputeTransposedStrides(D, Y_dims, X_axes, Y_strides_arr.data);
int cur_stride = 1;
for (int i = D - 1; i >= 0; --i) {
W_strides_arr.data[i] = W_dims[i] == 1 ? 0 : cur_stride;
cur_stride *= W_dims[i];
}
for (int i = 0; i < D; ++i) {
X_dims_arr.data[i] = Y_dims[X_axes[i]];
}
hipLaunchKernelGGL(( ComputeMulGradientCUDAKernel<TGrad, TIn, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
Y_dims_arr,
Y_strides_arr,
W_strides_arr,
X_dims_arr,
dY,
W,
dX);
}
template <typename TGrad, typename TIn>
void ComputeMulGradientCUDA(
const std::vector<int>& Y_dims,
const std::vector<int>& W_dims,
const std::vector<int>& X_axes,
const TGrad* dY,
const TIn* W,
TGrad* dX,
CUDAContext* context) {
CAFFE_ENFORCE_EQ(Y_dims.size(), W_dims.size());
const int ndim = Y_dims.size();
std::vector<int> X_transpose_axes(ndim);
math::utils::ComputeTransposeAxesForReduceOp(
ndim, X_axes.size(), X_axes.data(), X_transpose_axes.data());
const int pivot = ndim - X_axes.size();
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= Y_dims[X_transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < ndim; ++i) {
inner_size *= Y_dims[X_transpose_axes[i]];
}
if (outer_size > 0) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
ndim,
ComputeMulGradientCUDAImpl,
TGrad,
TIn,
outer_size,
inner_size,
Y_dims.data(),
W_dims.data(),
X_transpose_axes.data(),
dY,
W,
dX,
context);
}
}
} // namespace
template <>
template <typename TGrad, typename TIn, typename TOut>
bool MulFunctor<CUDAContext>::Backward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TGrad* dC,
const TIn* A,
const TIn* B,
const TOut* /* C */,
TGrad* dA,
TGrad* dB,
CUDAContext* context) const {
if (A_dims == B_dims) {
const int size = std::accumulate(
A_dims.cbegin(), A_dims.cend(), 1, std::multiplies<int>());
math::Mul(size, dC, B, dA, context);
math::Mul(size, dC, A, dB, context);
return true;
}
const int ndim = ::max(A_dims.size(), B_dims.size());
std::vector<int> A_broadcast_dims(ndim);
std::vector<int> B_broadcast_dims(ndim);
std::vector<int> C_broadcast_dims(ndim);
math::utils::ComputeBroadcastBinaryOpDims(
A_dims.size(),
A_dims.data(),
B_dims.size(),
B_dims.data(),
A_broadcast_dims.data(),
B_broadcast_dims.data(),
C_broadcast_dims.data());
std::vector<int> A_axes;
std::vector<int> B_axes;
elementwise_ops_utils::ComputeBinaryBroadcastBackwardAxes(
A_dims, B_dims, &A_axes, &B_axes);
ComputeMulGradientCUDA<TGrad, TIn>(
C_broadcast_dims, B_broadcast_dims, A_axes, dC, B, dA, context);
ComputeMulGradientCUDA<TGrad, TIn>(
C_broadcast_dims, A_broadcast_dims, B_axes, dC, A, dB, context);
return true;
}
REGISTER_CUDA_OPERATOR(
Mul,
BinaryElementwiseOp<NumericTypes, CUDAContext, MulFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
MulGradient,
BinaryElementwiseGradientOp<
NumericTypes,
CUDAContext,
MulFunctor<CUDAContext>>);
} // namespace caffe2
| 791814262a7dcbd240dad71c1da8c1c853d031b3.cu | #include "caffe2/operators/elementwise_mul_op.h"
#include <algorithm>
#include <functional>
#include <cub/block/block_reduce.cuh>
#include <cub/cub.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/elementwise_ops_utils.h"
namespace caffe2 {
namespace {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename TGrad, typename TIn, int D>
__global__ void ComputeMulGradientCUDAKernel(
const int outer_size,
const int inner_size,
const SimpleArray<int, D> Y_dims,
const SimpleArray<int, D> Y_strides,
const SimpleArray<int, D> W_strides,
const SimpleArray<int, D> X_dims,
const TGrad* dY,
const TIn* W,
TGrad* dX) {
__shared__ typename BlockReduce<TGrad>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
TGrad sum = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int X_index = i * inner_size + j;
int Y_index = 0;
int X_index_val = X_index;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
Y_index += (X_index_val % X_dims.data[d]) * Y_strides.data[d];
X_index_val /= X_dims.data[d];
}
int W_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
W_index += W_strides.data[d] == 0
? 0
: (Y_index_val % Y_dims.data[d]) * W_strides.data[d];
Y_index_val /= Y_dims.data[d];
}
#if __CUDA_ARCH__ >= 350
sum += __ldg(dY + Y_index) * __ldg(W + W_index);
#else
sum += dY[Y_index] * W[W_index];
#endif
}
sum = BlockReduce<TGrad>(temp_storage).Reduce(sum, cub::Sum());
if (threadIdx.x == 0) {
dX[i] = sum;
}
__syncthreads();
}
}
template <typename TGrad, typename TIn, int D>
void ComputeMulGradientCUDAImpl(
const int outer_size,
const int inner_size,
const int* Y_dims,
const int* W_dims,
const int* X_axes,
const TGrad* dY,
const TIn* W,
TGrad* dX,
CUDAContext* context) {
SimpleArray<int, D> Y_dims_arr;
SimpleArray<int, D> Y_strides_arr;
SimpleArray<int, D> W_strides_arr;
SimpleArray<int, D> X_dims_arr;
std::copy_n(Y_dims, D, Y_dims_arr.data);
math::utils::ComputeTransposedStrides(D, Y_dims, X_axes, Y_strides_arr.data);
int cur_stride = 1;
for (int i = D - 1; i >= 0; --i) {
W_strides_arr.data[i] = W_dims[i] == 1 ? 0 : cur_stride;
cur_stride *= W_dims[i];
}
for (int i = 0; i < D; ++i) {
X_dims_arr.data[i] = Y_dims[X_axes[i]];
}
ComputeMulGradientCUDAKernel<TGrad, TIn, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
Y_dims_arr,
Y_strides_arr,
W_strides_arr,
X_dims_arr,
dY,
W,
dX);
}
template <typename TGrad, typename TIn>
void ComputeMulGradientCUDA(
const std::vector<int>& Y_dims,
const std::vector<int>& W_dims,
const std::vector<int>& X_axes,
const TGrad* dY,
const TIn* W,
TGrad* dX,
CUDAContext* context) {
CAFFE_ENFORCE_EQ(Y_dims.size(), W_dims.size());
const int ndim = Y_dims.size();
std::vector<int> X_transpose_axes(ndim);
math::utils::ComputeTransposeAxesForReduceOp(
ndim, X_axes.size(), X_axes.data(), X_transpose_axes.data());
const int pivot = ndim - X_axes.size();
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= Y_dims[X_transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < ndim; ++i) {
inner_size *= Y_dims[X_transpose_axes[i]];
}
if (outer_size > 0) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
ndim,
ComputeMulGradientCUDAImpl,
TGrad,
TIn,
outer_size,
inner_size,
Y_dims.data(),
W_dims.data(),
X_transpose_axes.data(),
dY,
W,
dX,
context);
}
}
} // namespace
template <>
template <typename TGrad, typename TIn, typename TOut>
bool MulFunctor<CUDAContext>::Backward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TGrad* dC,
const TIn* A,
const TIn* B,
const TOut* /* C */,
TGrad* dA,
TGrad* dB,
CUDAContext* context) const {
if (A_dims == B_dims) {
const int size = std::accumulate(
A_dims.cbegin(), A_dims.cend(), 1, std::multiplies<int>());
math::Mul(size, dC, B, dA, context);
math::Mul(size, dC, A, dB, context);
return true;
}
const int ndim = std::max(A_dims.size(), B_dims.size());
std::vector<int> A_broadcast_dims(ndim);
std::vector<int> B_broadcast_dims(ndim);
std::vector<int> C_broadcast_dims(ndim);
math::utils::ComputeBroadcastBinaryOpDims(
A_dims.size(),
A_dims.data(),
B_dims.size(),
B_dims.data(),
A_broadcast_dims.data(),
B_broadcast_dims.data(),
C_broadcast_dims.data());
std::vector<int> A_axes;
std::vector<int> B_axes;
elementwise_ops_utils::ComputeBinaryBroadcastBackwardAxes(
A_dims, B_dims, &A_axes, &B_axes);
ComputeMulGradientCUDA<TGrad, TIn>(
C_broadcast_dims, B_broadcast_dims, A_axes, dC, B, dA, context);
ComputeMulGradientCUDA<TGrad, TIn>(
C_broadcast_dims, A_broadcast_dims, B_axes, dC, A, dB, context);
return true;
}
REGISTER_CUDA_OPERATOR(
Mul,
BinaryElementwiseOp<NumericTypes, CUDAContext, MulFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
MulGradient,
BinaryElementwiseGradientOp<
NumericTypes,
CUDAContext,
MulFunctor<CUDAContext>>);
} // namespace caffe2
|
23cf13f493e6d1d07193d62cdae1c81b67fe09df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "StereoMatcher.h"
// Author: True Price <jtprice at cs.unc.edu>
//
// BSD License
// Copyright (C) 2017 The University of North Carolina at Chapel Hill
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the original author nor the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
// THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
// CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
// NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <math.h>
#include "helper_math.h"
#include "hipArray/cudaArray2D.h"
#include "hipArray/cudaTexture2D.h"
#include "hipArray/cudaSurface2D.h"
//#include "hipArray/cudaArray3D.h"
#include "hipArray/cudaSurface3D.h"
#include "glog/logging.h"
const float INV_255 = 0.00392156863;
__constant__ StereoMatcher::Options c_options;
//------------------------------------------------------------------------------
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file,
line);
if (abort) exit(code);
}
}
#define CUDA_CHECK_ERROR CUDA_CHECK(hipPeekAtLastError())
// uncomment the bottom two lines to include timing information
#define CUDA_TIMER_START
#define CUDA_TIMER_STOP(var)
//#define CUDA_TIMER_START \
// { \
// hipEvent_t start, stop; \
// hipEventCreate(&start); \
// hipEventCreate(&stop); \
// hipEventRecord(start);
//
//#define CUDA_TIMER_STOP(var) \
// hipEventRecord(stop); \
// hipEventSynchronize(stop); \
// hipEventElapsedTime(&var, start, stop); \
// }
//------------------------------------------------------------------------------
// converts a BGRA color into an intensity value in the range [0, 1]
__device__ float bgra_to_intensity(uchar4 color) {
// the multiplication divides by 255
return (0.2126 * color.z + 0.7152 * color.y + 0.0722 * color.x) * INV_255;
}
//------------------------------------------------------------------------------
struct StereoMatcher::StereoMatcherImpl {
cua::CudaTexture2D<float2> ur_map1, ur_map2; // undistort+rectify maps
cua::CudaTexture2D<unsigned char> orig_image1, orig_image2; // input GPU images
cua::CudaArray2D<float> image1, image2; // undistorted GPU images
// for the window around each pixel, compute the mean and inverse L2 norm
cua::CudaArray2D<float2> image1_mean_inv_norm, image2_mean_inv_norm;
cua::CudaSurface3D<float> raw_shift_scores;
cua::CudaSurface3D<float> shift_scores;
cua::CudaSurface2D<float> depth_map;
// shift: number of shift elements (= max_shift - min_shift)
StereoMatcherImpl(unsigned int width, unsigned int height, unsigned int shift)
: ur_map1(width, height),
ur_map2(width, height),
orig_image1(width, height, hipFilterModeLinear, hipAddressModeBorder,
hipReadModeNormalizedFloat),
orig_image2(width, height, hipFilterModeLinear, hipAddressModeBorder,
hipReadModeNormalizedFloat),
image1(width, height),
image2(width, height),
image1_mean_inv_norm(width, height),
image2_mean_inv_norm(width, height),
raw_shift_scores(width, height, shift),
shift_scores(width, height, shift),
depth_map(width, height) {}
};
//------------------------------------------------------------------------------
// for each input pixel, consider all possible left-image-to-right-image matches
// in the range [-max_shift-1,-min_shift] and select the one with the best NCC
// [threadIdx.x]: equal to the shift radius
// [threadIdx.y]: TODO x-position to consider, offset by the block position
__global__ void StereoMatcher_NCC_kernel(
StereoMatcher::StereoMatcherImpl impl) {
extern __shared__ int shmem[];
//
// local constants
//
const int dx = threadIdx.x; // 0 to (max_shift - min_shift - 1)
const int x = blockIdx.x;
const int y = blockIdx.y;
const int &window_size = c_options.window_size;
const int &min_shift = c_options.min_shift;
const int &max_shift = c_options.max_shift;
const int window_radius = window_size >> 1;
//
// shared memory
//
// intensities for the window in the first image
float *im1 = (float *)shmem;
// intensities for the window in the second image
float *im2 = (float *)&im1[window_size];
//
// begin computation
//
const float2 im1_mean_inv_norm = impl.image1_mean_inv_norm.get(x, y);
const float2 im2_mean_inv_norm =
impl.image2_mean_inv_norm.get(x + dx + min_shift - max_shift + 1, y);
// collect the windows for both images into shared memory
im2[dx] =
impl.image2.get(x + dx + min_shift - max_shift - window_radius + 1, y);
if (dx < window_size) {
im1[dx] = impl.image1.get(x + dx - window_radius, y) - im1_mean_inv_norm.x;
im2[dx + max_shift - min_shift - 1] =
impl.image2.get(x + dx + min_shift - window_radius, y);
}
__syncthreads();
// compute NCC
float score = 0;
for (int i = 0; i < window_size; ++i) {
score += im1[i] * (im2[i + dx] - im2_mean_inv_norm.x);
}
score *= im1_mean_inv_norm.y * im2_mean_inv_norm.y;
impl.raw_shift_scores.set(x, y, dx, score);
}
//------------------------------------------------------------------------------
__global__ void StereoMatcher_Smoothing_kernel(
StereoMatcher::StereoMatcherImpl impl) {
const int x0 = blockIdx.x * blockDim.x - 5;
const int y0 = blockIdx.y * blockDim.y - 5;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
const int step = blockDim.x * blockDim.y;
const int w = (blockDim.x + 10);
const int h = (blockDim.y + 10);
// load the (padded) local window into shared memory
extern __shared__ int shmem[];
float *values = (float *)(shmem + w * h * threadIdx.z);
for (int i = blockDim.x * threadIdx.y + threadIdx.x; i < w * h; i += step) {
const int u = x0 + i % w;
const int v = y0 + i / w;
values[i] = impl.raw_shift_scores.get(u, v, z);
}
__syncthreads();
float score = 0.f;
for (int i = 0; i < 11; ++i) {
for (int j = 0; j < 11; ++j) {
score += values[(threadIdx.y + i) * w + threadIdx.x + j];
}
}
impl.shift_scores.set(x, y, z, score);
}
/*
__global__ void StereoMatcher_Smoothing_kernel(
StereoMatcher::StereoMatcherImpl impl) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
float score = 0.f;
for (int i = -5; i <= 5; ++i) {
for (int j = -5; j <= 5; ++j) {
score += impl.raw_shift_scores.get(x + j, y + i, z);
}
}
impl.shift_scores.set(x, y, z, score);
}
*/
//------------------------------------------------------------------------------
//
// class definitions
//
//------------------------------------------------------------------------------
StereoMatcher::StereoMatcher(const StereoMatcher::Options &options)
: options_(options),
impl_(new StereoMatcherImpl(options.width, options.height,
options.max_shift)) {
if (options_.max_shift < options_.window_size) {
fprintf(stderr, "max shift smaller than window size!");
exit(1);
}
hipMemcpyToSymbol(c_options, &options_, sizeof(StereoMatcher::Options));
}
//------------------------------------------------------------------------------
StereoMatcher::~StereoMatcher() {}
//------------------------------------------------------------------------------
void StereoMatcher::initUndistortRectifyMaps(const void *ur_map1,
const void *ur_map2) {
impl_->ur_map1 = (float2*)ur_map1;
impl_->ur_map2 = (float2*)ur_map2;
}
//------------------------------------------------------------------------------
void StereoMatcher::init_frame(void *image1_cpu, void *image2_cpu) {
auto &impl = *impl_;
impl.orig_image1 = (unsigned char *)image1_cpu; // copy to GPU
impl.orig_image2 = (unsigned char *)image2_cpu; // copy to GPU
// undistort+rectify (using bilinear interpolation), and convert to intensity
impl.image1.apply_op([=] __device__(const size_t x, const size_t y) {
const float2 uv = impl.ur_map1.get(x, y);
return impl.orig_image1.interp<float>(uv.x, uv.y);
});
impl.image2.apply_op([=] __device__(const size_t x, const size_t y) {
const float2 uv = impl.ur_map2.get(x, y);
return impl.orig_image2.interp<float>(uv.x, uv.y);
});
// compute the mean and std. dev. of the window centered around each pixel
impl.image1_mean_inv_norm.apply_op([=] __device__(const size_t x,
const size_t y) {
const int &window_size = c_options.window_size;
const int window_radius = window_size >> 1;
float avg = 0.f;
float inv_norm = 0.f;
for (int i = 0; i < c_options.window_size; ++i) {
avg += impl.image1.get(x + i - window_radius, y);
}
avg /= c_options.window_size;
for (int i = 0; i < window_size; ++i) {
const float intensity = impl.image1.get(x + i - window_radius, y) - avg;
inv_norm += intensity * intensity;
}
inv_norm = rsqrt(inv_norm); // = 1 / sqrt(x)
return make_float2(avg, inv_norm);
});
impl.image2_mean_inv_norm.apply_op([=] __device__(const size_t x,
const size_t y) {
const int &window_size = c_options.window_size;
const int window_radius = window_size >> 1;
float avg = 0.f;
float inv_norm = 0.f;
for (int i = 0; i < c_options.window_size; ++i) {
avg += impl.image2.get(x + i - window_radius, y);
}
avg /= c_options.window_size;
for (int i = 0; i < window_size; ++i) {
const float intensity = impl.image2.get(x + i - window_radius, y) - avg;
inv_norm += intensity * intensity;
}
inv_norm = rsqrt(inv_norm); // = 1 / sqrt(x)
return make_float2(avg, inv_norm);
});
}
//------------------------------------------------------------------------------
void StereoMatcher::download_image1(void *image) const {
impl_->image1.CopyTo((float *)image);
}
void StereoMatcher::download_image2(void *image) const {
impl_->image2.CopyTo((float *)image);
}
void StereoMatcher::download_depth(void *image) const {
impl_->depth_map.CopyTo((float *)image);
}
//------------------------------------------------------------------------------
void StereoMatcher::match() {
float t1 = 0.f, t2 = 0.f, t3 = 0.f;
auto &impl = *impl_;
//
// raw matching
//
{
const dim3 block_dim = dim3(options_.shift());
const dim3 grid_dim = dim3(options_.width, options_.height);
// shared memory breakdown:
// im1: window_size elements
// im2: max_shift + window_size - 1 elements
const size_t shmem_size =
(2 * options_.window_size + options_.shift() - 1) * sizeof(float);
CUDA_TIMER_START
hipLaunchKernelGGL(( StereoMatcher_NCC_kernel), dim3(grid_dim), dim3(block_dim), shmem_size, 0, impl);
CUDA_TIMER_STOP(t1)
}
//
// smoothing
//
{
const int w = 32; // pixels per thread block
const int h = 32; // pixels per thread block
const int d = 1; // shift values per thread block
const dim3 block_dim = dim3(w, h, d);
const dim3 grid_dim =
dim3((options_.width + w - 1) / w, (options_.height + h - 1) / h,
(options_.shift() + d - 1) / d);
// shared memory breakdown:
const size_t shmem_size = (w + 10) * (h + 10) * d * sizeof(float);
CUDA_TIMER_START
hipLaunchKernelGGL(( StereoMatcher_Smoothing_kernel), dim3(grid_dim), dim3(block_dim), shmem_size, 0, impl);
CUDA_TIMER_STOP(t2)
}
// find the best shift
CUDA_TIMER_START
impl.depth_map.apply_op([=] __device__(const size_t x, const size_t y) {
float best_dx = c_options.max_shift - 1;
float best_score = impl.shift_scores.get(
x, y, c_options.max_shift - c_options.min_shift - 1);
for (int dx = c_options.max_shift - 2; dx >= c_options.min_shift; --dx) {
const float score = impl.shift_scores.get(x, y, dx - c_options.min_shift);
if (score > best_score) {
best_score = score;
best_dx = dx;
}
}
const float disparity = (float)(c_options.max_shift - best_dx);
return disparity;
});
CUDA_TIMER_STOP(t3)
}
//------------------------------------------------------------------------------
// match calculates disparity; this converts to depth values
// TODO (True): this is currently just for visualization purposes; it can really
// be moved into match()
void StereoMatcher::calculate_depth() {
auto &impl = *impl_;
impl.depth_map.apply_op([=] __device__(const size_t x, const size_t y) {
const float depth =
c_options.focal_length * c_options.baseline / impl.depth_map.get(x, y);
return min(max(depth, c_options.min_depth), c_options.max_depth);
});
}
| 23cf13f493e6d1d07193d62cdae1c81b67fe09df.cu | #include "StereoMatcher.h"
// Author: True Price <jtprice at cs.unc.edu>
//
// BSD License
// Copyright (C) 2017 The University of North Carolina at Chapel Hill
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the original author nor the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
// THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
// CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
// NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <math.h>
#include "helper_math.h"
#include "cudaArray/cudaArray2D.h"
#include "cudaArray/cudaTexture2D.h"
#include "cudaArray/cudaSurface2D.h"
//#include "cudaArray/cudaArray3D.h"
#include "cudaArray/cudaSurface3D.h"
#include "glog/logging.h"
const float INV_255 = 0.00392156863;
__constant__ StereoMatcher::Options c_options;
//------------------------------------------------------------------------------
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file,
line);
if (abort) exit(code);
}
}
#define CUDA_CHECK_ERROR CUDA_CHECK(cudaPeekAtLastError())
// uncomment the bottom two lines to include timing information
#define CUDA_TIMER_START
#define CUDA_TIMER_STOP(var)
//#define CUDA_TIMER_START \
// { \
// cudaEvent_t start, stop; \
// cudaEventCreate(&start); \
// cudaEventCreate(&stop); \
// cudaEventRecord(start);
//
//#define CUDA_TIMER_STOP(var) \
// cudaEventRecord(stop); \
// cudaEventSynchronize(stop); \
// cudaEventElapsedTime(&var, start, stop); \
// }
//------------------------------------------------------------------------------
// converts a BGRA color into an intensity value in the range [0, 1]
__device__ float bgra_to_intensity(uchar4 color) {
// the multiplication divides by 255
return (0.2126 * color.z + 0.7152 * color.y + 0.0722 * color.x) * INV_255;
}
//------------------------------------------------------------------------------
struct StereoMatcher::StereoMatcherImpl {
cua::CudaTexture2D<float2> ur_map1, ur_map2; // undistort+rectify maps
cua::CudaTexture2D<unsigned char> orig_image1, orig_image2; // input GPU images
cua::CudaArray2D<float> image1, image2; // undistorted GPU images
// for the window around each pixel, compute the mean and inverse L2 norm
cua::CudaArray2D<float2> image1_mean_inv_norm, image2_mean_inv_norm;
cua::CudaSurface3D<float> raw_shift_scores;
cua::CudaSurface3D<float> shift_scores;
cua::CudaSurface2D<float> depth_map;
// shift: number of shift elements (= max_shift - min_shift)
StereoMatcherImpl(unsigned int width, unsigned int height, unsigned int shift)
: ur_map1(width, height),
ur_map2(width, height),
orig_image1(width, height, cudaFilterModeLinear, cudaAddressModeBorder,
cudaReadModeNormalizedFloat),
orig_image2(width, height, cudaFilterModeLinear, cudaAddressModeBorder,
cudaReadModeNormalizedFloat),
image1(width, height),
image2(width, height),
image1_mean_inv_norm(width, height),
image2_mean_inv_norm(width, height),
raw_shift_scores(width, height, shift),
shift_scores(width, height, shift),
depth_map(width, height) {}
};
//------------------------------------------------------------------------------
// for each input pixel, consider all possible left-image-to-right-image matches
// in the range [-max_shift-1,-min_shift] and select the one with the best NCC
// [threadIdx.x]: equal to the shift radius
// [threadIdx.y]: TODO x-position to consider, offset by the block position
__global__ void StereoMatcher_NCC_kernel(
StereoMatcher::StereoMatcherImpl impl) {
extern __shared__ int shmem[];
//
// local constants
//
const int dx = threadIdx.x; // 0 to (max_shift - min_shift - 1)
const int x = blockIdx.x;
const int y = blockIdx.y;
const int &window_size = c_options.window_size;
const int &min_shift = c_options.min_shift;
const int &max_shift = c_options.max_shift;
const int window_radius = window_size >> 1;
//
// shared memory
//
// intensities for the window in the first image
float *im1 = (float *)shmem;
// intensities for the window in the second image
float *im2 = (float *)&im1[window_size];
//
// begin computation
//
const float2 im1_mean_inv_norm = impl.image1_mean_inv_norm.get(x, y);
const float2 im2_mean_inv_norm =
impl.image2_mean_inv_norm.get(x + dx + min_shift - max_shift + 1, y);
// collect the windows for both images into shared memory
im2[dx] =
impl.image2.get(x + dx + min_shift - max_shift - window_radius + 1, y);
if (dx < window_size) {
im1[dx] = impl.image1.get(x + dx - window_radius, y) - im1_mean_inv_norm.x;
im2[dx + max_shift - min_shift - 1] =
impl.image2.get(x + dx + min_shift - window_radius, y);
}
__syncthreads();
// compute NCC
float score = 0;
for (int i = 0; i < window_size; ++i) {
score += im1[i] * (im2[i + dx] - im2_mean_inv_norm.x);
}
score *= im1_mean_inv_norm.y * im2_mean_inv_norm.y;
impl.raw_shift_scores.set(x, y, dx, score);
}
//------------------------------------------------------------------------------
__global__ void StereoMatcher_Smoothing_kernel(
StereoMatcher::StereoMatcherImpl impl) {
const int x0 = blockIdx.x * blockDim.x - 5;
const int y0 = blockIdx.y * blockDim.y - 5;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
const int step = blockDim.x * blockDim.y;
const int w = (blockDim.x + 10);
const int h = (blockDim.y + 10);
// load the (padded) local window into shared memory
extern __shared__ int shmem[];
float *values = (float *)(shmem + w * h * threadIdx.z);
for (int i = blockDim.x * threadIdx.y + threadIdx.x; i < w * h; i += step) {
const int u = x0 + i % w;
const int v = y0 + i / w;
values[i] = impl.raw_shift_scores.get(u, v, z);
}
__syncthreads();
float score = 0.f;
for (int i = 0; i < 11; ++i) {
for (int j = 0; j < 11; ++j) {
score += values[(threadIdx.y + i) * w + threadIdx.x + j];
}
}
impl.shift_scores.set(x, y, z, score);
}
/*
__global__ void StereoMatcher_Smoothing_kernel(
StereoMatcher::StereoMatcherImpl impl) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
float score = 0.f;
for (int i = -5; i <= 5; ++i) {
for (int j = -5; j <= 5; ++j) {
score += impl.raw_shift_scores.get(x + j, y + i, z);
}
}
impl.shift_scores.set(x, y, z, score);
}
*/
//------------------------------------------------------------------------------
//
// class definitions
//
//------------------------------------------------------------------------------
StereoMatcher::StereoMatcher(const StereoMatcher::Options &options)
: options_(options),
impl_(new StereoMatcherImpl(options.width, options.height,
options.max_shift)) {
if (options_.max_shift < options_.window_size) {
fprintf(stderr, "max shift smaller than window size!");
exit(1);
}
cudaMemcpyToSymbol(c_options, &options_, sizeof(StereoMatcher::Options));
}
//------------------------------------------------------------------------------
StereoMatcher::~StereoMatcher() {}
//------------------------------------------------------------------------------
void StereoMatcher::initUndistortRectifyMaps(const void *ur_map1,
const void *ur_map2) {
impl_->ur_map1 = (float2*)ur_map1;
impl_->ur_map2 = (float2*)ur_map2;
}
//------------------------------------------------------------------------------
void StereoMatcher::init_frame(void *image1_cpu, void *image2_cpu) {
auto &impl = *impl_;
impl.orig_image1 = (unsigned char *)image1_cpu; // copy to GPU
impl.orig_image2 = (unsigned char *)image2_cpu; // copy to GPU
// undistort+rectify (using bilinear interpolation), and convert to intensity
impl.image1.apply_op([=] __device__(const size_t x, const size_t y) {
const float2 uv = impl.ur_map1.get(x, y);
return impl.orig_image1.interp<float>(uv.x, uv.y);
});
impl.image2.apply_op([=] __device__(const size_t x, const size_t y) {
const float2 uv = impl.ur_map2.get(x, y);
return impl.orig_image2.interp<float>(uv.x, uv.y);
});
// compute the mean and std. dev. of the window centered around each pixel
impl.image1_mean_inv_norm.apply_op([=] __device__(const size_t x,
const size_t y) {
const int &window_size = c_options.window_size;
const int window_radius = window_size >> 1;
float avg = 0.f;
float inv_norm = 0.f;
for (int i = 0; i < c_options.window_size; ++i) {
avg += impl.image1.get(x + i - window_radius, y);
}
avg /= c_options.window_size;
for (int i = 0; i < window_size; ++i) {
const float intensity = impl.image1.get(x + i - window_radius, y) - avg;
inv_norm += intensity * intensity;
}
inv_norm = rsqrt(inv_norm); // = 1 / sqrt(x)
return make_float2(avg, inv_norm);
});
impl.image2_mean_inv_norm.apply_op([=] __device__(const size_t x,
const size_t y) {
const int &window_size = c_options.window_size;
const int window_radius = window_size >> 1;
float avg = 0.f;
float inv_norm = 0.f;
for (int i = 0; i < c_options.window_size; ++i) {
avg += impl.image2.get(x + i - window_radius, y);
}
avg /= c_options.window_size;
for (int i = 0; i < window_size; ++i) {
const float intensity = impl.image2.get(x + i - window_radius, y) - avg;
inv_norm += intensity * intensity;
}
inv_norm = rsqrt(inv_norm); // = 1 / sqrt(x)
return make_float2(avg, inv_norm);
});
}
//------------------------------------------------------------------------------
void StereoMatcher::download_image1(void *image) const {
impl_->image1.CopyTo((float *)image);
}
void StereoMatcher::download_image2(void *image) const {
impl_->image2.CopyTo((float *)image);
}
void StereoMatcher::download_depth(void *image) const {
impl_->depth_map.CopyTo((float *)image);
}
//------------------------------------------------------------------------------
void StereoMatcher::match() {
float t1 = 0.f, t2 = 0.f, t3 = 0.f;
auto &impl = *impl_;
//
// raw matching
//
{
const dim3 block_dim = dim3(options_.shift());
const dim3 grid_dim = dim3(options_.width, options_.height);
// shared memory breakdown:
// im1: window_size elements
// im2: max_shift + window_size - 1 elements
const size_t shmem_size =
(2 * options_.window_size + options_.shift() - 1) * sizeof(float);
CUDA_TIMER_START
StereoMatcher_NCC_kernel<<<grid_dim, block_dim, shmem_size>>>(impl);
CUDA_TIMER_STOP(t1)
}
//
// smoothing
//
{
const int w = 32; // pixels per thread block
const int h = 32; // pixels per thread block
const int d = 1; // shift values per thread block
const dim3 block_dim = dim3(w, h, d);
const dim3 grid_dim =
dim3((options_.width + w - 1) / w, (options_.height + h - 1) / h,
(options_.shift() + d - 1) / d);
// shared memory breakdown:
const size_t shmem_size = (w + 10) * (h + 10) * d * sizeof(float);
CUDA_TIMER_START
StereoMatcher_Smoothing_kernel<<<grid_dim, block_dim, shmem_size>>>(impl);
CUDA_TIMER_STOP(t2)
}
// find the best shift
CUDA_TIMER_START
impl.depth_map.apply_op([=] __device__(const size_t x, const size_t y) {
float best_dx = c_options.max_shift - 1;
float best_score = impl.shift_scores.get(
x, y, c_options.max_shift - c_options.min_shift - 1);
for (int dx = c_options.max_shift - 2; dx >= c_options.min_shift; --dx) {
const float score = impl.shift_scores.get(x, y, dx - c_options.min_shift);
if (score > best_score) {
best_score = score;
best_dx = dx;
}
}
const float disparity = (float)(c_options.max_shift - best_dx);
return disparity;
});
CUDA_TIMER_STOP(t3)
}
//------------------------------------------------------------------------------
// match calculates disparity; this converts to depth values
// TODO (True): this is currently just for visualization purposes; it can really
// be moved into match()
void StereoMatcher::calculate_depth() {
auto &impl = *impl_;
impl.depth_map.apply_op([=] __device__(const size_t x, const size_t y) {
const float depth =
c_options.focal_length * c_options.baseline / impl.depth_map.get(x, y);
return min(max(depth, c_options.min_depth), c_options.max_depth);
});
}
|
ae58215f40516c641d0e4e469baad4201afe8ecb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#define N 1024 * 1024
#define CHECK_CUDA_ERR(x) { \
err = x; \
if (err != hipSuccess) { \
printf("cuda error with %s in line %d\n",hipGetErrorString(err),__LINE__); \
exit(1); \
} }
int main()
{
hipError_t err;
hipEvent_t start_event,stop_event;
CHECK_CUDA_ERR ( hipEventCreate(&start_event) );
CHECK_CUDA_ERR ( hipEventCreate(&stop_event) );
void * buf = malloc(sizeof(float) * N);
void * dBuf;
CHECK_CUDA_ERR ( hipMalloc(&dBuf,sizeof(float) * N) );
CHECK_CUDA_ERR ( hipEventRecord(start_event,0) ) ;
CHECK_CUDA_ERR ( hipMemcpy(dBuf,buf,sizeof(float) * N, hipMemcpyHostToDevice));
CHECK_CUDA_ERR ( hipEventRecord(stop_event,0) );
CHECK_CUDA_ERR ( hipDeviceSynchronize() );
float ms = 100.f;
CHECK_CUDA_ERR ( hipEventElapsedTime(&ms,start_event,stop_event));
printf("%f m second cost\n",ms);
CHECK_CUDA_ERR ( hipEventDestroy(start_event) );
CHECK_CUDA_ERR ( hipEventDestroy(stop_event) );
free(buf);
CHECK_CUDA_ERR ( hipFree(dBuf) );
return 0;
}
| ae58215f40516c641d0e4e469baad4201afe8ecb.cu | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <cuda.h>
#define N 1024 * 1024
#define CHECK_CUDA_ERR(x) { \
err = x; \
if (err != cudaSuccess) { \
printf("cuda error with %s in line %d\n",cudaGetErrorString(err),__LINE__); \
exit(1); \
} }
int main()
{
cudaError_t err;
cudaEvent_t start_event,stop_event;
CHECK_CUDA_ERR ( cudaEventCreate(&start_event) );
CHECK_CUDA_ERR ( cudaEventCreate(&stop_event) );
void * buf = malloc(sizeof(float) * N);
void * dBuf;
CHECK_CUDA_ERR ( cudaMalloc(&dBuf,sizeof(float) * N) );
CHECK_CUDA_ERR ( cudaEventRecord(start_event,0) ) ;
CHECK_CUDA_ERR ( cudaMemcpy(dBuf,buf,sizeof(float) * N, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR ( cudaEventRecord(stop_event,0) );
CHECK_CUDA_ERR ( cudaDeviceSynchronize() );
float ms = 100.f;
CHECK_CUDA_ERR ( cudaEventElapsedTime(&ms,start_event,stop_event));
printf("%f m second cost\n",ms);
CHECK_CUDA_ERR ( cudaEventDestroy(start_event) );
CHECK_CUDA_ERR ( cudaEventDestroy(stop_event) );
free(buf);
CHECK_CUDA_ERR ( cudaFree(dBuf) );
return 0;
}
|
fc08c06c97946201179e76f1b1ac88b65cad6098.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void RBMCopyFilterKernel( float *weightPtr, float *filterPtr, int weightCount, int i, int thisLayerSize )
{
int weightIndex = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (weightIndex < weightCount)
{
filterPtr[weightIndex] = weightPtr[i + weightIndex * thisLayerSize];
}
} | fc08c06c97946201179e76f1b1ac88b65cad6098.cu | #include "includes.h"
__global__ void RBMCopyFilterKernel( float *weightPtr, float *filterPtr, int weightCount, int i, int thisLayerSize )
{
int weightIndex = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (weightIndex < weightCount)
{
filterPtr[weightIndex] = weightPtr[i + weightIndex * thisLayerSize];
}
} |
c2d5af7f33bb41818d1cd2d94ffa472ba7ab3a7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* _reg_resampling_kernels.cu
*
*
* Created by Marc Modat on 24/03/2009.
* Copyright (c) 2009, University College London. All rights reserved.
* Centre for Medical Image Computing (CMIC)
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
#ifndef _REG_RESAMPLING_KERNELS_CU
#define _REG_RESAMPLING_KERNELS_CU
#include "stdio.h"
texture<float, 3, hipReadModeElementType> sourceTexture;
texture<float4, 1, hipReadModeElementType> sourceMatrixTexture;
texture<float4, 1, hipReadModeElementType> positionFieldTexture;
texture<int, 1, hipReadModeElementType> maskTexture;
/* *************************************************************** */
__device__ __constant__ int3 c_SourceDim;
__device__ __constant__ int c_VoxelNumber;
__device__ __constant__ float c_PaddingValue;
__device__ __constant__ int c_ActiveVoxelNumber;
/* *************************************************************** */
/* *************************************************************** */
__global__ void reg_resampleSourceImage_kernel(float *resultArray)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_ActiveVoxelNumber){
//Get the real world position in the source space
float4 realPosition = tex1Dfetch(positionFieldTexture,tid);
//Get the voxel-based position in the source space
float3 voxelPosition;
float4 matrix = tex1Dfetch(sourceMatrixTexture,0);
voxelPosition.x = matrix.x*realPosition.x + matrix.y*realPosition.y +
matrix.z*realPosition.z + matrix.w;
matrix = tex1Dfetch(sourceMatrixTexture,1);
voxelPosition.y = matrix.x*realPosition.x + matrix.y*realPosition.y +
matrix.z*realPosition.z + matrix.w;
matrix = tex1Dfetch(sourceMatrixTexture,2);
voxelPosition.z = matrix.x*realPosition.x + matrix.y*realPosition.y +
matrix.z*realPosition.z + matrix.w;
int3 sourceImageSize = c_SourceDim;
float3 relativePosition;
relativePosition.x=(voxelPosition.x+0.5f)/(float)sourceImageSize.x;
relativePosition.y=(voxelPosition.y+0.5f)/(float)sourceImageSize.y;
relativePosition.z=(voxelPosition.z+0.5f)/(float)sourceImageSize.z;
if( relativePosition.x>=0.0f && relativePosition.x<=1.0f &&
relativePosition.y>=0.0f && relativePosition.y<=1.0f &&
relativePosition.z>=0.0f && relativePosition.z<=1.0f ){
resultArray[tex1Dfetch(maskTexture,tid)]=tex3D(sourceTexture, relativePosition.x, relativePosition.y, relativePosition.z);
}
else resultArray[tex1Dfetch(maskTexture,tid)]=c_PaddingValue;
}
}
/* *************************************************************** */
/* *************************************************************** */
__global__ void reg_getSourceImageGradient_kernel(float4 *gradientArray)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_ActiveVoxelNumber){
//Get the real world position in the source space
float4 realPosition = tex1Dfetch(positionFieldTexture,tid);
//Get the voxel-based position in the source space
float3 voxelPosition;
float4 matrix = tex1Dfetch(sourceMatrixTexture,0);
voxelPosition.x = matrix.x*realPosition.x + matrix.y*realPosition.y +
matrix.z*realPosition.z + matrix.w;
matrix = tex1Dfetch(sourceMatrixTexture,1);
voxelPosition.y = matrix.x*realPosition.x + matrix.y*realPosition.y +
matrix.z*realPosition.z + matrix.w;
matrix = tex1Dfetch(sourceMatrixTexture,2);
voxelPosition.z = matrix.x*realPosition.x + matrix.y*realPosition.y +
matrix.z*realPosition.z + matrix.w;
int3 sourceImageSize = c_SourceDim;
if( 0.0f<=voxelPosition.x && voxelPosition.x<=float(sourceImageSize.x-1) &&
0.0f<=voxelPosition.y && voxelPosition.y<=float(sourceImageSize.y-1) &&
0.0f<=voxelPosition.z && voxelPosition.z<=float(sourceImageSize.z-1)){
int3 voxel;
voxel.x = (int)(voxelPosition.x);
voxel.y = (int)(voxelPosition.y);
voxel.z = (int)(voxelPosition.z);
float xBasis[2];
float relative = fabsf(voxelPosition.x - (float)voxel.x);
xBasis[0]=1.0f-relative;
xBasis[1]=relative;
float yBasis[2];
relative = fabsf(voxelPosition.y - (float)voxel.y);
yBasis[0]=1.0f-relative;
yBasis[1]=relative;
float zBasis[2];
relative = fabsf(voxelPosition.z - (float)voxel.z);
zBasis[0]=1.0f-relative;
zBasis[1]=relative;
float deriv[2];
deriv[0]=-1.0f;
deriv[1]=1.0f;
float4 gradientValue=make_float4(0.0f, 0.0f, 0.0f, 0.0f);
float3 relativePosition;
for(short c=0; c<2; c++){
relativePosition.z=((float)voxel.z+(float)c+0.5f)/(float)c_SourceDim.z;
float3 tempValueY=make_float3(0.0f, 0.0f, 0.0f);
for(short b=0; b<2; b++){
float2 tempValueX=make_float2(0.0f, 0.0f);
relativePosition.y=((float)voxel.y+(float)b+0.5f)/(float)c_SourceDim.y;
for(short a=0; a<2; a++){
relativePosition.x=((float)voxel.x+(float)a+0.5f)/(float)c_SourceDim.x;
float intensity=tex3D(sourceTexture, relativePosition.x, relativePosition.y, relativePosition.z);
tempValueX.x += intensity * deriv[a];
tempValueX.y += intensity * xBasis[a];
}
tempValueY.x += tempValueX.x * yBasis[b];
tempValueY.y += tempValueX.y * deriv[b];
tempValueY.z += tempValueX.y * yBasis[b];
}
gradientValue.x += tempValueY.x * zBasis[c];
gradientValue.y += tempValueY.y * zBasis[c];
gradientValue.z += tempValueY.z * deriv[c];
}
gradientArray[tid]=gradientValue;
}
else gradientArray[tid]=make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
}
/* *************************************************************** */
/* *************************************************************** */
#endif
| c2d5af7f33bb41818d1cd2d94ffa472ba7ab3a7b.cu | /*
* _reg_resampling_kernels.cu
*
*
* Created by Marc Modat on 24/03/2009.
* Copyright (c) 2009, University College London. All rights reserved.
* Centre for Medical Image Computing (CMIC)
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
#ifndef _REG_RESAMPLING_KERNELS_CU
#define _REG_RESAMPLING_KERNELS_CU
#include "stdio.h"
texture<float, 3, cudaReadModeElementType> sourceTexture;
texture<float4, 1, cudaReadModeElementType> sourceMatrixTexture;
texture<float4, 1, cudaReadModeElementType> positionFieldTexture;
texture<int, 1, cudaReadModeElementType> maskTexture;
/* *************************************************************** */
__device__ __constant__ int3 c_SourceDim;
__device__ __constant__ int c_VoxelNumber;
__device__ __constant__ float c_PaddingValue;
__device__ __constant__ int c_ActiveVoxelNumber;
/* *************************************************************** */
/* *************************************************************** */
__global__ void reg_resampleSourceImage_kernel(float *resultArray)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_ActiveVoxelNumber){
//Get the real world position in the source space
float4 realPosition = tex1Dfetch(positionFieldTexture,tid);
//Get the voxel-based position in the source space
float3 voxelPosition;
float4 matrix = tex1Dfetch(sourceMatrixTexture,0);
voxelPosition.x = matrix.x*realPosition.x + matrix.y*realPosition.y +
matrix.z*realPosition.z + matrix.w;
matrix = tex1Dfetch(sourceMatrixTexture,1);
voxelPosition.y = matrix.x*realPosition.x + matrix.y*realPosition.y +
matrix.z*realPosition.z + matrix.w;
matrix = tex1Dfetch(sourceMatrixTexture,2);
voxelPosition.z = matrix.x*realPosition.x + matrix.y*realPosition.y +
matrix.z*realPosition.z + matrix.w;
int3 sourceImageSize = c_SourceDim;
float3 relativePosition;
relativePosition.x=(voxelPosition.x+0.5f)/(float)sourceImageSize.x;
relativePosition.y=(voxelPosition.y+0.5f)/(float)sourceImageSize.y;
relativePosition.z=(voxelPosition.z+0.5f)/(float)sourceImageSize.z;
if( relativePosition.x>=0.0f && relativePosition.x<=1.0f &&
relativePosition.y>=0.0f && relativePosition.y<=1.0f &&
relativePosition.z>=0.0f && relativePosition.z<=1.0f ){
resultArray[tex1Dfetch(maskTexture,tid)]=tex3D(sourceTexture, relativePosition.x, relativePosition.y, relativePosition.z);
}
else resultArray[tex1Dfetch(maskTexture,tid)]=c_PaddingValue;
}
}
/* *************************************************************** */
/* *************************************************************** */
__global__ void reg_getSourceImageGradient_kernel(float4 *gradientArray)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_ActiveVoxelNumber){
//Get the real world position in the source space
float4 realPosition = tex1Dfetch(positionFieldTexture,tid);
//Get the voxel-based position in the source space
float3 voxelPosition;
float4 matrix = tex1Dfetch(sourceMatrixTexture,0);
voxelPosition.x = matrix.x*realPosition.x + matrix.y*realPosition.y +
matrix.z*realPosition.z + matrix.w;
matrix = tex1Dfetch(sourceMatrixTexture,1);
voxelPosition.y = matrix.x*realPosition.x + matrix.y*realPosition.y +
matrix.z*realPosition.z + matrix.w;
matrix = tex1Dfetch(sourceMatrixTexture,2);
voxelPosition.z = matrix.x*realPosition.x + matrix.y*realPosition.y +
matrix.z*realPosition.z + matrix.w;
int3 sourceImageSize = c_SourceDim;
if( 0.0f<=voxelPosition.x && voxelPosition.x<=float(sourceImageSize.x-1) &&
0.0f<=voxelPosition.y && voxelPosition.y<=float(sourceImageSize.y-1) &&
0.0f<=voxelPosition.z && voxelPosition.z<=float(sourceImageSize.z-1)){
int3 voxel;
voxel.x = (int)(voxelPosition.x);
voxel.y = (int)(voxelPosition.y);
voxel.z = (int)(voxelPosition.z);
float xBasis[2];
float relative = fabsf(voxelPosition.x - (float)voxel.x);
xBasis[0]=1.0f-relative;
xBasis[1]=relative;
float yBasis[2];
relative = fabsf(voxelPosition.y - (float)voxel.y);
yBasis[0]=1.0f-relative;
yBasis[1]=relative;
float zBasis[2];
relative = fabsf(voxelPosition.z - (float)voxel.z);
zBasis[0]=1.0f-relative;
zBasis[1]=relative;
float deriv[2];
deriv[0]=-1.0f;
deriv[1]=1.0f;
float4 gradientValue=make_float4(0.0f, 0.0f, 0.0f, 0.0f);
float3 relativePosition;
for(short c=0; c<2; c++){
relativePosition.z=((float)voxel.z+(float)c+0.5f)/(float)c_SourceDim.z;
float3 tempValueY=make_float3(0.0f, 0.0f, 0.0f);
for(short b=0; b<2; b++){
float2 tempValueX=make_float2(0.0f, 0.0f);
relativePosition.y=((float)voxel.y+(float)b+0.5f)/(float)c_SourceDim.y;
for(short a=0; a<2; a++){
relativePosition.x=((float)voxel.x+(float)a+0.5f)/(float)c_SourceDim.x;
float intensity=tex3D(sourceTexture, relativePosition.x, relativePosition.y, relativePosition.z);
tempValueX.x += intensity * deriv[a];
tempValueX.y += intensity * xBasis[a];
}
tempValueY.x += tempValueX.x * yBasis[b];
tempValueY.y += tempValueX.y * deriv[b];
tempValueY.z += tempValueX.y * yBasis[b];
}
gradientValue.x += tempValueY.x * zBasis[c];
gradientValue.y += tempValueY.y * zBasis[c];
gradientValue.z += tempValueY.z * deriv[c];
}
gradientArray[tid]=gradientValue;
}
else gradientArray[tid]=make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
}
/* *************************************************************** */
/* *************************************************************** */
#endif
|
3aeefb430f6bcb9120eb04b40fe7145dcbc322f2.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "luaT.h"
#include "THH.h"
using namespace std;
namespace sparse_convo { namespace cuda {
int SparseFilterConvo::UpdateOutput(lua_State *L)
{
cout << "Hello World from CUDA Land!" << endl;
return 0;
}
int SparseFilterConvo::UpdateGradInput(lua_State *L)
{
return 0;
}
int SparseFilterConvo::AccGradParameters(lua_State *L)
{
return 0;
}
} }
| 3aeefb430f6bcb9120eb04b40fe7145dcbc322f2.cu | #include <iostream>
#include "luaT.h"
#include "THC.h"
using namespace std;
namespace sparse_convo { namespace cuda {
int SparseFilterConvo::UpdateOutput(lua_State *L)
{
cout << "Hello World from CUDA Land!" << endl;
return 0;
}
int SparseFilterConvo::UpdateGradInput(lua_State *L)
{
return 0;
}
int SparseFilterConvo::AccGradParameters(lua_State *L)
{
return 0;
}
} }
|
13b0f20c40064067a5115a66756964554734701a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2020 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include "open3d/ml/impl/continuous_conv/ContinuousConvCUDAKernels.h"
#include "open3d/utility/Helper.h"
using open3d::utility::DivUp;
namespace open3d {
namespace ml {
namespace impl {
/// Kernel for FillColumn
template <class TReal,
class TIndex,
bool ALIGN_CORNERS,
CoordinateMapping MAPPING,
InterpolationMode INTERPOLATION>
__global__ void FillColumnKernel(
TReal* columns,
int in_channels,
TIndex begin_idx,
TIndex end_idx,
TIndex num_out,
const TReal* const __restrict__ out_positions,
TIndex num_inp,
const TReal* const __restrict__ inp_positions,
const TReal* const __restrict__ inp_features,
const TReal* const __restrict__ inp_importance,
size_t neighbors_index_size,
const TIndex* const __restrict__ neighbors_index,
const TReal* const __restrict__ neighbors_importance,
const int64_t* const __restrict__ neighbors_row_splits,
const TReal* const __restrict__ extents,
const TReal* const __restrict__ offsets,
int filter_size_x,
int filter_size_y,
int filter_size_z,
bool INDIVIDUAL_EXTENT,
bool ISOTROPIC_EXTENT,
bool NORMALIZE,
bool POINT_IMPORTANCE,
bool NEIGHBOR_IMPORTANCE) {
TIndex out_idx = begin_idx + blockIdx.x;
if (out_idx >= end_idx) return;
const int NUM_INTERP_VALUES =
(INTERPOLATION == InterpolationMode::LINEAR ||
INTERPOLATION == InterpolationMode::LINEAR_BORDER
? 8
: 1);
TReal interp_weights[NUM_INTERP_VALUES];
TIndex interp_indices[NUM_INTERP_VALUES];
TReal offset[3] = {offsets[0], offsets[1], offsets[2]};
const TIndex col_idx = out_idx - begin_idx;
TReal* out_column = columns + filter_size_x * filter_size_y *
filter_size_z * in_channels * col_idx;
const int64_t neighbor_start = neighbors_row_splits[out_idx];
const int64_t neighbor_end = neighbors_row_splits[out_idx + 1];
TReal out_pos[3] = {out_positions[out_idx * 3 + 0],
out_positions[out_idx * 3 + 1],
out_positions[out_idx * 3 + 2]};
TReal inv_extents[3];
if (INDIVIDUAL_EXTENT) {
if (ISOTROPIC_EXTENT) {
inv_extents[0] = TReal(1) / extents[out_idx];
inv_extents[1] = inv_extents[0];
inv_extents[2] = inv_extents[0];
} else {
inv_extents[0] = TReal(1) / extents[3 * out_idx + 0];
inv_extents[1] = TReal(1) / extents[3 * out_idx + 1];
inv_extents[2] = TReal(1) / extents[3 * out_idx + 2];
}
} else {
if (ISOTROPIC_EXTENT) {
inv_extents[0] = TReal(1) / extents[0];
inv_extents[1] = inv_extents[0];
inv_extents[2] = inv_extents[0];
} else {
inv_extents[0] = TReal(1) / extents[0];
inv_extents[1] = TReal(1) / extents[1];
inv_extents[2] = TReal(1) / extents[2];
}
}
TReal normalizer = TReal(0);
if (NORMALIZE) {
if (NEIGHBOR_IMPORTANCE) {
for (int64_t n_idx = neighbor_start + threadIdx.x;
n_idx < neighbor_end; n_idx += blockDim.x) {
TReal n_importance = neighbors_importance[n_idx];
normalizer += n_importance;
}
unsigned int mask = __activemask();
for (int offset = blockDim.x / 2; offset > 0; offset /= 2)
normalizer += __shfl_down_sync(mask, normalizer, offset);
normalizer = __shfl_sync(mask, normalizer, 0);
} else {
int64_t num_neighbors = neighbor_end - neighbor_start;
normalizer = num_neighbors;
}
}
for (int64_t n_idx = neighbor_start; n_idx < neighbor_end; ++n_idx) {
const TIndex inp_idx = neighbors_index[n_idx];
const TReal n_importance =
NEIGHBOR_IMPORTANCE ? neighbors_importance[n_idx] : TReal(1);
TReal x, y, z;
x = inp_positions[inp_idx * 3 + 0] - out_pos[0];
y = inp_positions[inp_idx * 3 + 1] - out_pos[1];
z = inp_positions[inp_idx * 3 + 2] - out_pos[2];
ComputeFilterCoordinates<ALIGN_CORNERS, MAPPING>(
x, y, z, filter_size_x, filter_size_y, filter_size_z,
inv_extents[0], inv_extents[1], inv_extents[2], offset[0],
offset[1], offset[2]);
Interpolate<INTERPOLATION>(interp_weights, interp_indices, x, y, z,
filter_size_x, filter_size_y, filter_size_z);
TReal infeat = 0;
TReal importance = 1;
if (POINT_IMPORTANCE) importance = inp_importance[inp_idx];
if (NEIGHBOR_IMPORTANCE) importance *= n_importance;
if (NORMALIZE && normalizer != 0) importance /= normalizer;
for (int ic = threadIdx.x; ic < in_channels; ic += blockDim.x) {
infeat = importance * inp_features[inp_idx * in_channels + ic];
for (int j = 0; j < NUM_INTERP_VALUES; ++j) {
TReal value = interp_weights[j] * infeat;
out_column[interp_indices[j] * in_channels + ic] += value;
}
}
} // for n
}
template <class TReal, class TIndex>
void FillColumn(const hipStream_t& stream,
TReal* columns,
int in_channels,
TIndex begin_idx,
TIndex end_idx,
TIndex num_out,
const TReal* const __restrict__ out_positions,
TIndex num_inp,
const TReal* const __restrict__ inp_positions,
const TReal* const __restrict__ inp_features,
const TReal* const __restrict__ inp_importance,
size_t neighbors_index_size,
const TIndex* const __restrict__ neighbors_index,
const TReal* const __restrict__ neighbors_importance,
const int64_t* const __restrict__ neighbors_row_splits,
const TReal* const __restrict__ extents,
const TReal* const __restrict__ offsets,
const std::vector<int>& filter_dims,
InterpolationMode interpolation,
CoordinateMapping coordinate_mapping,
bool align_corners,
bool individual_extent,
bool isotropic_extent,
bool normalize) {
const int filter_size_z = filter_dims[0];
const int filter_size_y = filter_dims[1];
const int filter_size_x = filter_dims[2];
TIndex num_columns = end_idx - begin_idx;
int filter_spatial_size = filter_size_x * filter_size_y * filter_size_z;
hipMemsetAsync(
columns, 0,
sizeof(TReal) * filter_spatial_size * in_channels * num_columns,
stream);
const int BLOCKSIZE = 32;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = num_columns;
#define FN_PARAMETERS \
columns, in_channels, begin_idx, end_idx, num_out, out_positions, num_inp, \
inp_positions, inp_features, inp_importance, neighbors_index_size, \
neighbors_index, neighbors_importance, neighbors_row_splits, \
extents, offsets, filter_size_x, filter_size_y, filter_size_z, \
individual_extent, isotropic_extent, normalize, \
inp_importance != nullptr, neighbors_importance != nullptr
#define CALL_TEMPLATE(INTERPOLATION, MAPPING, ALIGN_CORNERS) \
if (INTERPOLATION == interpolation && MAPPING == coordinate_mapping && \
ALIGN_CORNERS == align_corners) \
hipLaunchKernelGGL(( FillColumnKernel<TReal, TIndex, ALIGN_CORNERS, MAPPING, INTERPOLATION>) \
, dim3(grid), dim3(block), 0, stream, FN_PARAMETERS);
#define CALL_TEMPLATE2(INTERPOLATION, MAPPING) \
CALL_TEMPLATE(INTERPOLATION, MAPPING, true) \
CALL_TEMPLATE(INTERPOLATION, MAPPING, false)
#define CALL_TEMPLATE3(INTERPOLATION) \
CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::BALL_TO_CUBE_RADIAL) \
CALL_TEMPLATE2(INTERPOLATION, \
CoordinateMapping::BALL_TO_CUBE_VOLUME_PRESERVING) \
CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::IDENTITY)
#define CALL_TEMPLATE4 \
CALL_TEMPLATE3(InterpolationMode::LINEAR) \
CALL_TEMPLATE3(InterpolationMode::LINEAR_BORDER) \
CALL_TEMPLATE3(InterpolationMode::NEAREST_NEIGHBOR)
if (grid.x) {
CALL_TEMPLATE4
/*CHECK_CUDA_ERROR*/
}
#undef CALL_TEMPLATE
#undef CALL_TEMPLATE2
#undef CALL_TEMPLATE3
#undef CALL_TEMPLATE4
#undef FN_PARAMETERS
}
template void FillColumn<float, int32_t>(
const hipStream_t& stream,
float* columns,
int in_channels,
int32_t begin_idx,
int32_t end_idx,
int32_t num_out,
const float* const __restrict__ out_positions,
int32_t num_inp,
const float* const __restrict__ inp_positions,
const float* const __restrict__ inp_features,
const float* const __restrict__ inp_importance,
size_t neighbors_index_size,
const int32_t* const __restrict__ neighbors_index,
const float* const __restrict__ neighbors_importance,
const int64_t* const __restrict__ neighbors_row_splits,
const float* const __restrict__ extents,
const float* const __restrict__ offsets,
const std::vector<int>& filter_dims,
InterpolationMode interpolation,
CoordinateMapping coordinate_mapping,
bool align_corners,
bool individual_extent,
bool isotropic_extent,
bool normalize);
template <class TReal,
class TIndex,
bool ALIGN_CORNERS,
CoordinateMapping MAPPING,
InterpolationMode INTERPOLATION>
__global__ void FillColumnTransposeKernel(
TReal* columns,
int in_channels,
TIndex begin_idx,
TIndex end_idx,
TIndex num_out,
const TReal* const __restrict__ out_positions,
TIndex num_inp,
const TReal* const __restrict__ inp_positions,
const TReal* const __restrict__ inp_features,
size_t neighbors_index_size,
const TIndex* const __restrict__ neighbors_index,
const TReal* const __restrict__ inp_neighbors_importance_sum,
const int64_t* const __restrict__ inp_neighbors_prefix_sum,
const TReal* const __restrict__ neighbors_importance,
const int64_t* const __restrict__ neighbors_row_splits,
const TReal* const __restrict__ extents,
const TReal* const __restrict__ offsets,
int filter_size_x,
int filter_size_y,
int filter_size_z,
bool INDIVIDUAL_EXTENT,
bool ISOTROPIC_EXTENT,
bool NORMALIZE,
bool NEIGHBOR_IMPORTANCE) {
TIndex out_idx = begin_idx + blockIdx.x;
if (out_idx >= end_idx) return;
const int NUM_INTERP_VALUES =
(INTERPOLATION == InterpolationMode::LINEAR ||
INTERPOLATION == InterpolationMode::LINEAR_BORDER
? 8
: 1);
TReal interp_weights[NUM_INTERP_VALUES];
TIndex interp_indices[NUM_INTERP_VALUES];
TReal offset[3] = {offsets[0], offsets[1], offsets[2]};
const TIndex col_idx = out_idx - begin_idx;
TReal* out_column = columns + filter_size_x * filter_size_y *
filter_size_z * in_channels * col_idx;
const int64_t neighbor_start = neighbors_row_splits[out_idx];
const int64_t neighbor_end = neighbors_row_splits[out_idx + 1];
TReal out_pos[3] = {out_positions[out_idx * 3 + 0],
out_positions[out_idx * 3 + 1],
out_positions[out_idx * 3 + 2]};
TReal inv_extents[3];
if (INDIVIDUAL_EXTENT == false) {
if (ISOTROPIC_EXTENT) {
inv_extents[0] = TReal(1) / extents[0];
inv_extents[1] = inv_extents[0];
inv_extents[2] = inv_extents[0];
} else {
inv_extents[0] = TReal(1) / extents[0];
inv_extents[1] = TReal(1) / extents[1];
inv_extents[2] = TReal(1) / extents[2];
}
}
for (int64_t n_idx = neighbor_start; n_idx < neighbor_end; ++n_idx) {
const TIndex inp_idx = neighbors_index[n_idx];
TReal x, y, z;
x = out_pos[0] - inp_positions[inp_idx * 3 + 0];
y = out_pos[1] - inp_positions[inp_idx * 3 + 1];
z = out_pos[2] - inp_positions[inp_idx * 3 + 2];
if (INDIVIDUAL_EXTENT) {
if (ISOTROPIC_EXTENT) {
inv_extents[0] = TReal(1) / extents[inp_idx];
inv_extents[1] = inv_extents[0];
inv_extents[2] = inv_extents[0];
} else {
inv_extents[0] = TReal(1) / extents[3 * inp_idx + 0];
inv_extents[1] = TReal(1) / extents[3 * inp_idx + 1];
inv_extents[2] = TReal(1) / extents[3 * inp_idx + 2];
}
}
TReal num_inp_neighbors_normalizer = 1;
if (NORMALIZE) {
if (NEIGHBOR_IMPORTANCE) {
if (inp_neighbors_importance_sum[inp_idx] != 0)
num_inp_neighbors_normalizer /=
inp_neighbors_importance_sum[inp_idx];
} else {
const int64_t inp_neighbor_start =
inp_neighbors_prefix_sum[inp_idx];
const int64_t inp_neighbor_end =
inp_idx + 1 < num_inp
? inp_neighbors_prefix_sum[inp_idx + 1]
: neighbors_index_size;
const size_t num_inp_neighbors =
inp_neighbor_end - inp_neighbor_start;
if (num_inp_neighbors > 0)
num_inp_neighbors_normalizer /= num_inp_neighbors;
}
}
ComputeFilterCoordinates<ALIGN_CORNERS, MAPPING>(
x, y, z, filter_size_x, filter_size_y, filter_size_z,
inv_extents[0], inv_extents[1], inv_extents[2], offset[0],
offset[1], offset[2]);
Interpolate<INTERPOLATION>(interp_weights, interp_indices, x, y, z,
filter_size_x, filter_size_y, filter_size_z);
TReal infeat = 0;
for (int ic = threadIdx.x; ic < in_channels; ic += blockDim.x) {
infeat = inp_features[inp_idx * in_channels + ic];
if (NEIGHBOR_IMPORTANCE) infeat *= neighbors_importance[n_idx];
if (NORMALIZE) infeat *= num_inp_neighbors_normalizer;
for (int j = 0; j < NUM_INTERP_VALUES; ++j) {
TReal value = interp_weights[j] * infeat;
out_column[interp_indices[j] * in_channels + ic] += value;
}
}
} // for n
}
template <class TReal, class TIndex>
void FillColumnTranspose(
const hipStream_t& stream,
TReal* columns,
int in_channels,
TIndex begin_idx,
TIndex end_idx,
TIndex num_out,
const TReal* const __restrict__ out_positions,
TIndex num_inp,
const TReal* const __restrict__ inp_positions,
const TReal* const __restrict__ inp_features,
const TReal* const __restrict__ inp_neighbors_importance_sum,
const int64_t* const __restrict__ inp_neighbors_prefix_sum,
size_t neighbors_index_size,
const TIndex* const __restrict__ neighbors_index,
const TReal* const __restrict__ neighbors_importance,
const int64_t* const __restrict__ neighbors_row_splits,
const TReal* const __restrict__ extents,
const TReal* const __restrict__ offsets,
const std::vector<int>& filter_dims,
InterpolationMode interpolation,
CoordinateMapping coordinate_mapping,
bool align_corners,
bool individual_extent,
bool isotropic_extent,
bool normalize) {
const bool has_neighbors_importance = inp_neighbors_importance_sum;
const int filter_size_z = filter_dims[0];
const int filter_size_y = filter_dims[1];
const int filter_size_x = filter_dims[2];
TIndex num_columns = end_idx - begin_idx;
int filter_spatial_size = filter_size_x * filter_size_y * filter_size_z;
hipMemsetAsync(
columns, 0,
sizeof(TReal) * filter_spatial_size * in_channels * num_columns,
stream);
const int BLOCKSIZE = 32;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = num_columns;
#define FN_PARAMETERS \
columns, in_channels, begin_idx, end_idx, num_out, out_positions, num_inp, \
inp_positions, inp_features, neighbors_index_size, \
neighbors_index, inp_neighbors_importance_sum, \
inp_neighbors_prefix_sum, neighbors_importance, \
neighbors_row_splits, extents, offsets, filter_size_x, \
filter_size_y, filter_size_z, individual_extent, isotropic_extent, \
normalize, has_neighbors_importance
#define CALL_TEMPLATE(INTERPOLATION, MAPPING, ALIGN_CORNERS) \
if (INTERPOLATION == interpolation && MAPPING == coordinate_mapping && \
ALIGN_CORNERS == align_corners) \
hipLaunchKernelGGL(( FillColumnTransposeKernel<TReal, TIndex, ALIGN_CORNERS, MAPPING, \
INTERPOLATION>) \
, dim3(grid), dim3(block), 0, stream, FN_PARAMETERS);
#define CALL_TEMPLATE2(INTERPOLATION, MAPPING) \
CALL_TEMPLATE(INTERPOLATION, MAPPING, true) \
CALL_TEMPLATE(INTERPOLATION, MAPPING, false)
#define CALL_TEMPLATE3(INTERPOLATION) \
CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::BALL_TO_CUBE_RADIAL) \
CALL_TEMPLATE2(INTERPOLATION, \
CoordinateMapping::BALL_TO_CUBE_VOLUME_PRESERVING) \
CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::IDENTITY)
#define CALL_TEMPLATE4 \
CALL_TEMPLATE3(InterpolationMode::LINEAR) \
CALL_TEMPLATE3(InterpolationMode::LINEAR_BORDER) \
CALL_TEMPLATE3(InterpolationMode::NEAREST_NEIGHBOR)
if (grid.x) {
CALL_TEMPLATE4
/*CHECK_CUDA_ERROR*/
}
#undef CALL_TEMPLATE
#undef CALL_TEMPLATE2
#undef CALL_TEMPLATE3
#undef CALL_TEMPLATE4
#undef FN_PARAMETERS
}
template void FillColumnTranspose<float, int32_t>(
const hipStream_t& stream,
float* columns,
int in_channels,
int32_t begin_idx,
int32_t end_idx,
int32_t num_out,
const float* const __restrict__ out_positions,
int32_t num_inp,
const float* const __restrict__ inp_positions,
const float* const __restrict__ inp_features,
const float* const __restrict__ inp_neighbors_importance_sum,
const int64_t* const __restrict__ inp_neighbors_prefix_sum,
size_t neighbors_index_size,
const int32_t* const __restrict__ neighbors_index,
const float* const __restrict__ neighbors_importance,
const int64_t* const __restrict__ neighbors_row_splits,
const float* const __restrict__ extents,
const float* const __restrict__ offsets,
const std::vector<int>& filter_dims,
InterpolationMode interpolation,
CoordinateMapping coordinate_mapping,
bool align_corners,
bool individual_extent,
bool isotropic_extent,
bool normalize);
template <class T>
__global__ void MultiplyColumnsKernel(size_t rows,
size_t cols,
T* __restrict__ col_major_matrix,
const T* const __restrict__ vector) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= rows * cols) return;
size_t col = idx / rows;
T factor = vector[col];
col_major_matrix[idx] *= factor;
}
template <class T>
void MultiplyColumns(const hipStream_t& stream,
size_t rows,
size_t cols,
T* __restrict__ col_major_matrix,
const T* const __restrict__ vector) {
const int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = DivUp(rows * cols, BLOCKSIZE);
if (grid.x) {
hipLaunchKernelGGL(( MultiplyColumnsKernel<T>), dim3(grid), dim3(block), 0, stream,
rows, cols, col_major_matrix, vector);
}
}
template void MultiplyColumns<float>(const hipStream_t& stream,
size_t rows,
size_t cols,
float* __restrict__ col_major_matrix,
const float* const __restrict__ vector);
template <class T>
__global__ void MultiplyAndCopyColumnsKernel(
size_t rows,
size_t cols,
T* __restrict__ out_ptr,
const T* const __restrict__ col_major_matrix,
const T* const __restrict__ vector) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= rows * cols) return;
size_t col = idx / rows;
T factor = vector[col];
out_ptr[idx] = col_major_matrix[idx] * factor;
}
template <class T>
void MultiplyAndCopyColumns(const hipStream_t& stream,
size_t rows,
size_t cols,
T* __restrict__ out_ptr,
const T* const __restrict__ col_major_matrix,
const T* const __restrict__ vector) {
const int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = DivUp(rows * cols, BLOCKSIZE);
if (grid.x) {
hipLaunchKernelGGL(( MultiplyAndCopyColumnsKernel<T>), dim3(grid), dim3(block), 0, stream,
rows, cols, out_ptr, col_major_matrix, vector);
}
}
template void MultiplyAndCopyColumns<float>(
const hipStream_t& stream,
size_t rows,
size_t cols,
float* __restrict__ out_ptr,
const float* const __restrict__ col_major_matrix,
const float* const __restrict__ vector);
} // namespace impl
} // namespace ml
} // namespace open3d
| 13b0f20c40064067a5115a66756964554734701a.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2020 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include "open3d/ml/impl/continuous_conv/ContinuousConvCUDAKernels.h"
#include "open3d/utility/Helper.h"
using open3d::utility::DivUp;
namespace open3d {
namespace ml {
namespace impl {
/// Kernel for FillColumn
template <class TReal,
class TIndex,
bool ALIGN_CORNERS,
CoordinateMapping MAPPING,
InterpolationMode INTERPOLATION>
__global__ void FillColumnKernel(
TReal* columns,
int in_channels,
TIndex begin_idx,
TIndex end_idx,
TIndex num_out,
const TReal* const __restrict__ out_positions,
TIndex num_inp,
const TReal* const __restrict__ inp_positions,
const TReal* const __restrict__ inp_features,
const TReal* const __restrict__ inp_importance,
size_t neighbors_index_size,
const TIndex* const __restrict__ neighbors_index,
const TReal* const __restrict__ neighbors_importance,
const int64_t* const __restrict__ neighbors_row_splits,
const TReal* const __restrict__ extents,
const TReal* const __restrict__ offsets,
int filter_size_x,
int filter_size_y,
int filter_size_z,
bool INDIVIDUAL_EXTENT,
bool ISOTROPIC_EXTENT,
bool NORMALIZE,
bool POINT_IMPORTANCE,
bool NEIGHBOR_IMPORTANCE) {
TIndex out_idx = begin_idx + blockIdx.x;
if (out_idx >= end_idx) return;
const int NUM_INTERP_VALUES =
(INTERPOLATION == InterpolationMode::LINEAR ||
INTERPOLATION == InterpolationMode::LINEAR_BORDER
? 8
: 1);
TReal interp_weights[NUM_INTERP_VALUES];
TIndex interp_indices[NUM_INTERP_VALUES];
TReal offset[3] = {offsets[0], offsets[1], offsets[2]};
const TIndex col_idx = out_idx - begin_idx;
TReal* out_column = columns + filter_size_x * filter_size_y *
filter_size_z * in_channels * col_idx;
const int64_t neighbor_start = neighbors_row_splits[out_idx];
const int64_t neighbor_end = neighbors_row_splits[out_idx + 1];
TReal out_pos[3] = {out_positions[out_idx * 3 + 0],
out_positions[out_idx * 3 + 1],
out_positions[out_idx * 3 + 2]};
TReal inv_extents[3];
if (INDIVIDUAL_EXTENT) {
if (ISOTROPIC_EXTENT) {
inv_extents[0] = TReal(1) / extents[out_idx];
inv_extents[1] = inv_extents[0];
inv_extents[2] = inv_extents[0];
} else {
inv_extents[0] = TReal(1) / extents[3 * out_idx + 0];
inv_extents[1] = TReal(1) / extents[3 * out_idx + 1];
inv_extents[2] = TReal(1) / extents[3 * out_idx + 2];
}
} else {
if (ISOTROPIC_EXTENT) {
inv_extents[0] = TReal(1) / extents[0];
inv_extents[1] = inv_extents[0];
inv_extents[2] = inv_extents[0];
} else {
inv_extents[0] = TReal(1) / extents[0];
inv_extents[1] = TReal(1) / extents[1];
inv_extents[2] = TReal(1) / extents[2];
}
}
TReal normalizer = TReal(0);
if (NORMALIZE) {
if (NEIGHBOR_IMPORTANCE) {
for (int64_t n_idx = neighbor_start + threadIdx.x;
n_idx < neighbor_end; n_idx += blockDim.x) {
TReal n_importance = neighbors_importance[n_idx];
normalizer += n_importance;
}
unsigned int mask = __activemask();
for (int offset = blockDim.x / 2; offset > 0; offset /= 2)
normalizer += __shfl_down_sync(mask, normalizer, offset);
normalizer = __shfl_sync(mask, normalizer, 0);
} else {
int64_t num_neighbors = neighbor_end - neighbor_start;
normalizer = num_neighbors;
}
}
for (int64_t n_idx = neighbor_start; n_idx < neighbor_end; ++n_idx) {
const TIndex inp_idx = neighbors_index[n_idx];
const TReal n_importance =
NEIGHBOR_IMPORTANCE ? neighbors_importance[n_idx] : TReal(1);
TReal x, y, z;
x = inp_positions[inp_idx * 3 + 0] - out_pos[0];
y = inp_positions[inp_idx * 3 + 1] - out_pos[1];
z = inp_positions[inp_idx * 3 + 2] - out_pos[2];
ComputeFilterCoordinates<ALIGN_CORNERS, MAPPING>(
x, y, z, filter_size_x, filter_size_y, filter_size_z,
inv_extents[0], inv_extents[1], inv_extents[2], offset[0],
offset[1], offset[2]);
Interpolate<INTERPOLATION>(interp_weights, interp_indices, x, y, z,
filter_size_x, filter_size_y, filter_size_z);
TReal infeat = 0;
TReal importance = 1;
if (POINT_IMPORTANCE) importance = inp_importance[inp_idx];
if (NEIGHBOR_IMPORTANCE) importance *= n_importance;
if (NORMALIZE && normalizer != 0) importance /= normalizer;
for (int ic = threadIdx.x; ic < in_channels; ic += blockDim.x) {
infeat = importance * inp_features[inp_idx * in_channels + ic];
for (int j = 0; j < NUM_INTERP_VALUES; ++j) {
TReal value = interp_weights[j] * infeat;
out_column[interp_indices[j] * in_channels + ic] += value;
}
}
} // for n
}
template <class TReal, class TIndex>
void FillColumn(const cudaStream_t& stream,
TReal* columns,
int in_channels,
TIndex begin_idx,
TIndex end_idx,
TIndex num_out,
const TReal* const __restrict__ out_positions,
TIndex num_inp,
const TReal* const __restrict__ inp_positions,
const TReal* const __restrict__ inp_features,
const TReal* const __restrict__ inp_importance,
size_t neighbors_index_size,
const TIndex* const __restrict__ neighbors_index,
const TReal* const __restrict__ neighbors_importance,
const int64_t* const __restrict__ neighbors_row_splits,
const TReal* const __restrict__ extents,
const TReal* const __restrict__ offsets,
const std::vector<int>& filter_dims,
InterpolationMode interpolation,
CoordinateMapping coordinate_mapping,
bool align_corners,
bool individual_extent,
bool isotropic_extent,
bool normalize) {
const int filter_size_z = filter_dims[0];
const int filter_size_y = filter_dims[1];
const int filter_size_x = filter_dims[2];
TIndex num_columns = end_idx - begin_idx;
int filter_spatial_size = filter_size_x * filter_size_y * filter_size_z;
cudaMemsetAsync(
columns, 0,
sizeof(TReal) * filter_spatial_size * in_channels * num_columns,
stream);
const int BLOCKSIZE = 32;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = num_columns;
#define FN_PARAMETERS \
columns, in_channels, begin_idx, end_idx, num_out, out_positions, num_inp, \
inp_positions, inp_features, inp_importance, neighbors_index_size, \
neighbors_index, neighbors_importance, neighbors_row_splits, \
extents, offsets, filter_size_x, filter_size_y, filter_size_z, \
individual_extent, isotropic_extent, normalize, \
inp_importance != nullptr, neighbors_importance != nullptr
#define CALL_TEMPLATE(INTERPOLATION, MAPPING, ALIGN_CORNERS) \
if (INTERPOLATION == interpolation && MAPPING == coordinate_mapping && \
ALIGN_CORNERS == align_corners) \
FillColumnKernel<TReal, TIndex, ALIGN_CORNERS, MAPPING, INTERPOLATION> \
<<<grid, block, 0, stream>>>(FN_PARAMETERS);
#define CALL_TEMPLATE2(INTERPOLATION, MAPPING) \
CALL_TEMPLATE(INTERPOLATION, MAPPING, true) \
CALL_TEMPLATE(INTERPOLATION, MAPPING, false)
#define CALL_TEMPLATE3(INTERPOLATION) \
CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::BALL_TO_CUBE_RADIAL) \
CALL_TEMPLATE2(INTERPOLATION, \
CoordinateMapping::BALL_TO_CUBE_VOLUME_PRESERVING) \
CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::IDENTITY)
#define CALL_TEMPLATE4 \
CALL_TEMPLATE3(InterpolationMode::LINEAR) \
CALL_TEMPLATE3(InterpolationMode::LINEAR_BORDER) \
CALL_TEMPLATE3(InterpolationMode::NEAREST_NEIGHBOR)
if (grid.x) {
CALL_TEMPLATE4
/*CHECK_CUDA_ERROR*/
}
#undef CALL_TEMPLATE
#undef CALL_TEMPLATE2
#undef CALL_TEMPLATE3
#undef CALL_TEMPLATE4
#undef FN_PARAMETERS
}
template void FillColumn<float, int32_t>(
const cudaStream_t& stream,
float* columns,
int in_channels,
int32_t begin_idx,
int32_t end_idx,
int32_t num_out,
const float* const __restrict__ out_positions,
int32_t num_inp,
const float* const __restrict__ inp_positions,
const float* const __restrict__ inp_features,
const float* const __restrict__ inp_importance,
size_t neighbors_index_size,
const int32_t* const __restrict__ neighbors_index,
const float* const __restrict__ neighbors_importance,
const int64_t* const __restrict__ neighbors_row_splits,
const float* const __restrict__ extents,
const float* const __restrict__ offsets,
const std::vector<int>& filter_dims,
InterpolationMode interpolation,
CoordinateMapping coordinate_mapping,
bool align_corners,
bool individual_extent,
bool isotropic_extent,
bool normalize);
template <class TReal,
class TIndex,
bool ALIGN_CORNERS,
CoordinateMapping MAPPING,
InterpolationMode INTERPOLATION>
__global__ void FillColumnTransposeKernel(
TReal* columns,
int in_channels,
TIndex begin_idx,
TIndex end_idx,
TIndex num_out,
const TReal* const __restrict__ out_positions,
TIndex num_inp,
const TReal* const __restrict__ inp_positions,
const TReal* const __restrict__ inp_features,
size_t neighbors_index_size,
const TIndex* const __restrict__ neighbors_index,
const TReal* const __restrict__ inp_neighbors_importance_sum,
const int64_t* const __restrict__ inp_neighbors_prefix_sum,
const TReal* const __restrict__ neighbors_importance,
const int64_t* const __restrict__ neighbors_row_splits,
const TReal* const __restrict__ extents,
const TReal* const __restrict__ offsets,
int filter_size_x,
int filter_size_y,
int filter_size_z,
bool INDIVIDUAL_EXTENT,
bool ISOTROPIC_EXTENT,
bool NORMALIZE,
bool NEIGHBOR_IMPORTANCE) {
TIndex out_idx = begin_idx + blockIdx.x;
if (out_idx >= end_idx) return;
const int NUM_INTERP_VALUES =
(INTERPOLATION == InterpolationMode::LINEAR ||
INTERPOLATION == InterpolationMode::LINEAR_BORDER
? 8
: 1);
TReal interp_weights[NUM_INTERP_VALUES];
TIndex interp_indices[NUM_INTERP_VALUES];
TReal offset[3] = {offsets[0], offsets[1], offsets[2]};
const TIndex col_idx = out_idx - begin_idx;
TReal* out_column = columns + filter_size_x * filter_size_y *
filter_size_z * in_channels * col_idx;
const int64_t neighbor_start = neighbors_row_splits[out_idx];
const int64_t neighbor_end = neighbors_row_splits[out_idx + 1];
TReal out_pos[3] = {out_positions[out_idx * 3 + 0],
out_positions[out_idx * 3 + 1],
out_positions[out_idx * 3 + 2]};
TReal inv_extents[3];
if (INDIVIDUAL_EXTENT == false) {
if (ISOTROPIC_EXTENT) {
inv_extents[0] = TReal(1) / extents[0];
inv_extents[1] = inv_extents[0];
inv_extents[2] = inv_extents[0];
} else {
inv_extents[0] = TReal(1) / extents[0];
inv_extents[1] = TReal(1) / extents[1];
inv_extents[2] = TReal(1) / extents[2];
}
}
for (int64_t n_idx = neighbor_start; n_idx < neighbor_end; ++n_idx) {
const TIndex inp_idx = neighbors_index[n_idx];
TReal x, y, z;
x = out_pos[0] - inp_positions[inp_idx * 3 + 0];
y = out_pos[1] - inp_positions[inp_idx * 3 + 1];
z = out_pos[2] - inp_positions[inp_idx * 3 + 2];
if (INDIVIDUAL_EXTENT) {
if (ISOTROPIC_EXTENT) {
inv_extents[0] = TReal(1) / extents[inp_idx];
inv_extents[1] = inv_extents[0];
inv_extents[2] = inv_extents[0];
} else {
inv_extents[0] = TReal(1) / extents[3 * inp_idx + 0];
inv_extents[1] = TReal(1) / extents[3 * inp_idx + 1];
inv_extents[2] = TReal(1) / extents[3 * inp_idx + 2];
}
}
TReal num_inp_neighbors_normalizer = 1;
if (NORMALIZE) {
if (NEIGHBOR_IMPORTANCE) {
if (inp_neighbors_importance_sum[inp_idx] != 0)
num_inp_neighbors_normalizer /=
inp_neighbors_importance_sum[inp_idx];
} else {
const int64_t inp_neighbor_start =
inp_neighbors_prefix_sum[inp_idx];
const int64_t inp_neighbor_end =
inp_idx + 1 < num_inp
? inp_neighbors_prefix_sum[inp_idx + 1]
: neighbors_index_size;
const size_t num_inp_neighbors =
inp_neighbor_end - inp_neighbor_start;
if (num_inp_neighbors > 0)
num_inp_neighbors_normalizer /= num_inp_neighbors;
}
}
ComputeFilterCoordinates<ALIGN_CORNERS, MAPPING>(
x, y, z, filter_size_x, filter_size_y, filter_size_z,
inv_extents[0], inv_extents[1], inv_extents[2], offset[0],
offset[1], offset[2]);
Interpolate<INTERPOLATION>(interp_weights, interp_indices, x, y, z,
filter_size_x, filter_size_y, filter_size_z);
TReal infeat = 0;
for (int ic = threadIdx.x; ic < in_channels; ic += blockDim.x) {
infeat = inp_features[inp_idx * in_channels + ic];
if (NEIGHBOR_IMPORTANCE) infeat *= neighbors_importance[n_idx];
if (NORMALIZE) infeat *= num_inp_neighbors_normalizer;
for (int j = 0; j < NUM_INTERP_VALUES; ++j) {
TReal value = interp_weights[j] * infeat;
out_column[interp_indices[j] * in_channels + ic] += value;
}
}
} // for n
}
template <class TReal, class TIndex>
void FillColumnTranspose(
const cudaStream_t& stream,
TReal* columns,
int in_channels,
TIndex begin_idx,
TIndex end_idx,
TIndex num_out,
const TReal* const __restrict__ out_positions,
TIndex num_inp,
const TReal* const __restrict__ inp_positions,
const TReal* const __restrict__ inp_features,
const TReal* const __restrict__ inp_neighbors_importance_sum,
const int64_t* const __restrict__ inp_neighbors_prefix_sum,
size_t neighbors_index_size,
const TIndex* const __restrict__ neighbors_index,
const TReal* const __restrict__ neighbors_importance,
const int64_t* const __restrict__ neighbors_row_splits,
const TReal* const __restrict__ extents,
const TReal* const __restrict__ offsets,
const std::vector<int>& filter_dims,
InterpolationMode interpolation,
CoordinateMapping coordinate_mapping,
bool align_corners,
bool individual_extent,
bool isotropic_extent,
bool normalize) {
const bool has_neighbors_importance = inp_neighbors_importance_sum;
const int filter_size_z = filter_dims[0];
const int filter_size_y = filter_dims[1];
const int filter_size_x = filter_dims[2];
TIndex num_columns = end_idx - begin_idx;
int filter_spatial_size = filter_size_x * filter_size_y * filter_size_z;
cudaMemsetAsync(
columns, 0,
sizeof(TReal) * filter_spatial_size * in_channels * num_columns,
stream);
const int BLOCKSIZE = 32;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = num_columns;
#define FN_PARAMETERS \
columns, in_channels, begin_idx, end_idx, num_out, out_positions, num_inp, \
inp_positions, inp_features, neighbors_index_size, \
neighbors_index, inp_neighbors_importance_sum, \
inp_neighbors_prefix_sum, neighbors_importance, \
neighbors_row_splits, extents, offsets, filter_size_x, \
filter_size_y, filter_size_z, individual_extent, isotropic_extent, \
normalize, has_neighbors_importance
#define CALL_TEMPLATE(INTERPOLATION, MAPPING, ALIGN_CORNERS) \
if (INTERPOLATION == interpolation && MAPPING == coordinate_mapping && \
ALIGN_CORNERS == align_corners) \
FillColumnTransposeKernel<TReal, TIndex, ALIGN_CORNERS, MAPPING, \
INTERPOLATION> \
<<<grid, block, 0, stream>>>(FN_PARAMETERS);
#define CALL_TEMPLATE2(INTERPOLATION, MAPPING) \
CALL_TEMPLATE(INTERPOLATION, MAPPING, true) \
CALL_TEMPLATE(INTERPOLATION, MAPPING, false)
#define CALL_TEMPLATE3(INTERPOLATION) \
CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::BALL_TO_CUBE_RADIAL) \
CALL_TEMPLATE2(INTERPOLATION, \
CoordinateMapping::BALL_TO_CUBE_VOLUME_PRESERVING) \
CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::IDENTITY)
#define CALL_TEMPLATE4 \
CALL_TEMPLATE3(InterpolationMode::LINEAR) \
CALL_TEMPLATE3(InterpolationMode::LINEAR_BORDER) \
CALL_TEMPLATE3(InterpolationMode::NEAREST_NEIGHBOR)
if (grid.x) {
CALL_TEMPLATE4
/*CHECK_CUDA_ERROR*/
}
#undef CALL_TEMPLATE
#undef CALL_TEMPLATE2
#undef CALL_TEMPLATE3
#undef CALL_TEMPLATE4
#undef FN_PARAMETERS
}
template void FillColumnTranspose<float, int32_t>(
const cudaStream_t& stream,
float* columns,
int in_channels,
int32_t begin_idx,
int32_t end_idx,
int32_t num_out,
const float* const __restrict__ out_positions,
int32_t num_inp,
const float* const __restrict__ inp_positions,
const float* const __restrict__ inp_features,
const float* const __restrict__ inp_neighbors_importance_sum,
const int64_t* const __restrict__ inp_neighbors_prefix_sum,
size_t neighbors_index_size,
const int32_t* const __restrict__ neighbors_index,
const float* const __restrict__ neighbors_importance,
const int64_t* const __restrict__ neighbors_row_splits,
const float* const __restrict__ extents,
const float* const __restrict__ offsets,
const std::vector<int>& filter_dims,
InterpolationMode interpolation,
CoordinateMapping coordinate_mapping,
bool align_corners,
bool individual_extent,
bool isotropic_extent,
bool normalize);
template <class T>
__global__ void MultiplyColumnsKernel(size_t rows,
size_t cols,
T* __restrict__ col_major_matrix,
const T* const __restrict__ vector) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= rows * cols) return;
size_t col = idx / rows;
T factor = vector[col];
col_major_matrix[idx] *= factor;
}
template <class T>
void MultiplyColumns(const cudaStream_t& stream,
size_t rows,
size_t cols,
T* __restrict__ col_major_matrix,
const T* const __restrict__ vector) {
const int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = DivUp(rows * cols, BLOCKSIZE);
if (grid.x) {
MultiplyColumnsKernel<T><<<grid, block, 0, stream>>>(
rows, cols, col_major_matrix, vector);
}
}
template void MultiplyColumns<float>(const cudaStream_t& stream,
size_t rows,
size_t cols,
float* __restrict__ col_major_matrix,
const float* const __restrict__ vector);
template <class T>
__global__ void MultiplyAndCopyColumnsKernel(
size_t rows,
size_t cols,
T* __restrict__ out_ptr,
const T* const __restrict__ col_major_matrix,
const T* const __restrict__ vector) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= rows * cols) return;
size_t col = idx / rows;
T factor = vector[col];
out_ptr[idx] = col_major_matrix[idx] * factor;
}
template <class T>
void MultiplyAndCopyColumns(const cudaStream_t& stream,
size_t rows,
size_t cols,
T* __restrict__ out_ptr,
const T* const __restrict__ col_major_matrix,
const T* const __restrict__ vector) {
const int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = DivUp(rows * cols, BLOCKSIZE);
if (grid.x) {
MultiplyAndCopyColumnsKernel<T><<<grid, block, 0, stream>>>(
rows, cols, out_ptr, col_major_matrix, vector);
}
}
template void MultiplyAndCopyColumns<float>(
const cudaStream_t& stream,
size_t rows,
size_t cols,
float* __restrict__ out_ptr,
const float* const __restrict__ col_major_matrix,
const float* const __restrict__ vector);
} // namespace impl
} // namespace ml
} // namespace open3d
|
a099261871614e7e06fe69ec6078ec08e805dea8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
csymv.cu is nearly identical to chemv.cu, just change names and drop MAGMA_C_CONJ.
chemv_kernel_U (upper) in chemv_upper.cu is very similar to
chemv_kernel_L (lower) in chemv.cu; diff the two files to compare.
@generated from magmablas/zhemv.cu, normal z -> c, Thu Oct 8 23:05:34 2020
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
chemv_kernel_L(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_C_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_C_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end chemv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
chemv_kernel_L_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_chemv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements chemv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_chemv_work requires users to provide a workspace, while
magmablas_chemv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call chemv frequently, we suggest using
magmablas_chemv_work instead of magmablas_chemv. As the overhead to
allocate and free in device memory in magmablas_chemv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_chemv_work(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magmaFloatComplex_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_chemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
hipLaunchKernelGGL(( chemv_kernel_U), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( chemv_kernel_U_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
else {
hipLaunchKernelGGL(( chemv_kernel_L), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( chemv_kernel_L_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_chemv_work
/***************************************************************************//**
Purpose
-------
magmablas_chemv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_chemv(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_chemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
magmaFloatComplex_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_cmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_chemv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_chemv
| a099261871614e7e06fe69ec6078ec08e805dea8.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
csymv.cu is nearly identical to chemv.cu, just change names and drop MAGMA_C_CONJ.
chemv_kernel_U (upper) in chemv_upper.cu is very similar to
chemv_kernel_L (lower) in chemv.cu; diff the two files to compare.
@generated from magmablas/zhemv.cu, normal z -> c, Thu Oct 8 23:05:34 2020
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
chemv_kernel_L(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_C_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_C_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end chemv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
chemv_kernel_L_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_chemv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements chemv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_chemv_work requires users to provide a workspace, while
magmablas_chemv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call chemv frequently, we suggest using
magmablas_chemv_work instead of magmablas_chemv. As the overhead to
allocate and free in device memory in magmablas_chemv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_chemv_work(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magmaFloatComplex_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_chemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
chemv_kernel_U<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
chemv_kernel_U_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
else {
chemv_kernel_L<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
chemv_kernel_L_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_chemv_work
/***************************************************************************//**
Purpose
-------
magmablas_chemv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_chemv(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_chemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
magmaFloatComplex_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_cmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_chemv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_chemv
|
7ade31b217cd30f4658e3b1cd1fafe09c2e68d2c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "testKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_matrixA = NULL;
hipMalloc(&d_matrixA, XSIZE*YSIZE);
float *d_matrixB = NULL;
hipMalloc(&d_matrixB, XSIZE*YSIZE);
float *d_matrixC = NULL;
hipMalloc(&d_matrixC, XSIZE*YSIZE);
const unsigned int ah = 1;
const unsigned int aw = 1;
const unsigned int bh = 1;
const unsigned int bw = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
testKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_matrixA,d_matrixB,d_matrixC,ah,aw,bh,bw);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
testKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_matrixA,d_matrixB,d_matrixC,ah,aw,bh,bw);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
testKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_matrixA,d_matrixB,d_matrixC,ah,aw,bh,bw);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7ade31b217cd30f4658e3b1cd1fafe09c2e68d2c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "testKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_matrixA = NULL;
cudaMalloc(&d_matrixA, XSIZE*YSIZE);
float *d_matrixB = NULL;
cudaMalloc(&d_matrixB, XSIZE*YSIZE);
float *d_matrixC = NULL;
cudaMalloc(&d_matrixC, XSIZE*YSIZE);
const unsigned int ah = 1;
const unsigned int aw = 1;
const unsigned int bh = 1;
const unsigned int bw = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
testKernel<<<gridBlock,threadBlock>>>(d_matrixA,d_matrixB,d_matrixC,ah,aw,bh,bw);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
testKernel<<<gridBlock,threadBlock>>>(d_matrixA,d_matrixB,d_matrixC,ah,aw,bh,bw);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
testKernel<<<gridBlock,threadBlock>>>(d_matrixA,d_matrixB,d_matrixC,ah,aw,bh,bw);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2f532b465e408df7b202f035d706a91ecc0083a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Accelerating large graph algorithms on the GPU using CUDA
// http://dl.acm.org/citation.cfm?id=1782200
#define W_SZ 16
__global__ void kernel_cuda_virtual_wc(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int *result,
bool *still_running,
int iteration)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
for (int v2 = 0; v2 < num_vertices; v2 += num_threads)
{
int vertex2 = v2 + tid;
// W_SZ many threads are processing vertex2
int warp_id = vertex2 / W_SZ;
int warp_offset = vertex2 % W_SZ;
for (int v = 0; v < W_SZ; v++)
{
int vertex = warp_id * W_SZ + v;
if (vertex < num_vertices && result[vertex] == iteration)
{
for (int n = 0; n < v_adj_length[vertex]; n += W_SZ)
{
int neighbor_index = n + warp_offset;
if (neighbor_index < v_adj_length[vertex])
{
int neighbor = v_adj_list[v_adj_begin[vertex] + neighbor_index];
if (result[neighbor] == MAX_DIST)
{
result[neighbor] = result[vertex] + 1;
*still_running = true;
}
}
}
}
}
}
}
int bfs_cuda_virtual_wc(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int num_edges,
int start_vertex,
int *result)
{
int *k_v_adj_list;
int *k_v_adj_begin;
int *k_v_adj_length;
int *k_result;
bool *k_still_running;
int kernel_runs = 0;
fill_n(result, num_vertices, MAX_DIST);
result[start_vertex] = 0;
bool *still_running = new bool[1];
hipMalloc(&k_v_adj_list, sizeof(int) * num_edges);
hipMalloc(&k_v_adj_begin, sizeof(int) * num_vertices);
hipMalloc(&k_v_adj_length, sizeof(int) * num_vertices);
hipMalloc(&k_result, sizeof(int) * num_vertices);
hipMalloc(&k_still_running, sizeof(bool) * 1);
hipMemcpy(k_v_adj_list, v_adj_list, sizeof(int) * num_edges, hipMemcpyHostToDevice);
hipMemcpy(k_v_adj_begin, v_adj_begin, sizeof(int) * num_vertices, hipMemcpyHostToDevice);
hipMemcpy(k_v_adj_length, v_adj_length, sizeof(int) * num_vertices, hipMemcpyHostToDevice);
hipMemcpy(k_result, result, sizeof(int) * num_vertices, hipMemcpyHostToDevice);
// --- START MEASURE TIME ---
struct timeval t1, t2;
gettimeofday(&t1, NULL);
do
{
*still_running = false;
hipMemcpy(k_still_running, still_running, sizeof(bool) * 1, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_cuda_virtual_wc), dim3(BLOCKS), dim3(THREADS), 0, 0,
k_v_adj_list,
k_v_adj_begin,
k_v_adj_length,
num_vertices,
k_result,
k_still_running,
kernel_runs);
kernel_runs++;
hipMemcpy(still_running, k_still_running, sizeof(bool) * 1, hipMemcpyDeviceToHost);
} while (*still_running);
hipDeviceSynchronize();
gettimeofday(&t2, NULL);
long long time = get_elapsed_time(&t1, &t2);
if (report_time)
{
printf("%s,%i,%i,%i,%i,%lld\n", __FILE__, num_vertices, num_edges, BLOCKS, THREADS, time);
}
// --- END MEASURE TIME ---
hipMemcpy(result, k_result, sizeof(int) * num_vertices, hipMemcpyDeviceToHost);
hipFree(k_v_adj_list);
hipFree(k_v_adj_begin);
hipFree(k_v_adj_length);
hipFree(k_result);
hipFree(k_still_running);
// printf("%i kernel runs\n", kernel_runs);
return time;
}
| 2f532b465e408df7b202f035d706a91ecc0083a7.cu | // Accelerating large graph algorithms on the GPU using CUDA
// http://dl.acm.org/citation.cfm?id=1782200
#define W_SZ 16
__global__ void kernel_cuda_virtual_wc(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int *result,
bool *still_running,
int iteration)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
for (int v2 = 0; v2 < num_vertices; v2 += num_threads)
{
int vertex2 = v2 + tid;
// W_SZ many threads are processing vertex2
int warp_id = vertex2 / W_SZ;
int warp_offset = vertex2 % W_SZ;
for (int v = 0; v < W_SZ; v++)
{
int vertex = warp_id * W_SZ + v;
if (vertex < num_vertices && result[vertex] == iteration)
{
for (int n = 0; n < v_adj_length[vertex]; n += W_SZ)
{
int neighbor_index = n + warp_offset;
if (neighbor_index < v_adj_length[vertex])
{
int neighbor = v_adj_list[v_adj_begin[vertex] + neighbor_index];
if (result[neighbor] == MAX_DIST)
{
result[neighbor] = result[vertex] + 1;
*still_running = true;
}
}
}
}
}
}
}
int bfs_cuda_virtual_wc(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int num_edges,
int start_vertex,
int *result)
{
int *k_v_adj_list;
int *k_v_adj_begin;
int *k_v_adj_length;
int *k_result;
bool *k_still_running;
int kernel_runs = 0;
fill_n(result, num_vertices, MAX_DIST);
result[start_vertex] = 0;
bool *still_running = new bool[1];
cudaMalloc(&k_v_adj_list, sizeof(int) * num_edges);
cudaMalloc(&k_v_adj_begin, sizeof(int) * num_vertices);
cudaMalloc(&k_v_adj_length, sizeof(int) * num_vertices);
cudaMalloc(&k_result, sizeof(int) * num_vertices);
cudaMalloc(&k_still_running, sizeof(bool) * 1);
cudaMemcpy(k_v_adj_list, v_adj_list, sizeof(int) * num_edges, cudaMemcpyHostToDevice);
cudaMemcpy(k_v_adj_begin, v_adj_begin, sizeof(int) * num_vertices, cudaMemcpyHostToDevice);
cudaMemcpy(k_v_adj_length, v_adj_length, sizeof(int) * num_vertices, cudaMemcpyHostToDevice);
cudaMemcpy(k_result, result, sizeof(int) * num_vertices, cudaMemcpyHostToDevice);
// --- START MEASURE TIME ---
struct timeval t1, t2;
gettimeofday(&t1, NULL);
do
{
*still_running = false;
cudaMemcpy(k_still_running, still_running, sizeof(bool) * 1, cudaMemcpyHostToDevice);
kernel_cuda_virtual_wc<<<BLOCKS, THREADS>>>(
k_v_adj_list,
k_v_adj_begin,
k_v_adj_length,
num_vertices,
k_result,
k_still_running,
kernel_runs);
kernel_runs++;
cudaMemcpy(still_running, k_still_running, sizeof(bool) * 1, cudaMemcpyDeviceToHost);
} while (*still_running);
cudaThreadSynchronize();
gettimeofday(&t2, NULL);
long long time = get_elapsed_time(&t1, &t2);
if (report_time)
{
printf("%s,%i,%i,%i,%i,%lld\n", __FILE__, num_vertices, num_edges, BLOCKS, THREADS, time);
}
// --- END MEASURE TIME ---
cudaMemcpy(result, k_result, sizeof(int) * num_vertices, cudaMemcpyDeviceToHost);
cudaFree(k_v_adj_list);
cudaFree(k_v_adj_begin);
cudaFree(k_v_adj_length);
cudaFree(k_result);
cudaFree(k_still_running);
// printf("%i kernel runs\n", kernel_runs);
return time;
}
|
aee13e48c3cccb9083802f2c506b29abdf0f6f66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <vector>
#include "caffe/layers/scale_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ScaleForward(const int n, const Dtype* in,
const Dtype* scale, const int scale_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index];
}
}
template <typename Dtype>
__global__ void ScaleBiasForward(const int n, const Dtype* in,
const Dtype* scale, const Dtype* bias,
const int scale_dim, const int inner_dim, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index] + bias[scale_index];
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = top[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
if (bottom[0] == top[0]) {
// in-place computation; need to store bottom data before overwriting it.
// Note that this is only necessary for Backward; we could skip this if not
// doing Backward, but Caffe currently provides no way of knowing whether
// we'll need to do Backward at the time of the Forward call.
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(),
temp_.mutable_gpu_data());
}
const Dtype* scale_data =
((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (bias_layer_) {
const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data();
ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_,
top_data);
} else {
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data);
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (bias_layer_ &&
this->param_propagate_down_[this->param_propagate_down_.size() - 1]) {
bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_);
}
const bool scale_param = (bottom.size() == 1);
Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1];
if ((!scale_param && propagate_down[1]) ||
(scale_param && this->param_propagate_down_[0])) {
const Dtype* top_diff = top[0]->gpu_diff();
const bool in_place = (bottom[0] == top[0]);
const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data();
// Hack: store big eltwise product in bottom[0] diff, except in the special
// case where this layer itself does the eltwise product, in which case we
// can store it directly in the scale diff, and we're done.
// If we're computing in-place (and not doing eltwise computation), this
// hack doesn't work and we store the product in temp_.
const bool is_eltwise = (bottom[0]->count() == scale->count());
Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() :
(in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff()));
caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product);
if (!is_eltwise) {
Dtype* sum_result = NULL;
if (inner_dim_ == 1) {
sum_result = product;
} else if (sum_result_.count() == 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(inner_dim_, product, sum_mult, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff);
}
} else {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
sum_result = (outer_dim_ == 1) ?
scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data();
caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_,
Dtype(1), product, sum_mult, Dtype(0), sum_result);
}
if (outer_dim_ != 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
if (scale_dim_ == 1) {
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff);
}
} else {
Dtype* scale_diff = scale->mutable_gpu_diff();
caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_,
Dtype(1), sum_result, sum_mult, Dtype(scale_param),
scale_diff);
}
}
}
}
if (propagate_down[0]) {
const int count = top[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* scale_data = scale->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer);
} // namespace caffe | aee13e48c3cccb9083802f2c506b29abdf0f6f66.cu | #include <cfloat>
#include <vector>
#include "caffe/layers/scale_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ScaleForward(const int n, const Dtype* in,
const Dtype* scale, const int scale_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index];
}
}
template <typename Dtype>
__global__ void ScaleBiasForward(const int n, const Dtype* in,
const Dtype* scale, const Dtype* bias,
const int scale_dim, const int inner_dim, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index] + bias[scale_index];
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = top[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
if (bottom[0] == top[0]) {
// in-place computation; need to store bottom data before overwriting it.
// Note that this is only necessary for Backward; we could skip this if not
// doing Backward, but Caffe currently provides no way of knowing whether
// we'll need to do Backward at the time of the Forward call.
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(),
temp_.mutable_gpu_data());
}
const Dtype* scale_data =
((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (bias_layer_) {
const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data();
ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_,
top_data);
} else {
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data);
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (bias_layer_ &&
this->param_propagate_down_[this->param_propagate_down_.size() - 1]) {
bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_);
}
const bool scale_param = (bottom.size() == 1);
Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1];
if ((!scale_param && propagate_down[1]) ||
(scale_param && this->param_propagate_down_[0])) {
const Dtype* top_diff = top[0]->gpu_diff();
const bool in_place = (bottom[0] == top[0]);
const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data();
// Hack: store big eltwise product in bottom[0] diff, except in the special
// case where this layer itself does the eltwise product, in which case we
// can store it directly in the scale diff, and we're done.
// If we're computing in-place (and not doing eltwise computation), this
// hack doesn't work and we store the product in temp_.
const bool is_eltwise = (bottom[0]->count() == scale->count());
Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() :
(in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff()));
caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product);
if (!is_eltwise) {
Dtype* sum_result = NULL;
if (inner_dim_ == 1) {
sum_result = product;
} else if (sum_result_.count() == 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(inner_dim_, product, sum_mult, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff);
}
} else {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
sum_result = (outer_dim_ == 1) ?
scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data();
caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_,
Dtype(1), product, sum_mult, Dtype(0), sum_result);
}
if (outer_dim_ != 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
if (scale_dim_ == 1) {
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff);
}
} else {
Dtype* scale_diff = scale->mutable_gpu_diff();
caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_,
Dtype(1), sum_result, sum_mult, Dtype(scale_param),
scale_diff);
}
}
}
}
if (propagate_down[0]) {
const int count = top[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* scale_data = scale->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer);
} // namespace caffe |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.