hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
46e83615a7ddf8798c94025d0da8808e5dcd21d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
This work is part of the Core Imaging Library developed by
Visual Analytics and Imaging System Group of the Science Technology
Facilities Council, STFC
Copyright 2019 Daniil Kazantsev
Copyright 2019 Srikanth Nagella, Edoardo Pasca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "TGV_GPU_core.h"
#include "shared.h"
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
/* CUDA implementation of Primal-Dual denoising method for
* Total Generilized Variation (TGV)-L2 model [1] (2D/3D case)
*
* Input Parameters:
* 1. Noisy image/volume (2D/3D)
* 2. lambda - regularisation parameter
* 3. parameter to control the first-order term (alpha1)
* 4. parameter to control the second-order term (alpha0)
* 5. Number of Chambolle-Pock (Primal-Dual) iterations
* 6. Lipshitz constant (default is 12)
* 7. eplsilon: tolerance constant
* 8. GPU device number if for multigpu run (default 0)
*
* Output:
* [1] Filtered/regularized image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
* References:
* [1] K. Bredies "Total Generalized Variation"
*/
#define BLKXSIZE2D 16
#define BLKYSIZE2D 16
#define BLKXSIZE 8
#define BLKYSIZE 8
#define BLKZSIZE 8
#define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) )
/********************************************************************/
/***************************2D Functions*****************************/
/********************************************************************/
__global__ void DualP_2D_kernel(float *U, float *V1, float *V2, float *P1, float *P2, long dimX, long dimY, float sigma)
{
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
/* symmetric boundary conditions (Neuman) */
if ((i >= 0) && (i < dimX-1)) P1[index] += sigma*((U[(i+1) + dimX*j] - U[index]) - V1[index]);
else if (i == dimX-1) P1[index] -= sigma*(V1[index]);
else P1[index] = 0.0f;
if ((j >= 0) && (j < dimY-1)) P2[index] += sigma*((U[i + dimX*(j+1)] - U[index]) - V2[index]);
else if (j == dimY-1) P2[index] -= sigma*(V2[index]);
else P2[index] = 0.0f;
}
return;
}
__global__ void ProjP_2D_kernel(float *P1, float *P2, long dimX, long dimY, float alpha1)
{
float grad_magn;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
grad_magn = sqrtf(powf(P1[index],2) + powf(P2[index],2));
grad_magn = grad_magn/alpha1;
if (grad_magn > 1.0f) {
P1[index] /= grad_magn;
P2[index] /= grad_magn;
}
}
return;
}
__global__ void DualQ_2D_kernel(float *V1, float *V2, float *Q1, float *Q2, float *Q3, long dimX, long dimY, float sigma)
{
float q1, q2, q11, q22;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
q1 = 0.0f; q2 = 0.0f; q11 = 0.0f; q22 = 0.0f;
if ((i >= 0) && (i < dimX-1)) {
/* boundary conditions (Neuman) */
q1 = V1[(i+1) + dimX*j] - V1[index];
q11 = V2[(i+1) + dimX*j] - V2[index];
}
if ((j >= 0) && (j < dimY-1)) {
q2 = V2[i + dimX*(j+1)] - V2[index];
q22 = V1[i + dimX*(j+1)] - V1[index];
}
Q1[index] += sigma*(q1);
Q2[index] += sigma*(q2);
Q3[index] += sigma*(0.5f*(q11 + q22));
}
return;
}
__global__ void ProjQ_2D_kernel(float *Q1, float *Q2, float *Q3, long dimX, long dimY, float alpha0)
{
float grad_magn;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
grad_magn = sqrt(powf(Q1[index],2) + powf(Q2[index],2) + 2*powf(Q3[index],2));
grad_magn = grad_magn/alpha0;
if (grad_magn > 1.0f) {
Q1[index] /= grad_magn;
Q2[index] /= grad_magn;
Q3[index] /= grad_magn;
}
}
return;
}
__global__ void DivProjP_2D_kernel(float *U, float *U0, float *P1, float *P2, long dimX, long dimY, float lambda, float tau)
{
float P_v1, P_v2, div;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
if ((i > 0) && (i < dimX-1)) P_v1 = P1[index] - P1[(i-1) + dimX*j];
else if (i == dimX-1) P_v1 = -P1[(i-1) + dimX*j];
else if (i == 0) P_v1 = P1[index];
else P_v1 = 0.0f;
if ((j > 0) && (j < dimY-1)) P_v2 = P2[index] - P2[i + dimX*(j-1)];
else if (j == dimY-1) P_v2 = -P2[i + dimX*(j-1)];
else if (j == 0) P_v2 = P2[index];
else P_v2 = 0.0f;
div = P_v1 + P_v2;
U[index] = (lambda*(U[index] + tau*div) + tau*U0[index])/(lambda + tau);
}
return;
}
__global__ void UpdV_2D_kernel(float *V1, float *V2, float *P1, float *P2, float *Q1, float *Q2, float *Q3, long dimX, long dimY, float tau)
{
float q1, q3_x, q2, q3_y, div1, div2;
long i1, j1;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
q1 = 0.0f; q3_x = 0.0f; q2 = 0.0f; q3_y = 0.0f; div1 = 0.0f; div2= 0.0f;
i1 = (i-1) + dimX*j;
j1 = (i) + dimX*(j-1);
/* boundary conditions (Neuman) */
if ((i > 0) && (i < dimX-1)) {
q1 = Q1[index] - Q1[i1];
q3_x = Q3[index] - Q3[i1]; }
else if (i == 0) {
q1 = Q1[index];
q3_x = Q3[index]; }
else if (i == dimX-1) {
q1 = -Q1[i1];
q3_x = -Q3[i1]; }
else {
q1 = 0.0f;
q3_x = 0.0f;
}
if ((j > 0) && (j < dimY-1)) {
q2 = Q2[index] - Q2[j1];
q3_y = Q3[index] - Q3[j1]; }
else if (j == dimY-1) {
q2 = -Q2[j1];
q3_y = -Q3[j1]; }
else if (j == 0) {
q2 = Q2[index];
q3_y = Q3[index]; }
else {
q2 = 0.0f;
q3_y = 0.0f;
}
div1 = q1 + q3_y;
div2 = q3_x + q2;
V1[index] += tau*(P1[index] + div1);
V2[index] += tau*(P2[index] + div2);
}
return;
}
__global__ void copyIm_TGV_kernel(float *U, float *U_old, long dimX, long dimY)
{
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
U_old[index] = U[index];
}
}
__global__ void copyIm_TGV_kernel_ar2(float *V1, float *V2, float *V1_old, float *V2_old, long dimX, long dimY)
{
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
V1_old[index] = V1[index];
V2_old[index] = V2[index];
}
}
__global__ void newU_kernel(float *U, float *U_old, long dimX, long dimY)
{
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
U[index] = 2.0f*U[index] - U_old[index];
}
}
__global__ void newU_kernel_ar2(float *V1, float *V2, float *V1_old, float *V2_old, long dimX, long dimY)
{
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
V1[index] = 2.0f*V1[index] - V1_old[index];
V2[index] = 2.0f*V2[index] - V2_old[index];
}
}
/********************************************************************/
/***************************3D Functions*****************************/
/********************************************************************/
__global__ void DualP_3D_kernel(float *U, float *V1, float *V2, float *V3, float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float sigma)
{
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + i*dimX+j;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
/* symmetric boundary conditions (Neuman) */
if ((i >= 0) && (i < dimX-1)) P1[index] += sigma*((U[(dimX*dimY)*k + (i+1)*dimX+j] - U[index]) - V1[index]);
else if (i == dimX-1) P1[index] -= sigma*(V1[index]);
else P1[index] = 0.0f;
if ((j >= 0) && (j < dimY-1)) P2[index] += sigma*((U[(dimX*dimY)*k + i*dimX+(j+1)] - U[index]) - V2[index]);
else if (j == dimY-1) P2[index] -= sigma*(V2[index]);
else P2[index] = 0.0f;
if ((k >= 0) && (k < dimZ-1)) P3[index] += sigma*((U[(dimX*dimY)*(k+1) + i*dimX+(j)] - U[index]) - V3[index]);
else if (k == dimZ-1) P3[index] -= sigma*(V3[index]);
else P3[index] = 0.0f;
}
return;
}
__global__ void ProjP_3D_kernel(float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float alpha1)
{
float grad_magn;
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + i*dimX+j;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
grad_magn = (sqrtf(powf(P1[index],2) + powf(P2[index],2) + powf(P3[index],2)))/alpha1;
if (grad_magn > 1.0f) {
P1[index] /= grad_magn;
P2[index] /= grad_magn;
P3[index] /= grad_magn;
}
}
return;
}
__global__ void DualQ_3D_kernel(float *V1, float *V2, float *V3, float *Q1, float *Q2, float *Q3, float *Q4, float *Q5, float *Q6, long dimX, long dimY, long dimZ, float sigma)
{
float q1, q2, q3, q11, q22, q33, q44, q55, q66;
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + i*dimX+j;
long i1 = (dimX*dimY)*k + (i+1)*dimX+j;
long j1 = (dimX*dimY)*k + (i)*dimX+(j+1);
long k1 = (dimX*dimY)*(k+1) + (i)*dimX+(j);
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
q1 = 0.0f; q11 = 0.0f; q33 = 0.0f; q2 = 0.0f; q22 = 0.0f; q55 = 0.0f; q3 = 0.0f; q44 = 0.0f; q66 = 0.0f;
/* boundary conditions (Neuman) */
if ((i >= 0) && (i < dimX-1)) {
q1 = V1[i1] - V1[index];
q11 = V2[i1] - V2[index];
q33 = V3[i1] - V3[index]; }
if ((j >= 0) && (j < dimY-1)) {
q2 = V2[j1] - V2[index];
q22 = V1[j1] - V1[index];
q55 = V3[j1] - V3[index]; }
if ((k >= 0) && (k < dimZ-1)) {
q3 = V3[k1] - V3[index];
q44 = V1[k1] - V1[index];
q66 = V2[k1] - V2[index]; }
Q1[index] += sigma*(q1); /*Q11*/
Q2[index] += sigma*(q2); /*Q22*/
Q3[index] += sigma*(q3); /*Q33*/
Q4[index] += sigma*(0.5f*(q11 + q22)); /* Q21 / Q12 */
Q5[index] += sigma*(0.5f*(q33 + q44)); /* Q31 / Q13 */
Q6[index] += sigma*(0.5f*(q55 + q66)); /* Q32 / Q23 */
}
return;
}
__global__ void ProjQ_3D_kernel(float *Q1, float *Q2, float *Q3, float *Q4, float *Q5, float *Q6, long dimX, long dimY, long dimZ, float alpha0)
{
float grad_magn;
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + i*dimX+j;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
grad_magn = sqrtf(powf(Q1[index],2) + powf(Q2[index],2) + powf(Q3[index],2) + 2.0f*powf(Q4[index],2) + 2.0f*powf(Q5[index],2) + 2.0f*powf(Q6[index],2));
grad_magn = grad_magn/alpha0;
if (grad_magn > 1.0f) {
Q1[index] /= grad_magn;
Q2[index] /= grad_magn;
Q3[index] /= grad_magn;
Q4[index] /= grad_magn;
Q5[index] /= grad_magn;
Q6[index] /= grad_magn;
}
}
return;
}
__global__ void DivProjP_3D_kernel(float *U, float *U0, float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float lambda, float tau)
{
float P_v1, P_v2, P_v3, div;
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + i*dimX+j;
long i1 = (dimX*dimY)*k + (i-1)*dimX+j;
long j1 = (dimX*dimY)*k + (i)*dimX+(j-1);
long k1 = (dimX*dimY)*(k-1) + (i)*dimX+(j);
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
if ((i > 0) && (i < dimX-1)) P_v1 = P1[index] - P1[i1];
else if (i == dimX-1) P_v1 = -P1[i1];
else if (i == 0) P_v1 = P1[index];
else P_v1 = 0.0f;
if ((j > 0) && (j < dimY-1)) P_v2 = P2[index] - P2[j1];
else if (j == dimY-1) P_v2 = -P2[j1];
else if (j == 0) P_v2 = P2[index];
else P_v2 = 0.0f;
if ((k > 0) && (k < dimZ-1)) P_v3 = P3[index] - P3[k1];
else if (k == dimZ-1) P_v3 = -P3[k1];
else if (k == 0) P_v3 = P3[index];
else P_v3 = 0.0f;
div = P_v1 + P_v2 + P_v3;
U[index] = (lambda*(U[index] + tau*div) + tau*U0[index])/(lambda + tau);
}
return;
}
__global__ void UpdV_3D_kernel(float *V1, float *V2, float *V3, float *P1, float *P2, float *P3, float *Q1, float *Q2, float *Q3, float *Q4, float *Q5, float *Q6, long dimX, long dimY, long dimZ, float tau)
{
float q1, q4x, q5x, q2, q4y, q6y, q6z, q5z, q3, div1, div2, div3;
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + i*dimX+j;
long i1 = (dimX*dimY)*k + (i-1)*dimX+j;
long j1 = (dimX*dimY)*k + (i)*dimX+(j-1);
long k1 = (dimX*dimY)*(k-1) + (i)*dimX+(j);
/* Q1 - Q11, Q2 - Q22, Q3 - Q33, Q4 - Q21/Q12, Q5 - Q31/Q13, Q6 - Q32/Q23*/
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
/* boundary conditions (Neuman) */
if ((i > 0) && (i < dimX-1)) {
q1 = Q1[index] - Q1[i1];
q4x = Q4[index] - Q4[i1];
q5x = Q5[index] - Q5[i1]; }
else if (i == 0) {
q1 = Q1[index];
q4x = Q4[index];
q5x = Q5[index]; }
else if (i == dimX-1) {
q1 = -Q1[i1];
q4x = -Q4[i1];
q5x = -Q5[i1]; }
else {
q1 = 0.0f;
q4x = 0.0f;
q5x = 0.0f; }
if ((j > 0) && (j < dimY-1)) {
q2 = Q2[index] - Q2[j1];
q4y = Q4[index] - Q4[j1];
q6y = Q6[index] - Q6[j1]; }
else if (j == dimY-1) {
q2 = -Q2[j1];
q4y = -Q4[j1];
q6y = -Q6[j1]; }
else if (j == 0) {
q2 = Q2[index];
q4y = Q4[index];
q6y = Q6[index]; }
else {
q2 = 0.0f;
q4y = 0.0f;
q6y = 0.0f;
}
if ((k > 0) && (k < dimZ-1)) {
q6z = Q6[index] - Q6[k1];
q5z = Q5[index] - Q5[k1];
q3 = Q3[index] - Q3[k1]; }
else if (k == dimZ-1) {
q6z = -Q6[k1];
q5z = -Q5[k1];
q3 = -Q3[k1]; }
else if (k == 0) {
q6z = Q6[index];
q5z = Q5[index];
q3 = Q3[index]; }
else {
q6z = 0.0f;
q5z = 0.0f;
q3 = 0.0f; }
div1 = q1 + q4y + q5z;
div2 = q4x + q2 + q6z;
div3 = q5x + q6y + q3;
V1[index] += tau*(P1[index] + div1);
V2[index] += tau*(P2[index] + div2);
V3[index] += tau*(P3[index] + div3);
}
return;
}
__global__ void copyIm_TGV_kernel3D(float *U, float *U_old, long dimX, long dimY, long dimZ)
{
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + j*dimX+i;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
U_old[index] = U[index];
}
}
__global__ void copyIm_TGV_kernel3D_ar3(float *V1, float *V2, float *V3, float *V1_old, float *V2_old, float *V3_old, long dimX, long dimY, long dimZ)
{
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + j*dimX+i;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
V1_old[index] = V1[index];
V2_old[index] = V2[index];
V3_old[index] = V3[index];
}
}
__global__ void newU_kernel3D(float *U, float *U_old, long dimX, long dimY, long dimZ)
{
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + j*dimX+i;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
U[index] = 2.0f*U[index] - U_old[index];
}
}
__global__ void newU_kernel3D_ar3(float *V1, float *V2, float *V3, float *V1_old, float *V2_old, float *V3_old, long dimX, long dimY, long dimZ)
{
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + j*dimX+i;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
V1[index] = 2.0f*V1[index] - V1_old[index];
V2[index] = 2.0f*V2[index] - V2_old[index];
V3[index] = 2.0f*V3[index] - V3_old[index];
}
}
__global__ void TGVResidCalc2D_kernel(float *Input1, float *Input2, float* Output, long dimX, long dimY, long num_total)
{
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
__global__ void TGVResidCalc3D_kernel(float *Input1, float *Input2, float* Output, long dimX, long dimY, long dimZ, long num_total)
{
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + j*dimX+i;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
/************************ MAIN HOST FUNCTION ***********************/
/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
extern "C" int TGV_GPU_main(float *U0, float *U, float *infovector, float lambda, float alpha1, float alpha0, int iterationsNumb, float L2, float epsil, int gpu_device, int dimX, int dimY, int dimZ)
{
int deviceCount = -1; // number of devices
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "No CUDA devices found\n");
return -1;
}
checkCudaErrors(hipSetDevice(gpu_device));
long dimTotal = (long)(dimX*dimY*dimZ);
float *U_old, *d_U0, *d_U, *P1, *P2, *Q1, *Q2, *Q3, *V1, *V1_old, *V2, *V2_old, tau, sigma, re;
int n, count;
count = 0; re = 0.0f;
tau = powf(L2,-0.5f);
sigma = tau;
CHECK(hipMalloc((void**)&d_U0,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&d_U,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&U_old,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&P1,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&P2,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&Q1,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&Q2,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&Q3,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&V1,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&V2,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&V1_old,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&V2_old,dimTotal*sizeof(float)));
CHECK(hipMemcpy(d_U0,U0,dimTotal*sizeof(float),hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_U,U0,dimTotal*sizeof(float),hipMemcpyHostToDevice));
hipMemset(P1, 0, dimTotal*sizeof(float));
hipMemset(P2, 0, dimTotal*sizeof(float));
hipMemset(Q1, 0, dimTotal*sizeof(float));
hipMemset(Q2, 0, dimTotal*sizeof(float));
hipMemset(Q3, 0, dimTotal*sizeof(float));
hipMemset(V1, 0, dimTotal*sizeof(float));
hipMemset(V2, 0, dimTotal*sizeof(float));
if (dimZ == 1) {
/*2D case */
dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D);
dim3 dimGrid(idivup(dimX,BLKXSIZE2D), idivup(dimY,BLKYSIZE2D));
for(n=0; n < iterationsNumb; n++) {
/* Calculate Dual Variable P */
hipLaunchKernelGGL(( DualP_2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, V1, V2, P1, P2, (long)(dimX), (long)(dimY), sigma);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*Projection onto convex set for P*/
hipLaunchKernelGGL(( ProjP_2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, (long)(dimX), (long)(dimY), alpha1);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/* Calculate Dual Variable Q */
hipLaunchKernelGGL(( DualQ_2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, V1, V2, Q1, Q2, Q3, (long)(dimX), (long)(dimY), sigma);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*Projection onto convex set for Q*/
hipLaunchKernelGGL(( ProjQ_2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Q1, Q2, Q3, (long)(dimX), (long)(dimY), alpha0);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*saving U into U_old*/
hipLaunchKernelGGL(( copyIm_TGV_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, U_old, (long)(dimX), (long)(dimY));
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*adjoint operation -> divergence and projection of P*/
hipLaunchKernelGGL(( DivProjP_2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, d_U0, P1, P2, (long)(dimX), (long)(dimY), lambda, tau);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*get updated solution U*/
hipLaunchKernelGGL(( newU_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, U_old, (long)(dimX), (long)(dimY));
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*saving V into V_old*/
hipLaunchKernelGGL(( copyIm_TGV_kernel_ar2), dim3(dimGrid),dim3(dimBlock), 0, 0, V1, V2, V1_old, V2_old, (long)(dimX), (long)(dimY));
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/* upd V*/
hipLaunchKernelGGL(( UpdV_2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, V1, V2, P1, P2, Q1, Q2, Q3, (long)(dimX), (long)(dimY), tau);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*get new V*/
hipLaunchKernelGGL(( newU_kernel_ar2), dim3(dimGrid),dim3(dimBlock), 0, 0, V1, V2, V1_old, V2_old, (long)(dimX), (long)(dimY));
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
if ((epsil != 0.0f) && (n % 5 == 0)) {
/* calculate norm - stopping rules using the Thrust library */
hipLaunchKernelGGL(( TGVResidCalc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, U_old, V1_old, (long)(dimX), (long)(dimY), dimTotal);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
// setup arguments
square<float> unary_op;
thrust::plus<float> binary_op;
thrust::device_vector<float> d_vec(V1_old, V1_old + dimTotal);
float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op));
thrust::device_vector<float> d_vec2(d_U, d_U + dimTotal);
float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op));
// compute norm
re = (reduction/reduction2);
if (re < epsil) count++;
if (count > 3) break;
}
}
}
else {
/*3D case */
dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE);
dim3 dimGrid(idivup(dimX,BLKXSIZE), idivup(dimY,BLKYSIZE),idivup(dimZ,BLKXSIZE));
float *P3, *Q4, *Q5, *Q6, *V3, *V3_old;
CHECK(hipMalloc((void**)&P3,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&Q4,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&Q5,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&Q6,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&V3,dimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&V3_old,dimTotal*sizeof(float)));
hipMemset(Q4, 0.0f, dimTotal*sizeof(float));
hipMemset(Q5, 0.0f, dimTotal*sizeof(float));
hipMemset(Q6, 0.0f, dimTotal*sizeof(float));
hipMemset(P3, 0.0f, dimTotal*sizeof(float));
hipMemset(V3, 0.0f, dimTotal*sizeof(float));
for(n=0; n < iterationsNumb; n++) {
/* Calculate Dual Variable P */
hipLaunchKernelGGL(( DualP_3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, V1, V2, V3, P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), sigma);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*Projection onto convex set for P*/
hipLaunchKernelGGL(( ProjP_3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), alpha1);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/* Calculate Dual Variable Q */
hipLaunchKernelGGL(( DualQ_3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, V1, V2, V3, Q1, Q2, Q3, Q4, Q5, Q6, (long)(dimX), (long)(dimY), (long)(dimZ), sigma);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*Projection onto convex set for Q*/
hipLaunchKernelGGL(( ProjQ_3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Q1, Q2, Q3, Q4, Q5, Q6, (long)(dimX), (long)(dimY), (long)(dimZ), alpha0);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*saving U into U_old*/
hipLaunchKernelGGL(( copyIm_TGV_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, U_old, (long)(dimX), (long)(dimY), (long)(dimZ));
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*adjoint operation -> divergence and projection of P*/
hipLaunchKernelGGL(( DivProjP_3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, d_U0, P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), lambda, tau);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*get updated solution U*/
hipLaunchKernelGGL(( newU_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, U_old, (long)(dimX), (long)(dimY), (long)(dimZ));
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*saving V into V_old*/
hipLaunchKernelGGL(( copyIm_TGV_kernel3D_ar3), dim3(dimGrid),dim3(dimBlock), 0, 0, V1, V2, V3, V1_old, V2_old, V3_old, (long)(dimX), (long)(dimY), (long)(dimZ));
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/* upd V*/
hipLaunchKernelGGL(( UpdV_3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, V1, V2, V3, P1, P2, P3, Q1, Q2, Q3, Q4, Q5, Q6, (long)(dimX), (long)(dimY), (long)(dimZ), tau);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/*get new V*/
hipLaunchKernelGGL(( newU_kernel3D_ar3), dim3(dimGrid),dim3(dimBlock), 0, 0, V1, V2, V3, V1_old, V2_old, V3_old, (long)(dimX), (long)(dimY), (long)(dimZ));
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
if ((epsil != 0.0f) && (n % 5 == 0)) {
/* calculate norm - stopping rules using the Thrust library */
hipLaunchKernelGGL(( TGVResidCalc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_U, U_old, V1_old, (long)(dimX), (long)(dimY), (long)(dimZ), dimTotal);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
// setup arguments
square<float> unary_op;
thrust::plus<float> binary_op;
thrust::device_vector<float> d_vec(V1_old, V1_old + dimTotal);
float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op));
thrust::device_vector<float> d_vec2(d_U, d_U + dimTotal);
float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op));
// compute norm
re = (reduction/reduction2);
if (re < epsil) count++;
if (count > 3) break;
}
}
CHECK(hipFree(Q4));
CHECK(hipFree(Q5));
CHECK(hipFree(Q6));
CHECK(hipFree(P3));
CHECK(hipFree(V3));
CHECK(hipFree(V3_old));
}
CHECK(hipMemcpy(U,d_U,dimTotal*sizeof(float),hipMemcpyDeviceToHost));
CHECK(hipFree(d_U0));
CHECK(hipFree(d_U));
CHECK(hipFree(U_old));
CHECK(hipFree(P1));
CHECK(hipFree(P2));
CHECK(hipFree(Q1));
CHECK(hipFree(Q2));
CHECK(hipFree(Q3));
CHECK(hipFree(V1));
CHECK(hipFree(V2));
CHECK(hipFree(V1_old));
CHECK(hipFree(V2_old));
//hipDeviceReset();
/*adding info into info_vector */
infovector[0] = (float)(n); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
hipDeviceSynchronize();
return 0;
}
| 46e83615a7ddf8798c94025d0da8808e5dcd21d6.cu | /*
This work is part of the Core Imaging Library developed by
Visual Analytics and Imaging System Group of the Science Technology
Facilities Council, STFC
Copyright 2019 Daniil Kazantsev
Copyright 2019 Srikanth Nagella, Edoardo Pasca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "TGV_GPU_core.h"
#include "shared.h"
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
/* CUDA implementation of Primal-Dual denoising method for
* Total Generilized Variation (TGV)-L2 model [1] (2D/3D case)
*
* Input Parameters:
* 1. Noisy image/volume (2D/3D)
* 2. lambda - regularisation parameter
* 3. parameter to control the first-order term (alpha1)
* 4. parameter to control the second-order term (alpha0)
* 5. Number of Chambolle-Pock (Primal-Dual) iterations
* 6. Lipshitz constant (default is 12)
* 7. eplsilon: tolerance constant
* 8. GPU device number if for multigpu run (default 0)
*
* Output:
* [1] Filtered/regularized image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
* References:
* [1] K. Bredies "Total Generalized Variation"
*/
#define BLKXSIZE2D 16
#define BLKYSIZE2D 16
#define BLKXSIZE 8
#define BLKYSIZE 8
#define BLKZSIZE 8
#define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) )
/********************************************************************/
/***************************2D Functions*****************************/
/********************************************************************/
__global__ void DualP_2D_kernel(float *U, float *V1, float *V2, float *P1, float *P2, long dimX, long dimY, float sigma)
{
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
/* symmetric boundary conditions (Neuman) */
if ((i >= 0) && (i < dimX-1)) P1[index] += sigma*((U[(i+1) + dimX*j] - U[index]) - V1[index]);
else if (i == dimX-1) P1[index] -= sigma*(V1[index]);
else P1[index] = 0.0f;
if ((j >= 0) && (j < dimY-1)) P2[index] += sigma*((U[i + dimX*(j+1)] - U[index]) - V2[index]);
else if (j == dimY-1) P2[index] -= sigma*(V2[index]);
else P2[index] = 0.0f;
}
return;
}
__global__ void ProjP_2D_kernel(float *P1, float *P2, long dimX, long dimY, float alpha1)
{
float grad_magn;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
grad_magn = sqrtf(powf(P1[index],2) + powf(P2[index],2));
grad_magn = grad_magn/alpha1;
if (grad_magn > 1.0f) {
P1[index] /= grad_magn;
P2[index] /= grad_magn;
}
}
return;
}
__global__ void DualQ_2D_kernel(float *V1, float *V2, float *Q1, float *Q2, float *Q3, long dimX, long dimY, float sigma)
{
float q1, q2, q11, q22;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
q1 = 0.0f; q2 = 0.0f; q11 = 0.0f; q22 = 0.0f;
if ((i >= 0) && (i < dimX-1)) {
/* boundary conditions (Neuman) */
q1 = V1[(i+1) + dimX*j] - V1[index];
q11 = V2[(i+1) + dimX*j] - V2[index];
}
if ((j >= 0) && (j < dimY-1)) {
q2 = V2[i + dimX*(j+1)] - V2[index];
q22 = V1[i + dimX*(j+1)] - V1[index];
}
Q1[index] += sigma*(q1);
Q2[index] += sigma*(q2);
Q3[index] += sigma*(0.5f*(q11 + q22));
}
return;
}
__global__ void ProjQ_2D_kernel(float *Q1, float *Q2, float *Q3, long dimX, long dimY, float alpha0)
{
float grad_magn;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
grad_magn = sqrt(powf(Q1[index],2) + powf(Q2[index],2) + 2*powf(Q3[index],2));
grad_magn = grad_magn/alpha0;
if (grad_magn > 1.0f) {
Q1[index] /= grad_magn;
Q2[index] /= grad_magn;
Q3[index] /= grad_magn;
}
}
return;
}
__global__ void DivProjP_2D_kernel(float *U, float *U0, float *P1, float *P2, long dimX, long dimY, float lambda, float tau)
{
float P_v1, P_v2, div;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
if ((i > 0) && (i < dimX-1)) P_v1 = P1[index] - P1[(i-1) + dimX*j];
else if (i == dimX-1) P_v1 = -P1[(i-1) + dimX*j];
else if (i == 0) P_v1 = P1[index];
else P_v1 = 0.0f;
if ((j > 0) && (j < dimY-1)) P_v2 = P2[index] - P2[i + dimX*(j-1)];
else if (j == dimY-1) P_v2 = -P2[i + dimX*(j-1)];
else if (j == 0) P_v2 = P2[index];
else P_v2 = 0.0f;
div = P_v1 + P_v2;
U[index] = (lambda*(U[index] + tau*div) + tau*U0[index])/(lambda + tau);
}
return;
}
__global__ void UpdV_2D_kernel(float *V1, float *V2, float *P1, float *P2, float *Q1, float *Q2, float *Q3, long dimX, long dimY, float tau)
{
float q1, q3_x, q2, q3_y, div1, div2;
long i1, j1;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
q1 = 0.0f; q3_x = 0.0f; q2 = 0.0f; q3_y = 0.0f; div1 = 0.0f; div2= 0.0f;
i1 = (i-1) + dimX*j;
j1 = (i) + dimX*(j-1);
/* boundary conditions (Neuman) */
if ((i > 0) && (i < dimX-1)) {
q1 = Q1[index] - Q1[i1];
q3_x = Q3[index] - Q3[i1]; }
else if (i == 0) {
q1 = Q1[index];
q3_x = Q3[index]; }
else if (i == dimX-1) {
q1 = -Q1[i1];
q3_x = -Q3[i1]; }
else {
q1 = 0.0f;
q3_x = 0.0f;
}
if ((j > 0) && (j < dimY-1)) {
q2 = Q2[index] - Q2[j1];
q3_y = Q3[index] - Q3[j1]; }
else if (j == dimY-1) {
q2 = -Q2[j1];
q3_y = -Q3[j1]; }
else if (j == 0) {
q2 = Q2[index];
q3_y = Q3[index]; }
else {
q2 = 0.0f;
q3_y = 0.0f;
}
div1 = q1 + q3_y;
div2 = q3_x + q2;
V1[index] += tau*(P1[index] + div1);
V2[index] += tau*(P2[index] + div2);
}
return;
}
__global__ void copyIm_TGV_kernel(float *U, float *U_old, long dimX, long dimY)
{
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
U_old[index] = U[index];
}
}
__global__ void copyIm_TGV_kernel_ar2(float *V1, float *V2, float *V1_old, float *V2_old, long dimX, long dimY)
{
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
V1_old[index] = V1[index];
V2_old[index] = V2[index];
}
}
__global__ void newU_kernel(float *U, float *U_old, long dimX, long dimY)
{
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
U[index] = 2.0f*U[index] - U_old[index];
}
}
__global__ void newU_kernel_ar2(float *V1, float *V2, float *V1_old, float *V2_old, long dimX, long dimY)
{
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if ((i < dimX) && (j < dimY)) {
V1[index] = 2.0f*V1[index] - V1_old[index];
V2[index] = 2.0f*V2[index] - V2_old[index];
}
}
/********************************************************************/
/***************************3D Functions*****************************/
/********************************************************************/
__global__ void DualP_3D_kernel(float *U, float *V1, float *V2, float *V3, float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float sigma)
{
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + i*dimX+j;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
/* symmetric boundary conditions (Neuman) */
if ((i >= 0) && (i < dimX-1)) P1[index] += sigma*((U[(dimX*dimY)*k + (i+1)*dimX+j] - U[index]) - V1[index]);
else if (i == dimX-1) P1[index] -= sigma*(V1[index]);
else P1[index] = 0.0f;
if ((j >= 0) && (j < dimY-1)) P2[index] += sigma*((U[(dimX*dimY)*k + i*dimX+(j+1)] - U[index]) - V2[index]);
else if (j == dimY-1) P2[index] -= sigma*(V2[index]);
else P2[index] = 0.0f;
if ((k >= 0) && (k < dimZ-1)) P3[index] += sigma*((U[(dimX*dimY)*(k+1) + i*dimX+(j)] - U[index]) - V3[index]);
else if (k == dimZ-1) P3[index] -= sigma*(V3[index]);
else P3[index] = 0.0f;
}
return;
}
__global__ void ProjP_3D_kernel(float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float alpha1)
{
float grad_magn;
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + i*dimX+j;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
grad_magn = (sqrtf(powf(P1[index],2) + powf(P2[index],2) + powf(P3[index],2)))/alpha1;
if (grad_magn > 1.0f) {
P1[index] /= grad_magn;
P2[index] /= grad_magn;
P3[index] /= grad_magn;
}
}
return;
}
__global__ void DualQ_3D_kernel(float *V1, float *V2, float *V3, float *Q1, float *Q2, float *Q3, float *Q4, float *Q5, float *Q6, long dimX, long dimY, long dimZ, float sigma)
{
float q1, q2, q3, q11, q22, q33, q44, q55, q66;
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + i*dimX+j;
long i1 = (dimX*dimY)*k + (i+1)*dimX+j;
long j1 = (dimX*dimY)*k + (i)*dimX+(j+1);
long k1 = (dimX*dimY)*(k+1) + (i)*dimX+(j);
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
q1 = 0.0f; q11 = 0.0f; q33 = 0.0f; q2 = 0.0f; q22 = 0.0f; q55 = 0.0f; q3 = 0.0f; q44 = 0.0f; q66 = 0.0f;
/* boundary conditions (Neuman) */
if ((i >= 0) && (i < dimX-1)) {
q1 = V1[i1] - V1[index];
q11 = V2[i1] - V2[index];
q33 = V3[i1] - V3[index]; }
if ((j >= 0) && (j < dimY-1)) {
q2 = V2[j1] - V2[index];
q22 = V1[j1] - V1[index];
q55 = V3[j1] - V3[index]; }
if ((k >= 0) && (k < dimZ-1)) {
q3 = V3[k1] - V3[index];
q44 = V1[k1] - V1[index];
q66 = V2[k1] - V2[index]; }
Q1[index] += sigma*(q1); /*Q11*/
Q2[index] += sigma*(q2); /*Q22*/
Q3[index] += sigma*(q3); /*Q33*/
Q4[index] += sigma*(0.5f*(q11 + q22)); /* Q21 / Q12 */
Q5[index] += sigma*(0.5f*(q33 + q44)); /* Q31 / Q13 */
Q6[index] += sigma*(0.5f*(q55 + q66)); /* Q32 / Q23 */
}
return;
}
__global__ void ProjQ_3D_kernel(float *Q1, float *Q2, float *Q3, float *Q4, float *Q5, float *Q6, long dimX, long dimY, long dimZ, float alpha0)
{
float grad_magn;
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + i*dimX+j;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
grad_magn = sqrtf(powf(Q1[index],2) + powf(Q2[index],2) + powf(Q3[index],2) + 2.0f*powf(Q4[index],2) + 2.0f*powf(Q5[index],2) + 2.0f*powf(Q6[index],2));
grad_magn = grad_magn/alpha0;
if (grad_magn > 1.0f) {
Q1[index] /= grad_magn;
Q2[index] /= grad_magn;
Q3[index] /= grad_magn;
Q4[index] /= grad_magn;
Q5[index] /= grad_magn;
Q6[index] /= grad_magn;
}
}
return;
}
__global__ void DivProjP_3D_kernel(float *U, float *U0, float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float lambda, float tau)
{
float P_v1, P_v2, P_v3, div;
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + i*dimX+j;
long i1 = (dimX*dimY)*k + (i-1)*dimX+j;
long j1 = (dimX*dimY)*k + (i)*dimX+(j-1);
long k1 = (dimX*dimY)*(k-1) + (i)*dimX+(j);
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
if ((i > 0) && (i < dimX-1)) P_v1 = P1[index] - P1[i1];
else if (i == dimX-1) P_v1 = -P1[i1];
else if (i == 0) P_v1 = P1[index];
else P_v1 = 0.0f;
if ((j > 0) && (j < dimY-1)) P_v2 = P2[index] - P2[j1];
else if (j == dimY-1) P_v2 = -P2[j1];
else if (j == 0) P_v2 = P2[index];
else P_v2 = 0.0f;
if ((k > 0) && (k < dimZ-1)) P_v3 = P3[index] - P3[k1];
else if (k == dimZ-1) P_v3 = -P3[k1];
else if (k == 0) P_v3 = P3[index];
else P_v3 = 0.0f;
div = P_v1 + P_v2 + P_v3;
U[index] = (lambda*(U[index] + tau*div) + tau*U0[index])/(lambda + tau);
}
return;
}
__global__ void UpdV_3D_kernel(float *V1, float *V2, float *V3, float *P1, float *P2, float *P3, float *Q1, float *Q2, float *Q3, float *Q4, float *Q5, float *Q6, long dimX, long dimY, long dimZ, float tau)
{
float q1, q4x, q5x, q2, q4y, q6y, q6z, q5z, q3, div1, div2, div3;
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + i*dimX+j;
long i1 = (dimX*dimY)*k + (i-1)*dimX+j;
long j1 = (dimX*dimY)*k + (i)*dimX+(j-1);
long k1 = (dimX*dimY)*(k-1) + (i)*dimX+(j);
/* Q1 - Q11, Q2 - Q22, Q3 - Q33, Q4 - Q21/Q12, Q5 - Q31/Q13, Q6 - Q32/Q23*/
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
/* boundary conditions (Neuman) */
if ((i > 0) && (i < dimX-1)) {
q1 = Q1[index] - Q1[i1];
q4x = Q4[index] - Q4[i1];
q5x = Q5[index] - Q5[i1]; }
else if (i == 0) {
q1 = Q1[index];
q4x = Q4[index];
q5x = Q5[index]; }
else if (i == dimX-1) {
q1 = -Q1[i1];
q4x = -Q4[i1];
q5x = -Q5[i1]; }
else {
q1 = 0.0f;
q4x = 0.0f;
q5x = 0.0f; }
if ((j > 0) && (j < dimY-1)) {
q2 = Q2[index] - Q2[j1];
q4y = Q4[index] - Q4[j1];
q6y = Q6[index] - Q6[j1]; }
else if (j == dimY-1) {
q2 = -Q2[j1];
q4y = -Q4[j1];
q6y = -Q6[j1]; }
else if (j == 0) {
q2 = Q2[index];
q4y = Q4[index];
q6y = Q6[index]; }
else {
q2 = 0.0f;
q4y = 0.0f;
q6y = 0.0f;
}
if ((k > 0) && (k < dimZ-1)) {
q6z = Q6[index] - Q6[k1];
q5z = Q5[index] - Q5[k1];
q3 = Q3[index] - Q3[k1]; }
else if (k == dimZ-1) {
q6z = -Q6[k1];
q5z = -Q5[k1];
q3 = -Q3[k1]; }
else if (k == 0) {
q6z = Q6[index];
q5z = Q5[index];
q3 = Q3[index]; }
else {
q6z = 0.0f;
q5z = 0.0f;
q3 = 0.0f; }
div1 = q1 + q4y + q5z;
div2 = q4x + q2 + q6z;
div3 = q5x + q6y + q3;
V1[index] += tau*(P1[index] + div1);
V2[index] += tau*(P2[index] + div2);
V3[index] += tau*(P3[index] + div3);
}
return;
}
__global__ void copyIm_TGV_kernel3D(float *U, float *U_old, long dimX, long dimY, long dimZ)
{
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + j*dimX+i;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
U_old[index] = U[index];
}
}
__global__ void copyIm_TGV_kernel3D_ar3(float *V1, float *V2, float *V3, float *V1_old, float *V2_old, float *V3_old, long dimX, long dimY, long dimZ)
{
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + j*dimX+i;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
V1_old[index] = V1[index];
V2_old[index] = V2[index];
V3_old[index] = V3[index];
}
}
__global__ void newU_kernel3D(float *U, float *U_old, long dimX, long dimY, long dimZ)
{
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + j*dimX+i;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
U[index] = 2.0f*U[index] - U_old[index];
}
}
__global__ void newU_kernel3D_ar3(float *V1, float *V2, float *V3, float *V1_old, float *V2_old, float *V3_old, long dimX, long dimY, long dimZ)
{
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + j*dimX+i;
if ((i < dimX) && (j < dimY) && (k < dimZ)) {
V1[index] = 2.0f*V1[index] - V1_old[index];
V2[index] = 2.0f*V2[index] - V2_old[index];
V3[index] = 2.0f*V3[index] - V3_old[index];
}
}
__global__ void TGVResidCalc2D_kernel(float *Input1, float *Input2, float* Output, long dimX, long dimY, long num_total)
{
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
long index = i + (dimX)*j;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
__global__ void TGVResidCalc3D_kernel(float *Input1, float *Input2, float* Output, long dimX, long dimY, long dimZ, long num_total)
{
long index;
const long i = blockDim.x * blockIdx.x + threadIdx.x;
const long j = blockDim.y * blockIdx.y + threadIdx.y;
const long k = blockDim.z * blockIdx.z + threadIdx.z;
index = (dimX*dimY)*k + j*dimX+i;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
/************************ MAIN HOST FUNCTION ***********************/
/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
extern "C" int TGV_GPU_main(float *U0, float *U, float *infovector, float lambda, float alpha1, float alpha0, int iterationsNumb, float L2, float epsil, int gpu_device, int dimX, int dimY, int dimZ)
{
int deviceCount = -1; // number of devices
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "No CUDA devices found\n");
return -1;
}
checkCudaErrors(cudaSetDevice(gpu_device));
long dimTotal = (long)(dimX*dimY*dimZ);
float *U_old, *d_U0, *d_U, *P1, *P2, *Q1, *Q2, *Q3, *V1, *V1_old, *V2, *V2_old, tau, sigma, re;
int n, count;
count = 0; re = 0.0f;
tau = powf(L2,-0.5f);
sigma = tau;
CHECK(cudaMalloc((void**)&d_U0,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&d_U,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&U_old,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&P1,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&P2,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&Q1,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&Q2,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&Q3,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&V1,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&V2,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&V1_old,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&V2_old,dimTotal*sizeof(float)));
CHECK(cudaMemcpy(d_U0,U0,dimTotal*sizeof(float),cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_U,U0,dimTotal*sizeof(float),cudaMemcpyHostToDevice));
cudaMemset(P1, 0, dimTotal*sizeof(float));
cudaMemset(P2, 0, dimTotal*sizeof(float));
cudaMemset(Q1, 0, dimTotal*sizeof(float));
cudaMemset(Q2, 0, dimTotal*sizeof(float));
cudaMemset(Q3, 0, dimTotal*sizeof(float));
cudaMemset(V1, 0, dimTotal*sizeof(float));
cudaMemset(V2, 0, dimTotal*sizeof(float));
if (dimZ == 1) {
/*2D case */
dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D);
dim3 dimGrid(idivup(dimX,BLKXSIZE2D), idivup(dimY,BLKYSIZE2D));
for(n=0; n < iterationsNumb; n++) {
/* Calculate Dual Variable P */
DualP_2D_kernel<<<dimGrid,dimBlock>>>(d_U, V1, V2, P1, P2, (long)(dimX), (long)(dimY), sigma);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*Projection onto convex set for P*/
ProjP_2D_kernel<<<dimGrid,dimBlock>>>(P1, P2, (long)(dimX), (long)(dimY), alpha1);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/* Calculate Dual Variable Q */
DualQ_2D_kernel<<<dimGrid,dimBlock>>>(V1, V2, Q1, Q2, Q3, (long)(dimX), (long)(dimY), sigma);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*Projection onto convex set for Q*/
ProjQ_2D_kernel<<<dimGrid,dimBlock>>>(Q1, Q2, Q3, (long)(dimX), (long)(dimY), alpha0);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*saving U into U_old*/
copyIm_TGV_kernel<<<dimGrid,dimBlock>>>(d_U, U_old, (long)(dimX), (long)(dimY));
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*adjoint operation -> divergence and projection of P*/
DivProjP_2D_kernel<<<dimGrid,dimBlock>>>(d_U, d_U0, P1, P2, (long)(dimX), (long)(dimY), lambda, tau);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*get updated solution U*/
newU_kernel<<<dimGrid,dimBlock>>>(d_U, U_old, (long)(dimX), (long)(dimY));
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*saving V into V_old*/
copyIm_TGV_kernel_ar2<<<dimGrid,dimBlock>>>(V1, V2, V1_old, V2_old, (long)(dimX), (long)(dimY));
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/* upd V*/
UpdV_2D_kernel<<<dimGrid,dimBlock>>>(V1, V2, P1, P2, Q1, Q2, Q3, (long)(dimX), (long)(dimY), tau);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*get new V*/
newU_kernel_ar2<<<dimGrid,dimBlock>>>(V1, V2, V1_old, V2_old, (long)(dimX), (long)(dimY));
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
if ((epsil != 0.0f) && (n % 5 == 0)) {
/* calculate norm - stopping rules using the Thrust library */
TGVResidCalc2D_kernel<<<dimGrid,dimBlock>>>(d_U, U_old, V1_old, (long)(dimX), (long)(dimY), dimTotal);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
// setup arguments
square<float> unary_op;
thrust::plus<float> binary_op;
thrust::device_vector<float> d_vec(V1_old, V1_old + dimTotal);
float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op));
thrust::device_vector<float> d_vec2(d_U, d_U + dimTotal);
float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op));
// compute norm
re = (reduction/reduction2);
if (re < epsil) count++;
if (count > 3) break;
}
}
}
else {
/*3D case */
dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE);
dim3 dimGrid(idivup(dimX,BLKXSIZE), idivup(dimY,BLKYSIZE),idivup(dimZ,BLKXSIZE));
float *P3, *Q4, *Q5, *Q6, *V3, *V3_old;
CHECK(cudaMalloc((void**)&P3,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&Q4,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&Q5,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&Q6,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&V3,dimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&V3_old,dimTotal*sizeof(float)));
cudaMemset(Q4, 0.0f, dimTotal*sizeof(float));
cudaMemset(Q5, 0.0f, dimTotal*sizeof(float));
cudaMemset(Q6, 0.0f, dimTotal*sizeof(float));
cudaMemset(P3, 0.0f, dimTotal*sizeof(float));
cudaMemset(V3, 0.0f, dimTotal*sizeof(float));
for(n=0; n < iterationsNumb; n++) {
/* Calculate Dual Variable P */
DualP_3D_kernel<<<dimGrid,dimBlock>>>(d_U, V1, V2, V3, P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), sigma);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*Projection onto convex set for P*/
ProjP_3D_kernel<<<dimGrid,dimBlock>>>(P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), alpha1);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/* Calculate Dual Variable Q */
DualQ_3D_kernel<<<dimGrid,dimBlock>>>(V1, V2, V3, Q1, Q2, Q3, Q4, Q5, Q6, (long)(dimX), (long)(dimY), (long)(dimZ), sigma);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*Projection onto convex set for Q*/
ProjQ_3D_kernel<<<dimGrid,dimBlock>>>(Q1, Q2, Q3, Q4, Q5, Q6, (long)(dimX), (long)(dimY), (long)(dimZ), alpha0);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*saving U into U_old*/
copyIm_TGV_kernel3D<<<dimGrid,dimBlock>>>(d_U, U_old, (long)(dimX), (long)(dimY), (long)(dimZ));
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*adjoint operation -> divergence and projection of P*/
DivProjP_3D_kernel<<<dimGrid,dimBlock>>>(d_U, d_U0, P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), lambda, tau);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*get updated solution U*/
newU_kernel3D<<<dimGrid,dimBlock>>>(d_U, U_old, (long)(dimX), (long)(dimY), (long)(dimZ));
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*saving V into V_old*/
copyIm_TGV_kernel3D_ar3<<<dimGrid,dimBlock>>>(V1, V2, V3, V1_old, V2_old, V3_old, (long)(dimX), (long)(dimY), (long)(dimZ));
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/* upd V*/
UpdV_3D_kernel<<<dimGrid,dimBlock>>>(V1, V2, V3, P1, P2, P3, Q1, Q2, Q3, Q4, Q5, Q6, (long)(dimX), (long)(dimY), (long)(dimZ), tau);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/*get new V*/
newU_kernel3D_ar3<<<dimGrid,dimBlock>>>(V1, V2, V3, V1_old, V2_old, V3_old, (long)(dimX), (long)(dimY), (long)(dimZ));
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
if ((epsil != 0.0f) && (n % 5 == 0)) {
/* calculate norm - stopping rules using the Thrust library */
TGVResidCalc3D_kernel<<<dimGrid,dimBlock>>>(d_U, U_old, V1_old, (long)(dimX), (long)(dimY), (long)(dimZ), dimTotal);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
// setup arguments
square<float> unary_op;
thrust::plus<float> binary_op;
thrust::device_vector<float> d_vec(V1_old, V1_old + dimTotal);
float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op));
thrust::device_vector<float> d_vec2(d_U, d_U + dimTotal);
float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op));
// compute norm
re = (reduction/reduction2);
if (re < epsil) count++;
if (count > 3) break;
}
}
CHECK(cudaFree(Q4));
CHECK(cudaFree(Q5));
CHECK(cudaFree(Q6));
CHECK(cudaFree(P3));
CHECK(cudaFree(V3));
CHECK(cudaFree(V3_old));
}
CHECK(cudaMemcpy(U,d_U,dimTotal*sizeof(float),cudaMemcpyDeviceToHost));
CHECK(cudaFree(d_U0));
CHECK(cudaFree(d_U));
CHECK(cudaFree(U_old));
CHECK(cudaFree(P1));
CHECK(cudaFree(P2));
CHECK(cudaFree(Q1));
CHECK(cudaFree(Q2));
CHECK(cudaFree(Q3));
CHECK(cudaFree(V1));
CHECK(cudaFree(V2));
CHECK(cudaFree(V1_old));
CHECK(cudaFree(V2_old));
//cudaDeviceReset();
/*adding info into info_vector */
infovector[0] = (float)(n); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
cudaDeviceSynchronize();
return 0;
}
|
b057b8dd831fce47b31ad144a3e3114f961e8831.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Example from CUDA By Example
*/
#include <stdio.h>
#define N 10
__global__ void add(int *a, int *b, int *c)
{
int tid = blockIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
hipMalloc((void **)&dev_a, N * sizeof(int));
hipMalloc((void **)&dev_b, N * sizeof(int));
hipMalloc((void **)&dev_c, N * sizeof(int));
for (int i = 0; i < N; ++i)
{
a[i] = -i;
b[i] = i * i;
}
hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(N), dim3(1), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < N; ++i)
printf("%d + %d = %d\n", a[i], b[i], c[i]);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| b057b8dd831fce47b31ad144a3e3114f961e8831.cu | /*
Example from CUDA By Example
*/
#include <stdio.h>
#define N 10
__global__ void add(int *a, int *b, int *c)
{
int tid = blockIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **)&dev_a, N * sizeof(int));
cudaMalloc((void **)&dev_b, N * sizeof(int));
cudaMalloc((void **)&dev_c, N * sizeof(int));
for (int i = 0; i < N; ++i)
{
a[i] = -i;
b[i] = i * i;
}
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
add<<<N, 1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; ++i)
printf("%d + %d = %d\n", a[i], b[i], c[i]);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
986fac7dd000d89a5c300529650c9c6719bb2a59.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// this file is inspired by:
// https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/fused_kernels/scaled_upper_triang_masked_softmax.h
/* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef PADDLE_WITH_CUDA
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#endif
#ifdef PADDLE_WITH_HIP
#include <hip/hip_runtime.h>
#include <hiprand_kernel.h>
#endif
#include <stdint.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <algorithm>
#include <string>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/fused_softmax_mask_upper_triangle_op.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using framework::Tensor;
#ifdef PADDLE_WITH_HIP
#define WARP_SIZE 64
#else
#define WARP_SIZE 32
#endif
#define MASK 0xffffffff
namespace plat = paddle::platform;
__device__ __inline__ void load_data_upper_tri(plat::float16* dst,
const plat::float16* src) {
*(reinterpret_cast<float2*>(dst)) = *(reinterpret_cast<const float2*>(src));
}
__device__ __inline__ void load_data_upper_tri(float* dst, const float* src) {
*(reinterpret_cast<float4*>(dst)) = *(reinterpret_cast<const float4*>(src));
}
__device__ __inline__ void load_zero_vector_upper_tri(plat::float16* dst) {
*(reinterpret_cast<float2*>(dst)) = make_float2(0.0f, 0.0f);
}
__device__ __inline__ void load_zero_vector_upper_tri(float* dst) {
*(reinterpret_cast<float4*>(dst)) = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
int get_pow2_index_value(int value) {
int pow2_index = 0;
while ((1 << pow2_index) < value) {
++pow2_index;
}
return pow2_index;
}
template <typename T>
struct AddOP_upper_tri {
__device__ __forceinline__ T operator()(T a, T b) const { return a + b; }
};
template <typename T>
struct MaxOP_upper_tri {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
template <typename T>
__device__ __forceinline__ T warp_shfl_xor_upper_tri(T value, int laneMask,
int width,
unsigned int mask = MASK) {
#if TORCH_HIP_VERSION >= 9000
return __shfl_xor_sync(mask, value, laneMask, width);
#else
return __shfl_xor(value, laneMask, width);
#endif
}
template <typename T, int batch, int width, template <typename> class ReduceOp>
__device__ __forceinline__ void warp_reduce_upper_tri(T* sum) {
ReduceOp<T> r;
#pragma unroll
for (int offset = width / 2; offset > 0; offset /= 2) {
#pragma unroll
for (int i = 0; i < batch; ++i) {
T b = warp_shfl_xor_upper_tri(sum[i], offset, width);
sum[i] = r(sum[i], b);
}
}
}
template <typename T, int pow2_index>
__global__ void SoftmaxMaskFuseUpperTriangleGPUKernel(const T* src, T* dst,
int batch_count,
int key_seq_len) {
constexpr int next_pow2 = 1 << pow2_index;
constexpr int warp_size = (next_pow2 < WARP_SIZE) ? next_pow2 : WARP_SIZE;
constexpr int kLocalIterations = ::max(next_pow2 / warp_size, 4);
constexpr int kLocalBatchSize = (next_pow2 <= 128) ? 2 : 1;
constexpr int kOneLoadingCounts = 4;
int key_seq_len_pow_2 = key_seq_len * key_seq_len;
int first_idx =
(blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * kLocalBatchSize +
blockIdx.x;
int local_block_idx = blockIdx.x + 1;
int warp_iter_upper_bound =
(local_block_idx + kOneLoadingCounts * warp_size - 1) / warp_size;
int local_batches = batch_count - first_idx;
if (local_batches > kLocalBatchSize) local_batches = kLocalBatchSize;
int local_idx = threadIdx.x;
src += first_idx * key_seq_len + kOneLoadingCounts * local_idx;
dst += first_idx * key_seq_len + kOneLoadingCounts * local_idx;
float data[kLocalBatchSize][kLocalIterations];
T temp_in[kOneLoadingCounts];
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
int batch_total_number = (i >= local_batches) ? 0 : local_block_idx;
#pragma unroll
for (int ii = 0; ii < kLocalIterations; ii += kOneLoadingCounts) {
int element_index = kOneLoadingCounts * local_idx + ii * warp_size;
if (element_index < batch_total_number) {
load_data_upper_tri(temp_in,
src + i * key_seq_len_pow_2 + ii * warp_size);
#pragma unroll
for (int counter = 0; counter < kOneLoadingCounts; ++counter) {
if ((element_index + counter) < batch_total_number) {
data[i][ii + counter] = static_cast<float>(temp_in[counter]);
} else {
data[i][ii + counter] = -std::numeric_limits<float>::infinity();
}
}
} else {
#pragma unroll
for (int counter = 0; counter < kOneLoadingCounts; ++counter) {
data[i][ii + counter] = -std::numeric_limits<float>::infinity();
}
}
}
}
float max_value[kLocalBatchSize];
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
max_value[i] = data[i][0];
#pragma unroll
for (int ii = 1; ii < kLocalIterations; ++ii) {
max_value[i] = (max_value[i] > data[i][ii]) ? max_value[i] : data[i][ii];
}
}
warp_reduce_upper_tri<float, kLocalBatchSize, warp_size, MaxOP_upper_tri>(
max_value);
float sum[kLocalBatchSize]{0.0f};
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
#pragma unroll
for (int ii = 0; ii < kLocalIterations; ++ii) {
if (ii < warp_iter_upper_bound) {
data[i][ii] = ::exp((data[i][ii] - max_value[i]));
sum[i] += data[i][ii];
}
}
}
warp_reduce_upper_tri<float, kLocalBatchSize, warp_size, AddOP_upper_tri>(
sum);
T out[kOneLoadingCounts];
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
if (i >= local_batches) break;
#pragma unroll
for (int ii = 0; ii < kLocalIterations; ii += kOneLoadingCounts) {
int element_index = kOneLoadingCounts * local_idx + ii * warp_size;
if (element_index < local_block_idx) {
#pragma unroll
for (int counter = 0; counter < kOneLoadingCounts; ++counter) {
if (element_index + counter < local_block_idx) {
out[counter] = data[i][ii + counter] / sum[i];
} else {
out[counter] = 0;
}
}
load_data_upper_tri(dst + i * key_seq_len_pow_2 + ii * warp_size, out);
} else if (element_index < key_seq_len) {
load_zero_vector_upper_tri(dst + i * key_seq_len_pow_2 +
ii * warp_size);
} else {
break;
}
}
}
}
template <typename T, int pow2_index>
__global__ void SoftmaxMaskFuseUpperTriangleGradGPUKernel(const T* grad_input,
T* grad_output,
const T* softmax_rst,
int batch_count,
int key_seq_len) {
constexpr int next_pow2 = 1 << pow2_index;
constexpr int warp_size = (next_pow2 < WARP_SIZE) ? next_pow2 : WARP_SIZE;
constexpr int kLocalIterations = ::max(next_pow2 / warp_size, 4);
constexpr int kLocalBatchSize = (next_pow2 <= 128) ? 2 : 1;
constexpr int kOneLoadingCounts = 4;
int key_seq_len_pow_2 = key_seq_len * key_seq_len;
int first_idx =
(blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * kLocalBatchSize +
blockIdx.x;
int local_block_idx = blockIdx.x + 1;
// micro_batch_size might not be a multiple of WARP_BATCH. Check how
// many batches have to computed within this WARP.
int local_batches = batch_count - first_idx;
if (local_batches > kLocalBatchSize) local_batches = kLocalBatchSize;
// there might be multiple batches per warp. compute the index within the
// batch
int local_idx = threadIdx.x;
// the first element to process by the current thread
int offset = first_idx * key_seq_len + kOneLoadingCounts * local_idx;
grad_input += offset;
grad_output += offset;
softmax_rst += offset;
// load data from global memory
float grad_input_reg[kLocalBatchSize][kLocalIterations]{0.0f};
float softmax_rst_reg[kLocalBatchSize][kLocalIterations]{0.0f};
T temp_grad_input[kOneLoadingCounts];
T temp_softmax_rst[kOneLoadingCounts];
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
int batch_total_number = (i >= local_batches) ? 0 : local_block_idx;
#pragma unroll
for (int ii = 0; ii < kLocalIterations; ii += kOneLoadingCounts) {
int element_index = kOneLoadingCounts * local_idx + ii * warp_size;
if (element_index < batch_total_number) {
load_data_upper_tri(
temp_grad_input,
grad_input + i * key_seq_len_pow_2 + ii * warp_size);
load_data_upper_tri(
temp_softmax_rst,
softmax_rst + i * key_seq_len_pow_2 + ii * warp_size);
#pragma unroll
for (int counter = 0; counter < kOneLoadingCounts; ++counter) {
if (element_index + counter < batch_total_number) {
softmax_rst_reg[i][ii + counter] =
static_cast<float>(temp_softmax_rst[counter]);
}
}
#pragma unroll
for (int counter = 0; counter < kOneLoadingCounts; ++counter) {
if (element_index + counter < batch_total_number) {
grad_input_reg[i][ii + counter] =
static_cast<float>(temp_grad_input[counter]) *
softmax_rst_reg[i][ii + counter];
}
}
}
}
}
float sum[kLocalBatchSize];
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
sum[i] = grad_input_reg[i][0];
#pragma unroll
for (int ii = 1; ii < kLocalIterations; ++ii) {
sum[i] += grad_input_reg[i][ii];
}
}
warp_reduce_upper_tri<float, kLocalBatchSize, warp_size, AddOP_upper_tri>(
sum);
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
if (i >= local_batches) break;
#pragma unroll
for (int ii = 0; ii < kLocalIterations; ii += kOneLoadingCounts) {
int element_index = kOneLoadingCounts * local_idx + ii * warp_size;
if (element_index < key_seq_len) {
// compute gradients
T samples_out[kOneLoadingCounts];
#pragma unroll
for (int counter = 0; counter < kOneLoadingCounts; ++counter) {
samples_out[counter] = grad_input_reg[i][ii + counter] -
softmax_rst_reg[i][ii + counter] * sum[i];
}
load_data_upper_tri(
grad_output + i * key_seq_len_pow_2 + ii * warp_size, samples_out);
}
}
}
}
template <typename Place, typename T>
class SoftmaxMaskFuseUpperTriangleKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* y = context.Output<Tensor>("Out");
auto* x_data = x->data<T>();
auto* y_data = y->mutable_data<T>(context.GetPlace());
auto x_dim = x->dims();
auto batches = x_dim[0];
auto attn_heads = x_dim[1];
auto attn_mul_batch = batches * attn_heads;
auto query_seq_len = x_dim[2];
auto key_seq_len = x_dim[3];
PADDLE_ENFORCE_EQ(key_seq_len, query_seq_len,
platform::errors::InvalidArgument(
"Key seq len must be equal with query seq len "
"received key len: %d, query len: %d",
key_seq_len, query_seq_len));
PADDLE_ENFORCE_EQ(key_seq_len >= 32 && key_seq_len < 8192, true,
platform::errors::InvalidArgument(
"Input x's last dim must be between [32, 8192) "
"received the last dimension of x is %d",
key_seq_len));
auto& place = *context.template device_context<Place>().eigen_device();
auto stream = context.cuda_device_context().stream();
int pow2_index = get_pow2_index_value(key_seq_len);
const int next_pow2 = 1 << pow2_index;
int batch_count = attn_mul_batch * query_seq_len;
int warp_size = (next_pow2 < WARP_SIZE) ? next_pow2 : WARP_SIZE;
int batches_per_warp = (next_pow2 <= 128) ? 2 : 1;
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
PADDLE_ENFORCE_EQ(
query_seq_len % batches_per_block, 0,
platform::errors::InvalidArgument(
"The query seq len (third dim of input X) must can divide the "
"number of batches per block. The query seq len is %d, while "
"the number of batches per block is %d.",
query_seq_len, batches_per_block));
dim3 blocks(query_seq_len,
(attn_mul_batch + batches_per_block) / batches_per_block, 1);
dim3 threads(warp_size, warps_per_block, 1);
switch (pow2_index) {
case 5: // 32
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 5>), dim3(blocks), dim3(threads), 0, stream, x_data, y_data, batch_count,
key_seq_len);
break;
case 6: // 64
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 6>), dim3(blocks), dim3(threads), 0, stream, x_data, y_data, batch_count,
key_seq_len);
break;
case 7: // 128
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 7>), dim3(blocks), dim3(threads), 0, stream, x_data, y_data, batch_count,
key_seq_len);
break;
case 8: // 256
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 8>), dim3(blocks), dim3(threads), 0, stream, x_data, y_data, batch_count,
key_seq_len);
break;
case 9: // 512
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 9>), dim3(blocks), dim3(threads), 0, stream, x_data, y_data, batch_count,
key_seq_len);
break;
case 10: // 1024
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 10>), dim3(blocks), dim3(threads), 0, stream, x_data, y_data, batch_count,
key_seq_len);
break;
case 11: // 2048
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 11>), dim3(blocks), dim3(threads), 0, stream, x_data, y_data, batch_count,
key_seq_len);
break;
case 12: // 4096
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 12>), dim3(blocks), dim3(threads), 0, stream, x_data, y_data, batch_count,
key_seq_len);
break;
case 13: // 8192
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 13>), dim3(blocks), dim3(threads), 0, stream, x_data, y_data, batch_count,
key_seq_len);
break;
default:
break;
}
}
};
template <typename Place, typename T>
class SoftmaxMaskFuseUpperTriangleGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out"));
auto* softmax_rst = context.Input<Tensor>("Softmax");
auto* grad_x_data = grad_x->mutable_data<T>(context.GetPlace());
auto* grad_y_data = grad_y->data<T>();
auto* softmax_rst_data = softmax_rst->data<T>();
auto y_dim = grad_y->dims();
auto batches = y_dim[0];
auto attn_heads = y_dim[1];
auto attn_mul_batch = batches * attn_heads;
auto query_seq_len = y_dim[2];
auto key_seq_len = y_dim[3];
auto& place = *context.template device_context<Place>().eigen_device();
auto stream = context.cuda_device_context().stream();
int pow2_index = get_pow2_index_value(key_seq_len);
const int next_pow2 = 1 << pow2_index;
int batch_count = attn_mul_batch * query_seq_len;
int warp_size = (next_pow2 < WARP_SIZE) ? next_pow2 : WARP_SIZE;
int batches_per_warp = (next_pow2 <= 128) ? 2 : 1;
// use 128 threads per block to maximum gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
dim3 blocks(query_seq_len,
(attn_mul_batch + batches_per_block) / batches_per_block, 1);
dim3 threads(warp_size, warps_per_block, 1);
switch (pow2_index) {
case 5: // 32
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 5>), dim3(blocks), dim3(threads), 0, stream, grad_y_data, grad_x_data,
softmax_rst_data, batch_count,
key_seq_len);
break;
case 6: // 64
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 6>), dim3(blocks), dim3(threads), 0, stream, grad_y_data, grad_x_data,
softmax_rst_data, batch_count,
key_seq_len);
break;
case 7: // 128
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 7>), dim3(blocks), dim3(threads), 0, stream, grad_y_data, grad_x_data,
softmax_rst_data, batch_count,
key_seq_len);
break;
case 8: // 256
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 8>), dim3(blocks), dim3(threads), 0, stream, grad_y_data, grad_x_data,
softmax_rst_data, batch_count,
key_seq_len);
break;
case 9: // 512
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 9>), dim3(blocks), dim3(threads), 0, stream, grad_y_data, grad_x_data,
softmax_rst_data, batch_count,
key_seq_len);
break;
case 10: // 1024
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 10>), dim3(blocks), dim3(threads), 0, stream, grad_y_data, grad_x_data,
softmax_rst_data,
batch_count, key_seq_len);
break;
case 11: // 2048
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 11>), dim3(blocks), dim3(threads), 0, stream, grad_y_data, grad_x_data,
softmax_rst_data,
batch_count, key_seq_len);
break;
case 12: // 4096
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 12>), dim3(blocks), dim3(threads), 0, stream, grad_y_data, grad_x_data,
softmax_rst_data,
batch_count, key_seq_len);
break;
case 13: // 8192
hipLaunchKernelGGL(( SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 13>), dim3(blocks), dim3(threads), 0, stream, grad_y_data, grad_x_data,
softmax_rst_data,
batch_count, key_seq_len);
break;
default:
break;
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
fused_softmax_mask_upper_triangle,
ops::SoftmaxMaskFuseUpperTriangleKernel<plat::CUDADeviceContext,
plat::float16>,
ops::SoftmaxMaskFuseUpperTriangleKernel<plat::CUDADeviceContext, float>);
REGISTER_OP_CUDA_KERNEL(
fused_softmax_mask_upper_triangle_grad,
ops::SoftmaxMaskFuseUpperTriangleGradKernel<plat::CUDADeviceContext,
plat::float16>,
ops::SoftmaxMaskFuseUpperTriangleGradKernel<plat::CUDADeviceContext,
float>);
| 986fac7dd000d89a5c300529650c9c6719bb2a59.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// this file is inspired by:
// https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/fused_kernels/scaled_upper_triang_masked_softmax.h
/* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef PADDLE_WITH_CUDA
#include <cuda.h>
#include <curand_kernel.h>
#endif
#ifdef PADDLE_WITH_HIP
#include <hip/hip_runtime.h>
#include <hiprand_kernel.h>
#endif
#include <stdint.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <algorithm>
#include <string>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/fused_softmax_mask_upper_triangle_op.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using framework::Tensor;
#ifdef PADDLE_WITH_HIP
#define WARP_SIZE 64
#else
#define WARP_SIZE 32
#endif
#define MASK 0xffffffff
namespace plat = paddle::platform;
__device__ __inline__ void load_data_upper_tri(plat::float16* dst,
const plat::float16* src) {
*(reinterpret_cast<float2*>(dst)) = *(reinterpret_cast<const float2*>(src));
}
__device__ __inline__ void load_data_upper_tri(float* dst, const float* src) {
*(reinterpret_cast<float4*>(dst)) = *(reinterpret_cast<const float4*>(src));
}
__device__ __inline__ void load_zero_vector_upper_tri(plat::float16* dst) {
*(reinterpret_cast<float2*>(dst)) = make_float2(0.0f, 0.0f);
}
__device__ __inline__ void load_zero_vector_upper_tri(float* dst) {
*(reinterpret_cast<float4*>(dst)) = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
int get_pow2_index_value(int value) {
int pow2_index = 0;
while ((1 << pow2_index) < value) {
++pow2_index;
}
return pow2_index;
}
template <typename T>
struct AddOP_upper_tri {
__device__ __forceinline__ T operator()(T a, T b) const { return a + b; }
};
template <typename T>
struct MaxOP_upper_tri {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
template <typename T>
__device__ __forceinline__ T warp_shfl_xor_upper_tri(T value, int laneMask,
int width,
unsigned int mask = MASK) {
#if CUDA_VERSION >= 9000
return __shfl_xor_sync(mask, value, laneMask, width);
#else
return __shfl_xor(value, laneMask, width);
#endif
}
template <typename T, int batch, int width, template <typename> class ReduceOp>
__device__ __forceinline__ void warp_reduce_upper_tri(T* sum) {
ReduceOp<T> r;
#pragma unroll
for (int offset = width / 2; offset > 0; offset /= 2) {
#pragma unroll
for (int i = 0; i < batch; ++i) {
T b = warp_shfl_xor_upper_tri(sum[i], offset, width);
sum[i] = r(sum[i], b);
}
}
}
template <typename T, int pow2_index>
__global__ void SoftmaxMaskFuseUpperTriangleGPUKernel(const T* src, T* dst,
int batch_count,
int key_seq_len) {
constexpr int next_pow2 = 1 << pow2_index;
constexpr int warp_size = (next_pow2 < WARP_SIZE) ? next_pow2 : WARP_SIZE;
constexpr int kLocalIterations = std::max(next_pow2 / warp_size, 4);
constexpr int kLocalBatchSize = (next_pow2 <= 128) ? 2 : 1;
constexpr int kOneLoadingCounts = 4;
int key_seq_len_pow_2 = key_seq_len * key_seq_len;
int first_idx =
(blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * kLocalBatchSize +
blockIdx.x;
int local_block_idx = blockIdx.x + 1;
int warp_iter_upper_bound =
(local_block_idx + kOneLoadingCounts * warp_size - 1) / warp_size;
int local_batches = batch_count - first_idx;
if (local_batches > kLocalBatchSize) local_batches = kLocalBatchSize;
int local_idx = threadIdx.x;
src += first_idx * key_seq_len + kOneLoadingCounts * local_idx;
dst += first_idx * key_seq_len + kOneLoadingCounts * local_idx;
float data[kLocalBatchSize][kLocalIterations];
T temp_in[kOneLoadingCounts];
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
int batch_total_number = (i >= local_batches) ? 0 : local_block_idx;
#pragma unroll
for (int ii = 0; ii < kLocalIterations; ii += kOneLoadingCounts) {
int element_index = kOneLoadingCounts * local_idx + ii * warp_size;
if (element_index < batch_total_number) {
load_data_upper_tri(temp_in,
src + i * key_seq_len_pow_2 + ii * warp_size);
#pragma unroll
for (int counter = 0; counter < kOneLoadingCounts; ++counter) {
if ((element_index + counter) < batch_total_number) {
data[i][ii + counter] = static_cast<float>(temp_in[counter]);
} else {
data[i][ii + counter] = -std::numeric_limits<float>::infinity();
}
}
} else {
#pragma unroll
for (int counter = 0; counter < kOneLoadingCounts; ++counter) {
data[i][ii + counter] = -std::numeric_limits<float>::infinity();
}
}
}
}
float max_value[kLocalBatchSize];
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
max_value[i] = data[i][0];
#pragma unroll
for (int ii = 1; ii < kLocalIterations; ++ii) {
max_value[i] = (max_value[i] > data[i][ii]) ? max_value[i] : data[i][ii];
}
}
warp_reduce_upper_tri<float, kLocalBatchSize, warp_size, MaxOP_upper_tri>(
max_value);
float sum[kLocalBatchSize]{0.0f};
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
#pragma unroll
for (int ii = 0; ii < kLocalIterations; ++ii) {
if (ii < warp_iter_upper_bound) {
data[i][ii] = std::exp((data[i][ii] - max_value[i]));
sum[i] += data[i][ii];
}
}
}
warp_reduce_upper_tri<float, kLocalBatchSize, warp_size, AddOP_upper_tri>(
sum);
T out[kOneLoadingCounts];
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
if (i >= local_batches) break;
#pragma unroll
for (int ii = 0; ii < kLocalIterations; ii += kOneLoadingCounts) {
int element_index = kOneLoadingCounts * local_idx + ii * warp_size;
if (element_index < local_block_idx) {
#pragma unroll
for (int counter = 0; counter < kOneLoadingCounts; ++counter) {
if (element_index + counter < local_block_idx) {
out[counter] = data[i][ii + counter] / sum[i];
} else {
out[counter] = 0;
}
}
load_data_upper_tri(dst + i * key_seq_len_pow_2 + ii * warp_size, out);
} else if (element_index < key_seq_len) {
load_zero_vector_upper_tri(dst + i * key_seq_len_pow_2 +
ii * warp_size);
} else {
break;
}
}
}
}
template <typename T, int pow2_index>
__global__ void SoftmaxMaskFuseUpperTriangleGradGPUKernel(const T* grad_input,
T* grad_output,
const T* softmax_rst,
int batch_count,
int key_seq_len) {
constexpr int next_pow2 = 1 << pow2_index;
constexpr int warp_size = (next_pow2 < WARP_SIZE) ? next_pow2 : WARP_SIZE;
constexpr int kLocalIterations = std::max(next_pow2 / warp_size, 4);
constexpr int kLocalBatchSize = (next_pow2 <= 128) ? 2 : 1;
constexpr int kOneLoadingCounts = 4;
int key_seq_len_pow_2 = key_seq_len * key_seq_len;
int first_idx =
(blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * kLocalBatchSize +
blockIdx.x;
int local_block_idx = blockIdx.x + 1;
// micro_batch_size might not be a multiple of WARP_BATCH. Check how
// many batches have to computed within this WARP.
int local_batches = batch_count - first_idx;
if (local_batches > kLocalBatchSize) local_batches = kLocalBatchSize;
// there might be multiple batches per warp. compute the index within the
// batch
int local_idx = threadIdx.x;
// the first element to process by the current thread
int offset = first_idx * key_seq_len + kOneLoadingCounts * local_idx;
grad_input += offset;
grad_output += offset;
softmax_rst += offset;
// load data from global memory
float grad_input_reg[kLocalBatchSize][kLocalIterations]{0.0f};
float softmax_rst_reg[kLocalBatchSize][kLocalIterations]{0.0f};
T temp_grad_input[kOneLoadingCounts];
T temp_softmax_rst[kOneLoadingCounts];
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
int batch_total_number = (i >= local_batches) ? 0 : local_block_idx;
#pragma unroll
for (int ii = 0; ii < kLocalIterations; ii += kOneLoadingCounts) {
int element_index = kOneLoadingCounts * local_idx + ii * warp_size;
if (element_index < batch_total_number) {
load_data_upper_tri(
temp_grad_input,
grad_input + i * key_seq_len_pow_2 + ii * warp_size);
load_data_upper_tri(
temp_softmax_rst,
softmax_rst + i * key_seq_len_pow_2 + ii * warp_size);
#pragma unroll
for (int counter = 0; counter < kOneLoadingCounts; ++counter) {
if (element_index + counter < batch_total_number) {
softmax_rst_reg[i][ii + counter] =
static_cast<float>(temp_softmax_rst[counter]);
}
}
#pragma unroll
for (int counter = 0; counter < kOneLoadingCounts; ++counter) {
if (element_index + counter < batch_total_number) {
grad_input_reg[i][ii + counter] =
static_cast<float>(temp_grad_input[counter]) *
softmax_rst_reg[i][ii + counter];
}
}
}
}
}
float sum[kLocalBatchSize];
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
sum[i] = grad_input_reg[i][0];
#pragma unroll
for (int ii = 1; ii < kLocalIterations; ++ii) {
sum[i] += grad_input_reg[i][ii];
}
}
warp_reduce_upper_tri<float, kLocalBatchSize, warp_size, AddOP_upper_tri>(
sum);
#pragma unroll
for (int i = 0; i < kLocalBatchSize; ++i) {
if (i >= local_batches) break;
#pragma unroll
for (int ii = 0; ii < kLocalIterations; ii += kOneLoadingCounts) {
int element_index = kOneLoadingCounts * local_idx + ii * warp_size;
if (element_index < key_seq_len) {
// compute gradients
T samples_out[kOneLoadingCounts];
#pragma unroll
for (int counter = 0; counter < kOneLoadingCounts; ++counter) {
samples_out[counter] = grad_input_reg[i][ii + counter] -
softmax_rst_reg[i][ii + counter] * sum[i];
}
load_data_upper_tri(
grad_output + i * key_seq_len_pow_2 + ii * warp_size, samples_out);
}
}
}
}
template <typename Place, typename T>
class SoftmaxMaskFuseUpperTriangleKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* y = context.Output<Tensor>("Out");
auto* x_data = x->data<T>();
auto* y_data = y->mutable_data<T>(context.GetPlace());
auto x_dim = x->dims();
auto batches = x_dim[0];
auto attn_heads = x_dim[1];
auto attn_mul_batch = batches * attn_heads;
auto query_seq_len = x_dim[2];
auto key_seq_len = x_dim[3];
PADDLE_ENFORCE_EQ(key_seq_len, query_seq_len,
platform::errors::InvalidArgument(
"Key seq len must be equal with query seq len "
"received key len: %d, query len: %d",
key_seq_len, query_seq_len));
PADDLE_ENFORCE_EQ(key_seq_len >= 32 && key_seq_len < 8192, true,
platform::errors::InvalidArgument(
"Input x's last dim must be between [32, 8192) "
"received the last dimension of x is %d",
key_seq_len));
auto& place = *context.template device_context<Place>().eigen_device();
auto stream = context.cuda_device_context().stream();
int pow2_index = get_pow2_index_value(key_seq_len);
const int next_pow2 = 1 << pow2_index;
int batch_count = attn_mul_batch * query_seq_len;
int warp_size = (next_pow2 < WARP_SIZE) ? next_pow2 : WARP_SIZE;
int batches_per_warp = (next_pow2 <= 128) ? 2 : 1;
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
PADDLE_ENFORCE_EQ(
query_seq_len % batches_per_block, 0,
platform::errors::InvalidArgument(
"The query seq len (third dim of input X) must can divide the "
"number of batches per block. The query seq len is %d, while "
"the number of batches per block is %d.",
query_seq_len, batches_per_block));
dim3 blocks(query_seq_len,
(attn_mul_batch + batches_per_block) / batches_per_block, 1);
dim3 threads(warp_size, warps_per_block, 1);
switch (pow2_index) {
case 5: // 32
SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 5><<<blocks, threads, 0, stream>>>(x_data, y_data, batch_count,
key_seq_len);
break;
case 6: // 64
SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 6><<<blocks, threads, 0, stream>>>(x_data, y_data, batch_count,
key_seq_len);
break;
case 7: // 128
SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 7><<<blocks, threads, 0, stream>>>(x_data, y_data, batch_count,
key_seq_len);
break;
case 8: // 256
SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 8><<<blocks, threads, 0, stream>>>(x_data, y_data, batch_count,
key_seq_len);
break;
case 9: // 512
SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 9><<<blocks, threads, 0, stream>>>(x_data, y_data, batch_count,
key_seq_len);
break;
case 10: // 1024
SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 10><<<blocks, threads, 0, stream>>>(x_data, y_data, batch_count,
key_seq_len);
break;
case 11: // 2048
SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 11><<<blocks, threads, 0, stream>>>(x_data, y_data, batch_count,
key_seq_len);
break;
case 12: // 4096
SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 12><<<blocks, threads, 0, stream>>>(x_data, y_data, batch_count,
key_seq_len);
break;
case 13: // 8192
SoftmaxMaskFuseUpperTriangleGPUKernel<
T, 13><<<blocks, threads, 0, stream>>>(x_data, y_data, batch_count,
key_seq_len);
break;
default:
break;
}
}
};
template <typename Place, typename T>
class SoftmaxMaskFuseUpperTriangleGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out"));
auto* softmax_rst = context.Input<Tensor>("Softmax");
auto* grad_x_data = grad_x->mutable_data<T>(context.GetPlace());
auto* grad_y_data = grad_y->data<T>();
auto* softmax_rst_data = softmax_rst->data<T>();
auto y_dim = grad_y->dims();
auto batches = y_dim[0];
auto attn_heads = y_dim[1];
auto attn_mul_batch = batches * attn_heads;
auto query_seq_len = y_dim[2];
auto key_seq_len = y_dim[3];
auto& place = *context.template device_context<Place>().eigen_device();
auto stream = context.cuda_device_context().stream();
int pow2_index = get_pow2_index_value(key_seq_len);
const int next_pow2 = 1 << pow2_index;
int batch_count = attn_mul_batch * query_seq_len;
int warp_size = (next_pow2 < WARP_SIZE) ? next_pow2 : WARP_SIZE;
int batches_per_warp = (next_pow2 <= 128) ? 2 : 1;
// use 128 threads per block to maximum gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
dim3 blocks(query_seq_len,
(attn_mul_batch + batches_per_block) / batches_per_block, 1);
dim3 threads(warp_size, warps_per_block, 1);
switch (pow2_index) {
case 5: // 32
SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 5><<<blocks, threads, 0, stream>>>(grad_y_data, grad_x_data,
softmax_rst_data, batch_count,
key_seq_len);
break;
case 6: // 64
SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 6><<<blocks, threads, 0, stream>>>(grad_y_data, grad_x_data,
softmax_rst_data, batch_count,
key_seq_len);
break;
case 7: // 128
SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 7><<<blocks, threads, 0, stream>>>(grad_y_data, grad_x_data,
softmax_rst_data, batch_count,
key_seq_len);
break;
case 8: // 256
SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 8><<<blocks, threads, 0, stream>>>(grad_y_data, grad_x_data,
softmax_rst_data, batch_count,
key_seq_len);
break;
case 9: // 512
SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 9><<<blocks, threads, 0, stream>>>(grad_y_data, grad_x_data,
softmax_rst_data, batch_count,
key_seq_len);
break;
case 10: // 1024
SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 10><<<blocks, threads, 0, stream>>>(grad_y_data, grad_x_data,
softmax_rst_data,
batch_count, key_seq_len);
break;
case 11: // 2048
SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 11><<<blocks, threads, 0, stream>>>(grad_y_data, grad_x_data,
softmax_rst_data,
batch_count, key_seq_len);
break;
case 12: // 4096
SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 12><<<blocks, threads, 0, stream>>>(grad_y_data, grad_x_data,
softmax_rst_data,
batch_count, key_seq_len);
break;
case 13: // 8192
SoftmaxMaskFuseUpperTriangleGradGPUKernel<
T, 13><<<blocks, threads, 0, stream>>>(grad_y_data, grad_x_data,
softmax_rst_data,
batch_count, key_seq_len);
break;
default:
break;
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
fused_softmax_mask_upper_triangle,
ops::SoftmaxMaskFuseUpperTriangleKernel<plat::CUDADeviceContext,
plat::float16>,
ops::SoftmaxMaskFuseUpperTriangleKernel<plat::CUDADeviceContext, float>);
REGISTER_OP_CUDA_KERNEL(
fused_softmax_mask_upper_triangle_grad,
ops::SoftmaxMaskFuseUpperTriangleGradKernel<plat::CUDADeviceContext,
plat::float16>,
ops::SoftmaxMaskFuseUpperTriangleGradKernel<plat::CUDADeviceContext,
float>);
|
0c4e6bfa48d656117bee7c25fa6e4f40cd98bb87.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/for_each.h>
#include <thrust/device_vector.h>
#include "CBufSpec.hh"
#include "TBuf.hh"
#include "TUtil.hh"
#include "NPY.hpp"
#include "PLOG.hh"
struct printf_functor_i
{
__host__ __device__
void operator()(int x)
{
printf("%d\n", x);
}
};
struct printf_functor_f4
{
__host__ __device__
void operator()(float4 v)
{
printf("%10.4f %10.4f %10.4f %10.4f \n", v.x, v.y, v.z, v.w);
}
};
void test_foreach()
{
LOG(info) << "(" ;
thrust::device_vector<int> ivec(3);
ivec[0] = 0;
ivec[1] = 1;
ivec[2] = 2;
thrust::for_each(ivec.begin(), ivec.end(), printf_functor_i());
LOG(info) << ")" ;
}
void test_cbufspec()
{
LOG(info) << "(" ;
thrust::device_vector<int> ivec(3);
ivec[0] = 0;
ivec[1] = 1;
ivec[2] = 2;
CBufSpec ibs = make_bufspec<int>(ivec);
ibs.Summary("ibs");
LOG(info) << ")" ;
}
void test_tbuf(unsigned n, unsigned stride)
{
LOG(info) << "(" ;
thrust::device_vector<int> ivec(n);
for(unsigned i=0 ; i < n ; i++) ivec[i] = i ;
CBufSpec ibs = make_bufspec<int>(ivec);
ibs.Summary("ibs");
TBuf tibs("tibs", ibs );
tibs.dump<int>("tibs dump", stride, 0, n ); // stride, begin, end
LOG(info) << ")" ;
}
void test_ull(unsigned int n, unsigned stride)
{
LOG(info) << "(" ;
thrust::device_vector<unsigned long long> uvec(n);
for(unsigned i=0 ; i < n ; i++)
{
unsigned j = i % 3 ;
if( j == 0) uvec[i] = 0xffeedd;
else if( j == 1) uvec[i] = 0xffaabb;
else if( j == 2) uvec[i] = 0xffbbcc;
else uvec[i] = 0xffffff;
}
//thrust::for_each(ivec.begin(), ivec.end(), printf_functor_i());
CBufSpec ubs = make_bufspec<unsigned long long>(uvec);
ubs.Summary("ubs");
TBuf tubs("tubs", ubs );
tubs.dump<unsigned long long>("tubs dump", stride, 0, n );
LOG(info) << ")" ;
}
void test_f4()
{
LOG(info) << "(" ;
thrust::device_vector<float4> fvec(3);
fvec[0] = make_float4( 1.f, 2.f, 3.f, 4.f );
fvec[1] = make_float4( 1.f, 2.f, 3.f, 4.f );
fvec[2] = make_float4( 1.f, 2.f, 3.f, 4.f );
thrust::for_each(fvec.begin(), fvec.end(), printf_functor_f4());
CBufSpec fbs = make_bufspec<float4>(fvec);
fbs.Summary("fbs");
LOG(info) << ")" ;
}
void test_dump()
{
const char* pfx = NULL ;
LOG(info) << "(" ;
NPY<unsigned long long>* ph = NPY<unsigned long long>::load(pfx, "ph%s", "torch", "-5", "rainbow" );
// check
if (!ph) {
printf("can't load data\n");
return ;
}
thrust::device_vector<unsigned long long> d_ph(ph->begin(), ph->end());
CBufSpec cph = make_bufspec<unsigned long long>(d_ph);
TBuf tph("tph", cph);
tph.dump<unsigned long long>("tph dump", 2, 0, 10 );
LOG(info) << ")" ;
}
int main(int argc, char** argv)
{
PLOG_(argc, argv);
LOG(info) << argv[0] ;
test_foreach();
test_cbufspec();
unsigned stride = 1 ;
test_tbuf(3,stride);
test_tbuf(4,stride);
test_ull(3,stride);
test_ull(6,stride);
test_f4();
test_dump();
hipDeviceSynchronize();
}
// Without the sync the process will typically terminate before
// any output stream gets pumped out to the terminal when
// iterating over device_ptr.
// Curiously that doesnt seem to happen with device_vector ?
// Maybe their dtors are delayed by the dumping
/*
simonblyth@optix thrustrap]$ TBufTest
2016-07-08 17:56:01.307 INFO [32347] [main@140] TBufTest
2016-07-08 17:56:01.307 INFO [32347] [test_foreach@36] (
2016-07-08 17:56:01.592 INFO [32347] [test_foreach@42] )
0
1
2
2016-07-08 17:56:01.593 INFO [32347] [test_cbufspec@48] (
ibs : dev_ptr 0xb07200000 size 3 num_bytes 12
2016-07-08 17:56:01.593 INFO [32347] [test_cbufspec@56] )
2016-07-08 17:56:01.593 INFO [32347] [test_tbuf@61] (
ibs : dev_ptr 0xb07200000 size 3 num_bytes 12
tibs dump tibs
terminate called after throwing an instance of 'thrust::system::system_error'
what(): function_attributes(): after hipFuncGetAttributes: invalid device function
Aborted (core dumped)
*/
| 0c4e6bfa48d656117bee7c25fa6e4f40cd98bb87.cu | /*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/for_each.h>
#include <thrust/device_vector.h>
#include "CBufSpec.hh"
#include "TBuf.hh"
#include "TUtil.hh"
#include "NPY.hpp"
#include "PLOG.hh"
struct printf_functor_i
{
__host__ __device__
void operator()(int x)
{
printf("%d\n", x);
}
};
struct printf_functor_f4
{
__host__ __device__
void operator()(float4 v)
{
printf("%10.4f %10.4f %10.4f %10.4f \n", v.x, v.y, v.z, v.w);
}
};
void test_foreach()
{
LOG(info) << "(" ;
thrust::device_vector<int> ivec(3);
ivec[0] = 0;
ivec[1] = 1;
ivec[2] = 2;
thrust::for_each(ivec.begin(), ivec.end(), printf_functor_i());
LOG(info) << ")" ;
}
void test_cbufspec()
{
LOG(info) << "(" ;
thrust::device_vector<int> ivec(3);
ivec[0] = 0;
ivec[1] = 1;
ivec[2] = 2;
CBufSpec ibs = make_bufspec<int>(ivec);
ibs.Summary("ibs");
LOG(info) << ")" ;
}
void test_tbuf(unsigned n, unsigned stride)
{
LOG(info) << "(" ;
thrust::device_vector<int> ivec(n);
for(unsigned i=0 ; i < n ; i++) ivec[i] = i ;
CBufSpec ibs = make_bufspec<int>(ivec);
ibs.Summary("ibs");
TBuf tibs("tibs", ibs );
tibs.dump<int>("tibs dump", stride, 0, n ); // stride, begin, end
LOG(info) << ")" ;
}
void test_ull(unsigned int n, unsigned stride)
{
LOG(info) << "(" ;
thrust::device_vector<unsigned long long> uvec(n);
for(unsigned i=0 ; i < n ; i++)
{
unsigned j = i % 3 ;
if( j == 0) uvec[i] = 0xffeedd;
else if( j == 1) uvec[i] = 0xffaabb;
else if( j == 2) uvec[i] = 0xffbbcc;
else uvec[i] = 0xffffff;
}
//thrust::for_each(ivec.begin(), ivec.end(), printf_functor_i());
CBufSpec ubs = make_bufspec<unsigned long long>(uvec);
ubs.Summary("ubs");
TBuf tubs("tubs", ubs );
tubs.dump<unsigned long long>("tubs dump", stride, 0, n );
LOG(info) << ")" ;
}
void test_f4()
{
LOG(info) << "(" ;
thrust::device_vector<float4> fvec(3);
fvec[0] = make_float4( 1.f, 2.f, 3.f, 4.f );
fvec[1] = make_float4( 1.f, 2.f, 3.f, 4.f );
fvec[2] = make_float4( 1.f, 2.f, 3.f, 4.f );
thrust::for_each(fvec.begin(), fvec.end(), printf_functor_f4());
CBufSpec fbs = make_bufspec<float4>(fvec);
fbs.Summary("fbs");
LOG(info) << ")" ;
}
void test_dump()
{
const char* pfx = NULL ;
LOG(info) << "(" ;
NPY<unsigned long long>* ph = NPY<unsigned long long>::load(pfx, "ph%s", "torch", "-5", "rainbow" );
// check
if (!ph) {
printf("can't load data\n");
return ;
}
thrust::device_vector<unsigned long long> d_ph(ph->begin(), ph->end());
CBufSpec cph = make_bufspec<unsigned long long>(d_ph);
TBuf tph("tph", cph);
tph.dump<unsigned long long>("tph dump", 2, 0, 10 );
LOG(info) << ")" ;
}
int main(int argc, char** argv)
{
PLOG_(argc, argv);
LOG(info) << argv[0] ;
test_foreach();
test_cbufspec();
unsigned stride = 1 ;
test_tbuf(3,stride);
test_tbuf(4,stride);
test_ull(3,stride);
test_ull(6,stride);
test_f4();
test_dump();
cudaDeviceSynchronize();
}
// Without the sync the process will typically terminate before
// any output stream gets pumped out to the terminal when
// iterating over device_ptr.
// Curiously that doesnt seem to happen with device_vector ?
// Maybe their dtors are delayed by the dumping
/*
simonblyth@optix thrustrap]$ TBufTest
2016-07-08 17:56:01.307 INFO [32347] [main@140] TBufTest
2016-07-08 17:56:01.307 INFO [32347] [test_foreach@36] (
2016-07-08 17:56:01.592 INFO [32347] [test_foreach@42] )
0
1
2
2016-07-08 17:56:01.593 INFO [32347] [test_cbufspec@48] (
ibs : dev_ptr 0xb07200000 size 3 num_bytes 12
2016-07-08 17:56:01.593 INFO [32347] [test_cbufspec@56] )
2016-07-08 17:56:01.593 INFO [32347] [test_tbuf@61] (
ibs : dev_ptr 0xb07200000 size 3 num_bytes 12
tibs dump tibs
terminate called after throwing an instance of 'thrust::system::system_error'
what(): function_attributes(): after cudaFuncGetAttributes: invalid device function
Aborted (core dumped)
*/
|
5cce95affc1cb6cc2a139b0e843fd60d77ed39e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <iostream>
#include "gpu-new-forward.h"
#define MIN( a, b ) ( (a < b) ? a : b )
#define TILE_WIDTH 8
__constant__ float deviceKernel1[4][1][7][7]; // 196 * sizeof(float)
__constant__ float deviceKernel2[16][4][7][7]; // 3136 * sizeof(float)
__global__ void conv_forward_kernel(float *y, const float *x, const float *k, const int B, const int M, const int C, const int H, const int W, const int K) {
/*
Modify this function to implement the forward pass described in Chapter 16.
We have added an additional dimension to the tensors to support an entire mini-batch
The goal here is to be correct AND fast.
Function paramter definitions:
y - output
x - input
k - kernel
B - batch_size (number of images in x)
M - number of output feature maps
C - number of input feature mapsA
H - input height dimension
W - input width dimension
K - kernel height and width (K x K)
*/
const int H_out = H - K + 1;
const int W_out = W - K + 1;
const int W_grid = ceil(1.0 * W_out / TILE_WIDTH);
// We have some nice #defs for you below to simplify indexing. Feel free to use them, or create your own.
// An example use of these macros:
// float a = y4d(0,0,0,0)
// y4d(0,0,0,0) = a
#define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
#define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
// Insert your GPU convolution kernel code here
__shared__ float tile[TILE_WIDTH][TILE_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int b = blockIdx.x;
int m = blockIdx.y;
int z = blockIdx.z;
int h0 = z / W_grid * TILE_WIDTH;
int w0 = z % W_grid * TILE_WIDTH;
int h = h0 + ty;
int w = w0 + tx;
int w_lim = MIN(w0 + TILE_WIDTH, W_out);
int h_lim = MIN(h0 + TILE_WIDTH, H_out);
if (M == 4 && C == 1 && K == 7) {
if (w < W_out && h < H_out) {
float acc = 0.0f;
for (int c = 0; c < C; c++) {
tile[ty][tx] = x4d(b, c, h+K/2, w+K/2);
__syncthreads();
for (int p = 0; p < K; p++) {
int w_i = w + p;
if (w_i >= W) { break; }
for (int q = 0; q < K; q++) {
int h_i = h + q;
if (h_i >= H) { break; }
int tile_x = ty + q - K/2;
int tile_y = tx + p - K/2;
if (tile_y >= 0 && w_i - K/2 < w_lim && tile_x >= 0 && h_i - K/2 < h_lim) {
acc += tile[tile_x][tile_y] * deviceKernel1[m][c][q][p];
} else {
acc += x4d(b, c, h_i, w_i) * deviceKernel1[m][c][q][p];
}
}
}
__syncthreads();
}
y4d(b, m, h, w) = acc;
}
}
if (M == 16 && C == 4 && K == 7) {
if (w < W_out && h < H_out) {
float acc = 0.0f;
for (int c = 0; c < C; c++) {
tile[ty][tx] = x4d(b, c, h+K/2, w+K/2);
__syncthreads();
for (int p = 0; p < K; p++) {
int w_i = w + p;
if (w_i >= W) { break; }
for (int q = 0; q < K; q++) {
int h_i = h + q;
if (h_i >= H) { break; }
int tile_x = ty + q - K/2;
int tile_y = tx + p - K/2;
if (tile_y >= 0 && w_i - K/2 < w_lim && tile_x >= 0 && h_i - K/2 < h_lim) {
acc += tile[tile_x][tile_y] * deviceKernel2[m][c][q][p];
} else {
acc += x4d(b, c, h_i, w_i) * deviceKernel2[m][c][q][p];
}
}
}
__syncthreads();
}
y4d(b, m, h, w) = acc;
}
}
#undef y4d
#undef x4d
}
__host__ void GPUInterface::conv_forward_gpu_prolog(const float *host_y, const float *host_x, const float *host_k, float **device_y_ptr, float **device_x_ptr, float **device_k_ptr, const int B, const int M, const int C, const int H, const int W, const int K)
{
// Allocate memory and copy over the relevant data structures to the GPU
// We pass double pointers for you to initialize the relevant device pointers,
// which are passed to the other two functions.
const int H_out = H - K + 1;
const int W_out = W - K + 1;
hipMalloc((void **) device_x_ptr, B * C * W * H * sizeof(float));
hipMalloc((void **) device_y_ptr, B * M * W_out * H_out * sizeof(float));
hipMemcpy(*device_x_ptr, host_x, B * C * W * H * sizeof(float), hipMemcpyHostToDevice);
if (M == 4 && C == 1 && K == 7) {
hipMemcpyToSymbol(deviceKernel1, host_k, 196 * sizeof(float));
}
if (M == 16 && C == 4 && K == 7) {
hipMemcpyToSymbol(deviceKernel2, host_k, 3136 * sizeof(float));
}
// Useful snippet for error checking
hipError_t error = hipGetLastError();
if(error != hipSuccess) {
std::cout<<"CUDA error: "<<hipGetErrorString(error)<<std::endl;
exit(-1);
}
}
__host__ void GPUInterface::conv_forward_gpu(float *device_y, const float *device_x, const float *device_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
// Set the kernel dimensions and call the kernel
const int H_out = H - K + 1;
const int W_out = W - K + 1;
const int W_grid = ceil(1.0 * W_out / TILE_WIDTH);
const int H_grid = ceil(1.0 * H_out / TILE_WIDTH);
const int Z = W_grid * H_grid;
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
dim3 dimGrid(B, M, Z);
hipLaunchKernelGGL(( conv_forward_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, device_y, device_x, device_k, B, M, C, H, W, K);
}
__host__ void GPUInterface::conv_forward_gpu_epilog(float *host_y, float *device_y, float *device_x, float *device_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
const int H_out = H - K + 1;
const int W_out = W - K + 1;
// Copy the output back to host
hipMemcpy(host_y, device_y, B * M * W_out * H_out * sizeof(float), hipMemcpyDeviceToHost);
// Free device memory
hipFree(device_x);
hipFree(device_y);
}
__host__ void GPUInterface::get_device_properties()
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
for(int dev = 0; dev < deviceCount; dev++)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl;
std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl;
std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl;
std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl;
std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl;
std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl;
std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl;
std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl;
std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl;
}
}
| 5cce95affc1cb6cc2a139b0e843fd60d77ed39e9.cu | #include <cmath>
#include <iostream>
#include "gpu-new-forward.h"
#define MIN( a, b ) ( (a < b) ? a : b )
#define TILE_WIDTH 8
__constant__ float deviceKernel1[4][1][7][7]; // 196 * sizeof(float)
__constant__ float deviceKernel2[16][4][7][7]; // 3136 * sizeof(float)
__global__ void conv_forward_kernel(float *y, const float *x, const float *k, const int B, const int M, const int C, const int H, const int W, const int K) {
/*
Modify this function to implement the forward pass described in Chapter 16.
We have added an additional dimension to the tensors to support an entire mini-batch
The goal here is to be correct AND fast.
Function paramter definitions:
y - output
x - input
k - kernel
B - batch_size (number of images in x)
M - number of output feature maps
C - number of input feature mapsA
H - input height dimension
W - input width dimension
K - kernel height and width (K x K)
*/
const int H_out = H - K + 1;
const int W_out = W - K + 1;
const int W_grid = ceil(1.0 * W_out / TILE_WIDTH);
// We have some nice #defs for you below to simplify indexing. Feel free to use them, or create your own.
// An example use of these macros:
// float a = y4d(0,0,0,0)
// y4d(0,0,0,0) = a
#define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
#define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
// Insert your GPU convolution kernel code here
__shared__ float tile[TILE_WIDTH][TILE_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int b = blockIdx.x;
int m = blockIdx.y;
int z = blockIdx.z;
int h0 = z / W_grid * TILE_WIDTH;
int w0 = z % W_grid * TILE_WIDTH;
int h = h0 + ty;
int w = w0 + tx;
int w_lim = MIN(w0 + TILE_WIDTH, W_out);
int h_lim = MIN(h0 + TILE_WIDTH, H_out);
if (M == 4 && C == 1 && K == 7) {
if (w < W_out && h < H_out) {
float acc = 0.0f;
for (int c = 0; c < C; c++) {
tile[ty][tx] = x4d(b, c, h+K/2, w+K/2);
__syncthreads();
for (int p = 0; p < K; p++) {
int w_i = w + p;
if (w_i >= W) { break; }
for (int q = 0; q < K; q++) {
int h_i = h + q;
if (h_i >= H) { break; }
int tile_x = ty + q - K/2;
int tile_y = tx + p - K/2;
if (tile_y >= 0 && w_i - K/2 < w_lim && tile_x >= 0 && h_i - K/2 < h_lim) {
acc += tile[tile_x][tile_y] * deviceKernel1[m][c][q][p];
} else {
acc += x4d(b, c, h_i, w_i) * deviceKernel1[m][c][q][p];
}
}
}
__syncthreads();
}
y4d(b, m, h, w) = acc;
}
}
if (M == 16 && C == 4 && K == 7) {
if (w < W_out && h < H_out) {
float acc = 0.0f;
for (int c = 0; c < C; c++) {
tile[ty][tx] = x4d(b, c, h+K/2, w+K/2);
__syncthreads();
for (int p = 0; p < K; p++) {
int w_i = w + p;
if (w_i >= W) { break; }
for (int q = 0; q < K; q++) {
int h_i = h + q;
if (h_i >= H) { break; }
int tile_x = ty + q - K/2;
int tile_y = tx + p - K/2;
if (tile_y >= 0 && w_i - K/2 < w_lim && tile_x >= 0 && h_i - K/2 < h_lim) {
acc += tile[tile_x][tile_y] * deviceKernel2[m][c][q][p];
} else {
acc += x4d(b, c, h_i, w_i) * deviceKernel2[m][c][q][p];
}
}
}
__syncthreads();
}
y4d(b, m, h, w) = acc;
}
}
#undef y4d
#undef x4d
}
__host__ void GPUInterface::conv_forward_gpu_prolog(const float *host_y, const float *host_x, const float *host_k, float **device_y_ptr, float **device_x_ptr, float **device_k_ptr, const int B, const int M, const int C, const int H, const int W, const int K)
{
// Allocate memory and copy over the relevant data structures to the GPU
// We pass double pointers for you to initialize the relevant device pointers,
// which are passed to the other two functions.
const int H_out = H - K + 1;
const int W_out = W - K + 1;
cudaMalloc((void **) device_x_ptr, B * C * W * H * sizeof(float));
cudaMalloc((void **) device_y_ptr, B * M * W_out * H_out * sizeof(float));
cudaMemcpy(*device_x_ptr, host_x, B * C * W * H * sizeof(float), cudaMemcpyHostToDevice);
if (M == 4 && C == 1 && K == 7) {
cudaMemcpyToSymbol(deviceKernel1, host_k, 196 * sizeof(float));
}
if (M == 16 && C == 4 && K == 7) {
cudaMemcpyToSymbol(deviceKernel2, host_k, 3136 * sizeof(float));
}
// Useful snippet for error checking
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess) {
std::cout<<"CUDA error: "<<cudaGetErrorString(error)<<std::endl;
exit(-1);
}
}
__host__ void GPUInterface::conv_forward_gpu(float *device_y, const float *device_x, const float *device_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
// Set the kernel dimensions and call the kernel
const int H_out = H - K + 1;
const int W_out = W - K + 1;
const int W_grid = ceil(1.0 * W_out / TILE_WIDTH);
const int H_grid = ceil(1.0 * H_out / TILE_WIDTH);
const int Z = W_grid * H_grid;
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
dim3 dimGrid(B, M, Z);
conv_forward_kernel<<<dimGrid, dimBlock>>>(device_y, device_x, device_k, B, M, C, H, W, K);
}
__host__ void GPUInterface::conv_forward_gpu_epilog(float *host_y, float *device_y, float *device_x, float *device_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
const int H_out = H - K + 1;
const int W_out = W - K + 1;
// Copy the output back to host
cudaMemcpy(host_y, device_y, B * M * W_out * H_out * sizeof(float), cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(device_x);
cudaFree(device_y);
}
__host__ void GPUInterface::get_device_properties()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for(int dev = 0; dev < deviceCount; dev++)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl;
std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl;
std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl;
std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl;
std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl;
std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl;
std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl;
std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl;
std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl;
}
}
|
e0510f7cd482ab7ff87b12a3e90c405d013667ae.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
// #include "synchrotron_radiation.h"
#include "utils.h"
#include "cuda_utils.h"
#include <vector>
#include <random>
#include <iostream>
#include <string>
#include <algorithm>
#include <chrono>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <hiprand/hiprand.h>
#include <hip/hip_runtime.h>
using namespace std;
int random_generator(double *rand_array, const int n, const double mean,
const double std)
{
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MT19937);
hiprandSetGeneratorOrdering(gen, CURAND_ORDERING_PSEUDO_BEST);
hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
hiprandGenerateNormalDouble(gen, rand_array, n, mean, std);
hiprandDestroyGenerator(gen);
return 0;
}
__global__ void synchrotron_radiation_full(double * beam_dE,
const double *rand_array,
const double U0,
const int n_particles,
const double sigma_dE,
const double tau_z,
const double energy,
const int n_kicks)
{
const double const_quantum_exc = 2.0 * sigma_dE / sqrt(tau_z) * energy;
const double const_synch_rad = 2.0 / tau_z;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x;
i < n_particles;
i += blockDim.x * gridDim.x)
{
beam_dE[i] += const_quantum_exc * rand_array[i]
- const_synch_rad * beam_dE[i] - U0;
}
}
int main(int argc, char const *argv[])
{
int n_turns = 50000;
int n_particles = 1000000;
const int n_kicks = 1;
int blocks = 512;
int threads = 512;
if (argc > 1) n_turns = atoi(argv[1]);
if (argc > 2) n_particles = atoi(argv[2]);
if (argc > 3) blocks = atoi(argv[3]);
if (argc > 4) threads = atoi(argv[4]);
// initialize variables
vector<double> dE, dt;
double U0, sigma_dE, tau_z, energy;
string input = HOME "/input_files/distribution_10M_particles.txt";
read_distribution(input, n_particles, dt, dE);
U0 = 754257950.345;
sigma_dE = 0.00142927197106;
tau_z = 232.014940939;
energy = 175000000000.0;
thrust::device_vector<double> d_dE = dE;
double *d_dE_ptr = thrust::raw_pointer_cast(d_dE.data());
double *d_rand_array;
hipMalloc((void **)&d_rand_array, n_particles * sizeof(double));
// main loop
auto start = chrono::high_resolution_clock::now();
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MT19937);
hiprandSetGeneratorOrdering(gen, CURAND_ORDERING_PSEUDO_BEST);
hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
for (int i = 0; i < n_turns; ++i) {
// random_generator(d_rand_array, n_particles, 0.0, 1.0);
hiprandGenerateNormalDouble(gen, d_rand_array, n_particles, 0.0, 1.0);
hipDeviceSynchronize();
hipLaunchKernelGGL(( synchrotron_radiation_full) , dim3(blocks), dim3(threads), 0, 0,
d_dE_ptr, d_rand_array, U0, n_particles,
sigma_dE, tau_z, energy, n_kicks);
hipDeviceSynchronize();
}
hiprandDestroyGenerator(gen);
auto end = chrono::high_resolution_clock::now();
thrust::copy(d_dE.begin(), d_dE.end(), dE.begin());
auto duration = chrono::duration_cast<chrono::milliseconds>(end - start).count();
printf("function\tcounter\taverage_value\tstd(%%)\tcalls\n");
printf("sync_rad_gpu_v8\ttime(ms)\t%d\t0\t1\n", duration);
printf("dE: %lf\n", accumulate(dE.begin(), dE.end(), 0.0) / n_particles);
// papiprof->stop_counters();
// papiprof->report_timing();
return 0;
} | e0510f7cd482ab7ff87b12a3e90c405d013667ae.cu | #include <stdlib.h>
#include <stdio.h>
// #include "synchrotron_radiation.h"
#include "utils.h"
#include "cuda_utils.h"
#include <vector>
#include <random>
#include <iostream>
#include <string>
#include <algorithm>
#include <chrono>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <curand.h>
#include <cuda.h>
using namespace std;
int random_generator(double *rand_array, const int n, const double mean,
const double std)
{
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937);
curandSetGeneratorOrdering(gen, CURAND_ORDERING_PSEUDO_BEST);
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
curandGenerateNormalDouble(gen, rand_array, n, mean, std);
curandDestroyGenerator(gen);
return 0;
}
__global__ void synchrotron_radiation_full(double * beam_dE,
const double *rand_array,
const double U0,
const int n_particles,
const double sigma_dE,
const double tau_z,
const double energy,
const int n_kicks)
{
const double const_quantum_exc = 2.0 * sigma_dE / sqrt(tau_z) * energy;
const double const_synch_rad = 2.0 / tau_z;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x;
i < n_particles;
i += blockDim.x * gridDim.x)
{
beam_dE[i] += const_quantum_exc * rand_array[i]
- const_synch_rad * beam_dE[i] - U0;
}
}
int main(int argc, char const *argv[])
{
int n_turns = 50000;
int n_particles = 1000000;
const int n_kicks = 1;
int blocks = 512;
int threads = 512;
if (argc > 1) n_turns = atoi(argv[1]);
if (argc > 2) n_particles = atoi(argv[2]);
if (argc > 3) blocks = atoi(argv[3]);
if (argc > 4) threads = atoi(argv[4]);
// initialize variables
vector<double> dE, dt;
double U0, sigma_dE, tau_z, energy;
string input = HOME "/input_files/distribution_10M_particles.txt";
read_distribution(input, n_particles, dt, dE);
U0 = 754257950.345;
sigma_dE = 0.00142927197106;
tau_z = 232.014940939;
energy = 175000000000.0;
thrust::device_vector<double> d_dE = dE;
double *d_dE_ptr = thrust::raw_pointer_cast(d_dE.data());
double *d_rand_array;
cudaMalloc((void **)&d_rand_array, n_particles * sizeof(double));
// main loop
auto start = chrono::high_resolution_clock::now();
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937);
curandSetGeneratorOrdering(gen, CURAND_ORDERING_PSEUDO_BEST);
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
for (int i = 0; i < n_turns; ++i) {
// random_generator(d_rand_array, n_particles, 0.0, 1.0);
curandGenerateNormalDouble(gen, d_rand_array, n_particles, 0.0, 1.0);
cudaThreadSynchronize();
synchrotron_radiation_full <<< blocks, threads>>>(
d_dE_ptr, d_rand_array, U0, n_particles,
sigma_dE, tau_z, energy, n_kicks);
cudaThreadSynchronize();
}
curandDestroyGenerator(gen);
auto end = chrono::high_resolution_clock::now();
thrust::copy(d_dE.begin(), d_dE.end(), dE.begin());
auto duration = chrono::duration_cast<chrono::milliseconds>(end - start).count();
printf("function\tcounter\taverage_value\tstd(%%)\tcalls\n");
printf("sync_rad_gpu_v8\ttime(ms)\t%d\t0\t1\n", duration);
printf("dE: %lf\n", accumulate(dE.begin(), dE.end(), 0.0) / n_particles);
// papiprof->stop_counters();
// papiprof->report_timing();
return 0;
} |
4c171fed2481c1c1cde4ab01278265709aa3c09b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "courtemanche_ramirez_nattel_1998.h"
#include <stddef.h>
#include <stdint.h>
__global__ void kernel_set_model_initial_conditions(real *sv, int num_volumes, size_t pitch, bool use_adpt_dt, real min_dt) {
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes) {
*((real * )((char *) sv + pitch * 0) + threadID) = -8.118000e+01f; //V millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 2.908000e-03f; //m dimensionless
*((real * )((char *) sv + pitch * 2) + threadID) = 9.649000e-01f; //h dimensionless
*((real * )((char *) sv + pitch * 3) + threadID) = 9.775000e-01f; //j dimensionless
*((real * )((char *) sv + pitch * 4) + threadID) = 3.043000e-02f; //oa dimensionless
*((real * )((char *) sv + pitch * 5) + threadID) = 9.992000e-01f; //oi dimensionless
*((real * )((char *) sv + pitch * 6) + threadID) = 4.966000e-03f; //ua dimensionless
*((real * )((char *) sv + pitch * 7) + threadID) = 9.986000e-01f; //ui dimensionless
*((real * )((char *) sv + pitch * 8) + threadID) = 3.296000e-05f; //xr dimensionless
*((real * )((char *) sv + pitch * 9) + threadID) = 1.869000e-02f; //xs dimensionless
*((real * )((char *) sv + pitch * 10) + threadID) = 1.367000e-04f; //d dimensionless
*((real * )((char *) sv + pitch * 11) + threadID) = 9.996000e-01f; //f dimensionless
*((real * )((char *) sv + pitch * 12) + threadID) = 7.755000e-01f; //f_Ca dimensionless
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0f; //u dimensionless
*((real * )((char *) sv + pitch * 14) + threadID) = 1.000000e+00f; //v dimensionless
*((real * )((char *) sv + pitch * 15) + threadID) = 9.992000e-01f; //w dimensionless
*((real * )((char *) sv + pitch * 16) + threadID) = 1.117000e+01f; //Na_i millimolar
*((real * )((char *) sv + pitch * 17) + threadID) = 1.390000e+02f; //K_i millimolar
*((real * )((char *) sv + pitch * 18) + threadID) = 1.013000e-04f; //Ca_i millimolar
*((real * )((char *) sv + pitch * 19) + threadID) = 1.488000e+00f; //Ca_up millimolar
*((real * )((char *) sv + pitch * 20) + threadID) = 1.488000e+00f; //Ca_rel millimolar
if(use_adpt_dt) {
*((real *)((char *)sv + pitch * NEQ) + threadID) = min_dt; // dt
*((real *)((char *)sv + pitch * (NEQ + 1)) + threadID) = 0.0; // time_new
*((real *)((char *)sv + pitch * (NEQ + 2)) + threadID) = 0.0; // previous dt
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY, real stim_current, int thread_id, real dt, size_t pitch, bool use_adpt_dt) {
//State variables
real V_old_; //millivolt
real m_old_; //dimensionless
real h_old_; //dimensionless
real j_old_; //dimensionless
real oa_old_; //dimensionless
real oi_old_; //dimensionless
real ua_old_; //dimensionless
real ui_old_; //dimensionless
real xr_old_; //dimensionless
real xs_old_; //dimensionless
real d_old_; //dimensionless
real f_old_; //dimensionless
real f_Ca_old_; //dimensionless
real u_old_; //dimensionless
real v_old_; //dimensionless
real w_old_; //dimensionless
real Na_i_old_; //millimolar
real K_i_old_; //millimolar
real Ca_i_old_; //millimolar
real Ca_up_old_; //millimolar
real Ca_rel_old_; //millimolar
if(use_adpt_dt) {
V_old_ = sv[0];
m_old_ = sv[1];
h_old_ = sv[2];
j_old_ = sv[3];
oa_old_ = sv[4];
oi_old_ = sv[5];
ua_old_ = sv[6];
ui_old_ = sv[7];
xr_old_ = sv[8];
xs_old_ = sv[9];
d_old_ = sv[10];
f_old_ = sv[11];
f_Ca_old_ = sv[12];
u_old_ = sv[13];
v_old_ = sv[14];
w_old_ = sv[15];
Na_i_old_ = sv[16];
K_i_old_ = sv[17];
Ca_i_old_ = sv[18];
Ca_up_old_ = sv[19];
Ca_rel_old_ = sv[20];
} else {
V_old_ = *((real*)((char*)sv + pitch * 0) + thread_id);
m_old_ = *((real*)((char*)sv + pitch * 1) + thread_id);
h_old_ = *((real*)((char*)sv + pitch * 2) + thread_id);
j_old_ = *((real*)((char*)sv + pitch * 3) + thread_id);
oa_old_ = *((real*)((char*)sv + pitch * 4) + thread_id);
oi_old_ = *((real*)((char*)sv + pitch * 5) + thread_id);
ua_old_ = *((real*)((char*)sv + pitch * 6) + thread_id);
ui_old_ = *((real*)((char*)sv + pitch * 7) + thread_id);
xr_old_ = *((real*)((char*)sv + pitch * 8) + thread_id);
xs_old_ = *((real*)((char*)sv + pitch * 9) + thread_id);
d_old_ = *((real*)((char*)sv + pitch * 10) + thread_id);
f_old_ = *((real*)((char*)sv + pitch * 11) + thread_id);
f_Ca_old_ = *((real*)((char*)sv + pitch * 12) + thread_id);
u_old_ = *((real*)((char*)sv + pitch * 13) + thread_id);
v_old_ = *((real*)((char*)sv + pitch * 14) + thread_id);
w_old_ = *((real*)((char*)sv + pitch * 15) + thread_id);
Na_i_old_ = *((real*)((char*)sv + pitch * 16) + thread_id);
K_i_old_ = *((real*)((char*)sv + pitch * 17) + thread_id);
Ca_i_old_ = *((real*)((char*)sv + pitch * 18) + thread_id);
Ca_up_old_ = *((real*)((char*)sv + pitch * 19) + thread_id);
Ca_rel_old_ = *((real*)((char*)sv + pitch * 20) + thread_id);
}
#include "courtemanche_ramirez_nattel_1998_common.inc.c"
}
//Include the default solver used by all models.
#include "../default_solvers.cu"
| 4c171fed2481c1c1cde4ab01278265709aa3c09b.cu | #include "courtemanche_ramirez_nattel_1998.h"
#include <stddef.h>
#include <stdint.h>
__global__ void kernel_set_model_initial_conditions(real *sv, int num_volumes, size_t pitch, bool use_adpt_dt, real min_dt) {
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes) {
*((real * )((char *) sv + pitch * 0) + threadID) = -8.118000e+01f; //V millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 2.908000e-03f; //m dimensionless
*((real * )((char *) sv + pitch * 2) + threadID) = 9.649000e-01f; //h dimensionless
*((real * )((char *) sv + pitch * 3) + threadID) = 9.775000e-01f; //j dimensionless
*((real * )((char *) sv + pitch * 4) + threadID) = 3.043000e-02f; //oa dimensionless
*((real * )((char *) sv + pitch * 5) + threadID) = 9.992000e-01f; //oi dimensionless
*((real * )((char *) sv + pitch * 6) + threadID) = 4.966000e-03f; //ua dimensionless
*((real * )((char *) sv + pitch * 7) + threadID) = 9.986000e-01f; //ui dimensionless
*((real * )((char *) sv + pitch * 8) + threadID) = 3.296000e-05f; //xr dimensionless
*((real * )((char *) sv + pitch * 9) + threadID) = 1.869000e-02f; //xs dimensionless
*((real * )((char *) sv + pitch * 10) + threadID) = 1.367000e-04f; //d dimensionless
*((real * )((char *) sv + pitch * 11) + threadID) = 9.996000e-01f; //f dimensionless
*((real * )((char *) sv + pitch * 12) + threadID) = 7.755000e-01f; //f_Ca dimensionless
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0f; //u dimensionless
*((real * )((char *) sv + pitch * 14) + threadID) = 1.000000e+00f; //v dimensionless
*((real * )((char *) sv + pitch * 15) + threadID) = 9.992000e-01f; //w dimensionless
*((real * )((char *) sv + pitch * 16) + threadID) = 1.117000e+01f; //Na_i millimolar
*((real * )((char *) sv + pitch * 17) + threadID) = 1.390000e+02f; //K_i millimolar
*((real * )((char *) sv + pitch * 18) + threadID) = 1.013000e-04f; //Ca_i millimolar
*((real * )((char *) sv + pitch * 19) + threadID) = 1.488000e+00f; //Ca_up millimolar
*((real * )((char *) sv + pitch * 20) + threadID) = 1.488000e+00f; //Ca_rel millimolar
if(use_adpt_dt) {
*((real *)((char *)sv + pitch * NEQ) + threadID) = min_dt; // dt
*((real *)((char *)sv + pitch * (NEQ + 1)) + threadID) = 0.0; // time_new
*((real *)((char *)sv + pitch * (NEQ + 2)) + threadID) = 0.0; // previous dt
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY, real stim_current, int thread_id, real dt, size_t pitch, bool use_adpt_dt) {
//State variables
real V_old_; //millivolt
real m_old_; //dimensionless
real h_old_; //dimensionless
real j_old_; //dimensionless
real oa_old_; //dimensionless
real oi_old_; //dimensionless
real ua_old_; //dimensionless
real ui_old_; //dimensionless
real xr_old_; //dimensionless
real xs_old_; //dimensionless
real d_old_; //dimensionless
real f_old_; //dimensionless
real f_Ca_old_; //dimensionless
real u_old_; //dimensionless
real v_old_; //dimensionless
real w_old_; //dimensionless
real Na_i_old_; //millimolar
real K_i_old_; //millimolar
real Ca_i_old_; //millimolar
real Ca_up_old_; //millimolar
real Ca_rel_old_; //millimolar
if(use_adpt_dt) {
V_old_ = sv[0];
m_old_ = sv[1];
h_old_ = sv[2];
j_old_ = sv[3];
oa_old_ = sv[4];
oi_old_ = sv[5];
ua_old_ = sv[6];
ui_old_ = sv[7];
xr_old_ = sv[8];
xs_old_ = sv[9];
d_old_ = sv[10];
f_old_ = sv[11];
f_Ca_old_ = sv[12];
u_old_ = sv[13];
v_old_ = sv[14];
w_old_ = sv[15];
Na_i_old_ = sv[16];
K_i_old_ = sv[17];
Ca_i_old_ = sv[18];
Ca_up_old_ = sv[19];
Ca_rel_old_ = sv[20];
} else {
V_old_ = *((real*)((char*)sv + pitch * 0) + thread_id);
m_old_ = *((real*)((char*)sv + pitch * 1) + thread_id);
h_old_ = *((real*)((char*)sv + pitch * 2) + thread_id);
j_old_ = *((real*)((char*)sv + pitch * 3) + thread_id);
oa_old_ = *((real*)((char*)sv + pitch * 4) + thread_id);
oi_old_ = *((real*)((char*)sv + pitch * 5) + thread_id);
ua_old_ = *((real*)((char*)sv + pitch * 6) + thread_id);
ui_old_ = *((real*)((char*)sv + pitch * 7) + thread_id);
xr_old_ = *((real*)((char*)sv + pitch * 8) + thread_id);
xs_old_ = *((real*)((char*)sv + pitch * 9) + thread_id);
d_old_ = *((real*)((char*)sv + pitch * 10) + thread_id);
f_old_ = *((real*)((char*)sv + pitch * 11) + thread_id);
f_Ca_old_ = *((real*)((char*)sv + pitch * 12) + thread_id);
u_old_ = *((real*)((char*)sv + pitch * 13) + thread_id);
v_old_ = *((real*)((char*)sv + pitch * 14) + thread_id);
w_old_ = *((real*)((char*)sv + pitch * 15) + thread_id);
Na_i_old_ = *((real*)((char*)sv + pitch * 16) + thread_id);
K_i_old_ = *((real*)((char*)sv + pitch * 17) + thread_id);
Ca_i_old_ = *((real*)((char*)sv + pitch * 18) + thread_id);
Ca_up_old_ = *((real*)((char*)sv + pitch * 19) + thread_id);
Ca_rel_old_ = *((real*)((char*)sv + pitch * 20) + thread_id);
}
#include "courtemanche_ramirez_nattel_1998_common.inc.c"
}
//Include the default solver used by all models.
#include "../default_solvers.cu"
|
e0670ebe29de89de7d139179228d2a9368ae0dfd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/* -------- KERNEL -------- */
__global__ void reduce_kernel(int * d_out, int * d_in, int size)
{
// position and threadId
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
// do reduction in global memory
for (unsigned int s = blockDim.x / 2; s>0; s>>=1)
{
if (tid < s)
{
if (pos+s < size) // Handling out of bounds
{
d_in[pos] = d_in[pos] + d_in[pos+s];
}
}
__syncthreads();
}
// only thread 0 writes result, as thread
if ((tid==0) && (pos < size))
{
d_out[blockIdx.x] = d_in[pos];
}
}
/* -------- KERNEL WRAPPER -------- */
void reduce(int * d_out, int * d_in, int size, int num_threads)
{
// setting up blocks and intermediate result holder
int num_blocks;
if(((size) % num_threads))
{
num_blocks = ((size) / num_threads) + 1;
}
else
{
num_blocks = (size) / num_threads;
}
int * d_intermediate;
hipMalloc(&d_intermediate, sizeof(int)*num_blocks);
hipMemset(d_intermediate, 0, sizeof(int)*num_blocks);
int prev_num_blocks;
int i = 1;
int size_rest = 0;
// recursively solving, will run approximately log base num_threads times.
do
{
printf("Round:%.d\n", i);
printf("NumBlocks:%.d\n", num_blocks);
printf("NumThreads:%.d\n", num_threads);
printf("size of array:%.d\n", size);
i++;
hipLaunchKernelGGL(( reduce_kernel), dim3(num_blocks), dim3(num_threads), 0, 0, d_intermediate, d_in, size);
size_rest = size % num_threads;
size = size / num_threads + size_rest;
// updating input to intermediate
hipMemcpy(d_in, d_intermediate, sizeof(int)*num_blocks, hipMemcpyDeviceToDevice);
// Updating num_blocks to reflect how many blocks we now want to compute on
prev_num_blocks = num_blocks;
if(size % num_threads)
{
num_blocks = size / num_threads + 1;
}
else
{
num_blocks = size / num_threads;
}
// updating intermediate
hipFree(d_intermediate);
hipMalloc(&d_intermediate, sizeof(int)*num_blocks);
}
while(size > num_threads); // if it is too small, compute rest.
// computing rest
hipLaunchKernelGGL(( reduce_kernel), dim3(1), dim3(size), 0, 0, d_out, d_in, prev_num_blocks);
}
/* -------- MAIN -------- */
int main(int argc, char **argv)
{
printf("@@STARTING@@ \n");
// Setting num_threads
int num_threads = 512;
// Making non-bogus data and setting it on the GPU
const int size = 1<<19;
const int size_out = 1;
int * d_in;
int * d_out;
hipMalloc(&d_in, sizeof(int)*size);
hipMalloc(&d_out, sizeof(int)*size_out);
int * h_in = (int *)malloc(size*sizeof(int));
for (int i = 0; i < size; i++) h_in[i] = 1;
hipMemcpy(d_in, h_in, sizeof(int)*size, hipMemcpyHostToDevice);
// Running kernel wrapper
reduce(d_out, d_in, size, num_threads);
int result;
hipMemcpy(&result, d_out, sizeof(int), hipMemcpyDeviceToHost);
printf("\nFINAL SUM IS: %d\n", result);
} | e0670ebe29de89de7d139179228d2a9368ae0dfd.cu | #include <stdio.h>
/* -------- KERNEL -------- */
__global__ void reduce_kernel(int * d_out, int * d_in, int size)
{
// position and threadId
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
// do reduction in global memory
for (unsigned int s = blockDim.x / 2; s>0; s>>=1)
{
if (tid < s)
{
if (pos+s < size) // Handling out of bounds
{
d_in[pos] = d_in[pos] + d_in[pos+s];
}
}
__syncthreads();
}
// only thread 0 writes result, as thread
if ((tid==0) && (pos < size))
{
d_out[blockIdx.x] = d_in[pos];
}
}
/* -------- KERNEL WRAPPER -------- */
void reduce(int * d_out, int * d_in, int size, int num_threads)
{
// setting up blocks and intermediate result holder
int num_blocks;
if(((size) % num_threads))
{
num_blocks = ((size) / num_threads) + 1;
}
else
{
num_blocks = (size) / num_threads;
}
int * d_intermediate;
cudaMalloc(&d_intermediate, sizeof(int)*num_blocks);
cudaMemset(d_intermediate, 0, sizeof(int)*num_blocks);
int prev_num_blocks;
int i = 1;
int size_rest = 0;
// recursively solving, will run approximately log base num_threads times.
do
{
printf("Round:%.d\n", i);
printf("NumBlocks:%.d\n", num_blocks);
printf("NumThreads:%.d\n", num_threads);
printf("size of array:%.d\n", size);
i++;
reduce_kernel<<<num_blocks, num_threads>>>(d_intermediate, d_in, size);
size_rest = size % num_threads;
size = size / num_threads + size_rest;
// updating input to intermediate
cudaMemcpy(d_in, d_intermediate, sizeof(int)*num_blocks, cudaMemcpyDeviceToDevice);
// Updating num_blocks to reflect how many blocks we now want to compute on
prev_num_blocks = num_blocks;
if(size % num_threads)
{
num_blocks = size / num_threads + 1;
}
else
{
num_blocks = size / num_threads;
}
// updating intermediate
cudaFree(d_intermediate);
cudaMalloc(&d_intermediate, sizeof(int)*num_blocks);
}
while(size > num_threads); // if it is too small, compute rest.
// computing rest
reduce_kernel<<<1, size>>>(d_out, d_in, prev_num_blocks);
}
/* -------- MAIN -------- */
int main(int argc, char **argv)
{
printf("@@STARTING@@ \n");
// Setting num_threads
int num_threads = 512;
// Making non-bogus data and setting it on the GPU
const int size = 1<<19;
const int size_out = 1;
int * d_in;
int * d_out;
cudaMalloc(&d_in, sizeof(int)*size);
cudaMalloc(&d_out, sizeof(int)*size_out);
int * h_in = (int *)malloc(size*sizeof(int));
for (int i = 0; i < size; i++) h_in[i] = 1;
cudaMemcpy(d_in, h_in, sizeof(int)*size, cudaMemcpyHostToDevice);
// Running kernel wrapper
reduce(d_out, d_in, size, num_threads);
int result;
cudaMemcpy(&result, d_out, sizeof(int), cudaMemcpyDeviceToHost);
printf("\nFINAL SUM IS: %d\n", result);
} |
0c6736940787c561fe4e0e288e25fb2829ce3ac9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "j3d27pt-32x32-3-128_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(1, __reg_3_1);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0);
__STORE(__h + 1, __reg_3_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(1, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__STORE(__h + 1, __reg_2_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
}
}
| 0c6736940787c561fe4e0e288e25fb2829ce3ac9.cu | #include "j3d27pt-32x32-3-128_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(1, __reg_3_1);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0);
__STORE(__h + 1, __reg_3_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(1, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__STORE(__h + 1, __reg_2_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
}
}
|
dff1d0062f2c5bca1ab97f72950b77b4a24e5eff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.hpp"
#include <stdio.h>
#include <iostream>
/// Device functions, and global functions (also, confusingly called "kernels") preceded by
/// "sqexp" for the SquaredExponential kernel, and "mat" for the Matern52 kernel.
// Covariance device function
__device__ REAL sqexp_cov_val_d(int Ninput, REAL *x_d, REAL *y_d, REAL *theta_d)
{
REAL s = 0.0;
for (unsigned int i=0; i < Ninput; i++)
{
REAL d_i = x_d[i] - y_d[i];
s += d_i * d_i * exp(theta_d[i]);
}
return exp(-0.5 * s + theta_d[Ninput]);
}
////////////////////
__global__ void sqexp_cov_val_kernel(REAL *result_d, int Ninput, REAL *x_d,
REAL *y_d, REAL *theta_d)
{
*result_d = sqexp_cov_val_d(Ninput, x_d, y_d, theta_d);
}
////////////////////
__global__ void sqexp_cov_diag_kernel(REAL *result_d, int N, int Ninput, REAL *xnew_d,
REAL *xs, REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
result_d[i] = sqexp_cov_val_d(Ninput, xnew_d + Ninput * i, xs + Ninput * i,
theta_d);
}
}
////////////////////
__global__ void sqexp_cov_all_kernel(REAL *result_d, int N, int Ninput, REAL *xnew_d,
REAL *xs_d, REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
result_d[i] = sqexp_cov_val_d(Ninput, xnew_d, xs_d + Ninput * i, theta_d);
}
}
////////////////////
__global__ void sqexp_cov_batch_kernel(REAL *result_d, int Nnew, int N, int Ninput,
REAL *xsnew_d, REAL *xs_d, REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < N && j < Nnew)
{
result_d[j + Nnew * i] =
sqexp_cov_val_d(Ninput, xsnew_d + Ninput * j, xs_d + Ninput * i, theta_d);
}
}
////////////////////
__device__ void sqexp_cov_deriv_x(REAL *result_d, int Ninput,
const REAL *x_d, const REAL *y_d,
const REAL *theta_d)
{
REAL s = 0.0;
for (unsigned int i=0; i < Ninput; i++)
{
REAL d_i = x_d[i] - y_d[i];
REAL a = d_i * exp(theta_d[i]);
result_d[i] = a;
s += d_i * a;
}
REAL c = -exp(-0.5*s + theta_d[Ninput]);
for (unsigned int i=0; i < Ninput; i++)
{
result_d[i] *= c;
}
}
__global__ void sqexp_cov_deriv_x_batch_kernel(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < Nx && j < Ny)
{
sqexp_cov_deriv_x(result_d + Ninput * (Nx * j + i),
Ninput, xs_d + Ninput * i, ys_d + Ninput * j,
theta_d);
}
}
////////////////////
__device__ void sqexp_cov_deriv_theta(REAL *result_d, int result_stride, int Ninput,
const REAL *x_d, const REAL *y_d,
const REAL *theta_d)
{
REAL s = 0.0;
REAL exp_thetaN = exp(theta_d[Ninput]);
for (unsigned int i=0; i < Ninput; i++)
{
REAL d_i = x_d[i] - y_d[i];
REAL a = d_i * d_i * exp(theta_d[i]);
s += a;
result_d[i * result_stride] = -0.5 * a;
}
s = exp_thetaN * exp(-0.5 * s);
for (unsigned int i=0; i < Ninput; i++)
{
result_d[i * result_stride] *= s;
}
result_d[Ninput * result_stride] = s;
}
__global__ void sqexp_cov_deriv_theta_batch_kernel(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
const int result_stride = Nx * Ny;
if (i < Nx && j < Ny)
{
sqexp_cov_deriv_theta(result_d + Ny * i + j,
result_stride,
Ninput, xs_d + Ninput * j, ys_d + Ninput * i,
theta_d);
}
}
// Implementation of the SquaredExponentialKernel
void SquaredExponentialKernel::cov_val_gpu(REAL *result_d, int Ninput, REAL *x_d, REAL *y_d,
REAL *theta_d)
{
hipLaunchKernelGGL(( sqexp_cov_val_kernel), dim3(1),dim3(1), 0, 0, result_d, Ninput, x_d, y_d, theta_d);
}
void SquaredExponentialKernel::cov_all_gpu(REAL *result_d, int N, int Ninput, REAL *xnew_d, REAL *xs_d,
REAL *theta_d)
{
const int threads_per_block = 256;
hipLaunchKernelGGL(( sqexp_cov_all_kernel), dim3(10), dim3(threads_per_block), 0, 0,
result_d, N, Ninput, xnew_d, xs_d, theta_d);
}
void SquaredExponentialKernel::cov_diag_gpu(REAL *result_d, int N, int Ninput, REAL *xnew_d, REAL *xs_d,
REAL *theta_d)
{
const int threads_per_block = 256;
hipLaunchKernelGGL(( sqexp_cov_diag_kernel), dim3(10), dim3(threads_per_block), 0, 0,
result_d, N, Ninput, xnew_d, xs_d, theta_d);
}
void SquaredExponentialKernel::cov_batch_gpu(REAL *result_d, int Nnew, int N, int Ninput, REAL *xsnew_d,
REAL *xs_d, REAL *theta_d)
{
dim3 threads_per_block(8, 32);
dim3 blocks(250, 625);
hipLaunchKernelGGL(( sqexp_cov_batch_kernel), dim3(blocks), dim3(threads_per_block), 0, 0,
result_d, Nnew, N, Ninput, xsnew_d, xs_d, theta_d
);
}
void SquaredExponentialKernel::cov_deriv_x_batch_gpu(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
const int Bx = 16, By = 16;
dim3 threads_per_block(Bx, By);
dim3 blocks((Nx + Bx - 1)/Bx, (Ny + By - 1)/By);
hipLaunchKernelGGL(( sqexp_cov_deriv_x_batch_kernel), dim3(blocks), dim3(threads_per_block), 0, 0,
result_d, Ninput, Nx, Ny, xs_d, ys_d, theta_d);
}
void SquaredExponentialKernel::cov_deriv_theta_batch_gpu(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
const int Bx = 16, By = 16;
dim3 threads_per_block(Bx, By);
dim3 blocks((Nx + Bx - 1)/Bx, (Ny + By - 1)/By);
hipLaunchKernelGGL(( sqexp_cov_deriv_theta_batch_kernel), dim3(blocks), dim3(threads_per_block), 0, 0,
result_d, Ninput, Nx, Ny, xs_d, ys_d, theta_d);
}
////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Matern52 /////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// Covariance device function
__device__ REAL mat52_cov_val_d(int Ninput, REAL *x_d, REAL *y_d, REAL *theta_d)
{
REAL s = 0.0;
for (unsigned int i=0; i < Ninput; i++)
{
REAL d_i = x_d[i] - y_d[i];
s += d_i * d_i * exp(theta_d[i]);
}
REAL r = sqrt(s);
return (1 + r*sqrt(5.) + (5./3.)*s) * exp(theta_d[Ninput] - sqrt(5.)*r);
}
////////////////////
__global__ void mat52_cov_val_kernel(REAL *result_d, int Ninput, REAL *x_d,
REAL *y_d, REAL *theta_d)
{
*result_d = mat52_cov_val_d(Ninput, x_d, y_d, theta_d);
}
////////////////////
__global__ void mat52_cov_diag_kernel(REAL *result_d, int N, int Ninput, REAL *xnew_d,
REAL *xs, REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
result_d[i] = mat52_cov_val_d(Ninput, xnew_d + Ninput * i, xs + Ninput * i,
theta_d);
}
}
////////////////////
__global__ void mat52_cov_all_kernel(REAL *result_d, int N, int Ninput, REAL *xnew_d,
REAL *xs_d, REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
result_d[i] = mat52_cov_val_d(Ninput, xnew_d, xs_d + Ninput * i, theta_d);
}
}
////////////////////
__global__ void mat52_cov_batch_kernel(REAL *result_d, int Nnew, int N, int Ninput,
REAL *xsnew_d, REAL *xs_d, REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < N && j < Nnew)
{
result_d[j + Nnew * i] =
mat52_cov_val_d(Ninput, xsnew_d + Ninput * j, xs_d + Ninput * i, theta_d);
}
}
////////////////////
__device__ void mat52_cov_deriv_x(REAL *result_d, int Ninput,
const REAL *x_d, const REAL *y_d,
const REAL *theta_d)
{
// dK/dx = dr/dx * dK/dr
//drdx
REAL s = 0.0;
for (unsigned int i=0; i < Ninput; i++)
{
REAL d_i = x_d[i] - y_d[i];
REAL a = d_i * exp(theta_d[i]);
result_d[i] = a;
s += d_i * a;
}
REAL r = sqrt(s);
if (r > 0.) {
REAL dkdr = -5./3.*r*(1+sqrt(5.)*r)*exp(-1.*sqrt(5.)*r);
dkdr *= exp(theta_d[Ninput]);
for (unsigned int i=0; i < Ninput; i++)
{
result_d[i] = dkdr * result_d[i] / r;
}
}
}
__global__ void mat52_cov_deriv_x_batch_kernel(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < Nx && j < Ny)
{
mat52_cov_deriv_x(result_d + Ninput * (Nx * j + i),
Ninput, xs_d + Ninput * i, ys_d + Ninput * j,
theta_d);
}
}
////////////////////
__device__ void mat52_cov_deriv_theta(REAL *result_d, int result_stride, int Ninput,
const REAL *x_d, const REAL *y_d,
const REAL *theta_d)
{
//dK/dtheta = exp(thetaN) * dK/dr * dr/dtheta
REAL exp_thetaN = exp(theta_d[Ninput]);
REAL s = 0.0;
for (unsigned int i=0; i < Ninput; i++)
{
REAL d_i = x_d[i] - y_d[i];
REAL a = d_i * d_i * exp(theta_d[i]);
result_d[i * result_stride] = 0.5 * a;
s += a;
}
REAL r = sqrt(s);
REAL dkdr = 0.;
if (r > 0.) {
dkdr = -5./3.*r*(1+sqrt(5.)*r)*exp(-1.*sqrt(5.)*r);
dkdr *= exp_thetaN;
}
for (unsigned int i=0; i < Ninput; i++)
{
if (r > 0.) result_d[i * result_stride] = dkdr * result_d[i * result_stride] / r;
}
result_d[Ninput * result_stride] = (1 + r*sqrt(5.) + (5./3.)*s) * exp(theta_d[Ninput] - sqrt(5.)*r);
}
__global__ void mat52_cov_deriv_theta_batch_kernel(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
const int result_stride = Nx * Ny;
if (i < Nx && j < Ny)
{
mat52_cov_deriv_theta(result_d + Ny * i + j,
result_stride,
Ninput, xs_d + Ninput * j, ys_d + Ninput * i,
theta_d);
}
}
/// implementation of Matern52 kernel
void Matern52Kernel::cov_val_gpu(REAL *result_d, int Ninput, REAL *x_d, REAL *y_d,
REAL *theta_d)
{
hipLaunchKernelGGL(( mat52_cov_val_kernel), dim3(1),dim3(1), 0, 0, result_d, Ninput, x_d, y_d, theta_d);
}
void Matern52Kernel::cov_all_gpu(REAL *result_d, int N, int Ninput, REAL *xnew_d, REAL *xs_d,
REAL *theta_d)
{
const int threads_per_block = 256;
hipLaunchKernelGGL(( mat52_cov_all_kernel), dim3(10), dim3(threads_per_block), 0, 0,
result_d, N, Ninput, xnew_d, xs_d, theta_d);
}
void Matern52Kernel::cov_diag_gpu(REAL *result_d, int N, int Ninput, REAL *xnew_d, REAL *xs_d,
REAL *theta_d)
{
const int threads_per_block = 256;
hipLaunchKernelGGL(( mat52_cov_diag_kernel), dim3(10), dim3(threads_per_block), 0, 0,
result_d, N, Ninput, xnew_d, xs_d, theta_d);
}
void Matern52Kernel::cov_batch_gpu(REAL *result_d, int Nnew, int N, int Ninput, REAL *xsnew_d,
REAL *xs_d, REAL *theta_d)
{
dim3 threads_per_block(8, 32);
dim3 blocks(250, 625);
hipLaunchKernelGGL(( mat52_cov_batch_kernel), dim3(blocks), dim3(threads_per_block), 0, 0,
result_d, Nnew, N, Ninput, xsnew_d, xs_d, theta_d
);
}
void Matern52Kernel::cov_deriv_x_batch_gpu(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
const int Bx = 16, By = 16;
dim3 threads_per_block(Bx, By);
dim3 blocks((Nx + Bx - 1)/Bx, (Ny + By - 1)/By);
hipLaunchKernelGGL(( mat52_cov_deriv_x_batch_kernel), dim3(blocks), dim3(threads_per_block), 0, 0,
result_d, Ninput, Nx, Ny, xs_d, ys_d, theta_d);
}
void Matern52Kernel::cov_deriv_theta_batch_gpu(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
const int Bx = 16, By = 16;
dim3 threads_per_block(Bx, By);
dim3 blocks((Nx + Bx - 1)/Bx, (Ny + By - 1)/By);
hipLaunchKernelGGL(( mat52_cov_deriv_theta_batch_kernel), dim3(blocks), dim3(threads_per_block), 0, 0,
result_d, Ninput, Nx, Ny, xs_d, ys_d, theta_d);
} | dff1d0062f2c5bca1ab97f72950b77b4a24e5eff.cu | #include "kernel.hpp"
#include <stdio.h>
#include <iostream>
/// Device functions, and global functions (also, confusingly called "kernels") preceded by
/// "sqexp" for the SquaredExponential kernel, and "mat" for the Matern52 kernel.
// Covariance device function
__device__ REAL sqexp_cov_val_d(int Ninput, REAL *x_d, REAL *y_d, REAL *theta_d)
{
REAL s = 0.0;
for (unsigned int i=0; i < Ninput; i++)
{
REAL d_i = x_d[i] - y_d[i];
s += d_i * d_i * exp(theta_d[i]);
}
return exp(-0.5 * s + theta_d[Ninput]);
}
////////////////////
__global__ void sqexp_cov_val_kernel(REAL *result_d, int Ninput, REAL *x_d,
REAL *y_d, REAL *theta_d)
{
*result_d = sqexp_cov_val_d(Ninput, x_d, y_d, theta_d);
}
////////////////////
__global__ void sqexp_cov_diag_kernel(REAL *result_d, int N, int Ninput, REAL *xnew_d,
REAL *xs, REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
result_d[i] = sqexp_cov_val_d(Ninput, xnew_d + Ninput * i, xs + Ninput * i,
theta_d);
}
}
////////////////////
__global__ void sqexp_cov_all_kernel(REAL *result_d, int N, int Ninput, REAL *xnew_d,
REAL *xs_d, REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
result_d[i] = sqexp_cov_val_d(Ninput, xnew_d, xs_d + Ninput * i, theta_d);
}
}
////////////////////
__global__ void sqexp_cov_batch_kernel(REAL *result_d, int Nnew, int N, int Ninput,
REAL *xsnew_d, REAL *xs_d, REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < N && j < Nnew)
{
result_d[j + Nnew * i] =
sqexp_cov_val_d(Ninput, xsnew_d + Ninput * j, xs_d + Ninput * i, theta_d);
}
}
////////////////////
__device__ void sqexp_cov_deriv_x(REAL *result_d, int Ninput,
const REAL *x_d, const REAL *y_d,
const REAL *theta_d)
{
REAL s = 0.0;
for (unsigned int i=0; i < Ninput; i++)
{
REAL d_i = x_d[i] - y_d[i];
REAL a = d_i * exp(theta_d[i]);
result_d[i] = a;
s += d_i * a;
}
REAL c = -exp(-0.5*s + theta_d[Ninput]);
for (unsigned int i=0; i < Ninput; i++)
{
result_d[i] *= c;
}
}
__global__ void sqexp_cov_deriv_x_batch_kernel(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < Nx && j < Ny)
{
sqexp_cov_deriv_x(result_d + Ninput * (Nx * j + i),
Ninput, xs_d + Ninput * i, ys_d + Ninput * j,
theta_d);
}
}
////////////////////
__device__ void sqexp_cov_deriv_theta(REAL *result_d, int result_stride, int Ninput,
const REAL *x_d, const REAL *y_d,
const REAL *theta_d)
{
REAL s = 0.0;
REAL exp_thetaN = exp(theta_d[Ninput]);
for (unsigned int i=0; i < Ninput; i++)
{
REAL d_i = x_d[i] - y_d[i];
REAL a = d_i * d_i * exp(theta_d[i]);
s += a;
result_d[i * result_stride] = -0.5 * a;
}
s = exp_thetaN * exp(-0.5 * s);
for (unsigned int i=0; i < Ninput; i++)
{
result_d[i * result_stride] *= s;
}
result_d[Ninput * result_stride] = s;
}
__global__ void sqexp_cov_deriv_theta_batch_kernel(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
const int result_stride = Nx * Ny;
if (i < Nx && j < Ny)
{
sqexp_cov_deriv_theta(result_d + Ny * i + j,
result_stride,
Ninput, xs_d + Ninput * j, ys_d + Ninput * i,
theta_d);
}
}
// Implementation of the SquaredExponentialKernel
void SquaredExponentialKernel::cov_val_gpu(REAL *result_d, int Ninput, REAL *x_d, REAL *y_d,
REAL *theta_d)
{
sqexp_cov_val_kernel<<<1,1>>>(result_d, Ninput, x_d, y_d, theta_d);
}
void SquaredExponentialKernel::cov_all_gpu(REAL *result_d, int N, int Ninput, REAL *xnew_d, REAL *xs_d,
REAL *theta_d)
{
const int threads_per_block = 256;
sqexp_cov_all_kernel<<<10, threads_per_block>>>(
result_d, N, Ninput, xnew_d, xs_d, theta_d);
}
void SquaredExponentialKernel::cov_diag_gpu(REAL *result_d, int N, int Ninput, REAL *xnew_d, REAL *xs_d,
REAL *theta_d)
{
const int threads_per_block = 256;
sqexp_cov_diag_kernel<<<10, threads_per_block>>>(
result_d, N, Ninput, xnew_d, xs_d, theta_d);
}
void SquaredExponentialKernel::cov_batch_gpu(REAL *result_d, int Nnew, int N, int Ninput, REAL *xsnew_d,
REAL *xs_d, REAL *theta_d)
{
dim3 threads_per_block(8, 32);
dim3 blocks(250, 625);
sqexp_cov_batch_kernel<<<blocks, threads_per_block>>>(
result_d, Nnew, N, Ninput, xsnew_d, xs_d, theta_d
);
}
void SquaredExponentialKernel::cov_deriv_x_batch_gpu(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
const int Bx = 16, By = 16;
dim3 threads_per_block(Bx, By);
dim3 blocks((Nx + Bx - 1)/Bx, (Ny + By - 1)/By);
sqexp_cov_deriv_x_batch_kernel<<<blocks, threads_per_block>>>(
result_d, Ninput, Nx, Ny, xs_d, ys_d, theta_d);
}
void SquaredExponentialKernel::cov_deriv_theta_batch_gpu(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
const int Bx = 16, By = 16;
dim3 threads_per_block(Bx, By);
dim3 blocks((Nx + Bx - 1)/Bx, (Ny + By - 1)/By);
sqexp_cov_deriv_theta_batch_kernel<<<blocks, threads_per_block>>>(
result_d, Ninput, Nx, Ny, xs_d, ys_d, theta_d);
}
////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Matern52 /////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// Covariance device function
__device__ REAL mat52_cov_val_d(int Ninput, REAL *x_d, REAL *y_d, REAL *theta_d)
{
REAL s = 0.0;
for (unsigned int i=0; i < Ninput; i++)
{
REAL d_i = x_d[i] - y_d[i];
s += d_i * d_i * exp(theta_d[i]);
}
REAL r = sqrt(s);
return (1 + r*sqrt(5.) + (5./3.)*s) * exp(theta_d[Ninput] - sqrt(5.)*r);
}
////////////////////
__global__ void mat52_cov_val_kernel(REAL *result_d, int Ninput, REAL *x_d,
REAL *y_d, REAL *theta_d)
{
*result_d = mat52_cov_val_d(Ninput, x_d, y_d, theta_d);
}
////////////////////
__global__ void mat52_cov_diag_kernel(REAL *result_d, int N, int Ninput, REAL *xnew_d,
REAL *xs, REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
result_d[i] = mat52_cov_val_d(Ninput, xnew_d + Ninput * i, xs + Ninput * i,
theta_d);
}
}
////////////////////
__global__ void mat52_cov_all_kernel(REAL *result_d, int N, int Ninput, REAL *xnew_d,
REAL *xs_d, REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
result_d[i] = mat52_cov_val_d(Ninput, xnew_d, xs_d + Ninput * i, theta_d);
}
}
////////////////////
__global__ void mat52_cov_batch_kernel(REAL *result_d, int Nnew, int N, int Ninput,
REAL *xsnew_d, REAL *xs_d, REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < N && j < Nnew)
{
result_d[j + Nnew * i] =
mat52_cov_val_d(Ninput, xsnew_d + Ninput * j, xs_d + Ninput * i, theta_d);
}
}
////////////////////
__device__ void mat52_cov_deriv_x(REAL *result_d, int Ninput,
const REAL *x_d, const REAL *y_d,
const REAL *theta_d)
{
// dK/dx = dr/dx * dK/dr
//drdx
REAL s = 0.0;
for (unsigned int i=0; i < Ninput; i++)
{
REAL d_i = x_d[i] - y_d[i];
REAL a = d_i * exp(theta_d[i]);
result_d[i] = a;
s += d_i * a;
}
REAL r = sqrt(s);
if (r > 0.) {
REAL dkdr = -5./3.*r*(1+sqrt(5.)*r)*exp(-1.*sqrt(5.)*r);
dkdr *= exp(theta_d[Ninput]);
for (unsigned int i=0; i < Ninput; i++)
{
result_d[i] = dkdr * result_d[i] / r;
}
}
}
__global__ void mat52_cov_deriv_x_batch_kernel(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < Nx && j < Ny)
{
mat52_cov_deriv_x(result_d + Ninput * (Nx * j + i),
Ninput, xs_d + Ninput * i, ys_d + Ninput * j,
theta_d);
}
}
////////////////////
__device__ void mat52_cov_deriv_theta(REAL *result_d, int result_stride, int Ninput,
const REAL *x_d, const REAL *y_d,
const REAL *theta_d)
{
//dK/dtheta = exp(thetaN) * dK/dr * dr/dtheta
REAL exp_thetaN = exp(theta_d[Ninput]);
REAL s = 0.0;
for (unsigned int i=0; i < Ninput; i++)
{
REAL d_i = x_d[i] - y_d[i];
REAL a = d_i * d_i * exp(theta_d[i]);
result_d[i * result_stride] = 0.5 * a;
s += a;
}
REAL r = sqrt(s);
REAL dkdr = 0.;
if (r > 0.) {
dkdr = -5./3.*r*(1+sqrt(5.)*r)*exp(-1.*sqrt(5.)*r);
dkdr *= exp_thetaN;
}
for (unsigned int i=0; i < Ninput; i++)
{
if (r > 0.) result_d[i * result_stride] = dkdr * result_d[i * result_stride] / r;
}
result_d[Ninput * result_stride] = (1 + r*sqrt(5.) + (5./3.)*s) * exp(theta_d[Ninput] - sqrt(5.)*r);
}
__global__ void mat52_cov_deriv_theta_batch_kernel(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
const int result_stride = Nx * Ny;
if (i < Nx && j < Ny)
{
mat52_cov_deriv_theta(result_d + Ny * i + j,
result_stride,
Ninput, xs_d + Ninput * j, ys_d + Ninput * i,
theta_d);
}
}
/// implementation of Matern52 kernel
void Matern52Kernel::cov_val_gpu(REAL *result_d, int Ninput, REAL *x_d, REAL *y_d,
REAL *theta_d)
{
mat52_cov_val_kernel<<<1,1>>>(result_d, Ninput, x_d, y_d, theta_d);
}
void Matern52Kernel::cov_all_gpu(REAL *result_d, int N, int Ninput, REAL *xnew_d, REAL *xs_d,
REAL *theta_d)
{
const int threads_per_block = 256;
mat52_cov_all_kernel<<<10, threads_per_block>>>(
result_d, N, Ninput, xnew_d, xs_d, theta_d);
}
void Matern52Kernel::cov_diag_gpu(REAL *result_d, int N, int Ninput, REAL *xnew_d, REAL *xs_d,
REAL *theta_d)
{
const int threads_per_block = 256;
mat52_cov_diag_kernel<<<10, threads_per_block>>>(
result_d, N, Ninput, xnew_d, xs_d, theta_d);
}
void Matern52Kernel::cov_batch_gpu(REAL *result_d, int Nnew, int N, int Ninput, REAL *xsnew_d,
REAL *xs_d, REAL *theta_d)
{
dim3 threads_per_block(8, 32);
dim3 blocks(250, 625);
mat52_cov_batch_kernel<<<blocks, threads_per_block>>>(
result_d, Nnew, N, Ninput, xsnew_d, xs_d, theta_d
);
}
void Matern52Kernel::cov_deriv_x_batch_gpu(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
const int Bx = 16, By = 16;
dim3 threads_per_block(Bx, By);
dim3 blocks((Nx + Bx - 1)/Bx, (Ny + By - 1)/By);
mat52_cov_deriv_x_batch_kernel<<<blocks, threads_per_block>>>(
result_d, Ninput, Nx, Ny, xs_d, ys_d, theta_d);
}
void Matern52Kernel::cov_deriv_theta_batch_gpu(
REAL *result_d, int Ninput, int Nx, int Ny, const REAL *xs_d,
const REAL *ys_d, const REAL *theta_d)
{
const int Bx = 16, By = 16;
dim3 threads_per_block(Bx, By);
dim3 blocks((Nx + Bx - 1)/Bx, (Ny + By - 1)/By);
mat52_cov_deriv_theta_batch_kernel<<<blocks, threads_per_block>>>(
result_d, Ninput, Nx, Ny, xs_d, ys_d, theta_d);
} |
ed53baa102476d9f195e59f7a678fd97b735717e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "DataSetEnergy.h"
#include "cutils_func.h"
#include "boost_for_export.h"
using namespace std;
using namespace boost::python;
DataSetEnergy::DataSetEnergy(uint32_t groupTag_) : DataSet(groupTag_) {
requiresEng = true;
}
void DataSetEnergy::collect(int64_t turn, BoundsGPU &, int nAtoms, float4 *xs, float4 *vs, float4 *fs, float *engs, Virial *virials, hipDeviceProp_t &prop) {
engGPU.d_data.memset(0);
hipLaunchKernelGGL(( sumPlain<float, float>) , dim3(NBLOCK(nAtoms)), dim3(PERBLOCK), PERBLOCK*sizeof(float), 0, engGPU.getDevData(), engs, nAtoms, groupTag, fs, prop.warpSize);
engGPU.dataToHost();
turns.push_back(turn);
turnsPy.append(turn);
}
void DataSetEnergy::appendValues() {
double engCur = (double) engGPU.h_data[0] / (double) engGPU.h_data[1];
vals.push_back(engCur);
valsPy.append(engCur);
}
void DataSetEnergy::prepareForRun() {
engGPU = GPUArrayGlobal<float>(2);
}
void export_DataSetEnergy() {
class_<DataSetEnergy, SHARED(DataSetEnergy), bases<DataSet>, boost::noncopyable > ("DataSetEnergy", no_init)
;
}
| ed53baa102476d9f195e59f7a678fd97b735717e.cu | #include "DataSetEnergy.h"
#include "cutils_func.h"
#include "boost_for_export.h"
using namespace std;
using namespace boost::python;
DataSetEnergy::DataSetEnergy(uint32_t groupTag_) : DataSet(groupTag_) {
requiresEng = true;
}
void DataSetEnergy::collect(int64_t turn, BoundsGPU &, int nAtoms, float4 *xs, float4 *vs, float4 *fs, float *engs, Virial *virials, cudaDeviceProp &prop) {
engGPU.d_data.memset(0);
sumPlain<float, float> <<<NBLOCK(nAtoms), PERBLOCK, PERBLOCK*sizeof(float)>>>(engGPU.getDevData(), engs, nAtoms, groupTag, fs, prop.warpSize);
engGPU.dataToHost();
turns.push_back(turn);
turnsPy.append(turn);
}
void DataSetEnergy::appendValues() {
double engCur = (double) engGPU.h_data[0] / (double) engGPU.h_data[1];
vals.push_back(engCur);
valsPy.append(engCur);
}
void DataSetEnergy::prepareForRun() {
engGPU = GPUArrayGlobal<float>(2);
}
void export_DataSetEnergy() {
class_<DataSetEnergy, SHARED(DataSetEnergy), bases<DataSet>, boost::noncopyable > ("DataSetEnergy", no_init)
;
}
|
9b859252998ad19b2e8877ed8ee63dc9dd0989de.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <math.h>
#include "func.h"
#include "func_hip.cuh"
using namespace cv;
int main()
{
struct timeval tv0, tv;
gettimeofday(&tv0,NULL);
Mat srcImg = imread("coincoin.png", CV_LOAD_IMAGE_GRAYSCALE);
threshold(srcImg, srcImg, 190, 255, THRESH_BINARY | THRESH_OTSU);
Mat mask1 = getStructuringElement(MORPH_RECT, Size(6, 6));
Mat n = srcImg.clone();
bitmap img(srcImg);
bitmap oldimg(img);
uchar* tmp1;
uchar* tmp2;
uchar* d_img_pixel;
uchar* d_oldimg_pixel;
hipMalloc(&d_img_pixel, sizeof(uchar) * img.w * img.h);
hipMalloc(&d_oldimg_pixel, sizeof(uchar) * img.w * img.h);
/************ 8 Line *****************************/
hipMemcpy(d_img_pixel, img.pixel, sizeof(uchar)*img.w*img.h, hipMemcpyHostToDevice);
hipMemcpy(d_oldimg_pixel, oldimg.pixel, sizeof(uchar)*img.w*img.h, hipMemcpyHostToDevice);
tmp1 = img.pixel;
tmp2 = oldimg.pixel;
img.pixel = d_img_pixel;
oldimg.pixel = d_oldimg_pixel;
/****************************************/
TimeDiff(&tv0,&tv);
Erode(img);
TimeDiff(&tv0,&tv);
Dilate(img);
TimeDiff(&tv0,&tv);
/********* 5 Linw img show ***************/
// hipMemcpy(tmp1, img.pixel, sizeof(uchar)*img.w*img.h, hipMemcpyDeviceToHost);
// n.data = tmp1;
// imshow("Tmp", n);
/***********************************/
Sobel(img);
TimeDiff(&tv0,&tv);
oldimg.pixel = (uchar*)malloc(sizeof(uchar)*img.w*img.h);
// for(int r=50;r<150;r+=2)
Hough(img, oldimg, 113);
TimeDiff(&tv0,&tv);
/************ 8 Line *****************************/
hipMemcpy(tmp1, img.pixel, sizeof(uchar)*img.w*img.h, hipMemcpyDeviceToHost);
hipMemcpy(tmp2, oldimg.pixel, sizeof(uchar)*img.w*img.h, hipMemcpyDeviceToHost);
img.pixel = tmp1;
oldimg.pixel = tmp2;
/****************************************/
Mat m2 = Mat(img.h, img.w, CV_8UC1);
memcpy(m2.data, img.pixel, img.w*img.h * sizeof(uchar));
// imshow("After hough", m2);
GaussianBlur(m2, m2, Size(9, 9), 0, 0);
threshold(m2, m2, 30, 255, THRESH_BINARY );
hipFree(d_img_pixel);
hipFree(d_oldimg_pixel);
imshow("Final", m2);
waitKey(0);
TimeDiff(&tv0,&tv);
return 0;
}
| 9b859252998ad19b2e8877ed8ee63dc9dd0989de.cu | #include <stdio.h>
#include <opencv2/opencv.hpp>
#include <math.h>
#include "func.h"
#include "func.cuh"
using namespace cv;
int main()
{
struct timeval tv0, tv;
gettimeofday(&tv0,NULL);
Mat srcImg = imread("coincoin.png", CV_LOAD_IMAGE_GRAYSCALE);
threshold(srcImg, srcImg, 190, 255, THRESH_BINARY | THRESH_OTSU);
Mat mask1 = getStructuringElement(MORPH_RECT, Size(6, 6));
Mat n = srcImg.clone();
bitmap img(srcImg);
bitmap oldimg(img);
uchar* tmp1;
uchar* tmp2;
uchar* d_img_pixel;
uchar* d_oldimg_pixel;
cudaMalloc(&d_img_pixel, sizeof(uchar) * img.w * img.h);
cudaMalloc(&d_oldimg_pixel, sizeof(uchar) * img.w * img.h);
/************ 8 Line *****************************/
cudaMemcpy(d_img_pixel, img.pixel, sizeof(uchar)*img.w*img.h, cudaMemcpyHostToDevice);
cudaMemcpy(d_oldimg_pixel, oldimg.pixel, sizeof(uchar)*img.w*img.h, cudaMemcpyHostToDevice);
tmp1 = img.pixel;
tmp2 = oldimg.pixel;
img.pixel = d_img_pixel;
oldimg.pixel = d_oldimg_pixel;
/****************************************/
TimeDiff(&tv0,&tv);
Erode(img);
TimeDiff(&tv0,&tv);
Dilate(img);
TimeDiff(&tv0,&tv);
/********* 5 Linw img show ***************/
// cudaMemcpy(tmp1, img.pixel, sizeof(uchar)*img.w*img.h, cudaMemcpyDeviceToHost);
// n.data = tmp1;
// imshow("Tmp", n);
/***********************************/
Sobel(img);
TimeDiff(&tv0,&tv);
oldimg.pixel = (uchar*)malloc(sizeof(uchar)*img.w*img.h);
// for(int r=50;r<150;r+=2)
Hough(img, oldimg, 113);
TimeDiff(&tv0,&tv);
/************ 8 Line *****************************/
cudaMemcpy(tmp1, img.pixel, sizeof(uchar)*img.w*img.h, cudaMemcpyDeviceToHost);
cudaMemcpy(tmp2, oldimg.pixel, sizeof(uchar)*img.w*img.h, cudaMemcpyDeviceToHost);
img.pixel = tmp1;
oldimg.pixel = tmp2;
/****************************************/
Mat m2 = Mat(img.h, img.w, CV_8UC1);
memcpy(m2.data, img.pixel, img.w*img.h * sizeof(uchar));
// imshow("After hough", m2);
GaussianBlur(m2, m2, Size(9, 9), 0, 0);
threshold(m2, m2, 30, 255, THRESH_BINARY );
cudaFree(d_img_pixel);
cudaFree(d_oldimg_pixel);
imshow("Final", m2);
waitKey(0);
TimeDiff(&tv0,&tv);
return 0;
}
|
ad23bae958ccb0da2ec1cd06a6b41e53661fa2fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This file contains the definition of the CUDA functions ,
* for rendering depth of field, based on Gaussian blurring
* using separable convolution, with depth-dependent kernel size.
* Separable convolution is based on convolution CUDA Sample with kernel-size adaptation
*/
#include "dof_gpu.h"
__constant__ float c_kernel[KERNEL_RADIUS * (KERNEL_RADIUS + 2)];
void copyKernel(float *kernel_coefficients, int kernel_index) {
int kernel_radius = kernel_index + 1;
hipMemcpyToSymbol(c_kernel, kernel_coefficients,
KERNEL_LENGTH_X(kernel_radius) * sizeof(float),
kernel_index * (kernel_index + 2) * sizeof(float));
}
__global__ void _k_normalizeDepth(float* depth, float* depth_norm, unsigned int step, float min_distance, float max_distance, unsigned int width, unsigned height) {
uint32_t x_local = blockIdx.x*blockDim.x + threadIdx.x;
uint32_t y_local = blockIdx.y*blockDim.y + threadIdx.y;
if (x_local >= width || y_local >= height) return;
float depth_world = depth[x_local + y_local *step];
float depth_normalized = (max_distance - depth_world) / (max_distance - min_distance);
if (depth_normalized < 0.f) depth_normalized = 0.f;
if (depth_normalized > 1.f) depth_normalized = 1.f;
if (isfinite(depth_normalized))
depth_norm[x_local + y_local *step] = depth_normalized;
}
void normalizeDepth(float* depth, float* depth_out, unsigned int step, float min_distance, float max_distance, unsigned int width, unsigned height) {
dim3 dimGrid, dimBlock;
dimBlock.x = 32;
dimBlock.y = 8;
dimGrid.x = (width + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (height + dimBlock.y - 1) / dimBlock.y;
_k_normalizeDepth << <dimGrid, dimBlock, 0 >> > (depth, depth_out, step, min_distance, max_distance, width, height);
}
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
#define ROWS_BLOCKDIM_X 32
#define ROWS_BLOCKDIM_Y 4
#define ROWS_RESULT_STEPS 8
#define ROWS_HALO_STEPS 1
__global__ void _k_convolutionRows(sl::uchar4 *d_Dst, sl::uchar4 *d_Src, float* depth, int imageW, int imageH, int pitch, int pitch_depth, float focus_depth) {
__shared__ sl::uchar4 s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
depth += baseY * pitch_depth + baseX;
sl::uchar4 reset(0, 0, 0, 0);
//Load main data
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) {
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X];
}
//Load left halo
#pragma unroll
for (int i = 0; i < ROWS_HALO_STEPS; i++) {
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : reset;
}
//Load right halo
#pragma unroll
for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) {
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : reset;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) {
sl::float3 sum(0, 0, 0);
int kernel_radius = (int) floor((KERNEL_RADIUS) *fabs(depth[i * ROWS_BLOCKDIM_X] - focus_depth));
int kernel_mid = kernel_radius * kernel_radius - 1 + kernel_radius;
if (kernel_radius > 0) {
for (int j = -kernel_radius; j <= kernel_radius; ++j) {
sum.x += c_kernel[kernel_mid + j] * (float) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j].x;
sum.y += c_kernel[kernel_mid + j] * (float) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j].y;
sum.z += c_kernel[kernel_mid + j] * (float) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j].z;
}
} else {
sum.x = (float) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X].x;
sum.y = (float) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X].y;
sum.z = (float) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X].z;
}
d_Dst[i * ROWS_BLOCKDIM_X].x = sum.x;
d_Dst[i * ROWS_BLOCKDIM_X].y = sum.y;
d_Dst[i * ROWS_BLOCKDIM_X].z = sum.z;
d_Dst[i * ROWS_BLOCKDIM_X].w = 255;
}
}
void convolutionRows(sl::uchar4 *d_Dst, sl::uchar4 *d_Src, float* i_depth, int imageW, int imageH, int depth_pitch, float focus_point) {
dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y);
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
_k_convolutionRows << <blocks, threads >> > (d_Dst, d_Src, i_depth, imageW, imageH, imageW, depth_pitch, focus_point);
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
#define COLUMNS_BLOCKDIM_X 16
#define COLUMNS_BLOCKDIM_Y 8
#define COLUMNS_RESULT_STEPS 2
#define COLUMNS_HALO_STEPS 4
__global__ void _k_convolutionColumns(sl::uchar4 *d_Dst, sl::uchar4 *d_Src, float* depth, int imageW, int imageH, int pitch, int pitch_depth, float focus_depth) {
__shared__ sl::uchar4 s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
sl::uchar4 reset(0, 0, 0, 0);
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
depth += baseY * pitch_depth + baseX;
//Main data
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : reset;
}
//Lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : reset;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) {
sl::float3 sum(0, 0, 0);
int kernel_radius = (int) floor((KERNEL_RADIUS) *fabs(depth[i * COLUMNS_BLOCKDIM_Y * pitch] - focus_depth));
int kernel_mid = kernel_radius * kernel_radius - 1 + kernel_radius;
if (kernel_radius > 0) {
for (int j = -kernel_radius; j <= kernel_radius; ++j) {
sum.x += c_kernel[kernel_mid + j] * (float) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j].z;
sum.y += c_kernel[kernel_mid + j] * (float) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j].y;
sum.z += c_kernel[kernel_mid + j] * (float) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j].x;
}
} else {
sum.x = (float) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y].z;
sum.y = (float) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y].y;
sum.z = (float) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y].x;
}
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch].x = sum.x;
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch].y = sum.y;
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch].z = sum.z;
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch].w = 255;
}
}
void convolutionColumns(sl::uchar4 *d_Dst, sl::uchar4 *d_Src, float* i_depth, int imageW, int imageH, int depth_pitch, float focus_point) {
dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
_k_convolutionColumns << <blocks, threads >> > (d_Dst, d_Src, i_depth, imageW, imageH, imageW, depth_pitch, focus_point);
}
| ad23bae958ccb0da2ec1cd06a6b41e53661fa2fb.cu | /*
* This file contains the definition of the CUDA functions ,
* for rendering depth of field, based on Gaussian blurring
* using separable convolution, with depth-dependent kernel size.
* Separable convolution is based on convolution CUDA Sample with kernel-size adaptation
*/
#include "dof_gpu.h"
__constant__ float c_kernel[KERNEL_RADIUS * (KERNEL_RADIUS + 2)];
void copyKernel(float *kernel_coefficients, int kernel_index) {
int kernel_radius = kernel_index + 1;
cudaMemcpyToSymbol(c_kernel, kernel_coefficients,
KERNEL_LENGTH_X(kernel_radius) * sizeof(float),
kernel_index * (kernel_index + 2) * sizeof(float));
}
__global__ void _k_normalizeDepth(float* depth, float* depth_norm, unsigned int step, float min_distance, float max_distance, unsigned int width, unsigned height) {
uint32_t x_local = blockIdx.x*blockDim.x + threadIdx.x;
uint32_t y_local = blockIdx.y*blockDim.y + threadIdx.y;
if (x_local >= width || y_local >= height) return;
float depth_world = depth[x_local + y_local *step];
float depth_normalized = (max_distance - depth_world) / (max_distance - min_distance);
if (depth_normalized < 0.f) depth_normalized = 0.f;
if (depth_normalized > 1.f) depth_normalized = 1.f;
if (isfinite(depth_normalized))
depth_norm[x_local + y_local *step] = depth_normalized;
}
void normalizeDepth(float* depth, float* depth_out, unsigned int step, float min_distance, float max_distance, unsigned int width, unsigned height) {
dim3 dimGrid, dimBlock;
dimBlock.x = 32;
dimBlock.y = 8;
dimGrid.x = (width + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (height + dimBlock.y - 1) / dimBlock.y;
_k_normalizeDepth << <dimGrid, dimBlock, 0 >> > (depth, depth_out, step, min_distance, max_distance, width, height);
}
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
#define ROWS_BLOCKDIM_X 32
#define ROWS_BLOCKDIM_Y 4
#define ROWS_RESULT_STEPS 8
#define ROWS_HALO_STEPS 1
__global__ void _k_convolutionRows(sl::uchar4 *d_Dst, sl::uchar4 *d_Src, float* depth, int imageW, int imageH, int pitch, int pitch_depth, float focus_depth) {
__shared__ sl::uchar4 s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
depth += baseY * pitch_depth + baseX;
sl::uchar4 reset(0, 0, 0, 0);
//Load main data
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) {
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X];
}
//Load left halo
#pragma unroll
for (int i = 0; i < ROWS_HALO_STEPS; i++) {
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : reset;
}
//Load right halo
#pragma unroll
for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) {
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : reset;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) {
sl::float3 sum(0, 0, 0);
int kernel_radius = (int) floor((KERNEL_RADIUS) *fabs(depth[i * ROWS_BLOCKDIM_X] - focus_depth));
int kernel_mid = kernel_radius * kernel_radius - 1 + kernel_radius;
if (kernel_radius > 0) {
for (int j = -kernel_radius; j <= kernel_radius; ++j) {
sum.x += c_kernel[kernel_mid + j] * (float) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j].x;
sum.y += c_kernel[kernel_mid + j] * (float) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j].y;
sum.z += c_kernel[kernel_mid + j] * (float) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j].z;
}
} else {
sum.x = (float) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X].x;
sum.y = (float) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X].y;
sum.z = (float) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X].z;
}
d_Dst[i * ROWS_BLOCKDIM_X].x = sum.x;
d_Dst[i * ROWS_BLOCKDIM_X].y = sum.y;
d_Dst[i * ROWS_BLOCKDIM_X].z = sum.z;
d_Dst[i * ROWS_BLOCKDIM_X].w = 255;
}
}
void convolutionRows(sl::uchar4 *d_Dst, sl::uchar4 *d_Src, float* i_depth, int imageW, int imageH, int depth_pitch, float focus_point) {
dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y);
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
_k_convolutionRows << <blocks, threads >> > (d_Dst, d_Src, i_depth, imageW, imageH, imageW, depth_pitch, focus_point);
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
#define COLUMNS_BLOCKDIM_X 16
#define COLUMNS_BLOCKDIM_Y 8
#define COLUMNS_RESULT_STEPS 2
#define COLUMNS_HALO_STEPS 4
__global__ void _k_convolutionColumns(sl::uchar4 *d_Dst, sl::uchar4 *d_Src, float* depth, int imageW, int imageH, int pitch, int pitch_depth, float focus_depth) {
__shared__ sl::uchar4 s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
sl::uchar4 reset(0, 0, 0, 0);
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
depth += baseY * pitch_depth + baseX;
//Main data
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : reset;
}
//Lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : reset;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) {
sl::float3 sum(0, 0, 0);
int kernel_radius = (int) floor((KERNEL_RADIUS) *fabs(depth[i * COLUMNS_BLOCKDIM_Y * pitch] - focus_depth));
int kernel_mid = kernel_radius * kernel_radius - 1 + kernel_radius;
if (kernel_radius > 0) {
for (int j = -kernel_radius; j <= kernel_radius; ++j) {
sum.x += c_kernel[kernel_mid + j] * (float) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j].z;
sum.y += c_kernel[kernel_mid + j] * (float) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j].y;
sum.z += c_kernel[kernel_mid + j] * (float) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j].x;
}
} else {
sum.x = (float) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y].z;
sum.y = (float) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y].y;
sum.z = (float) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y].x;
}
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch].x = sum.x;
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch].y = sum.y;
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch].z = sum.z;
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch].w = 255;
}
}
void convolutionColumns(sl::uchar4 *d_Dst, sl::uchar4 *d_Src, float* i_depth, int imageW, int imageH, int depth_pitch, float focus_point) {
dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
_k_convolutionColumns << <blocks, threads >> > (d_Dst, d_Src, i_depth, imageW, imageH, imageW, depth_pitch, focus_point);
}
|
78918dcfddac4964821bad22edea51b7de801a71.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "GPUmat.hh"
#include "cudaCommon.h"
/* Given the RHS and how many cuda arrays we expect, extracts a set of pointers to GPU memory for us
Also conveniently checked for equal array extent and returns it for us */
double **getGPUSourcePointers(const mxArray *prhs[], int num, int *retNumel, int startat, GPUmat *gm)
{
GPUtype src;
double **gpuPointers = (double **)malloc(num * sizeof(double *));
int iter;
int numel = gm->gputype.getNumel(gm->gputype.getGPUtype(prhs[startat]));
for(iter = 0; iter < num; iter++) {
src = gm->gputype.getGPUtype(prhs[startat + iter]);
/* if (gm->gputype.getNumel(src) != numel) { free(gpuPointers); mexErrMsgTxt("Fatal: Arrays contain nonequal number of elements."); } */
gpuPointers[iter] = (double *)gm->gputype.getGPUptr(src);
}
retNumel[0] = numel;
return gpuPointers;
}
/* Creates destination array that the kernels write to; Returns the GPU memory pointer, and assigns the LHS it's passed */
double **makeGPUDestinationArrays(GPUtype src, mxArray *retArray[], int howmany, GPUmat *gm)
{
int d = gm->gputype.getNdims(src);
const int *ssize = gm->gputype.getSize(src);
int x;
int newsize[3];
for(x = 0; x < 3; x++) (x < d) ? newsize[x] = ssize[x] : newsize[x] = 1;
double **rvals = (double **)malloc(howmany*sizeof(double *));
int i;
for(i = 0; i < howmany; i++) {
GPUtype ra = gm->gputype.create(gpuDOUBLE, d, newsize, NULL);
retArray[i] = gm->gputype.createMxArray(ra);
rvals[i] = (double *)gm->gputype.getGPUptr(ra);
}
return rvals;
}
| 78918dcfddac4964821bad22edea51b7de801a71.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
#include "GPUmat.hh"
#include "cudaCommon.h"
/* Given the RHS and how many cuda arrays we expect, extracts a set of pointers to GPU memory for us
Also conveniently checked for equal array extent and returns it for us */
double **getGPUSourcePointers(const mxArray *prhs[], int num, int *retNumel, int startat, GPUmat *gm)
{
GPUtype src;
double **gpuPointers = (double **)malloc(num * sizeof(double *));
int iter;
int numel = gm->gputype.getNumel(gm->gputype.getGPUtype(prhs[startat]));
for(iter = 0; iter < num; iter++) {
src = gm->gputype.getGPUtype(prhs[startat + iter]);
/* if (gm->gputype.getNumel(src) != numel) { free(gpuPointers); mexErrMsgTxt("Fatal: Arrays contain nonequal number of elements."); } */
gpuPointers[iter] = (double *)gm->gputype.getGPUptr(src);
}
retNumel[0] = numel;
return gpuPointers;
}
/* Creates destination array that the kernels write to; Returns the GPU memory pointer, and assigns the LHS it's passed */
double **makeGPUDestinationArrays(GPUtype src, mxArray *retArray[], int howmany, GPUmat *gm)
{
int d = gm->gputype.getNdims(src);
const int *ssize = gm->gputype.getSize(src);
int x;
int newsize[3];
for(x = 0; x < 3; x++) (x < d) ? newsize[x] = ssize[x] : newsize[x] = 1;
double **rvals = (double **)malloc(howmany*sizeof(double *));
int i;
for(i = 0; i < howmany; i++) {
GPUtype ra = gm->gputype.create(gpuDOUBLE, d, newsize, NULL);
retArray[i] = gm->gputype.createMxArray(ra);
rvals[i] = (double *)gm->gputype.getGPUptr(ra);
}
return rvals;
}
|
a5dda5444cbd0e5eb34aa96cf0decc25f21a8393.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/for_each.h>
#include <iterator>
#include <thrust/copy.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/transform.h>
#include <algorithm>
#include <vector>
#include <thrust/sort.h>
#include <sys/times.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
void start_clock(void);
void end_clock(char *msg);
static clock_t st_time;
static clock_t en_time;
static struct tms st_cpu;
static struct tms en_cpu;
void
start_clock()
{
st_time = times(&st_cpu);
}
void end_clock(char *msg)
{
en_time = times(&en_cpu);
std::cout<< "Sort type : " << msg << std::endl<< " Time elapsed:"<< (intmax_t)(en_time - st_time)<<std::endl;
}
void generateRandom(double & i)
{
i = rand();
}
int main(int argc, char ** argv)
{
if(argc<2)
{
std::cout<<"Please provide size as argument"<<std::endl;
return 1;
}
long vec_size =atoi(argv[1]);
{
start_clock();
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), generateRandom);
std::sort(vec.begin(), vec.end());
end_clock("CPU all");
}
{
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), generateRandom);
start_clock();
std::sort(vec.begin(), vec.end());
end_clock("CPU sort only");
}
{
hipDeviceReset();
start_clock();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
hv = d;
end_clock("thrust ALL");
}
{
hipDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
start_clock();
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
thrust::copy(d.begin(), d.end(), hv.begin());
end_clock("Thrust sort and copy and alloc");
}
{
hipDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d(vec_size);
start_clock();
thrust::copy(hv.begin(), hv.end(), d.begin());
thrust::sort(d.begin(), d.end());
thrust::copy(d.begin(), d.end(), hv.begin());
end_clock("thrust sort and copy");
}
{
hipDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d = hv;
start_clock();
thrust::sort(d.begin(), d.end());
end_clock("thrust sort only");
hv = d;
}
} | a5dda5444cbd0e5eb34aa96cf0decc25f21a8393.cu | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/for_each.h>
#include <iterator>
#include <thrust/copy.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/transform.h>
#include <algorithm>
#include <vector>
#include <thrust/sort.h>
#include <sys/times.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cuda.h>
void start_clock(void);
void end_clock(char *msg);
static clock_t st_time;
static clock_t en_time;
static struct tms st_cpu;
static struct tms en_cpu;
void
start_clock()
{
st_time = times(&st_cpu);
}
void end_clock(char *msg)
{
en_time = times(&en_cpu);
std::cout<< "Sort type : " << msg << std::endl<< " Time elapsed:"<< (intmax_t)(en_time - st_time)<<std::endl;
}
void generateRandom(double & i)
{
i = rand();
}
int main(int argc, char ** argv)
{
if(argc<2)
{
std::cout<<"Please provide size as argument"<<std::endl;
return 1;
}
long vec_size =atoi(argv[1]);
{
start_clock();
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), generateRandom);
std::sort(vec.begin(), vec.end());
end_clock("CPU all");
}
{
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), generateRandom);
start_clock();
std::sort(vec.begin(), vec.end());
end_clock("CPU sort only");
}
{
cudaDeviceReset();
start_clock();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
hv = d;
end_clock("thrust ALL");
}
{
cudaDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
start_clock();
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
thrust::copy(d.begin(), d.end(), hv.begin());
end_clock("Thrust sort and copy and alloc");
}
{
cudaDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d(vec_size);
start_clock();
thrust::copy(hv.begin(), hv.end(), d.begin());
thrust::sort(d.begin(), d.end());
thrust::copy(d.begin(), d.end(), hv.begin());
end_clock("thrust sort and copy");
}
{
cudaDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d = hv;
start_clock();
thrust::sort(d.begin(), d.end());
end_clock("thrust sort only");
hv = d;
}
} |
09996699d0f69b813e173ce1640ae356dd7dc6f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "file_io.c"
#define BLOCK_SIZE 32
#define TILE_WIDTH 2
#define cudaCheckError() { \
hipError_t e = hipGetLastError(); \
if (e != hipSuccess) { \
printf("CUDA error %s:%d: %s\n", __FILE__, __LINE__, \
hipGetErrorString(e)); \
exit(1); \
} \
}
// Initialize a vector of size m to 1
__global__ void ones(double* vec, int m)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(row >= m) return;
vec[row] = 1.0;
}
// Calculate the value of w = average value of each row
__global__ void w_calc(double* objects, int m, int n, double *w)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= n) return;
double w_value = 0;
for (int i = 0; i < m ; i++)
w_value += objects[row * m + i];
w[row] = w_value/m;
}
// Calculate tmp = A * xk = ( M - w*e' )*xk
__global__ void pddp1(double* objects, int m, int n, double *w,double *x,double *tmp)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= n ) return;
double Cvalue = 0;
for(int i = 0; i < m; i++)
Cvalue += (objects[row * m + i] - w[row]) * x [i];
tmp[row] = Cvalue;
__syncthreads();
}
// Calculate output = A' * tmp = (M - w*e')' * ( M - w*e' )*xk
__global__ void pddp2(double* objects, double *output, int m, int n, double *w, double *tmp)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= m ) return;
double final_value = 0;
for(int j=0; j < n; j++)
final_value += (objects[j*m+row] - w[j]) * tmp[j];
output[row] = final_value;
__syncthreads();
}
// Calculate the power of each vector's vaule
__global__ void power(double *input, int m , double *output)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(row >= m ) return;
output[row] = (double)powf(input[row] , 2);
__syncthreads();
}
// Divide each element of a vector with a value
__global__ void division(double *input, int m, double norm, double *output)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(row >= m ) return;
output[row] = input[row] / norm;
__syncthreads();
}
// Calculate the difference in order to diverge
__global__ void diff_pow(double *x, int m, double *y)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(row >= m ) return;
x[row] = y[row] - x[row];
x[row] = (double)powf(x[row] , 2);
}
// Copy contents of vector c to vector x
__global__ void swap(double* x, int m, double *c)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(row >= m) return;
x[row] = c[row];
}
void StartKernelTiming (hipEvent_t& tic, hipEvent_t& toc, hipStream_t iStream)
{
hipEventCreate(&tic);
hipEventCreate(&toc);
hipEventRecord(tic , iStream);
}
void StopKernelTiming (hipEvent_t& tic, hipEvent_t& toc, hipStream_t iStream, float* ptimer)
{
float kt = 0;
hipEventRecord(toc , iStream);
hipEventSynchronize(toc);
hipEventElapsedTime(&kt , tic , toc);
hipEventDestroy(tic); hipEventDestroy(toc);
(*ptimer) += kt;
}
int main(int argc, char **argv) {
hipSetDevice(0);
int n,m;
char *input_file = argv[1];
double *objects;
objects = file_read(input_file, &n, &m);
printf("::Objects loaded::\n");
printf("Objects: %d\n", m);
printf("Attributes: %d\n", n);
double fnorm, f_sum, final[m], eps;
eps = pow(10,-6);
double *w, *x, *tmp, *den, *y;
w = (double*) malloc(n*sizeof(double));
x = (double*) malloc(m*sizeof(double));
tmp = (double*) malloc(n*sizeof(double));
den = (double*) malloc(m*sizeof(double));
y = (double*)malloc(m*sizeof(double));
double *objects_d,*final_d,*w_d, *x_d,*tmp_d, *in_d, *den_d, *y_d;
dim3 dimBlock(1,BLOCK_SIZE);
dim3 dimGrid(1, (m + dimBlock.y - 1) / dimBlock.y);
printf("Grid Size: (%d,%d) \n",dimGrid.x,dimGrid.y);
printf("Block Size: (%d,%d) \n",dimBlock.x,dimBlock.y);
hipMalloc((void**) &in_d , m*sizeof(double));
hipMemcpy(in_d , x , m*sizeof(double) , hipMemcpyHostToDevice);
hipMalloc((void**) &x_d , m*sizeof(double));
hipMemcpy(x_d , x , m*sizeof(double) , hipMemcpyHostToDevice);
hipMalloc((void **) &objects_d , m*n*sizeof(double));
hipMemcpy (objects_d , objects , m*n*sizeof(double) , hipMemcpyHostToDevice );
hipMalloc((void **) &final_d,m*sizeof(double));
hipMalloc((void **) &tmp_d,n*sizeof(double));
hipMalloc((void **) &w_d,n*sizeof(double));
hipMalloc((void **) &den_d,m*sizeof(double));
hipMalloc((void **) &y_d,m*sizeof(double));
cudaCheckError();
hipEvent_t tic, toc;
float Elapsed_Time;
StartKernelTiming(tic, toc, 0);
hipLaunchKernelGGL(( ones), dim3(dimGrid),dim3(dimBlock), 0, 0, x_d,m);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
hipMemcpy (x, x_d, m*sizeof(double), hipMemcpyDeviceToHost );
cudaCheckError();
StartKernelTiming(tic, toc, 0);
hipLaunchKernelGGL(( w_calc), dim3(dimGrid),dim3(dimBlock), 0, 0, objects_d,m,n,w_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
hipMemcpy (w , w_d , n*sizeof(double) , hipMemcpyDeviceToHost );
do{
StartKernelTiming(tic, toc, 0);
hipLaunchKernelGGL(( pddp1), dim3(dimGrid),dim3(dimBlock), 0, 0, objects_d,m,n,w_d,x_d,tmp_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
hipMemcpy (tmp , tmp_d , n*sizeof(double) , hipMemcpyDeviceToHost );
StartKernelTiming(tic, toc, 0);
hipLaunchKernelGGL(( pddp2), dim3(dimGrid),dim3(dimBlock), 0, 0, objects_d,final_d,m,n,w_d,tmp_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
hipMemcpy (final , final_d , m*sizeof(double) , hipMemcpyDeviceToHost );
//calculate each elements square
StartKernelTiming(tic, toc, 0);
hipLaunchKernelGGL(( power), dim3(dimGrid),dim3(dimBlock), 0, 0, final_d,m,den_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
hipMemcpy (den , den_d , m*sizeof(double) , hipMemcpyDeviceToHost);
cudaCheckError();
//sum all elements
StartKernelTiming(tic, toc, 0);
f_sum = 0.0;
for (int i = 0 ; i<m; i++)
f_sum+=den[i];
f_sum = sqrt(f_sum);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
//divide each element of y with norm
StartKernelTiming(tic, toc, 0);
hipLaunchKernelGGL(( division), dim3(dimGrid),dim3(dimBlock), 0, 0, final_d,m,f_sum,y_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
hipMemcpy (y, y_d,m* sizeof(double), hipMemcpyDeviceToHost );
cudaCheckError();
//calculate difference xk+1 - xk and find each element's square
StartKernelTiming(tic, toc, 0);
hipLaunchKernelGGL(( diff_pow), dim3(dimGrid),dim3(dimBlock), 0, 0, x_d,m,y_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
hipMemcpy (x, x_d, m*sizeof(double), hipMemcpyDeviceToHost );
cudaCheckError();
//calculate final norm
StartKernelTiming(tic, toc, 0);
fnorm = 0.0;
for (int i = 0 ; i<m; i++)
fnorm+=x[i];
fnorm = sqrt(fnorm);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
StartKernelTiming(tic, toc, 0);
hipLaunchKernelGGL(( swap), dim3(dimGrid),dim3(dimBlock), 0, 0, in_d,m,y_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
hipMemcpy (x, in_d, m*sizeof(double), hipMemcpyDeviceToHost );
hipMemcpy(x_d,x,m*sizeof(double),hipMemcpyHostToDevice);
cudaCheckError();
}while(fnorm > eps);
printf("-----------------\n");
printf("Elapsed_Time=%f ms\n", Elapsed_Time);
//Print the last 5 values of y to show correctness
printf("-----------------\n");
for(int i=m-5; i<m; i++)
printf("y[%d] = %.7f \n",i,y[i]);
hipFree(objects_d);
hipFree(final_d);
hipFree(w_d);
hipFree(x_d);
hipFree(tmp_d);
hipFree(in_d);
hipFree(den_d);
hipFree(y_d);
free(w);
free(x);
free(tmp);
free(den);
free(y);
free(objects);
return (0);
}
| 09996699d0f69b813e173ce1640ae356dd7dc6f1.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "file_io.c"
#define BLOCK_SIZE 32
#define TILE_WIDTH 2
#define cudaCheckError() { \
cudaError_t e = cudaGetLastError(); \
if (e != cudaSuccess) { \
printf("CUDA error %s:%d: %s\n", __FILE__, __LINE__, \
cudaGetErrorString(e)); \
exit(1); \
} \
}
// Initialize a vector of size m to 1
__global__ void ones(double* vec, int m)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(row >= m) return;
vec[row] = 1.0;
}
// Calculate the value of w = average value of each row
__global__ void w_calc(double* objects, int m, int n, double *w)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= n) return;
double w_value = 0;
for (int i = 0; i < m ; i++)
w_value += objects[row * m + i];
w[row] = w_value/m;
}
// Calculate tmp = A * xk = ( M - w*e' )*xk
__global__ void pddp1(double* objects, int m, int n, double *w,double *x,double *tmp)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= n ) return;
double Cvalue = 0;
for(int i = 0; i < m; i++)
Cvalue += (objects[row * m + i] - w[row]) * x [i];
tmp[row] = Cvalue;
__syncthreads();
}
// Calculate output = A' * tmp = (M - w*e')' * ( M - w*e' )*xk
__global__ void pddp2(double* objects, double *output, int m, int n, double *w, double *tmp)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= m ) return;
double final_value = 0;
for(int j=0; j < n; j++)
final_value += (objects[j*m+row] - w[j]) * tmp[j];
output[row] = final_value;
__syncthreads();
}
// Calculate the power of each vector's vaule
__global__ void power(double *input, int m , double *output)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(row >= m ) return;
output[row] = (double)powf(input[row] , 2);
__syncthreads();
}
// Divide each element of a vector with a value
__global__ void division(double *input, int m, double norm, double *output)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(row >= m ) return;
output[row] = input[row] / norm;
__syncthreads();
}
// Calculate the difference in order to diverge
__global__ void diff_pow(double *x, int m, double *y)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(row >= m ) return;
x[row] = y[row] - x[row];
x[row] = (double)powf(x[row] , 2);
}
// Copy contents of vector c to vector x
__global__ void swap(double* x, int m, double *c)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(row >= m) return;
x[row] = c[row];
}
void StartKernelTiming (cudaEvent_t& tic, cudaEvent_t& toc, cudaStream_t iStream)
{
cudaEventCreate(&tic);
cudaEventCreate(&toc);
cudaEventRecord(tic , iStream);
}
void StopKernelTiming (cudaEvent_t& tic, cudaEvent_t& toc, cudaStream_t iStream, float* ptimer)
{
float kt = 0;
cudaEventRecord(toc , iStream);
cudaEventSynchronize(toc);
cudaEventElapsedTime(&kt , tic , toc);
cudaEventDestroy(tic); cudaEventDestroy(toc);
(*ptimer) += kt;
}
int main(int argc, char **argv) {
cudaSetDevice(0);
int n,m;
char *input_file = argv[1];
double *objects;
objects = file_read(input_file, &n, &m);
printf("::Objects loaded::\n");
printf("Objects: %d\n", m);
printf("Attributes: %d\n", n);
double fnorm, f_sum, final[m], eps;
eps = pow(10,-6);
double *w, *x, *tmp, *den, *y;
w = (double*) malloc(n*sizeof(double));
x = (double*) malloc(m*sizeof(double));
tmp = (double*) malloc(n*sizeof(double));
den = (double*) malloc(m*sizeof(double));
y = (double*)malloc(m*sizeof(double));
double *objects_d,*final_d,*w_d, *x_d,*tmp_d, *in_d, *den_d, *y_d;
dim3 dimBlock(1,BLOCK_SIZE);
dim3 dimGrid(1, (m + dimBlock.y - 1) / dimBlock.y);
printf("Grid Size: (%d,%d) \n",dimGrid.x,dimGrid.y);
printf("Block Size: (%d,%d) \n",dimBlock.x,dimBlock.y);
cudaMalloc((void**) &in_d , m*sizeof(double));
cudaMemcpy(in_d , x , m*sizeof(double) , cudaMemcpyHostToDevice);
cudaMalloc((void**) &x_d , m*sizeof(double));
cudaMemcpy(x_d , x , m*sizeof(double) , cudaMemcpyHostToDevice);
cudaMalloc((void **) &objects_d , m*n*sizeof(double));
cudaMemcpy (objects_d , objects , m*n*sizeof(double) , cudaMemcpyHostToDevice );
cudaMalloc((void **) &final_d,m*sizeof(double));
cudaMalloc((void **) &tmp_d,n*sizeof(double));
cudaMalloc((void **) &w_d,n*sizeof(double));
cudaMalloc((void **) &den_d,m*sizeof(double));
cudaMalloc((void **) &y_d,m*sizeof(double));
cudaCheckError();
cudaEvent_t tic, toc;
float Elapsed_Time;
StartKernelTiming(tic, toc, 0);
ones<<<dimGrid,dimBlock>>>(x_d,m);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
cudaMemcpy (x, x_d, m*sizeof(double), cudaMemcpyDeviceToHost );
cudaCheckError();
StartKernelTiming(tic, toc, 0);
w_calc<<<dimGrid,dimBlock>>>(objects_d,m,n,w_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
cudaMemcpy (w , w_d , n*sizeof(double) , cudaMemcpyDeviceToHost );
do{
StartKernelTiming(tic, toc, 0);
pddp1<<<dimGrid,dimBlock>>>(objects_d,m,n,w_d,x_d,tmp_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
cudaMemcpy (tmp , tmp_d , n*sizeof(double) , cudaMemcpyDeviceToHost );
StartKernelTiming(tic, toc, 0);
pddp2<<<dimGrid,dimBlock>>>(objects_d,final_d,m,n,w_d,tmp_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
cudaMemcpy (final , final_d , m*sizeof(double) , cudaMemcpyDeviceToHost );
//calculate each elements square
StartKernelTiming(tic, toc, 0);
power<<<dimGrid,dimBlock>>>(final_d,m,den_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
cudaMemcpy (den , den_d , m*sizeof(double) , cudaMemcpyDeviceToHost);
cudaCheckError();
//sum all elements
StartKernelTiming(tic, toc, 0);
f_sum = 0.0;
for (int i = 0 ; i<m; i++)
f_sum+=den[i];
f_sum = sqrt(f_sum);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
//divide each element of y with norm
StartKernelTiming(tic, toc, 0);
division<<<dimGrid,dimBlock>>>(final_d,m,f_sum,y_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
cudaMemcpy (y, y_d,m* sizeof(double), cudaMemcpyDeviceToHost );
cudaCheckError();
//calculate difference xk+1 - xk and find each element's square
StartKernelTiming(tic, toc, 0);
diff_pow<<<dimGrid,dimBlock>>>(x_d,m,y_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
cudaMemcpy (x, x_d, m*sizeof(double), cudaMemcpyDeviceToHost );
cudaCheckError();
//calculate final norm
StartKernelTiming(tic, toc, 0);
fnorm = 0.0;
for (int i = 0 ; i<m; i++)
fnorm+=x[i];
fnorm = sqrt(fnorm);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
StartKernelTiming(tic, toc, 0);
swap<<<dimGrid,dimBlock>>>(in_d,m,y_d);
StopKernelTiming(tic, toc, 0, &Elapsed_Time);
cudaMemcpy (x, in_d, m*sizeof(double), cudaMemcpyDeviceToHost );
cudaMemcpy(x_d,x,m*sizeof(double),cudaMemcpyHostToDevice);
cudaCheckError();
}while(fnorm > eps);
printf("-----------------\n");
printf("Elapsed_Time=%f ms\n", Elapsed_Time);
//Print the last 5 values of y to show correctness
printf("-----------------\n");
for(int i=m-5; i<m; i++)
printf("y[%d] = %.7f \n",i,y[i]);
cudaFree(objects_d);
cudaFree(final_d);
cudaFree(w_d);
cudaFree(x_d);
cudaFree(tmp_d);
cudaFree(in_d);
cudaFree(den_d);
cudaFree(y_d);
free(w);
free(x);
free(tmp);
free(den);
free(y);
free(objects);
return (0);
}
|
e266fbbf0b5eeec1a4df68e91e2602487081b4f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void matrixMultiplicationKernel(double* A, double* B, double* C, int N) {
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
float tmpSum = 0;
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < N; i++) {
tmpSum += A[ROW * N + i] * B[i * N + COL];
}
}
C[ROW * N + COL] = tmpSum;
} | e266fbbf0b5eeec1a4df68e91e2602487081b4f5.cu | extern "C"
__global__ void matrixMultiplicationKernel(double* A, double* B, double* C, int N) {
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
float tmpSum = 0;
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < N; i++) {
tmpSum += A[ROW * N + i] * B[i * N + COL];
}
}
C[ROW * N + COL] = tmpSum;
} |
33ec9f7189b8d76073b2453378ef9d1b87b88660.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run convolution kernels using functions and data structures
provided by CUTLASS using tensor cores; which we run on a NVIDIA Ampere GPU.
Writing a single high performance convolution kernel is hard but do-able. Whereas writing
high performance kernels at scale which works for multiple problem sizes with good abstractions is
really hard. CUTLASS solves this problem by providing simplified abstractions to compose
multiple sections of implicit gemm kernel. When used properly, the kernels can hit peak performance
of GPU easily.
CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp
and thread-block level, they compute on their own tile-size with higher level of tile sizes being
composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used
to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute
threadblock-tile (tile size computed by a threadblock).
In thie example, we split variable initialization into
1. Setting up data properties : describes how tensors are laid out in the memory and how the kernel
can view them (logical to physical mapping)
2. Setting up computation properties : describes how the above set tensors will be used to compute
output of convolution.
First, we setup the data types of the input tensor A, weights' tensor B and output tensor C along
with alpha, beta as the equation for convolution is C = alpha * Conv2dFprop(A, B) + beta * C. In CUTLASS,
the kernels first compute Conv2dFprop(A, B) and leave the rest of the computation to end of the kernel as
alpha * X + beta * C is a simple element-wise operation on X (Conv2dFprop(A, B)) and C. We call this as
epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to
ElementComputeEpilogue = float. We use the data type for elements in input tensor A and B as
cutlass::half_t. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (float),
ElementComputeEpilogue (float), ElementInputA (cutlass::half_t), ElementInputB (cutlass::half_t),
ElementOutput (float). Communicating just the data type is not enough. As the data is laid out
linearly in memory, we have to convey the layout of tensors. We do that by initializing template
variables LayoutInputA, LayoutInputB and LayoutOutput to TensorNHWC cutlass variable. Next, we setup
rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template
variable EpilogueOp, which takes the data type of output ElementOutput (float), the number of
elements per vector memory access (8), data type of accumulator (float) and data type of
computation of linear combination (alpha * X + beta * C).
Now that we setup the properties of data, we have to setup properties of computation.
Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x64,
64x64x64, 16x8x16 (MxNxK) respectively. When passed to instantiate CUTLASS Implicit GEMM kernel, it
internally deduces the amount of threads needed per thread-block, amount of shared memory, storing
data in bank-conflict free manner, and ton of other variables required to compose, intialize and
launch a high performance Implicit GEMM kernel. This is the beauty of CUTLASS, it relieves developer
from understanding and coding complicated hardware optimizations which can easily go wrong.
CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines
constitute the whole process of loading input data from global memory to shared memory, loading data
from shared memory to registers, doing matrix multiplication, store to global memory. The below flow
sequence shows a typical mma multistage pipeline.
(see include/cutlass/conv/threadblock/implicit_gemm_multistage.h)
tensor in global memory --cp_async--> tile in shared memory --smem loads--> registers
--mma--> registers --global stores--> output to global memory
NVIDIA Ampere uses `cp_async` to build multistage software pipeline to better hide latencies.
There are few more template variables initialized such as, which threadblock tile of output matrix
is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on.
These are all put together to create a template variable which describes CUTLASS Implicit GEMM
kernel using cutlass::conv::device::ImplicitGemm template.
The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it.
We use CUTLASS utilities to initialize, fill, compare tensors as they are simple and doesn't come
in the way of learning CUTLASS.
Once all the tensors are initialized and filled with data, create arguments tuple to launch CUTLASS
kernel which takes problem size (N = 1, H = 64, W = 64, C = 128), filter size (K = 64,
R = 3, S = 3, C = 128 ), padding, strides, dilation, tensors, alpha, beta and the
important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space
memory required by the kernel we instantiated. If yes, we create it and pass it along with other
arguments created to intialize CUTLASS kernel then, the kernel is launched.
In this example, we later on launch a reference convolution kernel (from CUTLASS utilities) to
compare if the output from CUTLASS kernel is same as the reference implicit GEMM kernel.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kAnalytic;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(false),
measure_performance(true),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "16_ampere_tensorop_conv2dfprop example\n\n"
<< " This example uses Ampere's Tensor Core operators on F16 data types to compute\n"
<< " forward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n <int> Input tensor extent N\n"
<< " --h <int> Input tensor extent H\n"
<< " --w <int> Input tensor extent W\n"
<< " --c <int> Input tensor extent C\n"
<< " --k <int> Filter extent K\n"
<< " --r <int> Filter extent R\n"
<< " --s <int> Filter extent S\n\n"
<< " --alpha <float> Epilogue scalar alpha\n"
<< " --beta <float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations <int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag <string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/16_ampere_tensorop_conv2dfprop/16_ampere_tensorop_conv2dfprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/16_ampere_tensorop_conv2dfprop/16_ampere_tensorop_conv2dfprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
hipError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(hipSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_c(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C on host with zeros
cutlass::reference::host::TensorFill(
tensor_c.host_view());
// Fill tensor C for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_c.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_ref_c.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_c.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on host...\n";
// Compute with reference implementation
cutlass::reference::host::Conv2dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_a.host_ref(),
tensor_b.host_ref(),
tensor_c.host_ref(),
tensor_ref_c.host_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_c.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_c.host_view(),
tensor_ref_c.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "16_ampere_workspace_conv2dfprop_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_c.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
hipEvent_t events[2];
for (auto & event : events) {
result.error = hipEventCreate(&event);
if (result.error != hipSuccess) {
std::cerr << "hipEventCreate() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = hipEventRecord(events[0]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = hipEventRecord(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = hipEventSynchronize(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventSynchronize() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = hipEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != hipSuccess) {
std::cerr << "cudaEventElapsed() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)hipEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
CUDA_CHECK(hipGetDeviceProperties(&props, 0));
if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {1, 32, 64, 128, 256, 512};
struct Benchmark {
int h, w, c, k, r, s;
} layers[] = {
{56, 56, 64, 256, 1, 1},
{56, 56, 64, 64, 1, 1},
{56, 56, 64, 64, 3, 3},
{56, 56, 256, 64, 1, 1},
{56, 56, 256, 512, 1, 1},
{56, 56, 256, 128, 1, 1},
{28, 28, 128, 128, 3, 3},
{28, 28, 128, 512, 1, 1},
{28, 28, 512, 128, 1, 1},
{28, 28, 512, 1024, 1, 1},
{28, 28, 512, 256, 1, 1},
{14, 14, 256, 256, 3, 3},
{14, 14, 256, 1024, 1, 1},
{14, 14, 1024, 256, 1, 1},
{14, 14, 1024, 2048, 1, 1},
{14, 14, 1024, 512, 1, 1},
{7, 7, 512, 512, 3, 3},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 33ec9f7189b8d76073b2453378ef9d1b87b88660.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run convolution kernels using functions and data structures
provided by CUTLASS using tensor cores; which we run on a NVIDIA Ampere GPU.
Writing a single high performance convolution kernel is hard but do-able. Whereas writing
high performance kernels at scale which works for multiple problem sizes with good abstractions is
really hard. CUTLASS solves this problem by providing simplified abstractions to compose
multiple sections of implicit gemm kernel. When used properly, the kernels can hit peak performance
of GPU easily.
CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp
and thread-block level, they compute on their own tile-size with higher level of tile sizes being
composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used
to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute
threadblock-tile (tile size computed by a threadblock).
In thie example, we split variable initialization into
1. Setting up data properties : describes how tensors are laid out in the memory and how the kernel
can view them (logical to physical mapping)
2. Setting up computation properties : describes how the above set tensors will be used to compute
output of convolution.
First, we setup the data types of the input tensor A, weights' tensor B and output tensor C along
with alpha, beta as the equation for convolution is C = alpha * Conv2dFprop(A, B) + beta * C. In CUTLASS,
the kernels first compute Conv2dFprop(A, B) and leave the rest of the computation to end of the kernel as
alpha * X + beta * C is a simple element-wise operation on X (Conv2dFprop(A, B)) and C. We call this as
epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to
ElementComputeEpilogue = float. We use the data type for elements in input tensor A and B as
cutlass::half_t. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (float),
ElementComputeEpilogue (float), ElementInputA (cutlass::half_t), ElementInputB (cutlass::half_t),
ElementOutput (float). Communicating just the data type is not enough. As the data is laid out
linearly in memory, we have to convey the layout of tensors. We do that by initializing template
variables LayoutInputA, LayoutInputB and LayoutOutput to TensorNHWC cutlass variable. Next, we setup
rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template
variable EpilogueOp, which takes the data type of output ElementOutput (float), the number of
elements per vector memory access (8), data type of accumulator (float) and data type of
computation of linear combination (alpha * X + beta * C).
Now that we setup the properties of data, we have to setup properties of computation.
Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x64,
64x64x64, 16x8x16 (MxNxK) respectively. When passed to instantiate CUTLASS Implicit GEMM kernel, it
internally deduces the amount of threads needed per thread-block, amount of shared memory, storing
data in bank-conflict free manner, and ton of other variables required to compose, intialize and
launch a high performance Implicit GEMM kernel. This is the beauty of CUTLASS, it relieves developer
from understanding and coding complicated hardware optimizations which can easily go wrong.
CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines
constitute the whole process of loading input data from global memory to shared memory, loading data
from shared memory to registers, doing matrix multiplication, store to global memory. The below flow
sequence shows a typical mma multistage pipeline.
(see include/cutlass/conv/threadblock/implicit_gemm_multistage.h)
tensor in global memory --cp_async--> tile in shared memory --smem loads--> registers
--mma--> registers --global stores--> output to global memory
NVIDIA Ampere uses `cp_async` to build multistage software pipeline to better hide latencies.
There are few more template variables initialized such as, which threadblock tile of output matrix
is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on.
These are all put together to create a template variable which describes CUTLASS Implicit GEMM
kernel using cutlass::conv::device::ImplicitGemm template.
The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it.
We use CUTLASS utilities to initialize, fill, compare tensors as they are simple and doesn't come
in the way of learning CUTLASS.
Once all the tensors are initialized and filled with data, create arguments tuple to launch CUTLASS
kernel which takes problem size (N = 1, H = 64, W = 64, C = 128), filter size (K = 64,
R = 3, S = 3, C = 128 ), padding, strides, dilation, tensors, alpha, beta and the
important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space
memory required by the kernel we instantiated. If yes, we create it and pass it along with other
arguments created to intialize CUTLASS kernel then, the kernel is launched.
In this example, we later on launch a reference convolution kernel (from CUTLASS utilities) to
compare if the output from CUTLASS kernel is same as the reference implicit GEMM kernel.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kAnalytic;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(false),
measure_performance(true),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "16_ampere_tensorop_conv2dfprop example\n\n"
<< " This example uses Ampere's Tensor Core operators on F16 data types to compute\n"
<< " forward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n <int> Input tensor extent N\n"
<< " --h <int> Input tensor extent H\n"
<< " --w <int> Input tensor extent W\n"
<< " --c <int> Input tensor extent C\n"
<< " --k <int> Filter extent K\n"
<< " --r <int> Filter extent R\n"
<< " --s <int> Filter extent S\n\n"
<< " --alpha <float> Epilogue scalar alpha\n"
<< " --beta <float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations <int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag <string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/16_ampere_tensorop_conv2dfprop/16_ampere_tensorop_conv2dfprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/16_ampere_tensorop_conv2dfprop/16_ampere_tensorop_conv2dfprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_c(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C on host with zeros
cutlass::reference::host::TensorFill(
tensor_c.host_view());
// Fill tensor C for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_c.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_ref_c.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_c.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on host...\n";
// Compute with reference implementation
cutlass::reference::host::Conv2dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_a.host_ref(),
tensor_b.host_ref(),
tensor_c.host_ref(),
tensor_ref_c.host_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_c.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_c.host_view(),
tensor_ref_c.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "16_ampere_workspace_conv2dfprop_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_c.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {1, 32, 64, 128, 256, 512};
struct Benchmark {
int h, w, c, k, r, s;
} layers[] = {
{56, 56, 64, 256, 1, 1},
{56, 56, 64, 64, 1, 1},
{56, 56, 64, 64, 3, 3},
{56, 56, 256, 64, 1, 1},
{56, 56, 256, 512, 1, 1},
{56, 56, 256, 128, 1, 1},
{28, 28, 128, 128, 3, 3},
{28, 28, 128, 512, 1, 1},
{28, 28, 512, 128, 1, 1},
{28, 28, 512, 1024, 1, 1},
{28, 28, 512, 256, 1, 1},
{14, 14, 256, 256, 3, 3},
{14, 14, 256, 1024, 1, 1},
{14, 14, 1024, 256, 1, 1},
{14, 14, 1024, 2048, 1, 1},
{14, 14, 1024, 512, 1, 1},
{7, 7, 512, 512, 3, 3},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
fae5792751636800d96173540f0a572f429337c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cauchyLogErrDeriv.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
int M = 2;
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *Y = NULL;
hipMalloc(&Y, XSIZE*YSIZE);
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cauchyLogErrDeriv), dim3(gridBlock),dim3(threadBlock), 0, 0, N,M,A,Y,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cauchyLogErrDeriv), dim3(gridBlock),dim3(threadBlock), 0, 0, N,M,A,Y,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cauchyLogErrDeriv), dim3(gridBlock),dim3(threadBlock), 0, 0, N,M,A,Y,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | fae5792751636800d96173540f0a572f429337c0.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cauchyLogErrDeriv.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
int M = 2;
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *Y = NULL;
cudaMalloc(&Y, XSIZE*YSIZE);
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cauchyLogErrDeriv<<<gridBlock,threadBlock>>>(N,M,A,Y,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cauchyLogErrDeriv<<<gridBlock,threadBlock>>>(N,M,A,Y,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cauchyLogErrDeriv<<<gridBlock,threadBlock>>>(N,M,A,Y,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
dbe879f27037733dde2355aafa0e8ad859582aef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
__global__ void initContext(GraphChiContext* context, int vertices, int edges) {
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
__global__ void initObject(VirtVertex<int, int> **vertex, GraphChiContext*
context,
int* row, int* col, int* inrow, int* incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
vertex[tid] = new ChiVertex<int, int>(tid, indegree, outdegree);
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], 0);
}
//for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void initOutEdge(VirtVertex<int, int> **vertex, GraphChiContext*
context,
int* row, int* col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
*/
#include "parse_oo.h"
void initContext(GraphChiContext *context, int vertices, int edges) {
// int tid = blockDim.x * blockIdx.x + threadIdx.x;
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
void initObject(VirtVertex<int, int> *vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol, obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
// vertex[tid].inEdgeDataArray =
// (ChiEdge<int> *)alloc->my_new<Edge<int>>(indegree);
// vertex[tid].outEdgeDataArray =
// (ChiEdge<int> **)alloc->my_new<Edge<int> *>(outdegree);
new (&vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree, alloc);
vertex[tid].setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
void part0_initObject(VirtVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol,
obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
vertex[tid] =
(VirtVertex<int, int> *)alloc->my_new<ChiVertex<int, int>>();
}
}
void part1_initObject(VirtVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol,
obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
// int out_start = row[tid];
// int out_end;
// if (tid + 1 < context->getNumVertices()) {
// out_end = row[tid + 1];
// } else {
// out_end = context->getNumEdges();
// }
// int in_start = inrow[tid];
// int in_end;
// if (tid + 1 < context->getNumVertices()) {
// in_end = inrow[tid + 1];
// } else {
// in_end = context->getNumEdges();
// }
// int indegree = in_end - in_start;
// int outdegree = out_end - out_start;
// vertex[tid].inEdgeDataArray =
// (ChiEdge<myType> *)alloc->my_new<Edge<myType>>(indegree);
// vertex[tid].outEdgeDataArray =
// (ChiEdge<myType> **)alloc->my_new<Edge<myType> *>(outdegree);
// new (&vertex[tid]) ChiVertex<int, int>(tid, indegree,
// outdegree,alloc);
vertex[tid]->set_in_out(alloc);
// vertex[tid].setValue(INT_MAX);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
// }
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void part_kern0_initObject(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *row,
int *col, int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
new (vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree);
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
// vertex[tid].setValue(INT_MAX);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
// }
}
__global__ void part_kern1_initObject(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *row,
int *col, int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], 0);
}
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
void initOutEdge(VirtVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
__global__ void kern_initObject(VirtVertex<int, int> *vertex,
GraphChiContext *context, int *row, int *col,
int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
new (&vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree);
vertex[tid].setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
}
//}
}
__global__ void kern_initOutEdge(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *row, int *col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
__managed__ range_tree_node *range_tree;
__managed__ unsigned tree_size_g;
__managed__ void *temp_copyBack;
__managed__ void *temp_CC;
__global__ void ConnectedComponent(VirtVertex<int, int> **vertex,
GraphChiContext *context, int iteration) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int numEdges;
numEdges = vertex[tid]->numEdges();
if (iteration == 0) {
int vid = vertex[tid]->getId();
vertex[tid]->setValue(vid);
}
int curMin;
curMin = vertex[tid]->getValue();
for (int i = 0; i < numEdges; i++) {
ChiEdge<int> *edge;
edge = vertex[tid]->edge(i);
int nbLabel;
nbLabel = edge->getValue();
if (iteration == 0) {
nbLabel = edge->getVertexId(); // Note!
}
if (nbLabel < curMin) {
curMin = nbLabel;
}
}
/**
* Set my new label
*/
vertex[tid]->setValue(curMin);
int label = curMin;
/**
* Broadcast my value to neighbors by writing the value to my edges.
*/
if (iteration > 0) {
for (int i = 0; i < numEdges; i++) {
ChiEdge<int> *edge;
edge = vertex[tid]->edge(i);
int edgeValue;
edgeValue = edge->getValue();
if (edgeValue > label) {
edge->setValue(label);
}
}
} else {
// Special case for first iteration to avoid overwriting
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int> *outEdge;
outEdge = vertex[tid]->getOutEdge(i);
outEdge->setValue(label);
}
}
}
}
__global__ void copyBack(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *cc) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned tree_size = tree_size_g;
void **vtable;
range_tree_node *table = range_tree;
if (tid < context->getNumVertices()) {
vtable = get_vfunc(vertex[tid], table, tree_size);
temp_copyBack = vtable[1];
cc[tid] = vertex[tid]->getValue();
}
}
| dbe879f27037733dde2355aafa0e8ad859582aef.cu | /*
__global__ void initContext(GraphChiContext* context, int vertices, int edges) {
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
__global__ void initObject(VirtVertex<int, int> **vertex, GraphChiContext*
context,
int* row, int* col, int* inrow, int* incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
vertex[tid] = new ChiVertex<int, int>(tid, indegree, outdegree);
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], 0);
}
//for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void initOutEdge(VirtVertex<int, int> **vertex, GraphChiContext*
context,
int* row, int* col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
*/
#include "parse_oo.h"
void initContext(GraphChiContext *context, int vertices, int edges) {
// int tid = blockDim.x * blockIdx.x + threadIdx.x;
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
void initObject(VirtVertex<int, int> *vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol, obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
// vertex[tid].inEdgeDataArray =
// (ChiEdge<int> *)alloc->my_new<Edge<int>>(indegree);
// vertex[tid].outEdgeDataArray =
// (ChiEdge<int> **)alloc->my_new<Edge<int> *>(outdegree);
new (&vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree, alloc);
vertex[tid].setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
void part0_initObject(VirtVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol,
obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
vertex[tid] =
(VirtVertex<int, int> *)alloc->my_new<ChiVertex<int, int>>();
}
}
void part1_initObject(VirtVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol,
obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
// int out_start = row[tid];
// int out_end;
// if (tid + 1 < context->getNumVertices()) {
// out_end = row[tid + 1];
// } else {
// out_end = context->getNumEdges();
// }
// int in_start = inrow[tid];
// int in_end;
// if (tid + 1 < context->getNumVertices()) {
// in_end = inrow[tid + 1];
// } else {
// in_end = context->getNumEdges();
// }
// int indegree = in_end - in_start;
// int outdegree = out_end - out_start;
// vertex[tid].inEdgeDataArray =
// (ChiEdge<myType> *)alloc->my_new<Edge<myType>>(indegree);
// vertex[tid].outEdgeDataArray =
// (ChiEdge<myType> **)alloc->my_new<Edge<myType> *>(outdegree);
// new (&vertex[tid]) ChiVertex<int, int>(tid, indegree,
// outdegree,alloc);
vertex[tid]->set_in_out(alloc);
// vertex[tid].setValue(INT_MAX);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
// }
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void part_kern0_initObject(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *row,
int *col, int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
new (vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree);
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
// vertex[tid].setValue(INT_MAX);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
// }
}
__global__ void part_kern1_initObject(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *row,
int *col, int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], 0);
}
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
void initOutEdge(VirtVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
__global__ void kern_initObject(VirtVertex<int, int> *vertex,
GraphChiContext *context, int *row, int *col,
int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
new (&vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree);
vertex[tid].setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
}
//}
}
__global__ void kern_initOutEdge(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *row, int *col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
__managed__ range_tree_node *range_tree;
__managed__ unsigned tree_size_g;
__managed__ void *temp_copyBack;
__managed__ void *temp_CC;
__global__ void ConnectedComponent(VirtVertex<int, int> **vertex,
GraphChiContext *context, int iteration) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int numEdges;
numEdges = vertex[tid]->numEdges();
if (iteration == 0) {
int vid = vertex[tid]->getId();
vertex[tid]->setValue(vid);
}
int curMin;
curMin = vertex[tid]->getValue();
for (int i = 0; i < numEdges; i++) {
ChiEdge<int> *edge;
edge = vertex[tid]->edge(i);
int nbLabel;
nbLabel = edge->getValue();
if (iteration == 0) {
nbLabel = edge->getVertexId(); // Note!
}
if (nbLabel < curMin) {
curMin = nbLabel;
}
}
/**
* Set my new label
*/
vertex[tid]->setValue(curMin);
int label = curMin;
/**
* Broadcast my value to neighbors by writing the value to my edges.
*/
if (iteration > 0) {
for (int i = 0; i < numEdges; i++) {
ChiEdge<int> *edge;
edge = vertex[tid]->edge(i);
int edgeValue;
edgeValue = edge->getValue();
if (edgeValue > label) {
edge->setValue(label);
}
}
} else {
// Special case for first iteration to avoid overwriting
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int> *outEdge;
outEdge = vertex[tid]->getOutEdge(i);
outEdge->setValue(label);
}
}
}
}
__global__ void copyBack(VirtVertex<int, int> **vertex,
GraphChiContext *context, int *cc) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned tree_size = tree_size_g;
void **vtable;
range_tree_node *table = range_tree;
if (tid < context->getNumVertices()) {
vtable = get_vfunc(vertex[tid], table, tree_size);
temp_copyBack = vtable[1];
cc[tid] = vertex[tid]->getValue();
}
}
|
a3f8f3d63cc09eded113a3b422fc6547e9b5f151.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaHandleError.h"
#include "Parameters.h"
#include "DepthFilter.h"
#include "Timer.h"
namespace FilterNamespace {
__constant__ int SF_RADIUS = 5;
__constant__ float SF_ALPHA = 0.75f;
__constant__ float SF_THRESHOLD = 40.0f;
__constant__ float TF_ALPHA = 0.5f;
__constant__ float TF_THRESHOLD = 40.0f;
float* lastFrame;
};
using namespace FilterNamespace;
__global__ void kernelCleanLastFrame(float* lastFrame) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
lastFrame[id] = 0;
}
}
__global__ void kernelFilterToDisparity(UINT16* source, float* target, float convertFactor) {
#define DEPTH_SORT(a, b) { if ((a) > (b)) {UINT16 temp = (a); (a) = (b); (b) = temp;} }
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
UINT16 arr[5] = { source[id], 0, 0, 0, 0 };
if (x - 1 >= 0) arr[1] = source[id - 1];
if (x + 1 < DEPTH_W) arr[2] = source[id + 1];
if (y - 1 >= 0) arr[3] = source[id - DEPTH_W];
if (y + 1 < DEPTH_H) arr[4] = source[id + DEPTH_W];
DEPTH_SORT(arr[0], arr[1]);
DEPTH_SORT(arr[0], arr[2]);
DEPTH_SORT(arr[0], arr[3]);
DEPTH_SORT(arr[0], arr[4]);
DEPTH_SORT(arr[1], arr[2]);
DEPTH_SORT(arr[1], arr[3]);
DEPTH_SORT(arr[1], arr[4]);
DEPTH_SORT(arr[2], arr[3]);
DEPTH_SORT(arr[2], arr[4]);
DEPTH_SORT(arr[3], arr[4]);
__syncthreads();
if (arr[0] != 0) {
target[id] = convertFactor / arr[2];
} else
if (arr[1] != 0) {
target[id] = convertFactor * 2 / (arr[2] + arr[3]);
} else
if (arr[2] != 0) {
target[id] = convertFactor / arr[3];
} else
if (arr[3] != 0) {
target[id] = convertFactor * 2 / (arr[3] + arr[4]);
}
if (arr[4] != 0) {
target[id] = convertFactor / arr[4];
} else {
target[id] = 0;
}
}
}
__global__ void kernelFilterToDepth(float* depth, float convertFactor) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
if (depth[id] != 0) {
depth[id] = convertFactor / depth[id] * 0.001; //to m
}
}
}
__global__ void kernelSFVertical(float* depth)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
float origin = depth[id];
float result = 0;
if (origin != 0) {
float sum = origin;
float weight = 1;
float w = 1;
for (int r = 1; r <= SF_RADIUS; r++) {
w *= SF_ALPHA;
if (y - r >= 0 && depth[id - r * DEPTH_W] != 0 && fabs(depth[id - r * DEPTH_W] - origin) <= SF_THRESHOLD) {
weight += w;
sum += w * depth[id - r * DEPTH_W];
}
if (y + r < DEPTH_H && depth[id + r * DEPTH_W] != 0 && fabs(depth[id + r * DEPTH_W] - origin) <= SF_THRESHOLD) {
weight += w;
sum += w * depth[id + r * DEPTH_W];
}
}
result = sum / weight;
}
__syncthreads();
depth[id] = result;
}
}
__global__ void kernelSFHorizontal(float* depth)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
float origin = depth[id];
float result = 0;
if (origin != 0) {
float sum = origin;
float weight = 1;
float w = 1;
for (int r = 1; r <= SF_RADIUS; r++) {
w *= SF_ALPHA;
if (x - r >= 0 && depth[id - r] != 0 && fabs(depth[id - r] - origin) <= SF_THRESHOLD) {
weight += w;
sum += w * depth[id - r];
}
if (x + r < DEPTH_W && depth[id + r] != 0 && fabs(depth[id + r] - origin) <= SF_THRESHOLD) {
weight += w;
sum += w * depth[id + r];
}
}
result = sum / weight;
}
__syncthreads();
depth[id] = result;
}
}
__global__ void kernelFillHoles(float* depth) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
float result = depth[id];
int cnt = 0;
if (result == 0) {
for (int xx = x - 1; xx <= x + 1; xx++) {
for (int yy = y - 1; yy <= y + 1; yy++) {
if (0 <= xx && xx < DEPTH_W && 0 <= yy && yy < DEPTH_H && (xx != x || yy != y)) {
float currDepth = depth[yy * DEPTH_W + xx];
if (currDepth != 0) {
cnt++;
result = max(result, currDepth);
}
}
}
}
}
__syncthreads();
if (cnt >= 5) {
depth[id] = result;
}
}
}
__global__ void kernelTemporalFilter(float* depth, float* lastFrame) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
float result = depth[id];
float lastDepth = lastFrame[id];
if (lastDepth != 0 && fabs(result - lastDepth) <= TF_THRESHOLD) {
result = result * TF_ALPHA + lastDepth * (1 - TF_ALPHA);
}
__syncthreads();
depth[id] = result;
lastFrame[id] = result;
}
}
extern "C"
void cudaDepthFilterInit(UINT16*& depth_device, float*& depthFloat_device, float*& lastFrame_device) {
dim3 threadsPerBlock = dim3(256, 1);
dim3 blocksPerGrid = dim3((DEPTH_W + threadsPerBlock.x - 1) / threadsPerBlock.x, (DEPTH_H + threadsPerBlock.y - 1) / threadsPerBlock.y);
HANDLE_ERROR(hipMalloc(&depth_device, DEPTH_H * DEPTH_W * sizeof(UINT16)));
HANDLE_ERROR(hipMalloc(&depthFloat_device, MAX_CAMERAS * DEPTH_H * DEPTH_W * sizeof(float)));
HANDLE_ERROR(hipMalloc(&lastFrame_device, MAX_CAMERAS * DEPTH_H * DEPTH_W * sizeof(float)));
for (int i = 0; i < MAX_CAMERAS; i++) {
kernelCleanLastFrame << <blocksPerGrid, threadsPerBlock >> > (lastFrame_device + i * DEPTH_H * DEPTH_W);
hipGetLastError();
}
}
extern "C"
void cudaDepthFilterClean(UINT16*& depth_device, float*& depthFloat_device, float*& lastFrame_device) {
HANDLE_ERROR(hipFree(depth_device));
HANDLE_ERROR(hipFree(depthFloat_device));
HANDLE_ERROR(hipFree(lastFrame_device));
}
extern "C"
void cudaDepthFiltering(UINT16* depthMap, UINT16* depth_device, float* depthFloat_device, float* lastFrame_device, float convertFactor) {
dim3 threadsPerBlock = dim3(256, 1);
dim3 blocksPerGrid = dim3((DEPTH_W + threadsPerBlock.x - 1) / threadsPerBlock.x, (DEPTH_H + threadsPerBlock.y - 1) / threadsPerBlock.y);
HANDLE_ERROR(hipMemcpy(depth_device, depthMap, DEPTH_H * DEPTH_W * sizeof(UINT16), hipMemcpyHostToDevice));
kernelFilterToDisparity << <blocksPerGrid, threadsPerBlock >> > (depth_device, depthFloat_device, convertFactor);
hipGetLastError();
for (int i = 0; i < 2; i++) {
kernelSFVertical << <blocksPerGrid, threadsPerBlock >> > (depthFloat_device);
hipGetLastError();
kernelSFHorizontal << <blocksPerGrid, threadsPerBlock >> > (depthFloat_device);
hipGetLastError();
}
for (int i = 0; i < 3; i++) {
kernelFillHoles << <blocksPerGrid, threadsPerBlock >> > (depthFloat_device);
hipGetLastError();
}
kernelTemporalFilter << <blocksPerGrid, threadsPerBlock >> > (depthFloat_device, lastFrame_device);
hipGetLastError();
kernelFilterToDepth << <blocksPerGrid, threadsPerBlock >> > (depthFloat_device, convertFactor);
hipGetLastError();
}
| a3f8f3d63cc09eded113a3b422fc6547e9b5f151.cu | #include "CudaHandleError.h"
#include "Parameters.h"
#include "DepthFilter.h"
#include "Timer.h"
namespace FilterNamespace {
__constant__ int SF_RADIUS = 5;
__constant__ float SF_ALPHA = 0.75f;
__constant__ float SF_THRESHOLD = 40.0f;
__constant__ float TF_ALPHA = 0.5f;
__constant__ float TF_THRESHOLD = 40.0f;
float* lastFrame;
};
using namespace FilterNamespace;
__global__ void kernelCleanLastFrame(float* lastFrame) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
lastFrame[id] = 0;
}
}
__global__ void kernelFilterToDisparity(UINT16* source, float* target, float convertFactor) {
#define DEPTH_SORT(a, b) { if ((a) > (b)) {UINT16 temp = (a); (a) = (b); (b) = temp;} }
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
UINT16 arr[5] = { source[id], 0, 0, 0, 0 };
if (x - 1 >= 0) arr[1] = source[id - 1];
if (x + 1 < DEPTH_W) arr[2] = source[id + 1];
if (y - 1 >= 0) arr[3] = source[id - DEPTH_W];
if (y + 1 < DEPTH_H) arr[4] = source[id + DEPTH_W];
DEPTH_SORT(arr[0], arr[1]);
DEPTH_SORT(arr[0], arr[2]);
DEPTH_SORT(arr[0], arr[3]);
DEPTH_SORT(arr[0], arr[4]);
DEPTH_SORT(arr[1], arr[2]);
DEPTH_SORT(arr[1], arr[3]);
DEPTH_SORT(arr[1], arr[4]);
DEPTH_SORT(arr[2], arr[3]);
DEPTH_SORT(arr[2], arr[4]);
DEPTH_SORT(arr[3], arr[4]);
__syncthreads();
if (arr[0] != 0) {
target[id] = convertFactor / arr[2];
} else
if (arr[1] != 0) {
target[id] = convertFactor * 2 / (arr[2] + arr[3]);
} else
if (arr[2] != 0) {
target[id] = convertFactor / arr[3];
} else
if (arr[3] != 0) {
target[id] = convertFactor * 2 / (arr[3] + arr[4]);
}
if (arr[4] != 0) {
target[id] = convertFactor / arr[4];
} else {
target[id] = 0;
}
}
}
__global__ void kernelFilterToDepth(float* depth, float convertFactor) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
if (depth[id] != 0) {
depth[id] = convertFactor / depth[id] * 0.001; //to m
}
}
}
__global__ void kernelSFVertical(float* depth)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
float origin = depth[id];
float result = 0;
if (origin != 0) {
float sum = origin;
float weight = 1;
float w = 1;
for (int r = 1; r <= SF_RADIUS; r++) {
w *= SF_ALPHA;
if (y - r >= 0 && depth[id - r * DEPTH_W] != 0 && fabs(depth[id - r * DEPTH_W] - origin) <= SF_THRESHOLD) {
weight += w;
sum += w * depth[id - r * DEPTH_W];
}
if (y + r < DEPTH_H && depth[id + r * DEPTH_W] != 0 && fabs(depth[id + r * DEPTH_W] - origin) <= SF_THRESHOLD) {
weight += w;
sum += w * depth[id + r * DEPTH_W];
}
}
result = sum / weight;
}
__syncthreads();
depth[id] = result;
}
}
__global__ void kernelSFHorizontal(float* depth)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
float origin = depth[id];
float result = 0;
if (origin != 0) {
float sum = origin;
float weight = 1;
float w = 1;
for (int r = 1; r <= SF_RADIUS; r++) {
w *= SF_ALPHA;
if (x - r >= 0 && depth[id - r] != 0 && fabs(depth[id - r] - origin) <= SF_THRESHOLD) {
weight += w;
sum += w * depth[id - r];
}
if (x + r < DEPTH_W && depth[id + r] != 0 && fabs(depth[id + r] - origin) <= SF_THRESHOLD) {
weight += w;
sum += w * depth[id + r];
}
}
result = sum / weight;
}
__syncthreads();
depth[id] = result;
}
}
__global__ void kernelFillHoles(float* depth) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
float result = depth[id];
int cnt = 0;
if (result == 0) {
for (int xx = x - 1; xx <= x + 1; xx++) {
for (int yy = y - 1; yy <= y + 1; yy++) {
if (0 <= xx && xx < DEPTH_W && 0 <= yy && yy < DEPTH_H && (xx != x || yy != y)) {
float currDepth = depth[yy * DEPTH_W + xx];
if (currDepth != 0) {
cnt++;
result = max(result, currDepth);
}
}
}
}
}
__syncthreads();
if (cnt >= 5) {
depth[id] = result;
}
}
}
__global__ void kernelTemporalFilter(float* depth, float* lastFrame) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < DEPTH_W && y < DEPTH_H) {
int id = y * DEPTH_W + x;
float result = depth[id];
float lastDepth = lastFrame[id];
if (lastDepth != 0 && fabs(result - lastDepth) <= TF_THRESHOLD) {
result = result * TF_ALPHA + lastDepth * (1 - TF_ALPHA);
}
__syncthreads();
depth[id] = result;
lastFrame[id] = result;
}
}
extern "C"
void cudaDepthFilterInit(UINT16*& depth_device, float*& depthFloat_device, float*& lastFrame_device) {
dim3 threadsPerBlock = dim3(256, 1);
dim3 blocksPerGrid = dim3((DEPTH_W + threadsPerBlock.x - 1) / threadsPerBlock.x, (DEPTH_H + threadsPerBlock.y - 1) / threadsPerBlock.y);
HANDLE_ERROR(cudaMalloc(&depth_device, DEPTH_H * DEPTH_W * sizeof(UINT16)));
HANDLE_ERROR(cudaMalloc(&depthFloat_device, MAX_CAMERAS * DEPTH_H * DEPTH_W * sizeof(float)));
HANDLE_ERROR(cudaMalloc(&lastFrame_device, MAX_CAMERAS * DEPTH_H * DEPTH_W * sizeof(float)));
for (int i = 0; i < MAX_CAMERAS; i++) {
kernelCleanLastFrame << <blocksPerGrid, threadsPerBlock >> > (lastFrame_device + i * DEPTH_H * DEPTH_W);
cudaGetLastError();
}
}
extern "C"
void cudaDepthFilterClean(UINT16*& depth_device, float*& depthFloat_device, float*& lastFrame_device) {
HANDLE_ERROR(cudaFree(depth_device));
HANDLE_ERROR(cudaFree(depthFloat_device));
HANDLE_ERROR(cudaFree(lastFrame_device));
}
extern "C"
void cudaDepthFiltering(UINT16* depthMap, UINT16* depth_device, float* depthFloat_device, float* lastFrame_device, float convertFactor) {
dim3 threadsPerBlock = dim3(256, 1);
dim3 blocksPerGrid = dim3((DEPTH_W + threadsPerBlock.x - 1) / threadsPerBlock.x, (DEPTH_H + threadsPerBlock.y - 1) / threadsPerBlock.y);
HANDLE_ERROR(cudaMemcpy(depth_device, depthMap, DEPTH_H * DEPTH_W * sizeof(UINT16), cudaMemcpyHostToDevice));
kernelFilterToDisparity << <blocksPerGrid, threadsPerBlock >> > (depth_device, depthFloat_device, convertFactor);
cudaGetLastError();
for (int i = 0; i < 2; i++) {
kernelSFVertical << <blocksPerGrid, threadsPerBlock >> > (depthFloat_device);
cudaGetLastError();
kernelSFHorizontal << <blocksPerGrid, threadsPerBlock >> > (depthFloat_device);
cudaGetLastError();
}
for (int i = 0; i < 3; i++) {
kernelFillHoles << <blocksPerGrid, threadsPerBlock >> > (depthFloat_device);
cudaGetLastError();
}
kernelTemporalFilter << <blocksPerGrid, threadsPerBlock >> > (depthFloat_device, lastFrame_device);
cudaGetLastError();
kernelFilterToDepth << <blocksPerGrid, threadsPerBlock >> > (depthFloat_device, convertFactor);
cudaGetLastError();
}
|
4094151dd0584be30a7a8eb70ee65a7f28bbf706.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/local/cuda-convnet2/img_acts.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* --------------------------------------------------------------------------
* * This file has been modified by Megvii ("Megvii Modifications").
* * All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
* --------------------------------------------------------------------------
*/
#include "cudaconv2.cuh"
#include "nvmatrix.cuh"
#include "img_acts/img_act_templates.cuh"
#ifdef _WIN32
#define _Pragma(x)
#endif
namespace megdnn {
namespace cuda {
/*
* New Titan-optimized stuff.
*/
__device__ __forceinline__ void conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(const int my, const int mx, const int numModulesX,
const int paddingStart, const int moduleStride, const int blockPixelIdxY, const int blockPixelIdxX, const int filterSize, int &moduleIdx, int &pxIdxInFilter) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
moduleIdx = my * numModulesX + mx; // out
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; // out
}
#define IA_PRELOAD_LOOP(w,offset) _Pragma("unroll") \
for (int i = 0; i < imgsPerThread; i++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
/*
* Same loop as above but inverted.
*/
#define IA_PRELOAD_LOOP2(w,offset) _Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
_Pragma("unroll") \
for (int i = 0; i < imgsPerThread; i++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
#define IA_PRELOAD_LOOP3(i,offset) _Pragma("unroll") \
for (int w = 0; w < filterCacheH; w++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
#define IA_PRELOAD_W(z) wPreload[z] = fLoad[(z) * B_X*B_Y/filterCacheF * filterPixels * numFilters];
#define IA_PRELOAD_W_TX(z) wPreload[z] = tex1Dfetch<float>(filters, filtersLoadOffset + (z) * B_X*B_Y/filterCacheF * filterPixels * numFilters);
#define IA_PRELOAD_H(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \
hPreload[y][x] = hLoad[(y) * B_Y * numModules * numImages + (x) * B_X]; \
}
#define IA_PRELOAD_H_TX(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \
hPreload[y][x] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + (y) * B_Y * numModules * numImages + (x) * B_X); \
}
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void
__launch_bounds__(256, 2) // 256 threads per block, 2 blocks per multiprocessor
// These launch bounds ensure 25% occupancy (128 registers used)
// as oppposed to 13% (130 registers) achieved by defaults.
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex(hipTextureObject_t hidActs, hipTextureObject_t filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
fill_shared_mem<float>((float *)shFilters, sizeof(shFilters)/sizeof(float), 0);
fill_shared_mem<float>((float *)shHidActs, sizeof(shHidActs)/sizeof(float), 0);
__syncthreads();
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
// filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
//const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH/B_Y][imgsPerThread]; // [2][4]
float wPreload[filterCacheF*colorsPerThread/B_X]; // [8]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + 0
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[(moduleIdx + 0 * numModules) * numImages];
int hidActsLoadOffset = hidActsOffset + (moduleIdx + 0 * numModules) * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)];
}
}
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheH) * numModules) * numImages;
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z,0);
IA_PRELOAD_W_TX(z);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z,0);
IA_PRELOAD_H_TX((z-4)/4,z%4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z,0);
}
__syncthreads();
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
IA_PRELOAD_W_TX(z+4);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
IA_PRELOAD_H_TX((z-4)/4, z%4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void
//__launch_bounds__(128, 3) // 128 threads per block, 3 blocks per multiprocessor
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16(hipTextureObject_t hidActs, hipTextureObject_t filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
fill_shared_mem<float>((float *)shFilters, sizeof(shFilters)/sizeof(float), 0);
fill_shared_mem<float>((float *)shHidActs, sizeof(shHidActs)/sizeof(float), 0);
__syncthreads();
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
// filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
//const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH/B_Y][imgsPerThread]; // [4][4]
float wPreload[filterCacheF*colorsPerThread/B_X]; // [6]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[moduleIdx * numImages];
int hidActsLoadOffset = hidActsOffset + moduleIdx * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)];
}
}
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
__syncthreads();
// It seems that there is no point explicitly interleaving loads
// and computations because the scheduler does that anyway.
IA_PRELOAD_LOOP2(0,0);
IA_PRELOAD_LOOP2(1,0);
IA_PRELOAD_LOOP2(2,0);
IA_PRELOAD_LOOP2(3,0);
IA_PRELOAD_LOOP2(4,0);
IA_PRELOAD_LOOP2(5,0);
IA_PRELOAD_LOOP2(6,0);
IA_PRELOAD_LOOP2(7,0);
IA_PRELOAD_LOOP2(8,0);
IA_PRELOAD_LOOP2(9,0);
IA_PRELOAD_LOOP2(10,0);
IA_PRELOAD_LOOP2(11,0);
IA_PRELOAD_LOOP2(12,0);
IA_PRELOAD_LOOP2(13,0);
IA_PRELOAD_LOOP2(14,0);
IA_PRELOAD_LOOP2(15,0);
IA_PRELOAD_W_TX(0);
IA_PRELOAD_W_TX(1);
IA_PRELOAD_W_TX(2);
IA_PRELOAD_W_TX(3);
IA_PRELOAD_W_TX(4);
IA_PRELOAD_W_TX(5);
IA_PRELOAD_H_TX(0,0);
IA_PRELOAD_H_TX(0,1);
IA_PRELOAD_H_TX(0,2);
IA_PRELOAD_H_TX(0,3);
IA_PRELOAD_H_TX(1,0);
IA_PRELOAD_H_TX(1,1);
IA_PRELOAD_H_TX(1,2);
IA_PRELOAD_H_TX(1,3);
IA_PRELOAD_H_TX(2,0);
IA_PRELOAD_H_TX(2,1);
IA_PRELOAD_H_TX(2,2);
IA_PRELOAD_H_TX(2,3);
IA_PRELOAD_H_TX(3,0);
IA_PRELOAD_H_TX(3,1);
IA_PRELOAD_H_TX(3,2);
IA_PRELOAD_H_TX(3,3);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _imgActs(hipStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSizeY * imgSizeX;
int numModulesX = numModules / numModulesY;
megdnn_assert_internal(numImgColors % numGroups == 0);
//megdnn_assert_internal(numFilters % (16*numGroups) == 0); // TODO: insisting on 32 filters due to bug in calling code below. fix that.
bool previous_limit = (numFilters % (16 * numGroups)) == 0;
megdnn_assert_internal(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
megdnn_assert_internal(numGroups == 1 || numFilterColors % 4 == 0);
megdnn_assert_internal(filterPixels == filterSize * filterSize);
megdnn_assert_internal(hidActs.getNumRows() == numModules * numFilters);
megdnn_assert_internal(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
megdnn_assert_internal(numModules == numModulesY * numModulesX);
megdnn_assert_internal(hidActs.isContiguous());
megdnn_assert_internal(filters.isContiguous());
megdnn_assert_internal(!hidActs.isTrans());
megdnn_assert_internal(!filters.isTrans());
megdnn_assert_internal(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
megdnn_assert_internal(paddingStart <= 0);
megdnn_assert_internal(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
megdnn_assert_internal(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
megdnn_assert_internal(moduleStride <= filterSize);
megdnn_assert_internal(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads;
int colorsPerThread = 0, imgsPerThread = 0;
if (numFilterColors % 8 == 0) {
threads = dim3(32, numFilterColors % 64 == 0 ? 8 : 4);
colorsPerThread = numFilterColors % 64 == 0 ? 8
: numFilterColors % 48 == 0 ? 12
: numFilterColors % 32 == 0 ? 8
: numFilterColors % 16 == 0 ? 4
: 2;
imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
megdnn_assert_internal(numFilterColors % (threads.y * colorsPerThread) == 0);
//previous_limit = numFilterColors % (threads.y * colorsPerThread) == 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
// NOTE: the case when channels % 32 == 0 but channels % 48 != 0 and channels % 64 != 0 has not been optimized!!
} else if (numFilterColors > 3) {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
} else {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
}
bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(numImgColors*imgPixels, numImages);
} else {
megdnn_assert_internal(targets.getNumRows() == numImgColors * imgPixels);
megdnn_assert_internal(targets.getNumCols() == numImages);
}
const bool scale = scaleTargets != 0;
// hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, hipFuncCachePreferShared);
// conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(
// hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize,
// imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
//return;
// printf("conv: %d\n", conv);
// printf("scale: %d\n", scale);
// printf("checkCaseBounds: %d\n", checkCaseBounds);
// printf("numFilterColors: %d\n", numFilterColors);
// printf("numImages: %d\n", numImages);
// hipStream_t stream = NVMatrix::getDefaultStream();
if (conv == false) {
if (scale == false) {
if (checkCaseBounds == false) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
if (previous_limit) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
if (previous_limit) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 3, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 3, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
hipFuncSetCacheConfig(img_acts_color < 8, 1, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 8, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
hipFuncSetCacheConfig(img_acts_color < 4, 1, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 4, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
/*
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
*/
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 3, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 3, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
hipFuncSetCacheConfig(img_acts_color < 2, 1, false, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color < 2, 1, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
}
getLastCudaError("imgActs: kernel execution failed");
}
void convImgActs(hipStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true);
}
void convImgActs(hipStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActs(hipStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false);
}
void localImgActs(hipStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
} // namespace cuda
} // namespace megdnn
| 4094151dd0584be30a7a8eb70ee65a7f28bbf706.cu | /**
* \file dnn/src/cuda/local/cuda-convnet2/img_acts.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* --------------------------------------------------------------------------
* * This file has been modified by Megvii ("Megvii Modifications").
* * All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
* --------------------------------------------------------------------------
*/
#include "cudaconv2.cuh"
#include "nvmatrix.cuh"
#include "img_acts/img_act_templates.cuh"
#ifdef _WIN32
#define _Pragma(x)
#endif
namespace megdnn {
namespace cuda {
/*
* New Titan-optimized stuff.
*/
__device__ __forceinline__ void conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(const int my, const int mx, const int numModulesX,
const int paddingStart, const int moduleStride, const int blockPixelIdxY, const int blockPixelIdxX, const int filterSize, int &moduleIdx, int &pxIdxInFilter) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
moduleIdx = my * numModulesX + mx; // out
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; // out
}
#define IA_PRELOAD_LOOP(w,offset) _Pragma("unroll") \
for (int i = 0; i < imgsPerThread; i++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
/*
* Same loop as above but inverted.
*/
#define IA_PRELOAD_LOOP2(w,offset) _Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
_Pragma("unroll") \
for (int i = 0; i < imgsPerThread; i++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
#define IA_PRELOAD_LOOP3(i,offset) _Pragma("unroll") \
for (int w = 0; w < filterCacheH; w++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
} \
#define IA_PRELOAD_W(z) wPreload[z] = fLoad[(z) * B_X*B_Y/filterCacheF * filterPixels * numFilters];
#define IA_PRELOAD_W_TX(z) wPreload[z] = tex1Dfetch<float>(filters, filtersLoadOffset + (z) * B_X*B_Y/filterCacheF * filterPixels * numFilters);
#define IA_PRELOAD_H(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \
hPreload[y][x] = hLoad[(y) * B_Y * numModules * numImages + (x) * B_X]; \
}
#define IA_PRELOAD_H_TX(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \
hPreload[y][x] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + (y) * B_Y * numModules * numImages + (x) * B_X); \
}
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void
__launch_bounds__(256, 2) // 256 threads per block, 2 blocks per multiprocessor
// These launch bounds ensure 25% occupancy (128 registers used)
// as oppposed to 13% (130 registers) achieved by defaults.
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex(cudaTextureObject_t hidActs, cudaTextureObject_t filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
fill_shared_mem<float>((float *)shFilters, sizeof(shFilters)/sizeof(float), 0);
fill_shared_mem<float>((float *)shHidActs, sizeof(shHidActs)/sizeof(float), 0);
__syncthreads();
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
// filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
//const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH/B_Y][imgsPerThread]; // [2][4]
float wPreload[filterCacheF*colorsPerThread/B_X]; // [8]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + 0
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[(moduleIdx + 0 * numModules) * numImages];
int hidActsLoadOffset = hidActsOffset + (moduleIdx + 0 * numModules) * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)];
}
}
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheH) * numModules) * numImages;
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z,0);
IA_PRELOAD_W_TX(z);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z,0);
IA_PRELOAD_H_TX((z-4)/4,z%4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z,0);
}
__syncthreads();
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
IA_PRELOAD_W_TX(z+4);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
IA_PRELOAD_H_TX((z-4)/4, z%4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z,filterCacheH);
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void
//__launch_bounds__(128, 3) // 128 threads per block, 3 blocks per multiprocessor
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16(cudaTextureObject_t hidActs, cudaTextureObject_t filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X*imgsPerThread];
fill_shared_mem<float>((float *)shFilters, sizeof(shFilters)/sizeof(float), 0);
fill_shared_mem<float>((float *)shHidActs, sizeof(shHidActs)/sizeof(float), 0);
__syncthreads();
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
//const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
//const int outputY = threadIdx.y, outputX = threadIdx.x;
//const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
// filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
//const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH/B_Y][imgsPerThread]; // [4][4]
float wPreload[filterCacheF*colorsPerThread/B_X]; // [6]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[moduleIdx * numImages];
int hidActsLoadOffset = hidActsOffset + moduleIdx * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)];
}
}
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i];
}
}
}
}
hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
__syncthreads();
// It seems that there is no point explicitly interleaving loads
// and computations because the scheduler does that anyway.
IA_PRELOAD_LOOP2(0,0);
IA_PRELOAD_LOOP2(1,0);
IA_PRELOAD_LOOP2(2,0);
IA_PRELOAD_LOOP2(3,0);
IA_PRELOAD_LOOP2(4,0);
IA_PRELOAD_LOOP2(5,0);
IA_PRELOAD_LOOP2(6,0);
IA_PRELOAD_LOOP2(7,0);
IA_PRELOAD_LOOP2(8,0);
IA_PRELOAD_LOOP2(9,0);
IA_PRELOAD_LOOP2(10,0);
IA_PRELOAD_LOOP2(11,0);
IA_PRELOAD_LOOP2(12,0);
IA_PRELOAD_LOOP2(13,0);
IA_PRELOAD_LOOP2(14,0);
IA_PRELOAD_LOOP2(15,0);
IA_PRELOAD_W_TX(0);
IA_PRELOAD_W_TX(1);
IA_PRELOAD_W_TX(2);
IA_PRELOAD_W_TX(3);
IA_PRELOAD_W_TX(4);
IA_PRELOAD_W_TX(5);
IA_PRELOAD_H_TX(0,0);
IA_PRELOAD_H_TX(0,1);
IA_PRELOAD_H_TX(0,2);
IA_PRELOAD_H_TX(0,3);
IA_PRELOAD_H_TX(1,0);
IA_PRELOAD_H_TX(1,1);
IA_PRELOAD_H_TX(1,2);
IA_PRELOAD_H_TX(1,3);
IA_PRELOAD_H_TX(2,0);
IA_PRELOAD_H_TX(2,1);
IA_PRELOAD_H_TX(2,2);
IA_PRELOAD_H_TX(2,3);
IA_PRELOAD_H_TX(3,0);
IA_PRELOAD_H_TX(3,1);
IA_PRELOAD_H_TX(3,2);
IA_PRELOAD_H_TX(3,3);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _imgActs(cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSizeY * imgSizeX;
int numModulesX = numModules / numModulesY;
megdnn_assert_internal(numImgColors % numGroups == 0);
//megdnn_assert_internal(numFilters % (16*numGroups) == 0); // TODO: insisting on 32 filters due to bug in calling code below. fix that.
bool previous_limit = (numFilters % (16 * numGroups)) == 0;
megdnn_assert_internal(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
megdnn_assert_internal(numGroups == 1 || numFilterColors % 4 == 0);
megdnn_assert_internal(filterPixels == filterSize * filterSize);
megdnn_assert_internal(hidActs.getNumRows() == numModules * numFilters);
megdnn_assert_internal(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
megdnn_assert_internal(numModules == numModulesY * numModulesX);
megdnn_assert_internal(hidActs.isContiguous());
megdnn_assert_internal(filters.isContiguous());
megdnn_assert_internal(!hidActs.isTrans());
megdnn_assert_internal(!filters.isTrans());
megdnn_assert_internal(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
megdnn_assert_internal(paddingStart <= 0);
megdnn_assert_internal(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
megdnn_assert_internal(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
megdnn_assert_internal(moduleStride <= filterSize);
megdnn_assert_internal(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads;
int colorsPerThread = 0, imgsPerThread = 0;
if (numFilterColors % 8 == 0) {
threads = dim3(32, numFilterColors % 64 == 0 ? 8 : 4);
colorsPerThread = numFilterColors % 64 == 0 ? 8
: numFilterColors % 48 == 0 ? 12
: numFilterColors % 32 == 0 ? 8
: numFilterColors % 16 == 0 ? 4
: 2;
imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
megdnn_assert_internal(numFilterColors % (threads.y * colorsPerThread) == 0);
//previous_limit = numFilterColors % (threads.y * colorsPerThread) == 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
// NOTE: the case when channels % 32 == 0 but channels % 48 != 0 and channels % 64 != 0 has not been optimized!!
} else if (numFilterColors > 3) {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
} else {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
}
bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(numImgColors*imgPixels, numImages);
} else {
megdnn_assert_internal(targets.getNumRows() == numImgColors * imgPixels);
megdnn_assert_internal(targets.getNumCols() == numImages);
}
const bool scale = scaleTargets != 0;
// cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared);
// conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(
// hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize,
// imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
//return;
// printf("conv: %d\n", conv);
// printf("scale: %d\n", scale);
// printf("checkCaseBounds: %d\n", checkCaseBounds);
// printf("numFilterColors: %d\n", numFilterColors);
// printf("numImages: %d\n", numImages);
// cudaStream_t stream = NVMatrix::getDefaultStream();
if (conv == false) {
if (scale == false) {
if (checkCaseBounds == false) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
if (previous_limit) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
if (previous_limit) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 8, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 4, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 3, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 3, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 8, 1, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 8, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 4, 1, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 4, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 48 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
else if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 16 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors % 8 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >, cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, false >, cudaFuncCachePreferShared);
img_acts_mediumcolor < 2, 4, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
/*
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
*/
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 3, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 2, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors == 1) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, true, false >, cudaFuncCachePreferShared);
img_acts_color < 2, 1, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
}
getLastCudaError("imgActs: kernel execution failed");
}
void convImgActs(cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true);
}
void convImgActs(cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActs(cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false);
}
void localImgActs(cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
} // namespace cuda
} // namespace megdnn
|
a39c89d77fc099881f752141a338275e707f0cab.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id$
*
*/
#include <pcl/pcl_exports.h>
#include <pcl/cuda/sample_consensus/sac_model_1point_plane.h>
#include <pcl/cuda/common/eigen.h>
#include <pcl/cuda/cutil_math.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/random.h>
#include <hip/hip_vector_types.h>
#include <stdio.h>
#include <limits>
// specify inlier computation method
//#define KINECT_NORMALS
#define KINECT
namespace pcl
{
namespace cuda
{
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
SampleConsensusModel1PointPlane<Storage>::SampleConsensusModel1PointPlane (
const PointCloudConstPtr &cloud) :
SampleConsensusModel<Storage> (cloud)
{
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> void
SampleConsensusModel1PointPlane<Storage>::getSamples (int &iterations, Indices &samples)
{
samples.resize (1);
float trand = indices_->size () / (RAND_MAX + 1.0f);
int idx = (int)(rngl_ () * trand);
samples[0] = (*indices_)[idx];
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModel1PointPlane<Storage>::computeModelCoefficients (
const Indices &samples, Coefficients &model_coefficients)
{
if (samples.size () != 1)
return (false);
/* if (isnan ((PointXYZRGB)input_->points[samples[0]]).x ||
isnan ((PointXYZRGB)input_->points[samples[1]]).x ||
isnan ((PointXYZRGB)input_->points[samples[2]]).x)
return (false);*/
float3 normal;
normal.x = 0;
normal.y = 0;
normal.z = -1;
// Compute the plane coefficients
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (normal);
if (model_coefficients.size () != 4)
model_coefficients.resize (4);
model_coefficients[0] = mc.x;
model_coefficients[1] = mc.y;
model_coefficients[2] = mc.z;
// ... + d = 0
model_coefficients[3] = -1 * dot (mc, ((PointXYZRGB)input_->points[samples[0]]).xyz);
return (true);
}
__host__ __device__
unsigned int hash(unsigned int a)
{
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
//template <typename Tuple>
thrust::tuple <int, float4>
Create1PointPlaneSampleHypothesis<Storage>::operator () (int t)
{
float4 coeff;
coeff.x = coeff.y = coeff.z = coeff.w = 5;
float trand = (float) nr_indices / (thrust::default_random_engine::max + 1.0f);
//rng.seed (hash (t));
//int sample_point = indices[(int)(rng () * trand)];
int sample_point = indices[(int)(t * trand)];
if (isnan (input[sample_point].x))
return (thrust::make_tuple (sample_point, coeff));
#if 0
//TODO:: kind of important: get normal! :D
int xIdx = sample_point % width_;
int yIdx = sample_point / width_;
//int counter = 1;
int window_size = 3;
int left_index = 0;
int top_index = 0;
// West
if (xIdx >= window_size)
{
left_index = sample_point - window_size;
}
else
{
left_index = sample_point + window_size;
}
// North
if (yIdx >= window_size)
{
top_index = sample_point - window_size * width_;
}
else
{
top_index = sample_point + window_size * width_;
}
float3 left_point;
left_point.x = input[left_index].x - input[sample_point].x;
left_point.y = input[left_index].y - input[sample_point].y;
left_point.z = input[left_index].z - input[sample_point].z;
float3 top_point;
top_point.x = input[top_index].x - input[sample_point].x;
top_point.y = input[top_index].y - input[sample_point].y;
top_point.z = input[top_index].z - input[sample_point].z;
float3 normal = cross (top_point, left_point);
// Compute the plane coefficients from the 3 given points in a straightforward manner
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (normal);
if (0 == (normal.x) && 0 == (normal.y) && 0 == (normal.z))
{
//mc.x = mc.y = 0;
if (top_point.x == 0 && top_point.y == 0 && top_point.z == 0)
{
mc.x = 999999;
mc.y = input[top_index].x;
mc.z = input[sample_point].x;
//mc.z = top_index - sample_point;
//mc.z = 999999;
}
else
{
if (left_point.x == 0 && left_point.y == 0 && left_point.z == 0)
{
mc.x = mc.y = 888888;
mc.z = left_index - sample_point;
//mc.z = 888888;
}
}
}
#else
float3 mc = make_float3 (normals_[sample_point].x, normals_[sample_point].y, normals_[sample_point].z);
#endif
coeff.x = mc.x;
coeff.y = mc.y;
coeff.z = mc.z;
// ... + d = 0
coeff.w = -1 * dot (mc, input[sample_point].xyz);
return (thrust::make_tuple (sample_point, coeff));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
//template <typename Tuple>
float4
Create1PointPlaneHypothesis<Storage>::operator () (int t)
{
float4 coeff;
coeff.x = coeff.y = coeff.z = coeff.w = bad_value;
float trand = nr_indices / (RAND_MAX + 1.0f);
thrust::default_random_engine rng (t);
int sample_point = indices[(int)(rng () * trand)];
if (isnan (input[sample_point].x))
return (coeff);
//TODO:: kind of important: get normal! :D
//int xIdx = sample_point % width;
//int yIdx = sample_point / width;
//float3 b = input[sample_point];
//int counter = 1;
//// West
//if (xIdx < width-window_size)
//{
// b += input[sample_point + window_size];
// counter += 1
//}
//// North
//if (yIdx >= window_size)
//{
// b += input[sample_point - window_size * width];
//}
//// South
//if (yIdx < height-window_size)
//{
// b += input[sample_point + window_size * width];
//}
//// East
//if (xIdx >= window_size)
//{
// b += input[sample_point + window_size];
//}
//// Estimate the XYZ centroid
//compute3DCentroid (cloud, xyz_centroid);
//// Compute the 3x3 covariance matrix
//computeCovarianceMatrix (cloud, xyz_centroid, covariance_matrix);
//// Get the plane normal and surface curvature
//solvePlaneParameters (covariance_matrix, xyz_centroid, plane_parameters, curvature);
//int[5] idxs;
//idxs[0] = sample_point;
// west = sample_point - window_size;
//else
// west = -1;
float3 normal;
normal.x = 0;
normal.y = 0;
normal.z = -1;
// Compute the plane coefficients from the 3 given points in a straightforward manner
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (normal);
coeff.x = mc.x;
coeff.y = mc.y;
coeff.z = mc.z;
// ... + d = 0
coeff.w = -1 * dot (mc, input[sample_point].xyz);
return (coeff);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModel1PointPlane<Storage>::generateModelHypotheses (
Hypotheses &h, int max_iterations)
{
using namespace thrust;
// Create a vector of how many samples/coefficients do we want to get
h.resize (max_iterations);
typename Storage<int>::type randoms (max_iterations);
// a sequence counting up from 0
thrust::counting_iterator<int> index_sequence_begin (0);
// transform the range [0,1,2,...N]
// to a range of random numbers
thrust::transform (index_sequence_begin,
index_sequence_begin + max_iterations,
randoms.begin (),
parallel_random_generator (0));
//thrust::counting_iterator<int> first (0);
// Input: Point Cloud, Indices
// Output: Hypotheses
transform (//first, first + max_iterations,
//index_sequence_begin,
//index_sequence_begin + max_iterations,
randoms.begin (), randoms.begin () + max_iterations,
h.begin (),
Create1PointPlaneHypothesis<Storage> (thrust::raw_pointer_cast (&input_->points[0]),
thrust::raw_pointer_cast (&(*indices_)[0]),
indices_->size (), std::numeric_limits<float>::quiet_NaN ()));
return (true);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModel1PointPlane<Storage>::generateModelHypotheses (
Hypotheses &h, Samples &samples, int max_iterations)
{
using namespace thrust;
// Create a vector of how many samples/coefficients do we want to get
h.resize (max_iterations);
samples.resize (max_iterations);
typename Storage<int>::type randoms (max_iterations);
// a sequence counting up from 0
thrust::counting_iterator<int> index_sequence_begin (0);
// transform the range [0,1,2,...N]
// to a range of random numbers
thrust::transform (index_sequence_begin,
index_sequence_begin + max_iterations,
randoms.begin (),
parallel_random_generator (0));
//thrust::counting_iterator<int> first (0);
// Input: Point Cloud, Indices
// Output: Hypotheses
transform (//first, first + max_iterations,
//index_sequence_begin,
//index_sequence_begin + max_iterations,
randoms.begin (), randoms.begin () + max_iterations,
//index_sequence_begin, index_sequence_begin + max_iterations,
thrust::make_zip_iterator (thrust::make_tuple (samples.begin (), h.begin())),
// h.begin (),
Create1PointPlaneSampleHypothesis<Storage> (thrust::raw_pointer_cast (&input_->points[0]),
thrust::raw_pointer_cast (&(*normals_)[0]),
thrust::raw_pointer_cast (&(*indices_)[0]),
input_->width, input_->height,
indices_->size (), std::numeric_limits<float>::quiet_NaN ()));
return (true);
}
//////////////////////////////////////////////////////////////////////////
template <typename Tuple> bool
CountPlanarInlier::operator () (const Tuple &t)
{
if (!isfinite (thrust::raw_reference_cast(thrust::get<0>(t)).x))
return (false);
//TODO: make threshold adaptive, depending on z
return (fabs (thrust::raw_reference_cast(thrust::get<0>(t)).x * coefficients.x +
thrust::raw_reference_cast(thrust::get<0>(t)).y * coefficients.y +
thrust::raw_reference_cast(thrust::get<0>(t)).z * coefficients.z + coefficients.w) < threshold);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
NewCheckPlanarInlier<Storage>::operator () (const int &idx)
{
if (idx == -1)
return -1;
PointXYZRGB p = input_[idx];
if (isnan (p.x))
return -1;
if (fabs (p.x * coefficients.x +
p.y * coefficients.y +
p.z * coefficients.z + coefficients.w) < threshold)
// If inlier, return its position in the vector
return idx;
else
// If outlier, return -1
return -1;
}
//////////////////////////////////////////////////////////////////////////
template <typename Tuple> int
CheckPlanarInlier::operator () (const Tuple &t)
{
if (thrust::get<1>(t) == -1)
return (-1);
if (isnan (thrust::get<0>(t).x))
return (-1);
// Fill in XYZ (and copy NaNs with it)
float4 pt;
pt.x = thrust::get<0>(t).x;
pt.y = thrust::get<0>(t).y;
pt.z = thrust::get<0>(t).z;
pt.w = 1;
//TODO: make threshold adaptive, depending on z
if (fabs (dot (pt, coefficients)) < threshold)
// If inlier, return its position in the vector
return (thrust::get<1>(t));
else
// If outlier, return -1
return (-1);
}
int CheckPlanarInlierKinectIndices::operator () (const PointXYZRGB &pt, const int &idx)
{
//if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z) | (idx == -1))
// return (-1);
const float b = 0.075f;
const float f = 580.0f/2.0f;
float length_pt = sqrtf (dot (pt, pt));
float dot_n_p = pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z;
float D = - coefficients.w * length_pt / dot_n_p - length_pt;
float orig_disparity = b * f / pt.z;
float actual_disparity = orig_disparity * length_pt / (length_pt + D);
if ((fabs (actual_disparity - orig_disparity) <= 1.0/6.0) & idx != -1)
return (idx);
else
return -1;
}
template <typename Tuple>
int CheckPlanarInlierKinectNormalIndices::operator () (const Tuple &t, const int &idx)
{
//if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z) | (idx == -1))
// return (-1);
const PointXYZRGB &pt = thrust::get<0>(t);
float4 &normal = thrust::get<1>(t);
const float b = 0.075f;
const float f = 580.0f/2.0f;
float length_pt = sqrtf (dot (pt, pt));
float dot_n_p = pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z;
float D = - coefficients.w * length_pt / dot_n_p - length_pt;
float orig_disparity = b * f / pt.z;
float actual_disparity = orig_disparity * length_pt / (length_pt + D);
if ((fabs (actual_disparity - orig_disparity) <= 1.0/2.0) & (idx != -1)
&
(
fabs (acos (normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z)) < angle_threshold
|
fabs (acos (-(normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z))) < angle_threshold
)
)
return (idx);
else
return -1;
}
template <typename Tuple>
int CheckPlanarInlierNormalIndices::operator () (const Tuple &t, const int &idx)
{
const PointXYZRGB &pt = thrust::get<0>(t);
if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z) | (idx == -1))
return (-1);
float4 &normal = thrust::get<1>(t);
//TODO: make threshold adaptive, depending on z
if (fabs (pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z + coefficients.w) < threshold
&
(
fabs (acos (normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z)) < angle_threshold
|
fabs (acos (-(normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z))) < angle_threshold
)
)
// If inlier, return its position in the vector
return (idx);
else
// If outlier, return -1
return (-1);
}
int CheckPlanarInlierIndices::operator () (const PointXYZRGB &pt, const int &idx)
{
if (idx == -1)
return (-1);
if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z))
return (-1);
//TODO: make threshold adaptive, depending on z
if (fabs (pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z + coefficients.w) < threshold)
// If inlier, return its position in the vector
return (idx);
else
// If outlier, return -1
return (-1);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::countWithinDistance (
const Coefficients &model_coefficients, float threshold)
{
using namespace thrust;
// Needs a valid set of model coefficients
if (model_coefficients.size () != 4)
{
fprintf (stderr, "[pcl::cuda::SampleConsensusModel1PointPlane::countWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return 0;
}
float4 coefficients;
coefficients.x = model_coefficients[0];
coefficients.y = model_coefficients[1];
coefficients.z = model_coefficients[2];
coefficients.w = model_coefficients[3];
return (int) count_if (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
indices_->size (),
CountPlanarInlier (coefficients, threshold));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::countWithinDistance (
const Hypotheses &h, int idx, float threshold)
{
if (isnan (((float4)h[idx]).x))
return (0);
return (int)
(thrust::count_if (
thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())),
thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())) +
indices_->size (),
CountPlanarInlier (h[idx], threshold)));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::selectWithinDistance (
const Coefficients &model_coefficients, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil)
{
using namespace thrust;
// Needs a valid set of model coefficients
if (model_coefficients.size () != 4)
{
fprintf (stderr, "[SampleConsensusModel1PointPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return 0;
}
int nr_points = (int) indices_->size ();
{
// pcl::ScopeTime t ("Resize inl");
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
}
float4 coefficients;
coefficients.x = model_coefficients[0];
coefficients.y = model_coefficients[1];
coefficients.z = model_coefficients[2];
coefficients.w = model_coefficients[3];
{
// pcl::ScopeTime t ("transform");
// Send the data to the device
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
}
{
// pcl::ScopeTime t ("Resize all");
if (!inliers)
inliers.reset (new Indices());
inliers->resize (nr_points);
}
typename Indices::iterator it;
{
// pcl::ScopeTime t ("copy-if");
// Copy data
it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ());
//it = remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), -1);
}
{
// pcl::ScopeTime t ("Resize");
inliers->resize (it - inliers->begin ());
}
return (int) inliers->size();
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::selectWithinDistance (
const Hypotheses &h, int idx, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil)
{
using namespace thrust;
// Needs a valid set of model coefficients
/* if (model_coefficients.size () != 4)
{
fprintf (stderr, "[SampleConsensusModel1PointPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return;
}*/
int nr_points = (int) indices_->size ();
{
// pcl::ScopeTime t ("Resize inl");
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
}
float4 coefficients;
coefficients.x = ((float4)h[idx]).x;
coefficients.y = ((float4)h[idx]).y;
coefficients.z = ((float4)h[idx]).z;
coefficients.w = ((float4)h[idx]).w;
{
// pcl::ScopeTime t ("transform");
// Send the data to the device
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
}
{
// pcl::ScopeTime t ("Resize all");
if (!inliers)
inliers.reset (new Indices());
inliers->resize (nr_points);
}
typename Indices::iterator it;
{
// pcl::ScopeTime t ("copy-if");
// Copy data
it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ());
}
{
// pcl::ScopeTime t ("Resize");
inliers->resize (it - inliers->begin ());
}
return (int) inliers->size ();
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::selectWithinDistance (
Hypotheses &h, int idx, float threshold, IndicesPtr &inliers_stencil, float3 &c)
{
float angle_threshold = 0.26f;
using namespace thrust;
int nr_points = (int) indices_stencil_->size ();
float bad_point = std::numeric_limits<float>::quiet_NaN ();
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
// necessary for the transform_if call below (since not all elements get written, we init with -1)..
//inliers_stencil->resize (nr_points, -1);
float4 coefficients;
coefficients.x = ((float4)h[idx]).x;
coefficients.y = ((float4)h[idx]).y;
coefficients.z = ((float4)h[idx]).z;
coefficients.w = ((float4)h[idx]).w;
if (isnan (coefficients.x) |
isnan (coefficients.y) |
isnan (coefficients.z) |
isnan (coefficients.w) )
{
c.x = c.y = c.z = 0;
return 0;
}
float3 best_centroid;
IndicesPtr best_inliers_stencil;
float3 centroid;
centroid.x = centroid.y = centroid.z = 0;
best_centroid = centroid;
//ORIG
// transform (
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
// nr_points,
// inliers_stencil->begin (),
// CheckPlanarInlier (coefficients, threshold));
// this is just as fast as the ORIG version, but requires initialization to -1 (see above) --> much slower
// transform_if (
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
// nr_points,
// indices_->begin(),
// inliers_stencil->begin (),
// CheckPlanarInlier (coefficients, threshold),
// isInlier ()
// );
// i forgot why this was slow. but it was. :)
// transform (
// indices_stencil_->begin (),
// indices_stencil_->end(),
// inliers_stencil->begin (),
// NewCheckPlanarInlier<Storage> (coefficients, (float)threshold, input_->points));
// compute inliers
// fastest
#ifdef KINECT
// NOTE: this performs inlier checks with kinect disparity error model, without normal check
transform (
input_->points.begin (), input_->points.end (),
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectIndices (coefficients, threshold, angle_threshold));
#endif
#ifdef KINECT_NORMALS
// NOTE: this performs inlier checks with kinect disparity error model, with normal check
transform (
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())),
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())) + nr_points,
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectNormalIndices (coefficients, threshold, angle_threshold));
#endif
// store inliers here
Indices inliers;
inliers.resize (indices_->size ()); // is this necessary?
typename Indices::iterator last = thrust::remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers.begin (), -1);
inliers.erase (last, inliers.end ());
if (inliers.size () < 1)
return (int) inliers.size ();
best_inliers_stencil = inliers_stencil;
int best_nr_inliers = (int) inliers.size ();
int nr_inliers_after_refit = (int) inliers.size ();
int nr_inliers_before_refit;
int nr_refit_iterations = 0;
do {
nr_inliers_before_refit = nr_inliers_after_refit;
compute3DCentroid (make_permutation_iterator (input_->points.begin (), inliers.begin ()),
make_permutation_iterator (input_->points.begin (), inliers.end ()),
centroid);
if (isnan (centroid.x) | isnan (centroid.y) | isnan (centroid.z))
{
std::cerr << "Wow, centroid contains nans!" << std::endl;
inliers_stencil = best_inliers_stencil;
c = make_float3 (bad_point, bad_point, bad_point);
//best_centroid;
return best_nr_inliers;
}
// Note: centroid contains centroid * inliers.size() at this point !
#if 0
std::cerr << "----------------------------------------------------------------------------" << std::endl;
std::cerr << "inliers before: " << inliers.size () << std::endl;
std::cerr << "Centroid: " <<
centroid.x << ", " << centroid.y << ", " << centroid.z << ", " << std::endl;
#endif
CovarianceMatrix covariance_matrix;
computeCovariance (make_permutation_iterator (input_->points.begin (), inliers.begin ()),
make_permutation_iterator (input_->points.begin (), inliers.end ()),
covariance_matrix, centroid);
if (isnan (covariance_matrix.data[0].x))
{
std::cerr << "Wow, covariance matrix contains nans!" << std::endl;
inliers_stencil = best_inliers_stencil;
c = make_float3 (bad_point, bad_point, bad_point);
//best_centroid;
return best_nr_inliers;
}
#if 0
std::cerr << "Covariance: " <<
covariance_matrix.data[0].x << ", " << covariance_matrix.data[0].y << ", " << covariance_matrix.data[0].z << std::endl <<
covariance_matrix.data[1].x << ", " << covariance_matrix.data[1].y << ", " << covariance_matrix.data[1].z << std::endl <<
covariance_matrix.data[2].x << ", " << covariance_matrix.data[2].y << ", " << covariance_matrix.data[2].z << std::endl;
#endif
CovarianceMatrix evecs;
float3 evals;
// compute eigenvalues and -vectors
eigen33 (covariance_matrix, evecs, evals);
float3 mc = normalize (evecs.data[0]);
#if 0
std::cerr << "Eigenvectors: " <<
evecs.data[0].x << ", " << evecs.data[0].y << ", " << evecs.data[0].z << std::endl <<
evecs.data[1].x << ", " << evecs.data[1].y << ", " << evecs.data[1].z << std::endl <<
evecs.data[2].x << ", " << evecs.data[2].y << ", " << evecs.data[2].z << std::endl;
std::cerr << "Coefficients before: " <<
coefficients.x << ", " << coefficients.y << ", " << coefficients.z << ", " << coefficients.w << ", " << std::endl;
#endif
// compute plane coefficients from eigenvector corr. to smallest eigenvalue and centroid
coefficients.x = mc.x;
coefficients.y = mc.y;
coefficients.z = mc.z;
// ... + d = 0
coefficients.w = -1 * dot (mc, centroid);
#if 0
std::cerr << "Coefficients after: " <<
coefficients.x << ", " << coefficients.y << ", " << coefficients.z << ", " << coefficients.w << ", " << std::endl;
#endif
// finally, another inlier check:
#ifdef KINECT
transform (
input_->points.begin (), input_->points.end (),
//make_zip_iterator (make_tuple (input_->points.begin (), normals_.begin())),
//make_zip_iterator (make_tuple (input_->points.begin (), normals_.begin())) + nr_points,
// input_->points.begin (),
// input_->points.end (),
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectIndices (coefficients, threshold, angle_threshold));
#endif
#ifdef KINECT_NORMALS
transform (
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())),
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())) + nr_points,
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectNormalIndices (coefficients, threshold, angle_threshold));
#endif
// copy inliers from stencil to inlier vector
inliers.resize (inliers_stencil->size ()); // is this necessary?
last = thrust::remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers.begin (), -1);
inliers.erase (last, inliers.end ());
nr_inliers_after_refit = (int) inliers.size ();
compute3DCentroid (make_permutation_iterator (input_->points.begin (), inliers.begin ()),
make_permutation_iterator (input_->points.begin (), inliers.end ()),
centroid);
if (nr_inliers_after_refit > best_nr_inliers)
{
best_nr_inliers = nr_inliers_after_refit;
best_inliers_stencil = inliers_stencil;
best_centroid = centroid;
h[idx] = coefficients;
}
//fprintf (stderr, "iteration %i: %f, %f, %f, %f ---> %i\n", nr_refit_iterations, coefficients.x, coefficients.y, coefficients.z, coefficients.w, best_nr_inliers);
} while (nr_inliers_after_refit > nr_inliers_before_refit & ++nr_refit_iterations < 120);
#if 0
std::cerr << "inliers after: " << nr_inliers_after_refit << std::endl;
#endif
//std::cerr << "--> refitting steps: " << nr_refit_iterations << std::endl;
inliers_stencil = best_inliers_stencil;
c = best_centroid;
return best_nr_inliers;
}
// explicit template instantiation for device and host
template class PCL_EXPORTS SampleConsensusModel1PointPlane<Device>;
template class PCL_EXPORTS SampleConsensusModel1PointPlane<Host>;
} // namespace
} // namespace
| a39c89d77fc099881f752141a338275e707f0cab.cu | /*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id$
*
*/
#include <pcl/pcl_exports.h>
#include <pcl/cuda/sample_consensus/sac_model_1point_plane.h>
#include <pcl/cuda/common/eigen.h>
#include <pcl/cuda/cutil_math.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/random.h>
#include <vector_types.h>
#include <stdio.h>
#include <limits>
// specify inlier computation method
//#define KINECT_NORMALS
#define KINECT
namespace pcl
{
namespace cuda
{
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
SampleConsensusModel1PointPlane<Storage>::SampleConsensusModel1PointPlane (
const PointCloudConstPtr &cloud) :
SampleConsensusModel<Storage> (cloud)
{
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> void
SampleConsensusModel1PointPlane<Storage>::getSamples (int &iterations, Indices &samples)
{
samples.resize (1);
float trand = indices_->size () / (RAND_MAX + 1.0f);
int idx = (int)(rngl_ () * trand);
samples[0] = (*indices_)[idx];
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModel1PointPlane<Storage>::computeModelCoefficients (
const Indices &samples, Coefficients &model_coefficients)
{
if (samples.size () != 1)
return (false);
/* if (isnan ((PointXYZRGB)input_->points[samples[0]]).x ||
isnan ((PointXYZRGB)input_->points[samples[1]]).x ||
isnan ((PointXYZRGB)input_->points[samples[2]]).x)
return (false);*/
float3 normal;
normal.x = 0;
normal.y = 0;
normal.z = -1;
// Compute the plane coefficients
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (normal);
if (model_coefficients.size () != 4)
model_coefficients.resize (4);
model_coefficients[0] = mc.x;
model_coefficients[1] = mc.y;
model_coefficients[2] = mc.z;
// ... + d = 0
model_coefficients[3] = -1 * dot (mc, ((PointXYZRGB)input_->points[samples[0]]).xyz);
return (true);
}
__host__ __device__
unsigned int hash(unsigned int a)
{
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
//template <typename Tuple>
thrust::tuple <int, float4>
Create1PointPlaneSampleHypothesis<Storage>::operator () (int t)
{
float4 coeff;
coeff.x = coeff.y = coeff.z = coeff.w = 5;
float trand = (float) nr_indices / (thrust::default_random_engine::max + 1.0f);
//rng.seed (hash (t));
//int sample_point = indices[(int)(rng () * trand)];
int sample_point = indices[(int)(t * trand)];
if (isnan (input[sample_point].x))
return (thrust::make_tuple (sample_point, coeff));
#if 0
//TODO:: kind of important: get normal! :D
int xIdx = sample_point % width_;
int yIdx = sample_point / width_;
//int counter = 1;
int window_size = 3;
int left_index = 0;
int top_index = 0;
// West
if (xIdx >= window_size)
{
left_index = sample_point - window_size;
}
else
{
left_index = sample_point + window_size;
}
// North
if (yIdx >= window_size)
{
top_index = sample_point - window_size * width_;
}
else
{
top_index = sample_point + window_size * width_;
}
float3 left_point;
left_point.x = input[left_index].x - input[sample_point].x;
left_point.y = input[left_index].y - input[sample_point].y;
left_point.z = input[left_index].z - input[sample_point].z;
float3 top_point;
top_point.x = input[top_index].x - input[sample_point].x;
top_point.y = input[top_index].y - input[sample_point].y;
top_point.z = input[top_index].z - input[sample_point].z;
float3 normal = cross (top_point, left_point);
// Compute the plane coefficients from the 3 given points in a straightforward manner
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (normal);
if (0 == (normal.x) && 0 == (normal.y) && 0 == (normal.z))
{
//mc.x = mc.y = 0;
if (top_point.x == 0 && top_point.y == 0 && top_point.z == 0)
{
mc.x = 999999;
mc.y = input[top_index].x;
mc.z = input[sample_point].x;
//mc.z = top_index - sample_point;
//mc.z = 999999;
}
else
{
if (left_point.x == 0 && left_point.y == 0 && left_point.z == 0)
{
mc.x = mc.y = 888888;
mc.z = left_index - sample_point;
//mc.z = 888888;
}
}
}
#else
float3 mc = make_float3 (normals_[sample_point].x, normals_[sample_point].y, normals_[sample_point].z);
#endif
coeff.x = mc.x;
coeff.y = mc.y;
coeff.z = mc.z;
// ... + d = 0
coeff.w = -1 * dot (mc, input[sample_point].xyz);
return (thrust::make_tuple (sample_point, coeff));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
//template <typename Tuple>
float4
Create1PointPlaneHypothesis<Storage>::operator () (int t)
{
float4 coeff;
coeff.x = coeff.y = coeff.z = coeff.w = bad_value;
float trand = nr_indices / (RAND_MAX + 1.0f);
thrust::default_random_engine rng (t);
int sample_point = indices[(int)(rng () * trand)];
if (isnan (input[sample_point].x))
return (coeff);
//TODO:: kind of important: get normal! :D
//int xIdx = sample_point % width;
//int yIdx = sample_point / width;
//float3 b = input[sample_point];
//int counter = 1;
//// West
//if (xIdx < width-window_size)
//{
// b += input[sample_point + window_size];
// counter += 1
//}
//// North
//if (yIdx >= window_size)
//{
// b += input[sample_point - window_size * width];
//}
//// South
//if (yIdx < height-window_size)
//{
// b += input[sample_point + window_size * width];
//}
//// East
//if (xIdx >= window_size)
//{
// b += input[sample_point + window_size];
//}
//// Estimate the XYZ centroid
//compute3DCentroid (cloud, xyz_centroid);
//// Compute the 3x3 covariance matrix
//computeCovarianceMatrix (cloud, xyz_centroid, covariance_matrix);
//// Get the plane normal and surface curvature
//solvePlaneParameters (covariance_matrix, xyz_centroid, plane_parameters, curvature);
//int[5] idxs;
//idxs[0] = sample_point;
// west = sample_point - window_size;
//else
// west = -1;
float3 normal;
normal.x = 0;
normal.y = 0;
normal.z = -1;
// Compute the plane coefficients from the 3 given points in a straightforward manner
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (normal);
coeff.x = mc.x;
coeff.y = mc.y;
coeff.z = mc.z;
// ... + d = 0
coeff.w = -1 * dot (mc, input[sample_point].xyz);
return (coeff);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModel1PointPlane<Storage>::generateModelHypotheses (
Hypotheses &h, int max_iterations)
{
using namespace thrust;
// Create a vector of how many samples/coefficients do we want to get
h.resize (max_iterations);
typename Storage<int>::type randoms (max_iterations);
// a sequence counting up from 0
thrust::counting_iterator<int> index_sequence_begin (0);
// transform the range [0,1,2,...N]
// to a range of random numbers
thrust::transform (index_sequence_begin,
index_sequence_begin + max_iterations,
randoms.begin (),
parallel_random_generator (0));
//thrust::counting_iterator<int> first (0);
// Input: Point Cloud, Indices
// Output: Hypotheses
transform (//first, first + max_iterations,
//index_sequence_begin,
//index_sequence_begin + max_iterations,
randoms.begin (), randoms.begin () + max_iterations,
h.begin (),
Create1PointPlaneHypothesis<Storage> (thrust::raw_pointer_cast (&input_->points[0]),
thrust::raw_pointer_cast (&(*indices_)[0]),
indices_->size (), std::numeric_limits<float>::quiet_NaN ()));
return (true);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModel1PointPlane<Storage>::generateModelHypotheses (
Hypotheses &h, Samples &samples, int max_iterations)
{
using namespace thrust;
// Create a vector of how many samples/coefficients do we want to get
h.resize (max_iterations);
samples.resize (max_iterations);
typename Storage<int>::type randoms (max_iterations);
// a sequence counting up from 0
thrust::counting_iterator<int> index_sequence_begin (0);
// transform the range [0,1,2,...N]
// to a range of random numbers
thrust::transform (index_sequence_begin,
index_sequence_begin + max_iterations,
randoms.begin (),
parallel_random_generator (0));
//thrust::counting_iterator<int> first (0);
// Input: Point Cloud, Indices
// Output: Hypotheses
transform (//first, first + max_iterations,
//index_sequence_begin,
//index_sequence_begin + max_iterations,
randoms.begin (), randoms.begin () + max_iterations,
//index_sequence_begin, index_sequence_begin + max_iterations,
thrust::make_zip_iterator (thrust::make_tuple (samples.begin (), h.begin())),
// h.begin (),
Create1PointPlaneSampleHypothesis<Storage> (thrust::raw_pointer_cast (&input_->points[0]),
thrust::raw_pointer_cast (&(*normals_)[0]),
thrust::raw_pointer_cast (&(*indices_)[0]),
input_->width, input_->height,
indices_->size (), std::numeric_limits<float>::quiet_NaN ()));
return (true);
}
//////////////////////////////////////////////////////////////////////////
template <typename Tuple> bool
CountPlanarInlier::operator () (const Tuple &t)
{
if (!isfinite (thrust::raw_reference_cast(thrust::get<0>(t)).x))
return (false);
//TODO: make threshold adaptive, depending on z
return (fabs (thrust::raw_reference_cast(thrust::get<0>(t)).x * coefficients.x +
thrust::raw_reference_cast(thrust::get<0>(t)).y * coefficients.y +
thrust::raw_reference_cast(thrust::get<0>(t)).z * coefficients.z + coefficients.w) < threshold);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
NewCheckPlanarInlier<Storage>::operator () (const int &idx)
{
if (idx == -1)
return -1;
PointXYZRGB p = input_[idx];
if (isnan (p.x))
return -1;
if (fabs (p.x * coefficients.x +
p.y * coefficients.y +
p.z * coefficients.z + coefficients.w) < threshold)
// If inlier, return its position in the vector
return idx;
else
// If outlier, return -1
return -1;
}
//////////////////////////////////////////////////////////////////////////
template <typename Tuple> int
CheckPlanarInlier::operator () (const Tuple &t)
{
if (thrust::get<1>(t) == -1)
return (-1);
if (isnan (thrust::get<0>(t).x))
return (-1);
// Fill in XYZ (and copy NaNs with it)
float4 pt;
pt.x = thrust::get<0>(t).x;
pt.y = thrust::get<0>(t).y;
pt.z = thrust::get<0>(t).z;
pt.w = 1;
//TODO: make threshold adaptive, depending on z
if (fabs (dot (pt, coefficients)) < threshold)
// If inlier, return its position in the vector
return (thrust::get<1>(t));
else
// If outlier, return -1
return (-1);
}
int CheckPlanarInlierKinectIndices::operator () (const PointXYZRGB &pt, const int &idx)
{
//if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z) | (idx == -1))
// return (-1);
const float b = 0.075f;
const float f = 580.0f/2.0f;
float length_pt = sqrtf (dot (pt, pt));
float dot_n_p = pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z;
float D = - coefficients.w * length_pt / dot_n_p - length_pt;
float orig_disparity = b * f / pt.z;
float actual_disparity = orig_disparity * length_pt / (length_pt + D);
if ((fabs (actual_disparity - orig_disparity) <= 1.0/6.0) & idx != -1)
return (idx);
else
return -1;
}
template <typename Tuple>
int CheckPlanarInlierKinectNormalIndices::operator () (const Tuple &t, const int &idx)
{
//if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z) | (idx == -1))
// return (-1);
const PointXYZRGB &pt = thrust::get<0>(t);
float4 &normal = thrust::get<1>(t);
const float b = 0.075f;
const float f = 580.0f/2.0f;
float length_pt = sqrtf (dot (pt, pt));
float dot_n_p = pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z;
float D = - coefficients.w * length_pt / dot_n_p - length_pt;
float orig_disparity = b * f / pt.z;
float actual_disparity = orig_disparity * length_pt / (length_pt + D);
if ((fabs (actual_disparity - orig_disparity) <= 1.0/2.0) & (idx != -1)
&
(
fabs (acos (normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z)) < angle_threshold
|
fabs (acos (-(normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z))) < angle_threshold
)
)
return (idx);
else
return -1;
}
template <typename Tuple>
int CheckPlanarInlierNormalIndices::operator () (const Tuple &t, const int &idx)
{
const PointXYZRGB &pt = thrust::get<0>(t);
if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z) | (idx == -1))
return (-1);
float4 &normal = thrust::get<1>(t);
//TODO: make threshold adaptive, depending on z
if (fabs (pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z + coefficients.w) < threshold
&
(
fabs (acos (normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z)) < angle_threshold
|
fabs (acos (-(normal.x*coefficients.x + normal.y*coefficients.y + normal.z*coefficients.z))) < angle_threshold
)
)
// If inlier, return its position in the vector
return (idx);
else
// If outlier, return -1
return (-1);
}
int CheckPlanarInlierIndices::operator () (const PointXYZRGB &pt, const int &idx)
{
if (idx == -1)
return (-1);
if (isnan (pt.x) | isnan (pt.y) | isnan (pt.z))
return (-1);
//TODO: make threshold adaptive, depending on z
if (fabs (pt.x * coefficients.x +
pt.y * coefficients.y +
pt.z * coefficients.z + coefficients.w) < threshold)
// If inlier, return its position in the vector
return (idx);
else
// If outlier, return -1
return (-1);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::countWithinDistance (
const Coefficients &model_coefficients, float threshold)
{
using namespace thrust;
// Needs a valid set of model coefficients
if (model_coefficients.size () != 4)
{
fprintf (stderr, "[pcl::cuda::SampleConsensusModel1PointPlane::countWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return 0;
}
float4 coefficients;
coefficients.x = model_coefficients[0];
coefficients.y = model_coefficients[1];
coefficients.z = model_coefficients[2];
coefficients.w = model_coefficients[3];
return (int) count_if (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
indices_->size (),
CountPlanarInlier (coefficients, threshold));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::countWithinDistance (
const Hypotheses &h, int idx, float threshold)
{
if (isnan (((float4)h[idx]).x))
return (0);
return (int)
(thrust::count_if (
thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())),
thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())) +
indices_->size (),
CountPlanarInlier (h[idx], threshold)));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::selectWithinDistance (
const Coefficients &model_coefficients, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil)
{
using namespace thrust;
// Needs a valid set of model coefficients
if (model_coefficients.size () != 4)
{
fprintf (stderr, "[SampleConsensusModel1PointPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return 0;
}
int nr_points = (int) indices_->size ();
{
// pcl::ScopeTime t ("Resize inl");
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
}
float4 coefficients;
coefficients.x = model_coefficients[0];
coefficients.y = model_coefficients[1];
coefficients.z = model_coefficients[2];
coefficients.w = model_coefficients[3];
{
// pcl::ScopeTime t ("transform");
// Send the data to the device
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
}
{
// pcl::ScopeTime t ("Resize all");
if (!inliers)
inliers.reset (new Indices());
inliers->resize (nr_points);
}
typename Indices::iterator it;
{
// pcl::ScopeTime t ("copy-if");
// Copy data
it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ());
//it = remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), -1);
}
{
// pcl::ScopeTime t ("Resize");
inliers->resize (it - inliers->begin ());
}
return (int) inliers->size();
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::selectWithinDistance (
const Hypotheses &h, int idx, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil)
{
using namespace thrust;
// Needs a valid set of model coefficients
/* if (model_coefficients.size () != 4)
{
fprintf (stderr, "[SampleConsensusModel1PointPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return;
}*/
int nr_points = (int) indices_->size ();
{
// pcl::ScopeTime t ("Resize inl");
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
}
float4 coefficients;
coefficients.x = ((float4)h[idx]).x;
coefficients.y = ((float4)h[idx]).y;
coefficients.z = ((float4)h[idx]).z;
coefficients.w = ((float4)h[idx]).w;
{
// pcl::ScopeTime t ("transform");
// Send the data to the device
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
}
{
// pcl::ScopeTime t ("Resize all");
if (!inliers)
inliers.reset (new Indices());
inliers->resize (nr_points);
}
typename Indices::iterator it;
{
// pcl::ScopeTime t ("copy-if");
// Copy data
it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ());
}
{
// pcl::ScopeTime t ("Resize");
inliers->resize (it - inliers->begin ());
}
return (int) inliers->size ();
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModel1PointPlane<Storage>::selectWithinDistance (
Hypotheses &h, int idx, float threshold, IndicesPtr &inliers_stencil, float3 &c)
{
float angle_threshold = 0.26f;
using namespace thrust;
int nr_points = (int) indices_stencil_->size ();
float bad_point = std::numeric_limits<float>::quiet_NaN ();
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
// necessary for the transform_if call below (since not all elements get written, we init with -1)..
//inliers_stencil->resize (nr_points, -1);
float4 coefficients;
coefficients.x = ((float4)h[idx]).x;
coefficients.y = ((float4)h[idx]).y;
coefficients.z = ((float4)h[idx]).z;
coefficients.w = ((float4)h[idx]).w;
if (isnan (coefficients.x) |
isnan (coefficients.y) |
isnan (coefficients.z) |
isnan (coefficients.w) )
{
c.x = c.y = c.z = 0;
return 0;
}
float3 best_centroid;
IndicesPtr best_inliers_stencil;
float3 centroid;
centroid.x = centroid.y = centroid.z = 0;
best_centroid = centroid;
//ORIG
// transform (
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
// nr_points,
// inliers_stencil->begin (),
// CheckPlanarInlier (coefficients, threshold));
// this is just as fast as the ORIG version, but requires initialization to -1 (see above) --> much slower
// transform_if (
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
// make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
// nr_points,
// indices_->begin(),
// inliers_stencil->begin (),
// CheckPlanarInlier (coefficients, threshold),
// isInlier ()
// );
// i forgot why this was slow. but it was. :)
// transform (
// indices_stencil_->begin (),
// indices_stencil_->end(),
// inliers_stencil->begin (),
// NewCheckPlanarInlier<Storage> (coefficients, (float)threshold, input_->points));
// compute inliers
// fastest
#ifdef KINECT
// NOTE: this performs inlier checks with kinect disparity error model, without normal check
transform (
input_->points.begin (), input_->points.end (),
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectIndices (coefficients, threshold, angle_threshold));
#endif
#ifdef KINECT_NORMALS
// NOTE: this performs inlier checks with kinect disparity error model, with normal check
transform (
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())),
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())) + nr_points,
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectNormalIndices (coefficients, threshold, angle_threshold));
#endif
// store inliers here
Indices inliers;
inliers.resize (indices_->size ()); // is this necessary?
typename Indices::iterator last = thrust::remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers.begin (), -1);
inliers.erase (last, inliers.end ());
if (inliers.size () < 1)
return (int) inliers.size ();
best_inliers_stencil = inliers_stencil;
int best_nr_inliers = (int) inliers.size ();
int nr_inliers_after_refit = (int) inliers.size ();
int nr_inliers_before_refit;
int nr_refit_iterations = 0;
do {
nr_inliers_before_refit = nr_inliers_after_refit;
compute3DCentroid (make_permutation_iterator (input_->points.begin (), inliers.begin ()),
make_permutation_iterator (input_->points.begin (), inliers.end ()),
centroid);
if (isnan (centroid.x) | isnan (centroid.y) | isnan (centroid.z))
{
std::cerr << "Wow, centroid contains nans!" << std::endl;
inliers_stencil = best_inliers_stencil;
c = make_float3 (bad_point, bad_point, bad_point);
//best_centroid;
return best_nr_inliers;
}
// Note: centroid contains centroid * inliers.size() at this point !
#if 0
std::cerr << "----------------------------------------------------------------------------" << std::endl;
std::cerr << "inliers before: " << inliers.size () << std::endl;
std::cerr << "Centroid: " <<
centroid.x << ", " << centroid.y << ", " << centroid.z << ", " << std::endl;
#endif
CovarianceMatrix covariance_matrix;
computeCovariance (make_permutation_iterator (input_->points.begin (), inliers.begin ()),
make_permutation_iterator (input_->points.begin (), inliers.end ()),
covariance_matrix, centroid);
if (isnan (covariance_matrix.data[0].x))
{
std::cerr << "Wow, covariance matrix contains nans!" << std::endl;
inliers_stencil = best_inliers_stencil;
c = make_float3 (bad_point, bad_point, bad_point);
//best_centroid;
return best_nr_inliers;
}
#if 0
std::cerr << "Covariance: " <<
covariance_matrix.data[0].x << ", " << covariance_matrix.data[0].y << ", " << covariance_matrix.data[0].z << std::endl <<
covariance_matrix.data[1].x << ", " << covariance_matrix.data[1].y << ", " << covariance_matrix.data[1].z << std::endl <<
covariance_matrix.data[2].x << ", " << covariance_matrix.data[2].y << ", " << covariance_matrix.data[2].z << std::endl;
#endif
CovarianceMatrix evecs;
float3 evals;
// compute eigenvalues and -vectors
eigen33 (covariance_matrix, evecs, evals);
float3 mc = normalize (evecs.data[0]);
#if 0
std::cerr << "Eigenvectors: " <<
evecs.data[0].x << ", " << evecs.data[0].y << ", " << evecs.data[0].z << std::endl <<
evecs.data[1].x << ", " << evecs.data[1].y << ", " << evecs.data[1].z << std::endl <<
evecs.data[2].x << ", " << evecs.data[2].y << ", " << evecs.data[2].z << std::endl;
std::cerr << "Coefficients before: " <<
coefficients.x << ", " << coefficients.y << ", " << coefficients.z << ", " << coefficients.w << ", " << std::endl;
#endif
// compute plane coefficients from eigenvector corr. to smallest eigenvalue and centroid
coefficients.x = mc.x;
coefficients.y = mc.y;
coefficients.z = mc.z;
// ... + d = 0
coefficients.w = -1 * dot (mc, centroid);
#if 0
std::cerr << "Coefficients after: " <<
coefficients.x << ", " << coefficients.y << ", " << coefficients.z << ", " << coefficients.w << ", " << std::endl;
#endif
// finally, another inlier check:
#ifdef KINECT
transform (
input_->points.begin (), input_->points.end (),
//make_zip_iterator (make_tuple (input_->points.begin (), normals_.begin())),
//make_zip_iterator (make_tuple (input_->points.begin (), normals_.begin())) + nr_points,
// input_->points.begin (),
// input_->points.end (),
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectIndices (coefficients, threshold, angle_threshold));
#endif
#ifdef KINECT_NORMALS
transform (
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())),
make_zip_iterator (make_tuple (input_->points.begin (), normals_->begin())) + nr_points,
indices_stencil_->begin (),
inliers_stencil->begin (),
CheckPlanarInlierKinectNormalIndices (coefficients, threshold, angle_threshold));
#endif
// copy inliers from stencil to inlier vector
inliers.resize (inliers_stencil->size ()); // is this necessary?
last = thrust::remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers.begin (), -1);
inliers.erase (last, inliers.end ());
nr_inliers_after_refit = (int) inliers.size ();
compute3DCentroid (make_permutation_iterator (input_->points.begin (), inliers.begin ()),
make_permutation_iterator (input_->points.begin (), inliers.end ()),
centroid);
if (nr_inliers_after_refit > best_nr_inliers)
{
best_nr_inliers = nr_inliers_after_refit;
best_inliers_stencil = inliers_stencil;
best_centroid = centroid;
h[idx] = coefficients;
}
//fprintf (stderr, "iteration %i: %f, %f, %f, %f ---> %i\n", nr_refit_iterations, coefficients.x, coefficients.y, coefficients.z, coefficients.w, best_nr_inliers);
} while (nr_inliers_after_refit > nr_inliers_before_refit & ++nr_refit_iterations < 120);
#if 0
std::cerr << "inliers after: " << nr_inliers_after_refit << std::endl;
#endif
//std::cerr << "--> refitting steps: " << nr_refit_iterations << std::endl;
inliers_stencil = best_inliers_stencil;
c = best_centroid;
return best_nr_inliers;
}
// explicit template instantiation for device and host
template class PCL_EXPORTS SampleConsensusModel1PointPlane<Device>;
template class PCL_EXPORTS SampleConsensusModel1PointPlane<Host>;
} // namespace
} // namespace
|
87961d8be22b255ba519bb3e62981e29636155d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/operator/cudnn_batch_norm_layer.hpp"
#define BN_EPS float(1e-5)
namespace caffe {
static __global__ void linear_batch_norm_forward(int num,int channels,int height,int width,
const float *weight,const float * in, const float * bias, float *out)
{
CUDA_KERNEL_LOOP(ind,num*channels*height*width)
{
int c = ind / width / height % channels;
out[ind] = weight[c] * in[ind] + bias[c];
}
}
static __global__ void linear_batch_norm_backward(int num,int channels,int height,int width,
const float *weight,const float * in, const float * bias, float *out)
{
CUDA_KERNEL_LOOP(ind,num*channels*height*width)
{
int c = ind / width / height % channels;
out[ind] = weight[c] * in[ind];
}
}
void CuDNNBatchNormLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
if (Caffe::bn_state() == "frozen")
{
const int K = bottom[0]->channels();
weights.Reshape(1,K,1,1);
bias.Reshape(1,K,1,1);
for(int c=0;c<K;c++)
{
weights.mutable_cpu_data()[c] = this->blobs_[0]->cpu_data()[c] / (sqrtf(this->blobs_[3]->cpu_data()[c]+ float(CUDNN_BN_MIN_EPSILON)));
bias.mutable_cpu_data()[c] = -this->blobs_[0]->cpu_data()[c]*this->blobs_[2]->cpu_data()[c] / (sqrtf(this->blobs_[3]->cpu_data()[c] + float(CUDNN_BN_MIN_EPSILON)))
+this->blobs_[1]->cpu_data()[c];
}
}
if (Caffe::number_collect_sample == 0 && Caffe::bn_state() == "learned")
{
caffe_gpu_set(this->blobs_[2]->count(),float(0),this->blobs_[2]->mutable_gpu_data());
caffe_gpu_set(this->blobs_[3]->count(),float(0),this->blobs_[3]->mutable_gpu_data());
}
const float* bottom_data = bottom[0]->gpu_data();
float* top_data = top[0]->mutable_gpu_data();
if (Caffe::bn_state() == "learned")
{
double factor;
if (Caffe::number_collect_sample == -1)
factor = 0.01;
else
factor = double(1)/double(Caffe::number_collect_sample+1);
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(Caffe::cudnn_handle(gpu_id_),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType::one,cudnn::dataType::zero,
bottom_desc_, bottom_data,
top_desc_,top_data,
scale_bias_desc_,this->blobs_[0]->gpu_data(),this->blobs_[1]->gpu_data(),
factor,
this->blobs_[2]->mutable_gpu_data(),this->blobs_[3]->mutable_gpu_data(),
double(CUDNN_BN_MIN_EPSILON),
mean_buffer_->mutable_gpu_data(),var_buffer_->mutable_gpu_data()));
}
else
{
hipLaunchKernelGGL(( linear_batch_norm_forward), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->num(),bottom[0]->channels(),bottom[0]->height(),bottom[0]->width(),
weights.gpu_data(),bottom[0]->gpu_data(),bias.gpu_data(),top[0]->mutable_gpu_data());
/*
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(Caffe::cudnn_handle(gpu_id_),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType::one,cudnn::dataType::zero,
bottom_desc_, bottom_data,
top_desc_,top_data,
scale_bias_desc_,this->blobs_[0]->gpu_data(),this->blobs_[1]->gpu_data(),
this->blobs_[2]->mutable_gpu_data(),this->blobs_[3]->mutable_gpu_data(),
double(0.001)
));
*/
}
}
void CuDNNBatchNormLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom)
{
if (Caffe::bn_state() == "learned")
{
const float* top_data = top[0]->gpu_data();
const float* top_diff = top[0]->gpu_diff();
const float* bottom_data = bottom[0]->gpu_data();
float* bottom_diff = bottom[0]->mutable_gpu_diff();
if (Caffe::frozen_param() == false)
{
CUDNN_CHECK(cudnnBatchNormalizationBackward(Caffe::cudnn_handle(gpu_id_),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType::one,cudnn::dataType::zero,
cudnn::dataType::one,cudnn::dataType::one,
bottom_desc_, bottom_data,
top_desc_,top_diff,
bottom_desc_, bottom_diff,
scale_bias_desc_,this->blobs_[0]->gpu_data(),this->blobs_[0]->mutable_gpu_diff(),this->blobs_[1]->mutable_gpu_diff(),
double(CUDNN_BN_MIN_EPSILON),
mean_buffer_->mutable_gpu_data(),var_buffer_->mutable_gpu_data()));
}
else
{
CUDNN_CHECK(cudnnBatchNormalizationBackward(Caffe::cudnn_handle(gpu_id_),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType::one,cudnn::dataType::zero,
cudnn::dataType::zero,cudnn::dataType::one,
bottom_desc_, bottom_data,
top_desc_,top_diff,
bottom_desc_, bottom_diff,
scale_bias_desc_,this->blobs_[0]->gpu_data(),this->blobs_[0]->mutable_gpu_diff(),this->blobs_[1]->mutable_gpu_diff(),//not use
double(CUDNN_BN_MIN_EPSILON),
mean_buffer_->mutable_gpu_data(),var_buffer_->mutable_gpu_data()));
}
}
else
{
hipLaunchKernelGGL(( linear_batch_norm_backward), dim3(CAFFE_GET_BLOCKS(top[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->num(),bottom[0]->channels(),bottom[0]->height(),bottom[0]->width(),
weights.gpu_data(),top[0]->gpu_diff(),bias.gpu_data(),bottom[0]->mutable_gpu_diff());
}
}
void CuDNNBatchNormLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
CUDNN_CHECK(cudnnBatchNormalizationBackward(Caffe::cudnn_handle(gpu_id_),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType::one,cudnn::dataType::zero,
cudnn::dataType::one,cudnn::dataType::one,
bottom_desc_, bottom[0]->gpu_data(),
bottom_desc_,bottom[0]->gpu_sec_diff(),
top_desc_, top[0]->mutable_gpu_sec_diff(),
scale_bias_desc_,this->blobs_[0]->gpu_data(),this->blobs_[0]->mutable_gpu_diff(),this->blobs_[1]->mutable_gpu_sec_diff(),//blobs_[1]->diff shoud be fixed
double(CUDNN_BN_MIN_EPSILON),
mean_buffer_->mutable_gpu_data(),var_buffer_->mutable_gpu_data()));
}
} // namespace caffe
| 87961d8be22b255ba519bb3e62981e29636155d1.cu | #include <vector>
#include "caffe/layers/operator/cudnn_batch_norm_layer.hpp"
#define BN_EPS float(1e-5)
namespace caffe {
static __global__ void linear_batch_norm_forward(int num,int channels,int height,int width,
const float *weight,const float * in, const float * bias, float *out)
{
CUDA_KERNEL_LOOP(ind,num*channels*height*width)
{
int c = ind / width / height % channels;
out[ind] = weight[c] * in[ind] + bias[c];
}
}
static __global__ void linear_batch_norm_backward(int num,int channels,int height,int width,
const float *weight,const float * in, const float * bias, float *out)
{
CUDA_KERNEL_LOOP(ind,num*channels*height*width)
{
int c = ind / width / height % channels;
out[ind] = weight[c] * in[ind];
}
}
void CuDNNBatchNormLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
if (Caffe::bn_state() == "frozen")
{
const int K = bottom[0]->channels();
weights.Reshape(1,K,1,1);
bias.Reshape(1,K,1,1);
for(int c=0;c<K;c++)
{
weights.mutable_cpu_data()[c] = this->blobs_[0]->cpu_data()[c] / (sqrtf(this->blobs_[3]->cpu_data()[c]+ float(CUDNN_BN_MIN_EPSILON)));
bias.mutable_cpu_data()[c] = -this->blobs_[0]->cpu_data()[c]*this->blobs_[2]->cpu_data()[c] / (sqrtf(this->blobs_[3]->cpu_data()[c] + float(CUDNN_BN_MIN_EPSILON)))
+this->blobs_[1]->cpu_data()[c];
}
}
if (Caffe::number_collect_sample == 0 && Caffe::bn_state() == "learned")
{
caffe_gpu_set(this->blobs_[2]->count(),float(0),this->blobs_[2]->mutable_gpu_data());
caffe_gpu_set(this->blobs_[3]->count(),float(0),this->blobs_[3]->mutable_gpu_data());
}
const float* bottom_data = bottom[0]->gpu_data();
float* top_data = top[0]->mutable_gpu_data();
if (Caffe::bn_state() == "learned")
{
double factor;
if (Caffe::number_collect_sample == -1)
factor = 0.01;
else
factor = double(1)/double(Caffe::number_collect_sample+1);
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(Caffe::cudnn_handle(gpu_id_),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType::one,cudnn::dataType::zero,
bottom_desc_, bottom_data,
top_desc_,top_data,
scale_bias_desc_,this->blobs_[0]->gpu_data(),this->blobs_[1]->gpu_data(),
factor,
this->blobs_[2]->mutable_gpu_data(),this->blobs_[3]->mutable_gpu_data(),
double(CUDNN_BN_MIN_EPSILON),
mean_buffer_->mutable_gpu_data(),var_buffer_->mutable_gpu_data()));
}
else
{
linear_batch_norm_forward<<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>>
(bottom[0]->num(),bottom[0]->channels(),bottom[0]->height(),bottom[0]->width(),
weights.gpu_data(),bottom[0]->gpu_data(),bias.gpu_data(),top[0]->mutable_gpu_data());
/*
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(Caffe::cudnn_handle(gpu_id_),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType::one,cudnn::dataType::zero,
bottom_desc_, bottom_data,
top_desc_,top_data,
scale_bias_desc_,this->blobs_[0]->gpu_data(),this->blobs_[1]->gpu_data(),
this->blobs_[2]->mutable_gpu_data(),this->blobs_[3]->mutable_gpu_data(),
double(0.001)
));
*/
}
}
void CuDNNBatchNormLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom)
{
if (Caffe::bn_state() == "learned")
{
const float* top_data = top[0]->gpu_data();
const float* top_diff = top[0]->gpu_diff();
const float* bottom_data = bottom[0]->gpu_data();
float* bottom_diff = bottom[0]->mutable_gpu_diff();
if (Caffe::frozen_param() == false)
{
CUDNN_CHECK(cudnnBatchNormalizationBackward(Caffe::cudnn_handle(gpu_id_),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType::one,cudnn::dataType::zero,
cudnn::dataType::one,cudnn::dataType::one,
bottom_desc_, bottom_data,
top_desc_,top_diff,
bottom_desc_, bottom_diff,
scale_bias_desc_,this->blobs_[0]->gpu_data(),this->blobs_[0]->mutable_gpu_diff(),this->blobs_[1]->mutable_gpu_diff(),
double(CUDNN_BN_MIN_EPSILON),
mean_buffer_->mutable_gpu_data(),var_buffer_->mutable_gpu_data()));
}
else
{
CUDNN_CHECK(cudnnBatchNormalizationBackward(Caffe::cudnn_handle(gpu_id_),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType::one,cudnn::dataType::zero,
cudnn::dataType::zero,cudnn::dataType::one,
bottom_desc_, bottom_data,
top_desc_,top_diff,
bottom_desc_, bottom_diff,
scale_bias_desc_,this->blobs_[0]->gpu_data(),this->blobs_[0]->mutable_gpu_diff(),this->blobs_[1]->mutable_gpu_diff(),//not use
double(CUDNN_BN_MIN_EPSILON),
mean_buffer_->mutable_gpu_data(),var_buffer_->mutable_gpu_data()));
}
}
else
{
linear_batch_norm_backward<<<CAFFE_GET_BLOCKS(top[0]->count()), CAFFE_CUDA_NUM_THREADS>>>
(bottom[0]->num(),bottom[0]->channels(),bottom[0]->height(),bottom[0]->width(),
weights.gpu_data(),top[0]->gpu_diff(),bias.gpu_data(),bottom[0]->mutable_gpu_diff());
}
}
void CuDNNBatchNormLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
CUDNN_CHECK(cudnnBatchNormalizationBackward(Caffe::cudnn_handle(gpu_id_),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType::one,cudnn::dataType::zero,
cudnn::dataType::one,cudnn::dataType::one,
bottom_desc_, bottom[0]->gpu_data(),
bottom_desc_,bottom[0]->gpu_sec_diff(),
top_desc_, top[0]->mutable_gpu_sec_diff(),
scale_bias_desc_,this->blobs_[0]->gpu_data(),this->blobs_[0]->mutable_gpu_diff(),this->blobs_[1]->mutable_gpu_sec_diff(),//blobs_[1]->diff shoud be fixed
double(CUDNN_BN_MIN_EPSILON),
mean_buffer_->mutable_gpu_data(),var_buffer_->mutable_gpu_data()));
}
} // namespace caffe
|
26842be48b83e78089f3bc4f50150f5ab3cc5000.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cu/fouriercoeff.h"
#include "util.hpp"
#include <iostream>
#include "cmplxutil.hpp"
__global__ void
d_fouriercoeff_zero(slice_entry_t *slices, step_entry_t *steps,
cuFloatComplex* coeffs, cuFloatComplex *refrac,
const int norders, const unsigned nloops,
const unsigned nwavelengths, const unsigned nmats)
{
// calculating only one value
cuFloatComplex c0 = {0.0, 0.0};
// slice index
const unsigned si = blockIdx.x;
// wave-length-index. required for refractive index
unsigned wi = threadIdx.x;
// number of slices in this stack
const unsigned nslices = gridDim.x;
// get the slice map entry
slice_entry_t slice = slices[si];
step_entry_t stepn = steps[slice.offset + slice.nsteps - 1];
const float T = stepn.x;
/*
* this will possibly be heavily divergent when nsteps changes a lot
*/
for (unsigned j = 0; j < nloops; j++, wi += blockDim.x) {
if (wi >= nwavelengths)
return;
step_entry_t step0 = steps[slice.offset];
float x0 = step0.x;
c0 = refrac[IDX(step0.m, wi, nmats)];
c0 *= x0;
for (unsigned i = 1; i < slice.nsteps; i++) {
stepn = steps[slice.offset + 1];
cuFloatComplex f = refrac[IDX(stepn.m, wi, nmats)] * (stepn.x - step0.x);
c0 += f;
step0 = stepn;
}
c0 *= (1/T);
__syncthreads();
const unsigned i = si + wi * nslices;
coeffs[IDX(i, norders, nslices * nwavelengths)] = c0;
}
}
__global__ void
d_fouriercoeff_nonzero(slice_entry_t *slices, step_entry_t *steps,
cuFloatComplex* coeffs, cuFloatComplex *refrac,
const unsigned norders, const unsigned nloops,
const unsigned nmats)
{
const float two_pi = 2 * M_PI;
// coefficient to calculate
unsigned ci = threadIdx.x;
// slice index
const unsigned si = blockIdx.x;
// wave-length-index. required for refractive index
const unsigned wi = blockIdx.y;
// number of slices in this stack
const unsigned nslices = gridDim.x;
// number of wavelengths stored
const unsigned nwavelengths = gridDim.y;
const unsigned nrows = nslices * nwavelengths;
// get the slice map entry and last step for overall-width
slice_entry_t slice = slices[si];
step_entry_t stepn = steps[slice.offset + slice.nsteps - 1];
const float T = stepn.x;
for (unsigned c = 0; c < nloops; c++, ci += blockDim.x) {
if (ci >= (norders << 1))
return;
// calculation of coefficients
step_entry_t step0 = steps[slice.offset];
cuFloatComplex f0 = refrac[IDX(step0.m, wi, nmats)];
cuFloatComplex f1 = refrac[IDX(stepn.m, wi, nmats)];
cuFloatComplex tmp = f1 - f0;
cuFloatComplex tmp2 = {0.0f, 0.0f};
// calculate k<>0 indices
int k = ci - norders;
if (k >= 0) ++k;
for (unsigned i = 1; i < slice.nsteps; i++) {
stepn = steps[slice.offset + i];
// f0 = refrac[IDX(step0.m, wi, nwavelengths)];
f1 = refrac[IDX(stepn.m, wi, nmats)];
f0 = f0 - f1;
tmp2.x = 0.0f;
tmp2.y = -1.0f;
tmp2 *= (two_pi * (float)k/T * step0.x);
cexpi(tmp2);
f0 *= tmp2;
tmp += f0;
step0 = stepn;
// prevent fetching f1 twice from global memory
f0 = f1;
}
f0.x = 0.0f;
f0.y = 1.0f;
f0 /= (two_pi * k);
tmp *= f0;
// write back: beware of k>0 !
const unsigned j = k > 0 ? ci + 1 : ci;
const unsigned i = si + wi * nslices;
__syncthreads();
coeffs[IDX(i, j, nrows)] = tmp;
}
}
extern "C"
void cuda_fouriercoeff(unsigned n_wavelengths, cuFloatComplex *dev_refrac_idx,
unsigned nslices, slice_entry_t *slice, step_entry_t *steps,
int norders, cuFloatComplex *fouriercoeffs, unsigned nmats)
{
/*
* first kernel computes the values that are not at c(0)
*/
unsigned nthreads = 2 * norders;
unsigned nloops = (nthreads >> 8) + 1;
nthreads = min(nthreads, 512);
dim3 blocks(nslices, n_wavelengths);
dim3 threads(nthreads);
hipLaunchKernelGGL(( d_fouriercoeff_nonzero), dim3(blocks), dim3(threads), 0, 0, slice, steps,
fouriercoeffs, dev_refrac_idx, norders, nloops, nmats);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
std::cout << "ERROR: d_fouriercoeff_nonzero failed: " <<
err << " (" << hipGetErrorString(err) << ")" <<
std::endl;
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
/*
* second kernel computes remaining values at position c(0) for every
* slice and wavelength
*/
nloops = (n_wavelengths >> 8) + 1;
blocks = dim3(nslices);
threads = dim3(min(n_wavelengths, 512));
hipLaunchKernelGGL(( d_fouriercoeff_zero), dim3(blocks), dim3(threads), 0, 0, slice, steps, fouriercoeffs,
dev_refrac_idx, norders, nloops, n_wavelengths, nmats);
err = hipGetLastError();
if (err != hipSuccess) {
std::cout << "ERROR: d_fouriercoeff_zero failed: " <<
err << " (" << hipGetErrorString(err) << ")" <<
std::endl;
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
}
| 26842be48b83e78089f3bc4f50150f5ab3cc5000.cu | #include "cu/fouriercoeff.h"
#include "util.hpp"
#include <iostream>
#include "cmplxutil.hpp"
__global__ void
d_fouriercoeff_zero(slice_entry_t *slices, step_entry_t *steps,
cuFloatComplex* coeffs, cuFloatComplex *refrac,
const int norders, const unsigned nloops,
const unsigned nwavelengths, const unsigned nmats)
{
// calculating only one value
cuFloatComplex c0 = {0.0, 0.0};
// slice index
const unsigned si = blockIdx.x;
// wave-length-index. required for refractive index
unsigned wi = threadIdx.x;
// number of slices in this stack
const unsigned nslices = gridDim.x;
// get the slice map entry
slice_entry_t slice = slices[si];
step_entry_t stepn = steps[slice.offset + slice.nsteps - 1];
const float T = stepn.x;
/*
* this will possibly be heavily divergent when nsteps changes a lot
*/
for (unsigned j = 0; j < nloops; j++, wi += blockDim.x) {
if (wi >= nwavelengths)
return;
step_entry_t step0 = steps[slice.offset];
float x0 = step0.x;
c0 = refrac[IDX(step0.m, wi, nmats)];
c0 *= x0;
for (unsigned i = 1; i < slice.nsteps; i++) {
stepn = steps[slice.offset + 1];
cuFloatComplex f = refrac[IDX(stepn.m, wi, nmats)] * (stepn.x - step0.x);
c0 += f;
step0 = stepn;
}
c0 *= (1/T);
__syncthreads();
const unsigned i = si + wi * nslices;
coeffs[IDX(i, norders, nslices * nwavelengths)] = c0;
}
}
__global__ void
d_fouriercoeff_nonzero(slice_entry_t *slices, step_entry_t *steps,
cuFloatComplex* coeffs, cuFloatComplex *refrac,
const unsigned norders, const unsigned nloops,
const unsigned nmats)
{
const float two_pi = 2 * M_PI;
// coefficient to calculate
unsigned ci = threadIdx.x;
// slice index
const unsigned si = blockIdx.x;
// wave-length-index. required for refractive index
const unsigned wi = blockIdx.y;
// number of slices in this stack
const unsigned nslices = gridDim.x;
// number of wavelengths stored
const unsigned nwavelengths = gridDim.y;
const unsigned nrows = nslices * nwavelengths;
// get the slice map entry and last step for overall-width
slice_entry_t slice = slices[si];
step_entry_t stepn = steps[slice.offset + slice.nsteps - 1];
const float T = stepn.x;
for (unsigned c = 0; c < nloops; c++, ci += blockDim.x) {
if (ci >= (norders << 1))
return;
// calculation of coefficients
step_entry_t step0 = steps[slice.offset];
cuFloatComplex f0 = refrac[IDX(step0.m, wi, nmats)];
cuFloatComplex f1 = refrac[IDX(stepn.m, wi, nmats)];
cuFloatComplex tmp = f1 - f0;
cuFloatComplex tmp2 = {0.0f, 0.0f};
// calculate k<>0 indices
int k = ci - norders;
if (k >= 0) ++k;
for (unsigned i = 1; i < slice.nsteps; i++) {
stepn = steps[slice.offset + i];
// f0 = refrac[IDX(step0.m, wi, nwavelengths)];
f1 = refrac[IDX(stepn.m, wi, nmats)];
f0 = f0 - f1;
tmp2.x = 0.0f;
tmp2.y = -1.0f;
tmp2 *= (two_pi * (float)k/T * step0.x);
cexpi(tmp2);
f0 *= tmp2;
tmp += f0;
step0 = stepn;
// prevent fetching f1 twice from global memory
f0 = f1;
}
f0.x = 0.0f;
f0.y = 1.0f;
f0 /= (two_pi * k);
tmp *= f0;
// write back: beware of k>0 !
const unsigned j = k > 0 ? ci + 1 : ci;
const unsigned i = si + wi * nslices;
__syncthreads();
coeffs[IDX(i, j, nrows)] = tmp;
}
}
extern "C"
void cuda_fouriercoeff(unsigned n_wavelengths, cuFloatComplex *dev_refrac_idx,
unsigned nslices, slice_entry_t *slice, step_entry_t *steps,
int norders, cuFloatComplex *fouriercoeffs, unsigned nmats)
{
/*
* first kernel computes the values that are not at c(0)
*/
unsigned nthreads = 2 * norders;
unsigned nloops = (nthreads >> 8) + 1;
nthreads = min(nthreads, 512);
dim3 blocks(nslices, n_wavelengths);
dim3 threads(nthreads);
d_fouriercoeff_nonzero<<<blocks, threads>>>(slice, steps,
fouriercoeffs, dev_refrac_idx, norders, nloops, nmats);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
std::cout << "ERROR: d_fouriercoeff_nonzero failed: " <<
err << " (" << cudaGetErrorString(err) << ")" <<
std::endl;
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
/*
* second kernel computes remaining values at position c(0) for every
* slice and wavelength
*/
nloops = (n_wavelengths >> 8) + 1;
blocks = dim3(nslices);
threads = dim3(min(n_wavelengths, 512));
d_fouriercoeff_zero<<<blocks, threads>>>(slice, steps, fouriercoeffs,
dev_refrac_idx, norders, nloops, n_wavelengths, nmats);
err = cudaGetLastError();
if (err != cudaSuccess) {
std::cout << "ERROR: d_fouriercoeff_zero failed: " <<
err << " (" << cudaGetErrorString(err) << ")" <<
std::endl;
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
}
|
419a4a219ec5b67724f243f3b3d4bccef30f28b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the1(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ hipComplex the2(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ hipComplex the3(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ hipComplex qin(hipComplex a, hipComplex q)
{
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ hipComplex geffa(hipComplex z, hipComplex q)
{
hipComplex out(0.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex wu(0.0,0.0);
hipComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ hipComplex thratd(hipComplex z, hipComplex q)
{
int n;
hipComplex fau(4.0,0.0);
hipComplex too(2.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex ennn(1.0,0.0);
hipComplex ni(-1.0,0.0);
hipComplex noo(-1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex loo = q;
hipComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ hipComplex mitlef(hipComplex z,hipComplex c)
{
hipComplex out(0.0,0.0);
hipComplex Z(1.0,0.0);
hipComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ hipComplex helva(hipComplex z)
{
hipComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex hilva(hipComplex z)
{
hipComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex halva(hipComplex z)
{
hipComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex aciwa(hipComplex z)
{
hipComplex out(j0f(j1f(z.r)),j1f(j0f(z.i)));
return out;
}
__device__ hipComplex hinva(hipComplex z)
{
hipComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex henga(hipComplex z)
{
hipComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ hipComplex holva(hipComplex z)
{
hipComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ hipComplex aliva(hipComplex z)
{
hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ hipComplex ariva(hipComplex z)
{
hipComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ hipComplex arago(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex irigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thy(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q)));
}
return out;
}
__device__ hipComplex urigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
hipComplex ip(pi,0.0);
const float scale = 20;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
hipComplex effx(fx,0.0);
hipComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(1.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (4.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex rhuva(3.0,0.0);
hipComplex rarva(8.0,0.0);
hipComplex bor(-10.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// almost Klein's j-invariant
//cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva);
for(v=0;v<20;v++)
{
cue = cue - conj(powc(cue,aon*hilva(cue)))/powc(cue,uon*helva(cue));
/*cue = (aon*hilva(cue)-uon*helva(cue))/(aon*halva(cue)-uon*hilva(cue));*/
}
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ | 419a4a219ec5b67724f243f3b3d4bccef30f28b1.cu | #include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the1(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ cuComplex the2(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ cuComplex the3(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ cuComplex qin(cuComplex a, cuComplex q)
{
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ cuComplex geffa(cuComplex z, cuComplex q)
{
cuComplex out(0.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex wu(0.0,0.0);
cuComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ cuComplex thratd(cuComplex z, cuComplex q)
{
int n;
cuComplex fau(4.0,0.0);
cuComplex too(2.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex ennn(1.0,0.0);
cuComplex ni(-1.0,0.0);
cuComplex noo(-1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex loo = q;
cuComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ cuComplex mitlef(cuComplex z,cuComplex c)
{
cuComplex out(0.0,0.0);
cuComplex Z(1.0,0.0);
cuComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ cuComplex helva(cuComplex z)
{
cuComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex hilva(cuComplex z)
{
cuComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex halva(cuComplex z)
{
cuComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex aciwa(cuComplex z)
{
cuComplex out(j0f(j1f(z.r)),j1f(j0f(z.i)));
return out;
}
__device__ cuComplex hinva(cuComplex z)
{
cuComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex henga(cuComplex z)
{
cuComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ cuComplex holva(cuComplex z)
{
cuComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ cuComplex aliva(cuComplex z)
{
cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ cuComplex ariva(cuComplex z)
{
cuComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ cuComplex arago(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex irigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thy(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q)));
}
return out;
}
__device__ cuComplex urigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
cuComplex ip(pi,0.0);
const float scale = 20;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
cuComplex effx(fx,0.0);
cuComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(1.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (4.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex rhuva(3.0,0.0);
cuComplex rarva(8.0,0.0);
cuComplex bor(-10.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// almost Klein's j-invariant
//cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva);
for(v=0;v<20;v++)
{
cue = cue - conj(powc(cue,aon*hilva(cue)))/powc(cue,uon*helva(cue));
/*cue = (aon*hilva(cue)-uon*helva(cue))/(aon*halva(cue)-uon*hilva(cue));*/
}
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ |
0842195b9296518d2ce9c172f39eca25e2eb056b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// The caller of this code is responsible for calling culaInitialize and culaShutdown
#define POL 6
#define IDX(i, j, n) n*(j) + i
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <float.h>
#include "common.h"
void pm(float *A, int n) {
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++)
printf("%f ", A[i + j*n]);
printf("\n");
}
printf("\n");
}
void checkCublasStatus(hipblasStatus_t status) {
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS error: Status code %d\n", status);
exit(1);
}
}
void checkError(hipError_t error) {
if (error != hipSuccess) {
fprintf(stderr, "!!!! CUDA error: Error code %d\n", error);
exit(1);
}
}
void checkCulaStatus(culaStatus status)
{
char buf[256];
if(!status)
return;
printf("nooooo %d\n", status);
culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf));
printf("%s\n", buf);
culaShutdown();
exit(EXIT_FAILURE);
}
float infinity_norm(float *A, int n) {
int i, j;
float sum, max = FLT_MIN, ent;
for (i = 0; i < n; i++) {
sum = 0;
for (j = 0; j < n; j++) {
ent = A[IDX(i, j, n)];
if (ent < 0) ent *= -1;
sum += ent;
}
if (sum > max) max = sum;
}
return max;
}
// a is the matrix you have, e is the one you'll fill in
// now I'm going to use cublas, assume A and E are device pointers
void padeExp(hipblasHandle_t handle, float *A, float *E, int n) {
culaStatus status;
hipblasStatus_t bS;
hipError_t error;
float s, s_factor, one = 1, zero = 0, minus = -1, two = 2, m_two = -2;
float *Q, *A2, *P;
int i, j, f, *piv, scaled = 0;
float c[POL + 1];
float *A_h = (float*)malloc(n*n*sizeof(A_h[0]));
c[0] = 1;
for (i = 0; i < POL; i++) {
c[i + 1] = c[i]*((double)(POL - i)/((i + 1)*(2*POL - i)));
}
int blockSize = 16;
dim3 threads(blockSize, blockSize);
dim3 grid((n + blockSize - 1)/blockSize, (n + blockSize - 1)/blockSize);
// so I'll put this in for now to see if it's faster
int threadsPerBlock = blockSize*blockSize;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
extern float *p;
/* addRows<<<blocksPerGrid, threadsPerBlock>>>(A, p, n);
int idx;
bS = hipblasIsamax(handle, n, p, 1, &idx);
checkCublasStatus(bS);
// this will put s_factor in p[0] and 1/s_factor in p[1]
findScale<<<1, 1>>>(p, idx);
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_DEVICE);
bS = hipblasSscal(handle, n*n, p, A, 1);
checkCublasStatus(bS);
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST);
*/
bS = hipblasGetVector(n*n, sizeof(A[0]), A, 1, A_h, 1);
checkCublasStatus(bS);
// scale here
s = infinity_norm(A_h, n);
if (s > 0.5) {
scaled = 1;
f = (int) (log(s)/log(2));
s = MAX(0,f + 2);
s_factor = pow(2, -s);
bS = hipblasSscal(handle, n*n, &s_factor, A, 1);
checkCublasStatus(bS);
}
error = hipMalloc((void**)&piv, n*n*sizeof(Q[0]));
checkError(error);
error = hipMalloc((void**)&Q, n*n*sizeof(Q[0]));
checkError(error);
error = hipMalloc((void**)&P, n*n*sizeof(P[0]));
checkError(error);
error = hipMemset((void*)Q, 0, n*n*sizeof(Q[0]));
checkError(error);
error = hipMemset((void*)P, 0, n*n*sizeof(P[0]));
checkError(error);
// allocate space for A2; no need to initialize memory
error = hipMalloc((void**)&A2, n*n*sizeof(A2[0]));
checkError(error);
bS = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, &one, A, n, A, n, &zero, A2, n);
checkCublasStatus(bS);
hipLaunchKernelGGL(( addDiag), dim3(grid), dim3(threads), 0, 0, Q, n, c[POL]);
hipLaunchKernelGGL(( addDiag), dim3(grid), dim3(threads), 0, 0, P, n, c[POL - 1]);
int odd = 1;
for (i = POL - 2; i >= 0; i--) {
if (odd == 1) {
// Q = Q*A2
bS = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, &one, Q, n, A2, n, &zero, Q, n);
checkCublasStatus(bS);
// Q = Q + c[k]*I
hipLaunchKernelGGL(( addDiag), dim3(grid), dim3(threads), 0, 0, Q, n, c[i]);
}
else {
// P = P*A2
bS = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, &one, P, n, A2, n, &zero, P, n);
checkCublasStatus(bS);
// P = P + c[k]*I
hipLaunchKernelGGL(( addDiag), dim3(grid), dim3(threads), 0, 0, P, n, c[i]);
}
odd = 1-odd;
}
if (odd == 1) {
// Q = Q*A
bS = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, &one, Q, n, A, n, &zero, Q, n);
checkCublasStatus(bS);
}
else {
// P = P*A
bS = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, &one, P, n, A, n, &zero, P, n);
checkCublasStatus(bS);
}
// Q = Q - P
bS = hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, &one, Q, n, &minus, P, n, Q, n);
checkCublasStatus(bS);
// Find X s.t. QX = Pi
error = hipMemset((void*)piv, 0, n*sizeof(int));
checkError(error);
status = culaDeviceSgesv(n, n, Q, n, piv, P, n);
checkCulaStatus(status);
// now P = X
if (odd == 0) {
// E = 2*P
bS = hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, &two, P, n, &zero, NULL, n, E, n);
checkCublasStatus(bS);
hipLaunchKernelGGL(( addDiag), dim3(grid), dim3(threads), 0, 0, E, n, 1);
}
else {
bS = hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, &m_two, P, n, &zero, NULL, n, E, n);
checkCublasStatus(bS);
hipLaunchKernelGGL(( addDiag), dim3(grid), dim3(threads), 0, 0, E, n, -1);
}
for (i = 0; i < s; i++) {
bS = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, &one, E, n, E, n, &zero, E, n);
checkCublasStatus(bS);
}
/*
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_DEVICE);
bS = hipblasSscal(handle, n*n, p + 1, A, 1);
checkCublasStatus(bS);
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST);
*/
if (scaled == 1) {
s_factor = 1./s_factor;
bS = hipblasSscal(handle, n*n, &s_factor, A, 1);
checkCublasStatus(bS);
}
free(A_h);
error = hipFree(Q);
checkError(error);
error = hipFree(P);
checkError(error);
error = hipFree(A2);
checkError(error);
return;
}
void phi(hipblasHandle_t handle, float *A, float *E, int n) {
float one = 1, minus = -1;
int i, *piv;
hipblasStatus_t bS;
hipError_t error;
culaStatus status;
// we want AX = e^A - I
padeExp(handle, A, E, n);
int blockSize = 16;
dim3 threads(blockSize, blockSize);
dim3 grid((n + blockSize - 1)/blockSize, (n + blockSize - 1)/blockSize);
hipLaunchKernelGGL(( addDiag), dim3(grid), dim3(threads), 0, 0, E, n, -1);
// now E = e^A - I
// Find X s.t. AX = E
error = hipMalloc((void**)&piv, n*sizeof(int));
checkError(error);
error = hipMemset((void*)piv, 0, n*sizeof(int));
checkError(error);
status = culaDeviceSgesv(n, n, A, n, piv, E, n);
checkCulaStatus(status);
// now E = X
// cleanup
error = hipFree(piv);
checkError(error);
}
/*
int main(void) {
#define N 5
hipblasStatus_t status;
hipblasHandle_t handle;
hipError_t error;
float A_h[N*N] = { -0.16580, 0.22570, 0.00000, 0.00000, 0.00000, 0.25460,
-0.73720,
0.29330,
0.00000,
0.00000,
-0.00220,
0.33500,
-0.53390,
0.20670,
0.00000,
-0.002100,
-0.0019000,
0.2369000,
-0.3663000,
0.1378000,
-0.0019000,
-0.0018000,
-0.0025000,
0.1566000,
-0.2340000
};
float E_h[N*N];
int i, j;
//float A[4] = {1, 0, 0, 1};
float *E, *A;
error = hipMalloc((void**)&E, N*N*sizeof(float));
checkError(error);
error = hipMalloc((void**)&A, N*N*sizeof(float));
checkError(error);
status = hipblasCreate(&handle);
checkCublasStatus(status);
culaInitialize();
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++)
printf("%f ", A_h[i + j*N]);
printf("\n");
}
printf("\n");
error = hipMemcpy((void*)A, (void*)A_h, N*N*sizeof(float), hipMemcpyHostToDevice);
checkError(error);
//padeExp(handle, A, E, N);
phi(handle, A, E, N);
error = hipMemcpy((void*)E_h, (void*)E, N*N*sizeof(float), hipMemcpyDeviceToHost);
checkError(error);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++)
printf("%f ", E_h[i + j*N]);
printf("\n");
}
status = hipblasDestroy(handle);
checkCublasStatus(status);
error = hipFree(E);
checkError(error);
error = hipFree(A);
checkError(error);
culaShutdown();
}*/
| 0842195b9296518d2ce9c172f39eca25e2eb056b.cu | // The caller of this code is responsible for calling culaInitialize and culaShutdown
#define POL 6
#define IDX(i, j, n) n*(j) + i
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <float.h>
#include "common.h"
void pm(float *A, int n) {
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++)
printf("%f ", A[i + j*n]);
printf("\n");
}
printf("\n");
}
void checkCublasStatus(cublasStatus_t status) {
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! CUBLAS error: Status code %d\n", status);
exit(1);
}
}
void checkError(cudaError_t error) {
if (error != cudaSuccess) {
fprintf(stderr, "!!!! CUDA error: Error code %d\n", error);
exit(1);
}
}
void checkCulaStatus(culaStatus status)
{
char buf[256];
if(!status)
return;
printf("nooooo %d\n", status);
culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf));
printf("%s\n", buf);
culaShutdown();
exit(EXIT_FAILURE);
}
float infinity_norm(float *A, int n) {
int i, j;
float sum, max = FLT_MIN, ent;
for (i = 0; i < n; i++) {
sum = 0;
for (j = 0; j < n; j++) {
ent = A[IDX(i, j, n)];
if (ent < 0) ent *= -1;
sum += ent;
}
if (sum > max) max = sum;
}
return max;
}
// a is the matrix you have, e is the one you'll fill in
// now I'm going to use cublas, assume A and E are device pointers
void padeExp(cublasHandle_t handle, float *A, float *E, int n) {
culaStatus status;
cublasStatus_t bS;
cudaError_t error;
float s, s_factor, one = 1, zero = 0, minus = -1, two = 2, m_two = -2;
float *Q, *A2, *P;
int i, j, f, *piv, scaled = 0;
float c[POL + 1];
float *A_h = (float*)malloc(n*n*sizeof(A_h[0]));
c[0] = 1;
for (i = 0; i < POL; i++) {
c[i + 1] = c[i]*((double)(POL - i)/((i + 1)*(2*POL - i)));
}
int blockSize = 16;
dim3 threads(blockSize, blockSize);
dim3 grid((n + blockSize - 1)/blockSize, (n + blockSize - 1)/blockSize);
// so I'll put this in for now to see if it's faster
int threadsPerBlock = blockSize*blockSize;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
extern float *p;
/* addRows<<<blocksPerGrid, threadsPerBlock>>>(A, p, n);
int idx;
bS = cublasIsamax(handle, n, p, 1, &idx);
checkCublasStatus(bS);
// this will put s_factor in p[0] and 1/s_factor in p[1]
findScale<<<1, 1>>>(p, idx);
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE);
bS = cublasSscal(handle, n*n, p, A, 1);
checkCublasStatus(bS);
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST);
*/
bS = cublasGetVector(n*n, sizeof(A[0]), A, 1, A_h, 1);
checkCublasStatus(bS);
// scale here
s = infinity_norm(A_h, n);
if (s > 0.5) {
scaled = 1;
f = (int) (log(s)/log(2));
s = MAX(0,f + 2);
s_factor = pow(2, -s);
bS = cublasSscal(handle, n*n, &s_factor, A, 1);
checkCublasStatus(bS);
}
error = cudaMalloc((void**)&piv, n*n*sizeof(Q[0]));
checkError(error);
error = cudaMalloc((void**)&Q, n*n*sizeof(Q[0]));
checkError(error);
error = cudaMalloc((void**)&P, n*n*sizeof(P[0]));
checkError(error);
error = cudaMemset((void*)Q, 0, n*n*sizeof(Q[0]));
checkError(error);
error = cudaMemset((void*)P, 0, n*n*sizeof(P[0]));
checkError(error);
// allocate space for A2; no need to initialize memory
error = cudaMalloc((void**)&A2, n*n*sizeof(A2[0]));
checkError(error);
bS = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &one, A, n, A, n, &zero, A2, n);
checkCublasStatus(bS);
addDiag<<<grid, threads>>>(Q, n, c[POL]);
addDiag<<<grid, threads>>>(P, n, c[POL - 1]);
int odd = 1;
for (i = POL - 2; i >= 0; i--) {
if (odd == 1) {
// Q = Q*A2
bS = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &one, Q, n, A2, n, &zero, Q, n);
checkCublasStatus(bS);
// Q = Q + c[k]*I
addDiag<<<grid, threads>>>(Q, n, c[i]);
}
else {
// P = P*A2
bS = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &one, P, n, A2, n, &zero, P, n);
checkCublasStatus(bS);
// P = P + c[k]*I
addDiag<<<grid, threads>>>(P, n, c[i]);
}
odd = 1-odd;
}
if (odd == 1) {
// Q = Q*A
bS = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &one, Q, n, A, n, &zero, Q, n);
checkCublasStatus(bS);
}
else {
// P = P*A
bS = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &one, P, n, A, n, &zero, P, n);
checkCublasStatus(bS);
}
// Q = Q - P
bS = cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, &one, Q, n, &minus, P, n, Q, n);
checkCublasStatus(bS);
// Find X s.t. QX = Pi
error = cudaMemset((void*)piv, 0, n*sizeof(int));
checkError(error);
status = culaDeviceSgesv(n, n, Q, n, piv, P, n);
checkCulaStatus(status);
// now P = X
if (odd == 0) {
// E = 2*P
bS = cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, &two, P, n, &zero, NULL, n, E, n);
checkCublasStatus(bS);
addDiag<<<grid, threads>>>(E, n, 1);
}
else {
bS = cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, &m_two, P, n, &zero, NULL, n, E, n);
checkCublasStatus(bS);
addDiag<<<grid, threads>>>(E, n, -1);
}
for (i = 0; i < s; i++) {
bS = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &one, E, n, E, n, &zero, E, n);
checkCublasStatus(bS);
}
/*
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE);
bS = cublasSscal(handle, n*n, p + 1, A, 1);
checkCublasStatus(bS);
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST);
*/
if (scaled == 1) {
s_factor = 1./s_factor;
bS = cublasSscal(handle, n*n, &s_factor, A, 1);
checkCublasStatus(bS);
}
free(A_h);
error = cudaFree(Q);
checkError(error);
error = cudaFree(P);
checkError(error);
error = cudaFree(A2);
checkError(error);
return;
}
void phi(cublasHandle_t handle, float *A, float *E, int n) {
float one = 1, minus = -1;
int i, *piv;
cublasStatus_t bS;
cudaError_t error;
culaStatus status;
// we want AX = e^A - I
padeExp(handle, A, E, n);
int blockSize = 16;
dim3 threads(blockSize, blockSize);
dim3 grid((n + blockSize - 1)/blockSize, (n + blockSize - 1)/blockSize);
addDiag<<<grid, threads>>>(E, n, -1);
// now E = e^A - I
// Find X s.t. AX = E
error = cudaMalloc((void**)&piv, n*sizeof(int));
checkError(error);
error = cudaMemset((void*)piv, 0, n*sizeof(int));
checkError(error);
status = culaDeviceSgesv(n, n, A, n, piv, E, n);
checkCulaStatus(status);
// now E = X
// cleanup
error = cudaFree(piv);
checkError(error);
}
/*
int main(void) {
#define N 5
cublasStatus_t status;
cublasHandle_t handle;
cudaError_t error;
float A_h[N*N] = { -0.16580, 0.22570, 0.00000, 0.00000, 0.00000, 0.25460,
-0.73720,
0.29330,
0.00000,
0.00000,
-0.00220,
0.33500,
-0.53390,
0.20670,
0.00000,
-0.002100,
-0.0019000,
0.2369000,
-0.3663000,
0.1378000,
-0.0019000,
-0.0018000,
-0.0025000,
0.1566000,
-0.2340000
};
float E_h[N*N];
int i, j;
//float A[4] = {1, 0, 0, 1};
float *E, *A;
error = cudaMalloc((void**)&E, N*N*sizeof(float));
checkError(error);
error = cudaMalloc((void**)&A, N*N*sizeof(float));
checkError(error);
status = cublasCreate(&handle);
checkCublasStatus(status);
culaInitialize();
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++)
printf("%f ", A_h[i + j*N]);
printf("\n");
}
printf("\n");
error = cudaMemcpy((void*)A, (void*)A_h, N*N*sizeof(float), cudaMemcpyHostToDevice);
checkError(error);
//padeExp(handle, A, E, N);
phi(handle, A, E, N);
error = cudaMemcpy((void*)E_h, (void*)E, N*N*sizeof(float), cudaMemcpyDeviceToHost);
checkError(error);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++)
printf("%f ", E_h[i + j*N]);
printf("\n");
}
status = cublasDestroy(handle);
checkCublasStatus(status);
error = cudaFree(E);
checkError(error);
error = cudaFree(A);
checkError(error);
culaShutdown();
}*/
|
f4b92d705235ccb9d197ef062aec245cbdecdfe0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// #include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
#include <cstddef>
#include <cstdint>
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/radixSort.h"
#include <algorithm>
using FLOAT = double;
// A templated unsigned integer type with N bytes
template <int N>
struct uintN;
template <>
struct uintN<8> {
using type = uint8_t;
};
template <>
struct uintN<16> {
using type = uint16_t;
};
template <>
struct uintN<32> {
using type = uint32_t;
};
template <>
struct uintN<64> {
using type = uint64_t;
};
template <int N>
using uintN_t = typename uintN<N>::type;
// A templated unsigned integer type with the same size as T
template <typename T>
using uintT_t = uintN_t<sizeof(T) * 8>;
// Keep only the `N` most significant bytes of `t`, and set the others to zero
template <int N, typename T, typename SFINAE = std::enable_if_t<N <= sizeof(T)>>
__device__ __host__ T truncate(T const& t) {
const int shift = 8 * (sizeof(T) - N);
union {
T t;
uintT_t<T> u;
} c;
c.t = t;
c.u = c.u >> shift << shift;
return c.t;
}
namespace {
__global__ void testKernel(FLOAT* gpu_input, int* gpu_product, int elements, bool doPrint) {
size_t firstElement = threadIdx.x + blockIdx.x * blockDim.x; // This is going to be the track index
size_t gridSize = blockDim.x * gridDim.x;
// radix sort works in a single block
assert(1 == gridDim.x);
assert(0 == blockIdx.x);
assert(elements <= 2048);
__shared__ uint16_t order[2048];
__shared__ uint16_t sws[2048];
__shared__ float z[2048];
__shared__ int iz[2048];
for (unsigned int itrack = firstElement; itrack < elements; itrack += gridSize) {
z[itrack] = gpu_input[itrack];
iz[itrack] = 10000 * gpu_input[itrack];
// order[itrack] = itrack;
}
__syncthreads();
radixSort<float, 2>(z, order, sws, elements);
__syncthreads();
//verify
for (unsigned int itrack = firstElement; itrack < (elements - 1); itrack += gridSize) {
auto ntrack = order[itrack];
auto mtrack = order[itrack + 1];
assert(truncate<2>(z[ntrack]) <= truncate<2>(z[mtrack]));
}
__syncthreads();
if (doPrint)
if (threadIdx.x == 0 && blockIdx.x == 0) {
for (unsigned int itrackO = 0; itrackO < elements; itrackO++) {
int itrack = order[itrackO];
printf(
"Radix sort with %i elements: At position %i, track position at input %i with z at input %f, z fed to "
"radixSort %f\n",
elements,
itrackO,
itrack,
gpu_input[itrack],
z[itrack]);
gpu_product[itrackO] = itrack;
}
}
__syncthreads();
radixSort<int, 4>(iz, order, sws, elements);
__syncthreads();
for (unsigned int itrack = firstElement; itrack < (elements - 1); itrack += gridSize) {
auto ntrack = order[itrack];
auto mtrack = order[itrack + 1];
assert(iz[ntrack] <= iz[mtrack]);
}
if (doPrint)
if (threadIdx.x == 0 && blockIdx.x == 0) {
for (unsigned int itrackO = 0; itrackO < elements; itrackO++) {
int itrack = order[itrackO];
printf(
"Radix sort with %i elements: At position %i, track position at input %i with z at input %f, z fed to "
"radixSort %d\n",
elements,
itrackO,
itrack,
gpu_input[itrack],
iz[itrack]);
gpu_product[itrackO] = itrack;
}
}
}
void testWrapper(FLOAT* gpu_input, int* gpu_product, int elements, bool doPrint) {
auto blockSize = 512; // somewhat arbitrary
auto gridSize = 1; // round up to cover the sample size
hipLaunchKernelGGL(( testKernel), dim3(gridSize), dim3(blockSize), 0, 0, gpu_input, gpu_product, elements, doPrint);
cudaCheck(hipGetLastError());
}
} // namespace
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
int main() {
cms::cudatest::requireDevices();
FLOAT* gpu_input;
int* gpu_product;
int nmax = 4 * 260;
FLOAT input[nmax] = {
30.0, 30.0, -4.4, -7.1860761642, -6.6870317459, 1.8010582924, 2.2535820007, 2.2666890621,
2.2677690983, 2.2794606686, 2.2802586555, 2.2821085453, 2.2852313519, 2.2877883911, 2.2946476936, 2.2960267067,
2.3006286621, 2.3245604038, 2.6755006313, 2.7229132652, 2.783257246, 2.8440306187, 2.9017834663, 2.9252648354,
2.9254128933, 2.927520752, 2.9422419071, 2.9453969002, 2.9457902908, 2.9465973377, 2.9492356777, 2.9573802948,
2.9575133324, 2.9575304985, 2.9586606026, 2.9605507851, 2.9622797966, 2.9625515938, 2.9641008377, 2.9646151066,
2.9676523209, 2.9708273411, 2.974111557, 2.9742531776, 2.9772830009, 2.9877333641, 2.9960610867, 3.013969183,
3.0187871456, 3.0379793644, 3.0407221317, 3.0415751934, 3.0470511913, 3.0560519695, 3.0592908859, 3.0599737167,
3.0607066154, 3.0629007816, 3.0632448196, 3.0633215904, 3.0643932819, 3.0645000935, 3.0666446686, 3.068046093,
3.0697011948, 3.0717656612, 3.0718104839, 3.0718348026, 3.0733406544, 3.0738227367, 3.0738801956, 3.0738828182,
3.0744686127, 3.0753741264, 3.0758397579, 3.0767207146, 3.0773906708, 3.0778541565, 3.0780284405, 3.0780889988,
3.0782799721, 3.0789675713, 3.0792205334, 3.0793278217, 3.0795567036, 3.0797944069, 3.0806643963, 3.0809247494,
3.0815284252, 3.0817306042, 3.0819730759, 3.0820026398, 3.0838682652, 3.084009409, 3.0848178864, 3.0853257179,
3.0855510235, 3.0856611729, 3.0873703957, 3.0884618759, 3.0891149044, 3.0893011093, 3.0895674229, 3.0901503563,
3.0903317928, 3.0912668705, 3.0920717716, 3.0954346657, 3.096424818, 3.0995628834, 3.1001036167, 3.1173279285,
3.1185023785, 3.1195163727, 3.1568386555, 3.1675374508, 3.1676850319, 3.1886672974, 3.3769197464, 3.3821125031,
3.4780933857, 3.4822063446, 3.4989323616, 3.5076274872, 3.5225863457, 3.5271244049, 3.5298995972, 3.5417425632,
3.5444457531, 3.5465917587, 3.5473103523, 3.5480232239, 3.5526945591, 3.5531234741, 3.5538012981, 3.5544877052,
3.5547749996, 3.5549693108, 3.5550665855, 3.5558729172, 3.5560717583, 3.5560848713, 3.5584278107, 3.558681488,
3.5587313175, 3.5592217445, 3.559384346, 3.5604712963, 3.5634038448, 3.563803196, 3.564593792, 3.5660364628,
3.5683133602, 3.5696356297, 3.569729805, 3.5740811825, 3.5757565498, 3.5760207176, 3.5760478973, 3.5836098194,
3.5839796066, 3.5852358341, 3.5901627541, 3.6141786575, 3.6601481438, 3.7187042236, 3.9741659164, 4.4111995697,
4.5337572098, 4.6292567253, 4.6748633385, 4.6806583405, 4.6868157387, 4.6868577003, 4.6879930496, 4.6888813972,
4.6910686493, 4.6925001144, 4.6957530975, 4.698094368, 4.6997032166, 4.7017259598, 4.7020640373, 4.7024269104,
4.7036352158, 4.7038679123, 4.7042069435, 4.7044086456, 4.7044372559, 4.7050771713, 4.7055773735, 4.7060651779,
4.7062759399, 4.7065420151, 4.70657444, 4.7066287994, 4.7066788673, 4.7067341805, 4.7072944641, 4.7074551582,
4.7075614929, 4.7075891495, 4.7076044083, 4.7077374458, 4.7080879211, 4.70819664, 4.7086658478, 4.708937645,
4.7092385292, 4.709479332, 4.7095656395, 4.7100076675, 4.7102108002, 4.7104525566, 4.7105507851, 4.71118927,
4.7113513947, 4.7115578651, 4.7116270065, 4.7116751671, 4.7117190361, 4.7117333412, 4.7117910385, 4.7119007111,
4.7120013237, 4.712003231, 4.712044239, 4.7122926712, 4.7135767937, 4.7143669128, 4.7145690918, 4.7148418427,
4.7149815559, 4.7159647942, 4.7161884308, 4.7177276611, 4.717815876, 4.718059063, 4.7188801765, 4.7190728188,
4.7199850082, 4.7213058472, 4.7239775658, 4.7243933678, 4.7243990898, 4.7273659706, 4.7294125557, 4.7296204567,
4.7325615883, 4.7356877327, 4.740146637, 4.742254734, 4.7433848381, 4.7454957962, 4.7462964058, 4.7692604065,
4.7723139628, 4.774812736, 4.8577151299, 4.890037536};
for (int i = 0; i < 260; i++) {
input[i + 260] = -input[i];
input[i + 2 * 260] = input[i] + 10;
input[i + 3 * 260] = -input[i] - 10;
}
cudaCheck(hipMalloc(&gpu_input, sizeof(FLOAT) * nmax));
cudaCheck(hipMalloc(&gpu_product, sizeof(int) * nmax));
// copy the input data to the GPU
cudaCheck(hipMemcpy(gpu_input, input, sizeof(FLOAT) * nmax, hipMemcpyHostToDevice));
for (int k = 2; k <= nmax; k++) {
std::random_shuffle(input, input + k);
printf("Test with %d items\n", k);
// sort on the GPU
testWrapper(gpu_input, gpu_product, k, false);
cudaCheck(hipDeviceSynchronize());
}
// free the GPU memory
cudaCheck(hipFree(gpu_input));
cudaCheck(hipFree(gpu_product));
return 0;
}
| f4b92d705235ccb9d197ef062aec245cbdecdfe0.cu | // #include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
#include <cstddef>
#include <cstdint>
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/radixSort.h"
#include <algorithm>
using FLOAT = double;
// A templated unsigned integer type with N bytes
template <int N>
struct uintN;
template <>
struct uintN<8> {
using type = uint8_t;
};
template <>
struct uintN<16> {
using type = uint16_t;
};
template <>
struct uintN<32> {
using type = uint32_t;
};
template <>
struct uintN<64> {
using type = uint64_t;
};
template <int N>
using uintN_t = typename uintN<N>::type;
// A templated unsigned integer type with the same size as T
template <typename T>
using uintT_t = uintN_t<sizeof(T) * 8>;
// Keep only the `N` most significant bytes of `t`, and set the others to zero
template <int N, typename T, typename SFINAE = std::enable_if_t<N <= sizeof(T)>>
__device__ __host__ T truncate(T const& t) {
const int shift = 8 * (sizeof(T) - N);
union {
T t;
uintT_t<T> u;
} c;
c.t = t;
c.u = c.u >> shift << shift;
return c.t;
}
namespace {
__global__ void testKernel(FLOAT* gpu_input, int* gpu_product, int elements, bool doPrint) {
size_t firstElement = threadIdx.x + blockIdx.x * blockDim.x; // This is going to be the track index
size_t gridSize = blockDim.x * gridDim.x;
// radix sort works in a single block
assert(1 == gridDim.x);
assert(0 == blockIdx.x);
assert(elements <= 2048);
__shared__ uint16_t order[2048];
__shared__ uint16_t sws[2048];
__shared__ float z[2048];
__shared__ int iz[2048];
for (unsigned int itrack = firstElement; itrack < elements; itrack += gridSize) {
z[itrack] = gpu_input[itrack];
iz[itrack] = 10000 * gpu_input[itrack];
// order[itrack] = itrack;
}
__syncthreads();
radixSort<float, 2>(z, order, sws, elements);
__syncthreads();
//verify
for (unsigned int itrack = firstElement; itrack < (elements - 1); itrack += gridSize) {
auto ntrack = order[itrack];
auto mtrack = order[itrack + 1];
assert(truncate<2>(z[ntrack]) <= truncate<2>(z[mtrack]));
}
__syncthreads();
if (doPrint)
if (threadIdx.x == 0 && blockIdx.x == 0) {
for (unsigned int itrackO = 0; itrackO < elements; itrackO++) {
int itrack = order[itrackO];
printf(
"Radix sort with %i elements: At position %i, track position at input %i with z at input %f, z fed to "
"radixSort %f\n",
elements,
itrackO,
itrack,
gpu_input[itrack],
z[itrack]);
gpu_product[itrackO] = itrack;
}
}
__syncthreads();
radixSort<int, 4>(iz, order, sws, elements);
__syncthreads();
for (unsigned int itrack = firstElement; itrack < (elements - 1); itrack += gridSize) {
auto ntrack = order[itrack];
auto mtrack = order[itrack + 1];
assert(iz[ntrack] <= iz[mtrack]);
}
if (doPrint)
if (threadIdx.x == 0 && blockIdx.x == 0) {
for (unsigned int itrackO = 0; itrackO < elements; itrackO++) {
int itrack = order[itrackO];
printf(
"Radix sort with %i elements: At position %i, track position at input %i with z at input %f, z fed to "
"radixSort %d\n",
elements,
itrackO,
itrack,
gpu_input[itrack],
iz[itrack]);
gpu_product[itrackO] = itrack;
}
}
}
void testWrapper(FLOAT* gpu_input, int* gpu_product, int elements, bool doPrint) {
auto blockSize = 512; // somewhat arbitrary
auto gridSize = 1; // round up to cover the sample size
testKernel<<<gridSize, blockSize>>>(gpu_input, gpu_product, elements, doPrint);
cudaCheck(cudaGetLastError());
}
} // namespace
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
int main() {
cms::cudatest::requireDevices();
FLOAT* gpu_input;
int* gpu_product;
int nmax = 4 * 260;
FLOAT input[nmax] = {
30.0, 30.0, -4.4, -7.1860761642, -6.6870317459, 1.8010582924, 2.2535820007, 2.2666890621,
2.2677690983, 2.2794606686, 2.2802586555, 2.2821085453, 2.2852313519, 2.2877883911, 2.2946476936, 2.2960267067,
2.3006286621, 2.3245604038, 2.6755006313, 2.7229132652, 2.783257246, 2.8440306187, 2.9017834663, 2.9252648354,
2.9254128933, 2.927520752, 2.9422419071, 2.9453969002, 2.9457902908, 2.9465973377, 2.9492356777, 2.9573802948,
2.9575133324, 2.9575304985, 2.9586606026, 2.9605507851, 2.9622797966, 2.9625515938, 2.9641008377, 2.9646151066,
2.9676523209, 2.9708273411, 2.974111557, 2.9742531776, 2.9772830009, 2.9877333641, 2.9960610867, 3.013969183,
3.0187871456, 3.0379793644, 3.0407221317, 3.0415751934, 3.0470511913, 3.0560519695, 3.0592908859, 3.0599737167,
3.0607066154, 3.0629007816, 3.0632448196, 3.0633215904, 3.0643932819, 3.0645000935, 3.0666446686, 3.068046093,
3.0697011948, 3.0717656612, 3.0718104839, 3.0718348026, 3.0733406544, 3.0738227367, 3.0738801956, 3.0738828182,
3.0744686127, 3.0753741264, 3.0758397579, 3.0767207146, 3.0773906708, 3.0778541565, 3.0780284405, 3.0780889988,
3.0782799721, 3.0789675713, 3.0792205334, 3.0793278217, 3.0795567036, 3.0797944069, 3.0806643963, 3.0809247494,
3.0815284252, 3.0817306042, 3.0819730759, 3.0820026398, 3.0838682652, 3.084009409, 3.0848178864, 3.0853257179,
3.0855510235, 3.0856611729, 3.0873703957, 3.0884618759, 3.0891149044, 3.0893011093, 3.0895674229, 3.0901503563,
3.0903317928, 3.0912668705, 3.0920717716, 3.0954346657, 3.096424818, 3.0995628834, 3.1001036167, 3.1173279285,
3.1185023785, 3.1195163727, 3.1568386555, 3.1675374508, 3.1676850319, 3.1886672974, 3.3769197464, 3.3821125031,
3.4780933857, 3.4822063446, 3.4989323616, 3.5076274872, 3.5225863457, 3.5271244049, 3.5298995972, 3.5417425632,
3.5444457531, 3.5465917587, 3.5473103523, 3.5480232239, 3.5526945591, 3.5531234741, 3.5538012981, 3.5544877052,
3.5547749996, 3.5549693108, 3.5550665855, 3.5558729172, 3.5560717583, 3.5560848713, 3.5584278107, 3.558681488,
3.5587313175, 3.5592217445, 3.559384346, 3.5604712963, 3.5634038448, 3.563803196, 3.564593792, 3.5660364628,
3.5683133602, 3.5696356297, 3.569729805, 3.5740811825, 3.5757565498, 3.5760207176, 3.5760478973, 3.5836098194,
3.5839796066, 3.5852358341, 3.5901627541, 3.6141786575, 3.6601481438, 3.7187042236, 3.9741659164, 4.4111995697,
4.5337572098, 4.6292567253, 4.6748633385, 4.6806583405, 4.6868157387, 4.6868577003, 4.6879930496, 4.6888813972,
4.6910686493, 4.6925001144, 4.6957530975, 4.698094368, 4.6997032166, 4.7017259598, 4.7020640373, 4.7024269104,
4.7036352158, 4.7038679123, 4.7042069435, 4.7044086456, 4.7044372559, 4.7050771713, 4.7055773735, 4.7060651779,
4.7062759399, 4.7065420151, 4.70657444, 4.7066287994, 4.7066788673, 4.7067341805, 4.7072944641, 4.7074551582,
4.7075614929, 4.7075891495, 4.7076044083, 4.7077374458, 4.7080879211, 4.70819664, 4.7086658478, 4.708937645,
4.7092385292, 4.709479332, 4.7095656395, 4.7100076675, 4.7102108002, 4.7104525566, 4.7105507851, 4.71118927,
4.7113513947, 4.7115578651, 4.7116270065, 4.7116751671, 4.7117190361, 4.7117333412, 4.7117910385, 4.7119007111,
4.7120013237, 4.712003231, 4.712044239, 4.7122926712, 4.7135767937, 4.7143669128, 4.7145690918, 4.7148418427,
4.7149815559, 4.7159647942, 4.7161884308, 4.7177276611, 4.717815876, 4.718059063, 4.7188801765, 4.7190728188,
4.7199850082, 4.7213058472, 4.7239775658, 4.7243933678, 4.7243990898, 4.7273659706, 4.7294125557, 4.7296204567,
4.7325615883, 4.7356877327, 4.740146637, 4.742254734, 4.7433848381, 4.7454957962, 4.7462964058, 4.7692604065,
4.7723139628, 4.774812736, 4.8577151299, 4.890037536};
for (int i = 0; i < 260; i++) {
input[i + 260] = -input[i];
input[i + 2 * 260] = input[i] + 10;
input[i + 3 * 260] = -input[i] - 10;
}
cudaCheck(cudaMalloc(&gpu_input, sizeof(FLOAT) * nmax));
cudaCheck(cudaMalloc(&gpu_product, sizeof(int) * nmax));
// copy the input data to the GPU
cudaCheck(cudaMemcpy(gpu_input, input, sizeof(FLOAT) * nmax, cudaMemcpyHostToDevice));
for (int k = 2; k <= nmax; k++) {
std::random_shuffle(input, input + k);
printf("Test with %d items\n", k);
// sort on the GPU
testWrapper(gpu_input, gpu_product, k, false);
cudaCheck(cudaDeviceSynchronize());
}
// free the GPU memory
cudaCheck(cudaFree(gpu_input));
cudaCheck(cudaFree(gpu_product));
return 0;
}
|
1d00558924a053bb8c7ba0ffd33cb5d40a042504.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/ztranspose_inplace.cu, normal z -> s, Tue Aug 30 09:38:34 2016
@author Stan Tomov
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 16
/******************************************************************************/
// grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd.
// lower indicates blocks in lower triangle of grid, including diagonal.
// lower blocks cover left side of matrix, including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width (or width-1)
// to cover right side of matrix.
// [ A00 A01 A02 ] [ A00 . . | . . ]
// [ A10 A11 A12 ] [ A10 A11 . | . . ]
// grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ]
// [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ]
// [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ]
//
// See stranspose_inplace_even for description of threads.
__global__ void stranspose_inplace_odd(
int n,
float *matrix, int lda )
{
__shared__ float sA[ NB ][ NB+1 ];
__shared__ float sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x >= blockIdx.y);
int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1));
int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y ));
ii *= NB;
jj *= NB;
float *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
float *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/******************************************************************************/
// grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even.
// lower indicates blocks in strictly lower triangle of grid, excluding diagonal.
// lower blocks shift up by one to cover left side of matrix including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width
// to cover right side of matrix.
// [ A00 A01 ] [ A10 . | . . ]
// [ A10 A11 ] [ A20 A21 | . . ]
// grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ]
// [ A30 A31 ] [ A40 A41 | A01 A11 ]
// [ A40 A41 ]
//
// Each block is NB x NB threads.
// For non-diagonal block A, block B is symmetric block.
// Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed,
// syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j).
// Threads outside the matrix do not touch memory.
__global__ void stranspose_inplace_even(
int n,
float *matrix, int lda )
{
__shared__ float sA[ NB ][ NB+1 ];
__shared__ float sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x > blockIdx.y);
int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y));
int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y));
ii *= NB;
jj *= NB;
float *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
float *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/***************************************************************************//**
Purpose
-------
stranspose_inplace_q transposes a square N-by-N matrix in-place.
Same as stranspose_inplace, but adds queue argument.
Arguments
---------
@param[in]
n INTEGER
The number of rows & columns of the matrix dA. N >= 0.
@param[in]
dA REAL array, dimension (LDDA,N)
The N-by-N matrix dA.
On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_transpose
*******************************************************************************/
extern "C" void
magmablas_stranspose_inplace_q(
magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( ldda < n )
info = -3;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
dim3 threads( NB, NB );
int nblock = magma_ceildiv( n, NB );
// need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix.
// block assignment differs depending on whether nblock is odd or even.
if ( nblock % 2 == 1 ) {
dim3 grid( nblock, (nblock+1)/2 );
hipLaunchKernelGGL(( stranspose_inplace_odd), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda );
}
else {
dim3 grid( nblock+1, nblock/2 );
hipLaunchKernelGGL(( stranspose_inplace_even), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda );
}
}
| 1d00558924a053bb8c7ba0ffd33cb5d40a042504.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/ztranspose_inplace.cu, normal z -> s, Tue Aug 30 09:38:34 2016
@author Stan Tomov
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 16
/******************************************************************************/
// grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd.
// lower indicates blocks in lower triangle of grid, including diagonal.
// lower blocks cover left side of matrix, including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width (or width-1)
// to cover right side of matrix.
// [ A00 A01 A02 ] [ A00 . . | . . ]
// [ A10 A11 A12 ] [ A10 A11 . | . . ]
// grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ]
// [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ]
// [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ]
//
// See stranspose_inplace_even for description of threads.
__global__ void stranspose_inplace_odd(
int n,
float *matrix, int lda )
{
__shared__ float sA[ NB ][ NB+1 ];
__shared__ float sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x >= blockIdx.y);
int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1));
int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y ));
ii *= NB;
jj *= NB;
float *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
float *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/******************************************************************************/
// grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even.
// lower indicates blocks in strictly lower triangle of grid, excluding diagonal.
// lower blocks shift up by one to cover left side of matrix including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width
// to cover right side of matrix.
// [ A00 A01 ] [ A10 . | . . ]
// [ A10 A11 ] [ A20 A21 | . . ]
// grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ]
// [ A30 A31 ] [ A40 A41 | A01 A11 ]
// [ A40 A41 ]
//
// Each block is NB x NB threads.
// For non-diagonal block A, block B is symmetric block.
// Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed,
// syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j).
// Threads outside the matrix do not touch memory.
__global__ void stranspose_inplace_even(
int n,
float *matrix, int lda )
{
__shared__ float sA[ NB ][ NB+1 ];
__shared__ float sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x > blockIdx.y);
int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y));
int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y));
ii *= NB;
jj *= NB;
float *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
float *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/***************************************************************************//**
Purpose
-------
stranspose_inplace_q transposes a square N-by-N matrix in-place.
Same as stranspose_inplace, but adds queue argument.
Arguments
---------
@param[in]
n INTEGER
The number of rows & columns of the matrix dA. N >= 0.
@param[in]
dA REAL array, dimension (LDDA,N)
The N-by-N matrix dA.
On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_transpose
*******************************************************************************/
extern "C" void
magmablas_stranspose_inplace_q(
magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( ldda < n )
info = -3;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
dim3 threads( NB, NB );
int nblock = magma_ceildiv( n, NB );
// need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix.
// block assignment differs depending on whether nblock is odd or even.
if ( nblock % 2 == 1 ) {
dim3 grid( nblock, (nblock+1)/2 );
stranspose_inplace_odd<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda );
}
else {
dim3 grid( nblock+1, nblock/2 );
stranspose_inplace_even<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda );
}
}
|
c50aeb422100ad0ab4f8431a2865516637b58882.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/count.h>
#include <thrust/remove.h>
#include <thrust/gather.h>
#include <thrust/copy.h>
#include <locale.h>
#include <stdexcept>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVCategory.h"
#include "NVStrings.h"
#include "custring_view.cuh"
#include "custring.cuh"
#include "ipc_transfer.h"
//
typedef custring_view** custring_view_array;
#define ALIGN_SIZE(v) (((v+7)/8)*8)
//static void printDeviceInts( const char* title, int* d_ints, int count )
//{
// thrust::host_vector<int> ints(count);
// int* h_ints = ints.data();
// hipMemcpy( h_ints, d_ints, count * sizeof(int), hipMemcpyDeviceToHost);
// if( title )
// printf("%s:\n",title);
// for( int i=0; i < count; ++i )
// printf(" %d",h_ints[i]);
// printf("\n");
//}
//
class NVCategoryImpl
{
public:
//
rmm::device_vector<custring_view*>* pList;
rmm::device_vector<int>* pMap;
void* memoryBuffer;
size_t bufferSize; // total memory size
hipStream_t stream_id;
bool bIpcHandle;
//
NVCategoryImpl()
: bufferSize(0), memoryBuffer(0), pList(0), pMap(0), stream_id(0), bIpcHandle(false)
{}
~NVCategoryImpl()
{
if( memoryBuffer )
{
if( bIpcHandle )
hipIpcCloseMemHandle(memoryBuffer);
else
RMM_FREE(memoryBuffer,0);
}
delete pList;
delete pMap;
memoryBuffer = 0;
bufferSize = 0;
}
inline custring_view_array getStringsPtr()
{
custring_view_array rtn = 0;
if( pList )
rtn = pList->data().get();
return rtn;
}
inline custring_view_array createStringsListFrom( custring_view_array strings, unsigned int keys )
{
pList = new rmm::device_vector<custring_view*>(keys);
hipMemcpy(pList->data().get(), strings, keys*sizeof(custring_view*), hipMemcpyDeviceToDevice);
return pList->data().get();
}
inline char* getMemoryPtr() { return (char*)memoryBuffer; }
inline int* getMapPtr()
{
int* rtn = 0;
if( pMap )
rtn = pMap->data().get();
return rtn;
}
inline int* createMapFrom( int* vals, unsigned int count )
{
pMap = new rmm::device_vector<int>(count);
hipMemcpy(pMap->data().get(), vals, count*sizeof(int), hipMemcpyDeviceToDevice);
return pMap->data().get();
}
inline void setMemoryBuffer( void* ptr, size_t memSize )
{
bufferSize = memSize;
memoryBuffer = ptr;
}
inline void setMemoryHandle( void* ptr, size_t memSize )
{
setMemoryBuffer(ptr,memSize);
bIpcHandle = true;
}
};
//
NVCategory::NVCategory()
{
pImpl = new NVCategoryImpl;
}
NVCategory::~NVCategory()
{
delete pImpl;
}
// utility to create keys from array of string pointers
// pImpl must exist but it's pList should be null -- this method will create it
void NVCategoryImpl_keys_from_index( NVCategoryImpl* pImpl, thrust::pair<const char*,size_t>* d_pairs, unsigned int ucount )
{
auto execpol = rmm::exec_policy(0);
// add up the lengths
rmm::device_vector<size_t> lengths(ucount,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), ucount,
[d_pairs, d_lengths] __device__(size_t idx){
const char* str = d_pairs[idx].first;
int bytes = (int)d_pairs[idx].second;
if( str )
d_lengths[idx] = ALIGN_SIZE(custring_view::alloc_size((char*)str,bytes));
});
// create output buffer to hold the string keys
size_t outsize = thrust::reduce(execpol->on(0), lengths.begin(), lengths.end());
char* d_buffer = 0;
RMM_ALLOC(&d_buffer,outsize,0);
pImpl->setMemoryBuffer(d_buffer,outsize);
rmm::device_vector<size_t> offsets(ucount,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
size_t* d_offsets = offsets.data().get();
// create the vector to hold the pointers
rmm::device_vector<custring_view*>* pList = new rmm::device_vector<custring_view*>(ucount,nullptr);
custring_view_array d_results = pList->data().get();
// copy keys strings to new memory buffer
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), ucount,
[d_pairs, d_buffer, d_offsets, d_results] __device__ (size_t idx) {
const char* str = d_pairs[idx].first;
int bytes = (int)d_pairs[idx].second;
if( str )
d_results[idx] = custring_view::create_from(d_buffer+d_offsets[idx],(char*)str,bytes);
});
pImpl->pList = pList;
}
// utility to create keys from array of custrings
// pImpl must exist but it's pList should be null -- this method will create it
void NVCategoryImpl_keys_from_custringarray( NVCategoryImpl* pImpl, custring_view_array d_keys, unsigned int ucount )
{
auto execpol = rmm::exec_policy(0);
// add up the lengths
rmm::device_vector<size_t> lengths(ucount,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), ucount,
[d_keys, d_lengths] __device__(size_t idx){
custring_view* dstr = d_keys[idx];
if( dstr )
d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size());
});
// create output buffer to hold the string keys
size_t outsize = thrust::reduce(execpol->on(0), lengths.begin(), lengths.end());
char* d_buffer = 0;
RMM_ALLOC(&d_buffer,outsize,0);
pImpl->setMemoryBuffer(d_buffer,outsize);
rmm::device_vector<size_t> offsets(ucount,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
size_t* d_offsets = offsets.data().get();
// create the vector to hold the pointers
rmm::device_vector<custring_view*>* pList = new rmm::device_vector<custring_view*>(ucount,nullptr);
custring_view_array d_results = pList->data().get();
// copy keys strings to new memory buffer
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), ucount,
[d_keys, d_buffer, d_offsets, d_results] __device__ (size_t idx) {
custring_view* dstr = d_keys[idx];
if( dstr )
d_results[idx] = custring_view::create_from(d_buffer+d_offsets[idx],*dstr);
});
pImpl->pList = pList;
}
// Utility to create category instance data from array of string pointers (in device memory).
// It does all operations using the given pointers (or copies) to build the map.
// This method can be given the index values from the NVStrings::create_index.
// So however an NVStrings can be created can also create an NVCategory.
//
// Should investigating converting this use custring pointers instead of index pairs.
// It would likely save some processing since we can create custrings from custrings.
void NVCategoryImpl_init(NVCategoryImpl* pImpl, std::pair<const char*,size_t>* pairs, unsigned int count, bool bdevmem, bool bindexescopied=false )
{
hipError_t err = hipSuccess;
auto execpol = rmm::exec_policy(0);
// make a copy of the indexes so we can sort them, etc
thrust::pair<const char*,size_t>* d_pairs = 0;
if( bdevmem )
{
if( bindexescopied ) // means caller already made a temp copy
d_pairs = (thrust::pair<const char*,size_t>*)pairs; // and we can just use it here
else
{
RMM_ALLOC(&d_pairs,sizeof(thrust::pair<const char*,size_t>)*count,0);
hipMemcpy(d_pairs,pairs,sizeof(thrust::pair<const char*,size_t>)*count,hipMemcpyDeviceToDevice);
}
}
else
{
RMM_ALLOC(&d_pairs,sizeof(thrust::pair<const char*,size_t>)*count,0);
hipMemcpy(d_pairs,pairs,sizeof(thrust::pair<const char*,size_t>)*count,hipMemcpyHostToDevice);
}
//
// example strings used in comments e,a,d,b,c,c,c,e,a
//
rmm::device_vector<int> indexes(count);
thrust::sequence(execpol->on(0),indexes.begin(),indexes.end()); // 0,1,2,3,4,5,6,7,8
int* d_indexes = indexes.data().get();
// sort by key (string) a,a,b,c,c,c,d,e,e
// and indexes go along for the ride 1,8,3,4,5,6,2,0,7
thrust::sort_by_key(execpol->on(0), d_pairs, d_pairs+count, d_indexes,
[] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) {
if( lhs.first==0 || rhs.first==0 )
return rhs.first!=0; // null < non-null
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second) < 0;
});
// build the map; this will let us lookup strings by index
rmm::device_vector<int>* pMap = new rmm::device_vector<int>(count,0);
int* d_map = pMap->data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int>(0), count,
[d_pairs, d_map] __device__ (int idx) {
if( idx==0 )
return;
const char* ptr1 = d_pairs[idx-1].first;
const char* ptr2 = d_pairs[idx].first;
unsigned int len1 = (unsigned int)d_pairs[idx-1].second, len2 = (unsigned int)d_pairs[idx].second;
//d_map[idx] = (int)(custr::compare(ptr1,len1,ptr2,len2)!=0);
int cmp = 0; // vvvvv - probably faster than - ^^^^^
if( !ptr1 || !ptr2 )
cmp = (int)(ptr1!=ptr2);
else if( len1 != len2 )
cmp = 1;
else
for( int i=0; !cmp && (i < len1); ++i)
cmp = (int)(*ptr1++ != *ptr2++);
d_map[idx] = cmp;
});
//
// d_map now identifies just string changes 0,0,1,1,0,0,1,1,0
int ucount = thrust::reduce(execpol->on(0), pMap->begin(), pMap->end()) + 1;
// scan converts to index values 0,0,1,2,2,2,3,4,4
thrust::inclusive_scan(execpol->on(0), pMap->begin(), pMap->end(), pMap->begin());
// re-sort will complete the map 4,0,3,1,2,2,2,4,0
thrust::sort_by_key(execpol->on(0), indexes.begin(), indexes.end(), pMap->begin());
pImpl->pMap = pMap; // index -> str is now just a lookup in the map
// now remove duplicates from string list a,b,c,d,e
thrust::unique(execpol->on(0), d_pairs, d_pairs+count,
[] __device__ ( thrust::pair<const char*,size_t> lhs, thrust::pair<const char*,size_t> rhs ) {
if( lhs.first==0 || rhs.first==0 )
return lhs.first==rhs.first;
if( lhs.second != rhs.second )
return false;
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second)==0;
});
// finally, create new string vector of just the keys
NVCategoryImpl_keys_from_index(pImpl,d_pairs,ucount);
err = hipDeviceSynchronize();
if( err!=hipSuccess )
fprintf(stderr,"category: error(%d) creating %'d strings\n",(int)err,ucount);
if( !bindexescopied )
RMM_FREE(d_pairs,0);
}
NVCategory* NVCategory::create_from_index(std::pair<const char*,size_t>* strs, unsigned int count, bool devmem )
{
NVCategory* rtn = new NVCategory;
if( count )
NVCategoryImpl_init(rtn->pImpl,strs,count,devmem);
return rtn;
}
NVCategory* NVCategory::create_from_array(const char** strs, unsigned int count)
{
NVCategory* rtn = new NVCategory;
if( count==0 )
return rtn;
NVStrings* dstrs = NVStrings::create_from_array(strs,count);
std::pair<const char*,size_t>* indexes = 0;
RMM_ALLOC(&indexes, count * sizeof(std::pair<const char*,size_t>),0);
dstrs->create_index(indexes);
NVCategoryImpl_init(rtn->pImpl,indexes,count,true,true);
RMM_FREE(indexes,0);
NVStrings::destroy(dstrs);
return rtn;
}
NVCategory* NVCategory::create_from_strings(NVStrings& strs)
{
NVCategory* rtn = new NVCategory;
unsigned int count = strs.size();
if( count==0 )
return rtn;
std::pair<const char*,size_t>* indexes = 0;
RMM_ALLOC(&indexes, count * sizeof(std::pair<const char*,size_t>),0);
strs.create_index(indexes);
NVCategoryImpl_init(rtn->pImpl,indexes,count,true,true);
RMM_FREE(indexes,0);
return rtn;
}
NVCategory* NVCategory::create_from_strings(std::vector<NVStrings*>& strs)
{
NVCategory* rtn = new NVCategory;
unsigned int count = 0;
for( unsigned int idx=0; idx < (unsigned int)strs.size(); idx++ )
count += strs[idx]->size();
if( count==0 )
return rtn;
std::pair<const char*,size_t>* indexes = 0;
RMM_ALLOC(&indexes, count * sizeof(std::pair<const char*,size_t>),0);
std::pair<const char*,size_t>* ptr = indexes;
for( unsigned int idx=0; idx < (unsigned int)strs.size(); idx++ )
{
strs[idx]->create_index(ptr);
ptr += strs[idx]->size();
}
NVCategoryImpl_init(rtn->pImpl,indexes,count,true,true);
RMM_FREE(indexes,0);
return rtn;
}
// bitmask is in arrow format
NVCategory* NVCategory::create_from_offsets(const char* strs, unsigned int count, const int* offsets, const unsigned char* nullbitmask, int nulls)
{
NVCategory* rtn = new NVCategory;
if( count==0 )
return rtn;
NVStrings* dstrs = NVStrings::create_from_offsets(strs,count,offsets,nullbitmask,nulls);
std::pair<const char*,size_t>* indexes = 0;
RMM_ALLOC(&indexes, count * sizeof(std::pair<const char*,size_t>),0);
dstrs->create_index(indexes); // try using the custring one; may be more efficient
NVCategoryImpl_init(rtn->pImpl,indexes,count,true,true);
RMM_FREE(indexes,0);
NVStrings::destroy(dstrs);
return rtn;
}
// create instance from ipc handle(s)
NVCategory* NVCategory::create_from_ipc( nvcategory_ipc_transfer& ipc )
{
NVCategory* rtn = new NVCategory;
unsigned int keys = ipc.keys;
if( keys==0 )
return rtn;
rtn->pImpl->setMemoryHandle(ipc.getMemoryPtr(),ipc.size);
custring_view_array d_strings = rtn->pImpl->createStringsListFrom((custring_view_array)ipc.getStringsPtr(),ipc.keys);
// fix up the pointers for this context
auto execpol = rmm::exec_policy(0);
char* baseaddr = (char*)ipc.base_address;
char* buffer = rtn->pImpl->getMemoryPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), keys,
[buffer, baseaddr, d_strings] __device__(unsigned int idx){
char* dstr = (char*)d_strings[idx];
if( !dstr )
return;
size_t diff = dstr - baseaddr;
char* newaddr = buffer + diff;
d_strings[idx] = (custring_view*)newaddr;
});
hipDeviceSynchronize();
// set the map values
rtn->pImpl->createMapFrom( (int*)ipc.getMapPtr(), ipc.count );
// done
return rtn;
}
//
// Example merging two categories and remapping the values:
//
// category1:--------- category2:---------
// | strs1 key1 | | strs2 key2 |
// | abbfcf -> abcf | | aadcce -> acde |
// | 012345 0123 | | 012345 0123 |
// | 011323 <-' | | 002113 <-' |
// ------------------ ------------------
//
// merge-remap should result in new category like:
// strs key
// abbfcfaadcce -> abcdef
// 012345
// 011525003224 <-'
//
// abcfacde -> w = aabccdef
// 01234567 x = 04125673
// y = 00110111
// y'= 00122345 = scan(y)
// y"= 01250234 = sort(x,y')
// v = 0125:0234 = this is y"
// m = 011323:002113 = orig values from each category
// m'= r1[v1]:r2[v2] -> 011525:003224
// w'= unique(w) -> abcdef
//
// This logic works for any number of categories.
// Loop is required at the beginning to combine all the keys.
// And loop is required at the end to combine and remap the values.
//
NVCategory* NVCategory::create_from_categories(std::vector<NVCategory*>& cats)
{
NVCategory* rtn = new NVCategory();
if( cats.empty() )
return rtn;
unsigned int count = 0;
unsigned int mcount = 0;
for( unsigned int idx=0; idx < cats.size(); ++idx )
{
NVCategory* cat = cats[idx];
count += cat->keys_size();
mcount += cat->size();
}
if( count==0 )
return rtn;
auto execpol = rmm::exec_policy(0);
// first combine the keys into one array
rmm::device_vector<custring_view*> wstrs(count);
custring_view_array d_w = wstrs.data().get();
for( unsigned int idx=0; idx < cats.size(); ++idx )
{
NVCategory* cat = cats[idx];
custring_view_array d_keys = cat->pImpl->getStringsPtr();
unsigned int ksize = cat->keys_size();
if( ksize )
hipMemcpy(d_w, d_keys, ksize*sizeof(custring_view*),hipMemcpyDeviceToDevice);
d_w += ksize;
}
d_w = wstrs.data().get(); // reset pointer
rmm::device_vector<int> x(count);
int* d_x = x.data().get(); // [0:count)
thrust::sequence( execpol->on(0), d_x, d_x+count );
thrust::stable_sort_by_key( execpol->on(0), d_w, d_w+count, d_x,
[] __device__ (custring_view*& lhs, custring_view*& rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0)); });
// x-vector is sorted sequence we'll use to remap values
rmm::device_vector<int> y(count,0); // y-vector will identify unique keys
int* d_y = y.data().get();
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(1), (count-1),
[d_y, d_w] __device__ (int idx) {
custring_view* lhs = d_w[idx];
custring_view* rhs = d_w[idx-1];
if( lhs && rhs )
d_y[idx] = (int)(lhs->compare(*rhs)!=0);
else
d_y[idx] = (int)(lhs!=rhs);
});
unsigned int kcount = (unsigned int)thrust::reduce( execpol->on(0), d_y, d_y+count )+1;
// use gather to get unique keys
// theory is that copy_if + gather on ints is faster than unique on strings
//rmm::device_vector<int> nidxs(kcount);
//thrust::counting_iterator<int> citr(0);
//thrust::copy_if( execpol->on(0), citr, citr + count, nidxs.data().get(), [d_y] __device__ (const int& idx) { return (idx==0 || d_y[idx]); });
//rmm::device_vector<custring_view*>* pNewList = new rmm::device_vector<custring_view*>(kcount,nullptr);
//custring_view_array d_keys = pNewList->data().get(); // this will hold the merged keyset
//thrust::gather( execpol->on(0), nidxs.begin(), nidxs.end(), d_w, d_keys );
thrust::unique( execpol->on(0), d_w, d_w+count, [] __device__ (custring_view* lhs, custring_view* rhs) { return (lhs && rhs) ? (lhs->compare(*rhs)==0) : (lhs==rhs); });
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_w,kcount);
// now create map to remap the values
thrust::inclusive_scan(execpol->on(0), d_y, d_y+count, d_y );
thrust::sort_by_key(execpol->on(0), d_x, d_x+count, d_y );
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount);
int* d_map = pNewMap->data().get();
int* d_v = d_y;
for( int idx=0; idx < (int)cats.size(); ++idx )
{
NVCategory* cat = cats[idx];
unsigned int msize = cat->size();
if( msize )
{
int* d_catmap = cat->pImpl->getMapPtr();
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), msize,
[d_catmap, d_v, d_map] __device__ (int idx) {
int v = d_catmap[idx];
d_map[idx] = ( v<0 ? v : d_v[v] );
});
}
d_v += cat->keys_size();
d_map += msize;
}
// done
rtn->pImpl->pMap = pNewMap;
return rtn;
}
void NVCategory::destroy(NVCategory* inst)
{
delete inst;
}
// dest should already be empty
void NVCategoryImpl_copy( NVCategoryImpl& dest, NVCategoryImpl& src )
{
if( src.pList==0 )
return;
auto execpol = rmm::exec_policy(0);
if( src.pMap )
{
unsigned int mcount = (unsigned int)src.pMap->size();
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount,0);
// copy map values from non-empty category instance
hipMemcpy( pNewMap->data().get(), src.pMap->data().get(), mcount*sizeof(int), hipMemcpyDeviceToDevice );
dest.pMap = pNewMap;
}
// copy key strings buffer
unsigned int ucount = (unsigned int)src.pList->size();
rmm::device_vector<custring_view*>* pNewList = new rmm::device_vector<custring_view*>(ucount,nullptr);
char* d_buffer = (char*)src.memoryBuffer;
size_t bufsize = src.bufferSize;
char* d_newbuffer = 0;
RMM_ALLOC(&d_newbuffer,bufsize,0);
hipMemcpy(d_newbuffer,d_buffer,bufsize,hipMemcpyDeviceToDevice);
// need to set custring_view ptrs
custring_view_array d_strings = src.getStringsPtr();
custring_view_array d_results = pNewList->data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), ucount,
[d_strings, d_buffer, d_newbuffer, d_results] __device__ (size_t idx) {
custring_view* dstr = d_strings[idx];
if( dstr )
{
char* buffer = d_newbuffer + ((char*)dstr - d_buffer);
d_results[idx] = (custring_view*)buffer;
}
});
dest.pList = pNewList;
dest.setMemoryBuffer( d_newbuffer, bufsize );
}
NVCategory::NVCategory(const NVCategory& cat)
{
pImpl = new NVCategoryImpl;
NVCategoryImpl_copy(*pImpl,*(cat.pImpl));
}
NVCategory& NVCategory::operator=(const NVCategory& cat)
{
delete pImpl;
pImpl = new NVCategoryImpl;
NVCategoryImpl_copy(*pImpl,*(cat.pImpl));
return *this;
}
NVCategory* NVCategory::copy()
{
NVCategory* rtn = new NVCategory;
NVCategoryImpl_copy(*(rtn->pImpl),*pImpl);
return rtn;
}
// return number of items
unsigned int NVCategory::size()
{
unsigned int size = 0;
if( pImpl->pMap )
size = pImpl->pMap->size();
return size;
}
// return number of keys
unsigned int NVCategory::keys_size()
{
unsigned int size = 0;
if( pImpl->pList )
size = pImpl->pList->size();
return size;
}
// true if any null values exist
bool NVCategory::has_nulls()
{
unsigned int count = keys_size();
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
int n = thrust::count_if(execpol->on(0), d_strings, d_strings+count,
[]__device__(custring_view* dstr) { return dstr==0; } );
return n > 0;
}
// bitarray is for the values; bits are set in arrow format
// return the number of null values found
int NVCategory::set_null_bitarray( unsigned char* bitarray, bool devmem )
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned int size = (count + 7)/8;
unsigned char* d_bitarray = bitarray;
if( !devmem )
RMM_ALLOC(&d_bitarray,size,0);
int nidx = -1;
{
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> nulls(1,-1);
thrust::copy_if( execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), thrust::make_counting_iterator<unsigned int>(keys_size()), nulls.begin(),
[d_strings] __device__ (unsigned int idx) { return d_strings[idx]==0; } );
nidx = nulls[0]; // should be the index of the null entry (or -1)
}
if( nidx < 0 )
{ // no nulls, set everything to 1s
hipMemset(d_bitarray,255,size); // actually sets more bits than we need to
if( !devmem )
{
hipMemcpy(bitarray,d_bitarray,size,hipMemcpyDeviceToHost);
RMM_FREE(d_bitarray,0);
}
return 0; // no nulls;
}
// count nulls in range for return value
int* d_map = pImpl->getMapPtr();
unsigned int ncount = thrust::count_if(execpol->on(0), d_map, d_map + count,
[nidx] __device__ (int index) { return (index==nidx); });
// fill in the bitarray
// the bitmask is in arrow format which means for each byte
// the null indicator is in bit position right-to-left: 76543210
// logic sets the high-bit and shifts to the right
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), size,
[d_map, nidx, count, d_bitarray] __device__(unsigned int byteIdx){
unsigned char byte = 0; // init all bits to zero
for( unsigned int i=0; i < 8; ++i )
{
unsigned int idx = i + (byteIdx*8);
byte = byte >> 1;
if( idx < count )
{
int index = d_map[idx];
byte |= (unsigned char)((index!=nidx) << 7);
}
}
d_bitarray[byteIdx] = byte;
});
hipDeviceSynchronize();
if( !devmem )
{
hipMemcpy(bitarray,d_bitarray,size,hipMemcpyDeviceToHost);
RMM_FREE(d_bitarray,0);
}
return ncount; // number of nulls
}
// build a string-index from this instances strings
int NVCategory::create_index(std::pair<const char*,size_t>* strs, bool bdevmem )
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
int* d_map = pImpl->getMapPtr();
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_map, d_indexes] __device__(unsigned int idx){
custring_view* dstr = d_strings[d_map[idx]];
if( dstr )
{
d_indexes[idx].first = (const char*)dstr->data();
d_indexes[idx].second = (size_t)dstr->size();
}
else
{
d_indexes[idx].first = 0;
d_indexes[idx].second = 0;
}
});
hipDeviceSynchronize();
//
if( bdevmem )
hipMemcpy( strs, indexes.data().get(), count * sizeof(std::pair<const char*,size_t>), hipMemcpyDeviceToDevice );
else
hipMemcpy( strs, indexes.data().get(), count * sizeof(std::pair<const char*,size_t>), hipMemcpyDeviceToHost );
return 0;
}
int NVCategory::create_ipc_transfer( nvcategory_ipc_transfer& ipc )
{
ipc.setStrsHandle(pImpl->getStringsPtr(),pImpl->getMemoryPtr(),keys_size());
ipc.setMemHandle(pImpl->getMemoryPtr(),pImpl->bufferSize);
ipc.setMapHandle(pImpl->getMapPtr(),size());
return 0;
}
// return strings keys for this instance
NVStrings* NVCategory::get_keys()
{
int count = keys_size();
if( count==0 )
return NVStrings::create_from_index(0,0);
auto execpol = rmm::exec_policy(0);
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_strings, d_indexes] __device__(size_t idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
d_indexes[idx].first = (const char*)dstr->data();
d_indexes[idx].second = (size_t)dstr->size();
}
else
{
d_indexes[idx].first = 0;
d_indexes[idx].second = 0;
}
});
hipDeviceSynchronize();
// create strings from index
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
//
int NVCategory::get_value(unsigned int index)
{
if( index >= size() )
return -1;
int* d_map = pImpl->getMapPtr();
int rtn = -1;
if( d_map )
hipMemcpy(&rtn,d_map+index,sizeof(int),hipMemcpyDeviceToHost);
return rtn;
}
//
int NVCategory::get_value(const char* str)
{
char* d_str = 0;
unsigned int bytes = 0;
auto execpol = rmm::exec_policy(0);
if( str )
{
bytes = (unsigned int)strlen(str);
RMM_ALLOC(&d_str,bytes+1,0);
hipMemcpy(d_str,str,bytes,hipMemcpyHostToDevice);
}
int count = keys_size();
custring_view_array d_strings = pImpl->getStringsPtr();
// find string in this instance
rmm::device_vector<int> keys(1,-1);
thrust::copy_if( execpol->on(0), thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(count), keys.begin(),
[d_strings, d_str, bytes] __device__ (int idx) {
custring_view* dstr = d_strings[idx];
if( (char*)dstr==d_str ) // only true if both are null
return true;
return ( dstr && dstr->compare(d_str,bytes)==0 );
} );
hipDeviceSynchronize();
if( d_str )
RMM_FREE(d_str,0);
return keys[0];
}
std::pair<int,int> NVCategory::get_value_bounds(const char* str)
{
std::pair<int,int> rtn(-1,-1);
// first check if key exists (saves alot work below)
int value = get_value(str);
if( value>=0 )
{
rtn.first = value;
rtn.second = value;
return rtn;
}
// not found in existing keyset
auto execpol = rmm::exec_policy(0);
unsigned int count = keys_size();
custring_view** d_strings = pImpl->getStringsPtr();
// create index of the keys
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count+1);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_strings, d_indexes] __device__(size_t idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
d_indexes[idx].first = (const char*)dstr->data();
d_indexes[idx].second = (size_t)dstr->size();
}
else
{
d_indexes[idx].first = 0;
d_indexes[idx].second = 0;
}
});
//
hipDeviceSynchronize();
// and add the passed in string to the indexes
size_t len = 0;
char* d_str = 0;
if( str )
{
len = strlen(str);
RMM_ALLOC(&d_str,len+1,0);
hipMemcpy(d_str,str,len+1,hipMemcpyHostToDevice);
}
thrust::pair<const char*,size_t> newstr(d_str,len); // add to the end
hipMemcpy(d_indexes+count,&newstr,sizeof(thrust::pair<const char*,size_t>),hipMemcpyHostToDevice);
// sort the keys with attached sequence numbers
rmm::device_vector<int> seqdata(count+1);
thrust::sequence(execpol->on(0),seqdata.begin(),seqdata.end()); // [0:count]
int* d_seqdata = seqdata.data().get();
thrust::sort_by_key(execpol->on(0), d_indexes, d_indexes+(count+1), d_seqdata,
[] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) {
if( lhs.first==0 || rhs.first==0 )
return rhs.first!=0;
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second) < 0;
});
// now find the new position of the argument
// this will be where the sequence number equals the count
rmm::device_vector<int> keys(1,-1);
thrust::copy_if( execpol->on(0), thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(count+1), keys.begin(),
[d_seqdata, count, d_indexes] __device__ (int idx) { return d_seqdata[idx]==count; });
hipDeviceSynchronize();
int first = 0; // get the position back into host memory
hipMemcpy(&first,keys.data().get(),sizeof(int),hipMemcpyDeviceToHost);
rtn.first = first-1; // range is always
rtn.second = first; // position and previous one
if( d_str )
RMM_FREE(d_str,0);
return rtn;
}
// return category values for all indexes
int NVCategory::get_values( int* results, bool bdevmem )
{
int count = (int)size();
int* d_map = pImpl->getMapPtr();
if( count && d_map )
{
if( bdevmem )
hipMemcpy(results,d_map,count*sizeof(int),hipMemcpyDeviceToDevice);
else
hipMemcpy(results,d_map,count*sizeof(int),hipMemcpyDeviceToHost);
}
return count;
}
const int* NVCategory::values_cptr()
{
return pImpl->getMapPtr();
}
int NVCategory::get_indexes_for( unsigned int index, int* results, bool bdevmem )
{
unsigned int count = size();
if( index >= count )
return -1;
auto execpol = rmm::exec_policy(0);
int* d_map = pImpl->getMapPtr();
if( !d_map )
return 0;
int matches = thrust::count_if( execpol->on(0), d_map, d_map+count, [index] __device__(int idx) { return idx==(int)index; });
if( matches <= 0 )
return 0; // done, found nothing, not likely
if( results==0 )
return matches; // caller just wants the count
int* d_results = results;
if( !bdevmem )
RMM_ALLOC(&d_results,matches*sizeof(int),0);
thrust::counting_iterator<unsigned int> itr(0);
thrust::copy_if( execpol->on(0), itr, itr+count, d_results,
[index, d_map] __device__(unsigned int idx) { return d_map[idx]==(int)index; });
hipDeviceSynchronize();
if( !bdevmem )
{
hipMemcpy(results,d_results,matches*sizeof(int),hipMemcpyDeviceToHost);
RMM_FREE(d_results,0);
}
return matches;
}
int NVCategory::get_indexes_for( const char* str, int* results, bool bdevmem )
{
int id = get_value(str);
if( id < 0 )
return id;
return get_indexes_for((unsigned int)id, results, bdevmem);
}
// creates a new instance incorporating the new strings
NVCategory* NVCategory::add_strings(NVStrings& strs)
{
// create one large index of both datasets
unsigned int count1 = size();
unsigned int count2 = strs.size();
unsigned int count = count1 + count2;
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
create_index((std::pair<const char*,size_t>*)d_indexes,count1);
strs.create_index((std::pair<const char*,size_t>*)d_indexes+count1,count2);
// build the category from this new set
return create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
// creates a new instance without the specified strings
// deprecated by remove_keys?
NVCategory* NVCategory::remove_strings(NVStrings& strs)
{
auto execpol = rmm::exec_policy(0);
unsigned int count = size();
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
create_index((std::pair<const char*,size_t>*)d_indexes,count);
unsigned int delete_count = strs.size();
rmm::device_vector< thrust::pair<const char*,size_t> > deletes(delete_count);
thrust::pair<const char*,size_t>* d_deletes = deletes.data().get();
strs.create_index((std::pair<const char*,size_t>*)d_deletes,delete_count);
// this would be inefficient if strs is very large
thrust::pair<const char*,size_t>* newend = thrust::remove_if(execpol->on(0), d_indexes, d_indexes + count,
[d_deletes,delete_count] __device__ (thrust::pair<const char*,size_t> lhs) {
for( unsigned int idx=0; idx < delete_count; ++idx )
{
thrust::pair<const char*,size_t> rhs = d_deletes[idx];
if( lhs.first == rhs.first )
return true;
if( lhs.second != rhs.second )
continue;
if( custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second)==0 )
return true;
}
return false;
});
// return value ensures a dev-sync has already been performed by thrust
count = (unsigned int)(newend - d_indexes); // new count of strings
// build the category from this new set
return create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
// basically recreates the original string list
NVStrings* NVCategory::to_strings()
{
int count = (int)size();
int* d_map = pImpl->getMapPtr();
if( count==0 || d_map==0 )
return 0;
custring_view** d_strings = pImpl->getStringsPtr();
// use the map to build the indexes array
auto execpol = rmm::exec_policy(0);
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_strings, d_map, d_indexes] __device__(size_t idx){
int stridx = d_map[idx];
custring_view* dstr = 0;
if( stridx >=0 )
dstr = d_strings[stridx];
if( dstr )
{
d_indexes[idx].first = (const char*)dstr->data();
d_indexes[idx].second = (size_t)dstr->size();
}
else
{
d_indexes[idx].first = 0;
d_indexes[idx].second = 0;
}
});
//
hipDeviceSynchronize();
// create strings from index
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
// creates a new NVStrings instance using the specified index values
NVStrings* NVCategory::gather_strings( const int* pos, unsigned int count, bool bdevmem )
{
auto execpol = rmm::exec_policy(0);
const int* d_pos = pos;
if( !bdevmem )
{
RMM_ALLOC((void**)&d_pos,count*sizeof(int),0);
hipMemcpy((void*)d_pos,pos,count*sizeof(int),hipMemcpyHostToDevice);
}
custring_view** d_strings = pImpl->getStringsPtr();
// need to check for invalid values
unsigned int size = keys_size();
rmm::device_vector<int> check(count,0);
int* d_check = check.data().get();
// use the map to build the indexes array
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_strings, d_pos, size, d_check, d_indexes] __device__(size_t idx){
int stridx = d_pos[idx];
if( (stridx < 0) || (stridx >= size) )
{
d_check[idx] = 1;
return;
}
custring_view* dstr = 0;
if( stridx >=0 )
dstr = d_strings[stridx];
if( dstr )
{
d_indexes[idx].first = (const char*)dstr->data();
d_indexes[idx].second = (size_t)dstr->size();
}
else
{
d_indexes[idx].first = 0;
d_indexes[idx].second = 0;
}
});
//
hipDeviceSynchronize();
if( !bdevmem )
RMM_FREE((void*)d_pos,0);
int invalidcount = thrust::reduce( execpol->on(0), d_check, d_check+count );
if( invalidcount )
throw std::out_of_range("");
// create strings from index
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
//
// Create a new category by gathering strings from this category.
// If specific keys are not referenced, the values are remapped.
// This is a shortcut method to calling gather_strings() and then
// just converting the resulting NVStrings instance into an new NVCategory.
//
// Example category
// category :---------
// | strs key |
// | abbfcf -> abcf |
// | 012345 0123 |
// | 011323 <-' |
// ------------------
//
// Specify strings in pos parameter:
// v = 1 3 2 3 1 2 bfcfbc
// x = 0 1 1 1 x[v[idx]] = 1 (set 1 for values in v)
// y = 0 0 1 2 excl-scan(x)
//
// Remap values using:
// v[idx] = y[v[idx]] -> 021201
// New key list is copy_if of keys where x==1 -> bcf
//
NVCategory* NVCategory::gather_and_remap( const int* pos, unsigned int count, bool bdevmem )
{
auto execpol = rmm::exec_policy(0);
const int* d_v = pos;
if( !bdevmem )
{
RMM_ALLOC((void**)&d_v,count*sizeof(int),0);
hipMemcpy((void*)d_v,pos,count*sizeof(int),hipMemcpyHostToDevice);
}
unsigned int kcount = keys_size();
// first, do bounds check on input values
int invalidcount = thrust::count_if(execpol->on(0), d_v, d_v+count,
[kcount] __device__ (int v) { return ((v < 0) || (v >= kcount)); } );
if( invalidcount )
{
if( !bdevmem )
RMM_FREE((void*)d_v,0);
throw std::out_of_range("");
}
// build x vector which has 1s for each value in v
rmm::device_vector<int> x(kcount,0);
int* d_x = x.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_v, d_x] __device__ (unsigned int idx) { d_x[d_v[idx]] = 1; });
// y vector is scan of x values
rmm::device_vector<int> y(kcount,0);
int* d_y = y.data().get();
thrust::exclusive_scan(execpol->on(0),d_x,d_x+kcount,d_y,0);
// use y to map input to new values
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(count,0);
int* d_map = pNewMap->data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_v, d_y, d_map] __device__ (unsigned int idx) { d_map[idx] = d_y[d_v[idx]]; });
// done creating the map
NVCategory* rtn = new NVCategory;
rtn->pImpl->pMap = pNewMap;
// copy/gather the keys
custring_view_array d_keys = pImpl->getStringsPtr();
unsigned int ucount = kcount;
{ // reuse the y vector for gather positions
thrust::counting_iterator<int> citr(0);
auto nend = thrust::copy_if( execpol->on(0), citr, citr + kcount, d_y, [d_x] __device__ (const int& idx) { return d_x[idx]==1; });
ucount = (unsigned int)(nend - d_y); // how many were copied
}
// gather keys into new vector
rmm::device_vector<custring_view*> newkeys(ucount,nullptr);
thrust::gather( execpol->on(0), d_y, d_y + ucount, d_keys, newkeys.data().get() );
hipDeviceSynchronize();
// build keylist for new category
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,newkeys.data().get(),ucount);
//
if( !bdevmem )
RMM_FREE((void*)d_v,0);
return rtn;
}
// this method simply copies the keys and the passed in values to create a new category instance
NVCategory* NVCategory::gather( const int* pos, unsigned int count, bool bdevmem )
{
unsigned int kcount = keys_size();
NVCategory* rtn = new NVCategory;
auto execpol = rmm::exec_policy(0);
if( count )
{
auto pMap = new rmm::device_vector<int>(count,0);
auto d_pos = pMap->data().get();
if( bdevmem )
hipMemcpy(d_pos,pos,count*sizeof(int),hipMemcpyDeviceToDevice);
else
hipMemcpy(d_pos,pos,count*sizeof(int),hipMemcpyHostToDevice);
// first, do bounds check on input values; also -1 is allowed
// need to re-evaluate if this check is really necessary here
int invalidcount = thrust::count_if(execpol->on(0), d_pos, d_pos+count,
[kcount] __device__ (int v) { return ((v < -1) || (v >= kcount)); } );
if( invalidcount )
{
delete pMap;
delete rtn;
throw std::out_of_range("");
}
rtn->pImpl->pMap = pMap;
}
custring_view_array d_keys = pImpl->getStringsPtr();
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_keys,kcount);
//
return rtn;
}
//
// Merge two categories and maintain the values and key positions of the this category.
// Very complicated to avoid looping over values or keys from either category.
//
// Example shows logic used:
//
// category1:--------- category2:---------
// | strs1 key1 | | strs2 key2 |
// | abbfcf -> abcf | | aadcce -> acde |
// | 012345 0123 | | 012345 0123 |
// | 011323 <-' | | 002113 <-' |
// ------------------ ------------------
//
// merge/append should result in new category:
// strs key
// abbfcfaadcce -> abcfde
// 012345
// 011323004225 <-'
//
// 1. build vector of all the keys and seq vector (x) and diff vector (y)
// concat keys key2,key1 (w); stable-sort-by-key(w,x) and
// create neg,pos sequence (x) build diff vector (y)
// a c d e a b c f -> w = a a b c c d e f
// 0 1 2 3 -1 -2 -3 -4 x = 0 -1 -2 1 -3 2 3 -4
// y = 1 0 0 1 0 0 0 0
//
// 2. compute keys diff using w,x,y:
// copy-if/gather(w) (x>=0) && (y==0) --> d e
// reduce(y) = 2 -> how many keys matched
// new key: abcf + de = abcfde
//
// a b c d e f :unique-by-key(w,x)
// ubl = 0 -2 1 2 3 -4 :x = unique val--^
// sws = 0 1 2 4 5 3 :sort new key (abcfde) with seq (012345)
//
// 3. gather new indexes for map2
// copy-if/gather(sws) where ubl>=0 --> 0 2 4 5 (remap)
// remove-if(ubl) ubl<0 --> 0 1 2 3
// sort(ubl,remap) --> 0 2 4 5
// new map2 values gathered using original map to remap values:
// 002113 -> 004225
//
// result:
// new key: abcfde
// new map: 011323004225
// abbfcfaadcce
//
// The end result is not guaranteed to be a sorted keyset.
//
NVCategory* NVCategory::merge_category(NVCategory& cat2)
{
unsigned int count1 = keys_size();
unsigned int mcount1 = size();
unsigned int count2 = cat2.keys_size();
unsigned int mcount2 = cat2.size();
NVCategory* rtn = new NVCategory();
if( (count1==0) && (count2==0) )
return rtn;
unsigned int count12 = count1 + count2;
unsigned int mcount = mcount1 + mcount2;
// if either category is empty, just copy the non-empty one
if( (count1==0) || (count2==0) )
{
NVCategory* dcat = ((count1==0) ? &cat2 : this);
return dcat->copy();
}
auto execpol = rmm::exec_policy(0);
// both this cat and cat2 are non-empty
// init working vars
custring_view_array d_keys1 = pImpl->getStringsPtr();
int* d_map1 = pImpl->getMapPtr();
custring_view_array d_keys2 = cat2.pImpl->getStringsPtr();
int* d_map2 = cat2.pImpl->getMapPtr();
// create some vectors we can sort
rmm::device_vector<custring_view*> wstrs(count12); // w = keys2 + keys1
custring_view_array d_w = wstrs.data().get();
hipMemcpy(d_w, d_keys2, count2*sizeof(custring_view*),hipMemcpyDeviceToDevice);
hipMemcpy(d_w+count2, d_keys1, count1*sizeof(custring_view*),hipMemcpyDeviceToDevice);
rmm::device_vector<int> x(count12); // 0,1,....count2,-1,...,-count1
int* d_x = x.data().get();
// sequence and for-each-n could be combined into for-each-n logic
thrust::sequence( execpol->on(0), d_x, d_x+count2 ); // first half is 0...count2
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int>(0), count1,
[d_x, count2] __device__ (int idx) { d_x[idx+count2]= -idx-1; }); // 2nd half is -1...-count1
thrust::stable_sort_by_key( execpol->on(0), d_w, d_w + count12, d_x, // preserves order for
[] __device__ (custring_view*& lhs, custring_view*& rhs) { // strings that match
return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0));
});
rmm::device_vector<int> y(count12,0); // y-vector will identify overlapped keys
int* d_y = y.data().get();
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), (count12-1),
[d_y, d_w] __device__ (int idx) {
custring_view* lhs = d_w[idx];
custring_view* rhs = d_w[idx+1];
if( lhs && rhs )
d_y[idx] = (int)(lhs->compare(*rhs)==0);
else
d_y[idx] = (int)(lhs==rhs);
});
int matched = thrust::reduce( execpol->on(0), d_y, d_y + count12 ); // how many keys matched
unsigned int ncount = count2 - (unsigned int)matched; // new keys count
unsigned int ucount = count1 + ncount; // total unique keys count
rmm::device_vector<custring_view*> keys(ucount,nullptr);
custring_view_array d_keys = keys.data().get(); // this will hold the merged keyset
rmm::device_vector<int> nidxs(ucount); // needed for various gather methods below
int* d_nidxs = nidxs.data().get(); // indexes of 'new' keys from key2 not in key1
{
thrust::counting_iterator<int> citr(0);
thrust::copy_if( execpol->on(0), citr, citr + (count12), d_nidxs,
[d_x, d_y] __device__ (const int& idx) { return (d_x[idx]>=0) && (d_y[idx]==0); });
hipDeviceSynchronize();
}
// first half of merged keyset is direct copy of key1
hipMemcpy( d_keys, d_keys1, count1*sizeof(custring_view*), hipMemcpyDeviceToDevice);
// append the 'new' keys from key2: extract them from w as identified by nidxs
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + ncount, d_w, d_keys + count1 );
int* d_ubl = d_x; // reuse d_x for unique-bias-left values
thrust::unique_by_key( execpol->on(0), d_w, d_w + count12, d_ubl,
[] __device__ (custring_view* lhs, custring_view* rhs) {
return ((lhs && rhs) ? (lhs->compare(*rhs)==0) : (lhs==rhs));
}); // ubl now contains new index values for key2
int* d_sws = d_y; // reuse d_y for sort-with-seq values
thrust::sequence( execpol->on(0), d_sws, d_sws + ucount); // need to assign new index values
rmm::device_vector<custring_view*> keySort(ucount); // for all the original key2 values
hipMemcpy( keySort.data().get(), d_keys, ucount * sizeof(custring_view*), hipMemcpyDeviceToDevice);
thrust::sort_by_key( execpol->on(0), keySort.begin(), keySort.end(), d_sws,
[] __device__ (custring_view*& lhs, custring_view*& rhs ) {
return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0));
}); // sws is now key index values for the new keyset
//printDeviceInts("d_sws",d_sws,ucount);
{
thrust::counting_iterator<int> citr(0); // generate subset of just the key2 values
thrust::copy_if( execpol->on(0), citr, citr + ucount, d_nidxs, [d_ubl] __device__ (const int& idx) { return d_ubl[idx]>=0; });
hipDeviceSynchronize();
}
// nidxs has the indexes to the key2 values in the new keyset but they are sorted when key2 may not have been
rmm::device_vector<int> remap2(count2); // need to remap the indexes to the original positions
int* d_remap2 = remap2.data().get(); // do this by de-sorting the key2 values from the full keyset
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + count2, d_sws, d_remap2 ); // here grab new positions for key2
// first, remove the key1 indexes from the sorted sequence values; ubl will then have only key2 orig. pos values
thrust::remove_if( execpol->on(0), d_ubl, d_ubl + ucount, [] __device__ (int v) { return v<0; });
thrust::sort_by_key( execpol->on(0), d_ubl, d_ubl+count2, d_remap2 ); // does a de-sort of key2 only
// build new map
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount,0);
int* d_map = pNewMap->data().get(); // first half is identical to map1
hipMemcpy( d_map, d_map1, mcount1 * sizeof(int), hipMemcpyDeviceToDevice);
hipMemcpy( d_map+mcount1, d_map2, mcount2 * sizeof(int), hipMemcpyDeviceToDevice);
// remap map2 values to their new positions in the full keyset
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(mcount1), mcount2,
[d_map, d_remap2] __device__ (int idx) {
int v = d_map[idx];
if( v >= 0 )
d_map[idx] = d_remap2[v];
});
hipDeviceSynchronize();
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_keys,ucount);
rtn->pImpl->pMap = pNewMap;
return rtn;
}
// see create_from_categories method above for logic details
NVCategory* NVCategory::merge_and_remap(NVCategory& cat2)
{
std::vector<NVCategory*> cats;
cats.push_back(this);
cats.push_back(&cat2);
return create_from_categories(cats);
}
//
// Creates a new instance adding the specified strings as keys and remapping the values.
// Pandas maintains the original position values. This function does a remap.
//
// Example:
//
// category :---------
// | strs key |
// | abbfcf -> abcf |
// | 012345 0123 |
// | 011323 <-' |
// ------------------
//
// new keys: abcd
// duplicate keys can be ignored; new keyset may shift the indexes
//
// a b c f : a b c d -> w = a a b b c c d f
// 0 1 2 3 -1 -2 -3 -4 x = 0 -1 1 -2 2 -3 -4 3
//
// u = a b c d f
// ux = 0 1 2 -4 3
//
// values map: a b c f
// 0 1 2 4
//
// new values: a b b f c f
// 0 1 1 4 2 4
//
NVCategory* NVCategory::add_keys_and_remap(NVStrings& strs)
{
unsigned int kcount = keys_size();
unsigned int mcount = size();
unsigned int count = strs.size();
if( (kcount==0) && (count==0) )
return new NVCategory;
if( count==0 )
return copy();
auto execpol = rmm::exec_policy(0);
// get the keys from the argument
rmm::device_vector<custring_view*> addKeys(count,nullptr);
custring_view_array d_addKeys = addKeys.data().get();
strs.create_custring_index(d_addKeys);
NVCategory* rtn = new NVCategory;
if( kcount==0 )
{
// just take the keys; values are not effected
// need to sort and unique them
thrust::sort(execpol->on(0), d_addKeys, d_addKeys + count, [] __device__( custring_view*& lhs, custring_view*& rhs ) { return ( (lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0) ); });
// now remove duplicates from string list
auto nend = thrust::unique(execpol->on(0), d_addKeys, d_addKeys + count, [] __device__ (custring_view* lhs, custring_view* rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)==0) : (lhs==rhs)); });
unsigned int ucount = nend - d_addKeys;
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_addKeys,ucount);
// copy the values
if( mcount )
{
rtn->pImpl->pMap = new rmm::device_vector<int>(mcount,0);
hipMemcpy(rtn->pImpl->getMapPtr(),pImpl->getMapPtr(),mcount*sizeof(int),hipMemcpyDeviceToDevice);
}
return rtn;
}
// both kcount and count are non-zero
custring_view_array d_keys = pImpl->getStringsPtr();
int akcount = kcount + count;
rmm::device_vector<custring_view*> wstrs(akcount);
custring_view_array d_w = wstrs.data().get();
hipMemcpy(d_w, d_keys, kcount*sizeof(custring_view*),hipMemcpyDeviceToDevice);
hipMemcpy(d_w+kcount, d_addKeys, count*sizeof(custring_view*),hipMemcpyDeviceToDevice);
rmm::device_vector<int> x(akcount); // values arranged like 0,...,(kcount-1),-1,...,-count
int* d_x = x.data().get();
// sequence and for-each-n could be combined into single for-each-n logic
thrust::sequence( execpol->on(0), d_x, d_x + kcount ); // first half is [0:kcount)
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int>(0), count, [d_x, kcount] __device__ (int idx) { d_x[idx+kcount]= -idx-1; }); // 2nd half is [-1:-count]
// stable-sort preserves order for strings that match
thrust::stable_sort_by_key( execpol->on(0), d_w, d_w + akcount, d_x, [] __device__ (custring_view*& lhs, custring_view*& rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0)); });
auto nend = thrust::unique_by_key( execpol->on(0), d_w, d_w + akcount, d_x, [] __device__ (custring_view* lhs, custring_view* rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)==0) : (lhs==rhs)); });
int ucount = nend.second - d_x;
// d_w,ucount are now the keys
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_w,ucount);
// remapping the values
rmm::device_vector<int> y(kcount,-1);
int* d_y = y.data().get();
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), ucount,
[d_y, d_x] __device__ (int idx) {
int u = d_x[idx];
if( u >= 0 )
d_y[u] = idx;
});
// allocate and fill new map
int* d_map = pImpl->getMapPtr();
if( mcount && d_map )
{
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount,-1);
int* d_newmap = pNewMap->data().get();
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), mcount,
[d_map, d_y, d_newmap] __device__ (int idx) {
int v = d_map[idx];
d_newmap[idx] = (v < 0 ? v : d_y[v]);
});
hipDeviceSynchronize();
rtn->pImpl->pMap = pNewMap;
}
return rtn;
}
// Creates a new instance removing the keys matching the specified strings and remapping the values.
// Pandas maintains the original position values. Below does a remap.
//
// category :---------
// | strs key |
// | abbfcf -> abcf |
// | 012345 0123 |
// | 011323 <-' |
// ------------------
//
// remove keys: bd
// unknown keys can be ignored; new keyset may shift the indexes
//
// a b c f : b d -> w = a b b c d f
// 0 1 2 3 -1 -2 x = 0 1 -1 2 -2 3
// y = 0 1 0 0 0 0
//
// remove keys: x<0 || y==1 : b d
// u = a c f
// ux = 0 2 3
//
// values map: a b c f
// 0 -1 1 2
//
// new values: a b b f c f
// 0 -1 -1 2 1 2
//
//
NVCategory* NVCategory::remove_keys_and_remap(NVStrings& strs)
{
unsigned int kcount = keys_size();
unsigned int count = strs.size();
if( kcount==0 || count==0 )
return copy();
// both kcount and count are non-zero
auto execpol = rmm::exec_policy(0);
// get the keys from the parameter
rmm::device_vector<custring_view*> removeKeys(count,nullptr);
custring_view_array d_removeKeys = removeKeys.data().get();
strs.create_custring_index(d_removeKeys);
// keys for this instance
custring_view_array d_keys = pImpl->getStringsPtr();
// combine the keys into one set to be evaluated
int akcount = kcount + count;
rmm::device_vector<custring_view*> wstrs(akcount);
custring_view_array d_w = wstrs.data().get();
hipMemcpy(d_w, d_keys, kcount*sizeof(custring_view*),hipMemcpyDeviceToDevice);
hipMemcpy(d_w+kcount, d_removeKeys, count*sizeof(custring_view*),hipMemcpyDeviceToDevice);
rmm::device_vector<int> x(akcount); // 0,1,...,kcount,-1,...,-count
int* d_x = x.data().get();
// sequence and for-each-n could be combined into single for-each-n logic
thrust::sequence( execpol->on(0), d_x, d_x + kcount ); // [0:kcount)
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int>(0), count, [d_x, kcount] __device__ (int idx) { d_x[idx+kcount]= -idx-1; }); // 2nd half is [-1:-count]
// stable-sort preserves order for strings that match
thrust::stable_sort_by_key( execpol->on(0), d_w, d_w + akcount, d_x, [] __device__ (custring_view*& lhs, custring_view*& rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0)); });
rmm::device_vector<int> y(akcount,0); // matches resulting from
int* d_y = y.data().get(); // sort are marked with '1'
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), (akcount-1),
[d_y, d_w] __device__ (int idx) {
custring_view* lhs = d_w[idx];
custring_view* rhs = d_w[idx+1];
if( lhs && rhs )
d_y[idx] = (int)(lhs->compare(*rhs)==0);
else
d_y[idx] = (int)(lhs==rhs);
});
//
int cpcount = akcount; // how many keys copied
{ // scoping to get rid of temporary memory sooner
rmm::device_vector<int> nidxs(akcount); // needed for gather
int* d_nidxs = nidxs.data().get(); // indexes of keys from key1 not in key2
thrust::counting_iterator<int> citr(0);
int* nend = thrust::copy_if( execpol->on(0), citr, citr + (akcount), d_nidxs,
[d_x, d_y] __device__ (const int& idx) { return (d_x[idx]>=0) && (d_y[idx]==0); });
cpcount = nend - d_nidxs;
// the gather()s here will select the remaining keys
rmm::device_vector<custring_view*> wstrs2(cpcount);
rmm::device_vector<int> x2(cpcount);
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + cpcount, wstrs.begin(), wstrs2.begin() );
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + cpcount, x.begin(), x2.begin() );
wstrs.swap(wstrs2);
d_w = wstrs.data().get();
x.swap(x2);
d_x = x.data().get();
hipDeviceSynchronize();
}
NVCategory* rtn = new NVCategory;
int ucount = cpcount; // final number of unique keys
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_w,ucount); // and d_w are those keys
// now remap the values: positive values in d_x are [0:ucount)
thrust::fill( execpol->on(0), d_y, d_y + kcount, -1);
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), ucount,
[d_y, d_x] __device__ (int idx) { d_y[d_x[idx]] = idx; });
unsigned int mcount = size();
int* d_map = pImpl->getMapPtr();
if( mcount && d_map )
{
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount,0);
int* d_newmap = pNewMap->data().get(); // new map will go here
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), mcount,
[d_map, d_y, d_newmap] __device__ (int idx) {
int v = d_map[idx]; // get old index
d_newmap[idx] = ( v < 0 ? v : d_y[v]); // set new index (may be negative)
});
hipDeviceSynchronize();
rtn->pImpl->pMap = pNewMap;
}
return rtn;
}
// keys that are not represented in the list of values are removed
// this may cause the values to be remapped if the keys positions are moved
NVCategory* NVCategory::remove_unused_keys_and_remap()
{
unsigned int kcount = keys_size();
unsigned int mcount = size();
if( kcount==0 )
return copy();
// both kcount and count are non-zero
auto execpol = rmm::exec_policy(0);
// keys for this instance
custring_view_array d_keys = pImpl->getStringsPtr();
int* d_map = pImpl->getMapPtr();
rmm::device_vector<unsigned int> usedkeys(kcount,0);
unsigned int* d_usedkeys = usedkeys.data().get();
// find the keys that not being used
unsigned int count = 0;
if( d_map )
{
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), mcount,
[d_map, d_usedkeys] __device__ (int idx) {
int pos = d_map[idx];
if( pos >= 0 )
d_usedkeys[pos] = 1; // race condition not important
});
// compute how many are not used
count = kcount - thrust::reduce(execpol->on(0),d_usedkeys,d_usedkeys+kcount,(unsigned int)0);
}
if( count==0 )
return copy();
// gather the unused keys
rmm::device_vector<custring_view*> removeKeys(count,nullptr);
custring_view_array d_removeKeys = removeKeys.data().get();
{
rmm::device_vector<int> nidxs(count);
int* d_nidxs = nidxs.data().get();
thrust::counting_iterator<int> citr(0);
thrust::copy_if( execpol->on(0), citr, citr + kcount, d_nidxs,
[d_usedkeys] __device__ (const int& idx) { return (d_usedkeys[idx]==0); });
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + count, d_keys, d_removeKeys );
hipDeviceSynchronize();
}
// the remainder is common with remove_keys_and_remap
// --------------------------------------------------
// combine the keys into one set to be evaluated
int akcount = kcount + count;
rmm::device_vector<custring_view*> wstrs(akcount);
custring_view_array d_w = wstrs.data().get();
hipMemcpy(d_w, d_keys, kcount*sizeof(custring_view*),hipMemcpyDeviceToDevice);
hipMemcpy(d_w+kcount, d_removeKeys, count*sizeof(custring_view*),hipMemcpyDeviceToDevice);
rmm::device_vector<int> x(akcount); // 0,1,...,kcount,-1,...,-count
int* d_x = x.data().get();
// sequence and for-each-n could be combined into single for-each-n logic
thrust::sequence( execpol->on(0), d_x, d_x + kcount ); // [0:kcount)
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int>(0), count, [d_x, kcount] __device__ (int idx) { d_x[idx+kcount]= -idx-1; }); // 2nd half is [-1:-count]
// stable-sort preserves order for strings that match
thrust::stable_sort_by_key( execpol->on(0), d_w, d_w + akcount, d_x, [] __device__ (custring_view*& lhs, custring_view*& rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0)); });
rmm::device_vector<int> y(akcount,0); // matches resulting from
int* d_y = y.data().get(); // sort are marked with '1'
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), (akcount-1),
[d_y, d_w] __device__ (int idx) {
custring_view* lhs = d_w[idx];
custring_view* rhs = d_w[idx+1];
if( lhs && rhs )
d_y[idx] = (int)(lhs->compare(*rhs)==0);
else
d_y[idx] = (int)(lhs==rhs);
});
int cpcount = akcount; // how many keys copied
{ // scoping to get rid of temporary memory sooner
rmm::device_vector<int> nidxs(akcount); // needed for gather
int* d_nidxs = nidxs.data().get(); // indexes of keys from key1 not in key2
thrust::counting_iterator<int> citr(0);
int* nend = thrust::copy_if( execpol->on(0), citr, citr + (akcount), d_nidxs,
[d_x, d_y] __device__ (const int& idx) { return (d_x[idx]>=0) && (d_y[idx]==0); });
cpcount = nend - d_nidxs;
// the gather()s here will select the remaining keys
rmm::device_vector<custring_view*> wstrs2(cpcount);
rmm::device_vector<int> x2(cpcount);
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + cpcount, wstrs.begin(), wstrs2.begin() );
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + cpcount, x.begin(), x2.begin() );
wstrs.swap(wstrs2);
d_w = wstrs.data().get();
x.swap(x2);
d_x = x.data().get();
hipDeviceSynchronize();
}
NVCategory* rtn = new NVCategory;
int ucount = cpcount; // final number of unique keys
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_w,ucount); // and d_w are those keys
// now remap the values: positive values in d_x are [0:ucount)
thrust::fill( execpol->on(0), d_y, d_y + kcount, -1);
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), ucount,
[d_y, d_x] __device__ (int idx) { d_y[d_x[idx]] = idx; });
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount,0);
int* d_newmap = pNewMap->data().get(); // new map will go here
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), mcount,
[d_map, d_y, d_newmap] __device__ (int idx) {
int v = d_map[idx]; // get old index
d_newmap[idx] = ( v < 0 ? v : d_y[v]); // set new index (may be negative)
});
hipDeviceSynchronize();
rtn->pImpl->pMap = pNewMap;
return rtn;
}
// Creates a new instance using the specified strings as keys causing add/remove as appropriate.
// Values are also remapped.
//
// Example:
// category :---------
// | strs key |
// | abbfcf -> abcf |
// | 012345 0123 |
// | 011323 <-' |
// ------------------
// new keyset: bcde
// 0123
// new values: a b b f c f
// -1 0 0 -1 1 -1
//
// Logic:
// a b c f : b c e d -> w = a b b c c d e f
// 0 1 2 3 -1 -2 -3 -4 x = 0 1 -1 2 -2 -4 -3 3
// y = 0 1 0 1 0 0 0 0
//
// remove keys: x>=0 && y==0 : a f -> [0,3] -> [-1,-1]
// w' = b b c c d e
// x' = 1 -1 2 -2 -4 -3
// u = b c d e
// 1 2 -4 -3
//
// need map to set values like: 0 1 2 3
// -1 0 1 -1
//
// so create map using:
// m[]=-1 -init all to -1; we don't need to worry about removed keys
// if(u[idx]>=0): m[u[idx]]=idx
//
// and create new values using:
// v[idx] = m[v[idx]] -- make sure v[idx]>=0
//
NVCategory* NVCategory::set_keys_and_remap(NVStrings& strs)
{
unsigned int kcount = keys_size();
unsigned int mcount = size();
unsigned int count = strs.size();
NVCategory* rtn = new NVCategory;
if( (kcount==0) && (count==0) )
return rtn;
if( count==0 )
{
rtn->pImpl->pMap = new rmm::device_vector<int>(mcount,-1);
return rtn;
}
auto execpol = rmm::exec_policy(0);
// get the keys
rmm::device_vector<custring_view*> newKeys(count,nullptr);
custring_view_array d_newKeys = newKeys.data().get();
strs.create_custring_index(d_newKeys);
if( kcount==0 )
{
// just take the new keys
thrust::sort(execpol->on(0), d_newKeys, d_newKeys + count, [] __device__( custring_view*& lhs, custring_view*& rhs ) { return ( (lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0) ); });
// now remove duplicates from string list
auto nend = thrust::unique(execpol->on(0), d_newKeys, d_newKeys + count, [] __device__ (custring_view* lhs, custring_view* rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)==0) : (lhs==rhs)); });
unsigned int ucount = nend - d_newKeys;
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_newKeys,ucount);
// copy the values
if( mcount )
{
rtn->pImpl->pMap = new rmm::device_vector<int>(mcount,0);
hipMemcpy(rtn->pImpl->getMapPtr(),pImpl->getMapPtr(),mcount*sizeof(int),hipMemcpyDeviceToDevice);
}
return rtn;
}
// both kcount and count are non-zero
custring_view_array d_keys = pImpl->getStringsPtr();
// combine the keys into single array
int akcount = kcount + count;
rmm::device_vector<custring_view*> wstrs(akcount);
custring_view_array d_w = wstrs.data().get();
hipMemcpy(d_w, d_keys, kcount*sizeof(custring_view*),hipMemcpyDeviceToDevice);
hipMemcpy(d_w+kcount, d_newKeys, count*sizeof(custring_view*),hipMemcpyDeviceToDevice);
rmm::device_vector<int> x(akcount); // 0,...,(kcount-),-1,...,-count
int* d_x = x.data().get();
// sequence and for-each-n could be combined into single for-each-n logic
thrust::sequence( execpol->on(0), d_x, d_x + kcount ); // first half is [0:kcount)
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int>(0), count, [d_x, kcount] __device__ (int idx) { d_x[idx+kcount]= -idx-1; }); // 2nd half is [-1:-count]
// stable-sort preserves order for strings that match
thrust::stable_sort_by_key( execpol->on(0), d_w, d_w + akcount, d_x, [] __device__ (custring_view*& lhs, custring_view*& rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0)); });
rmm::device_vector<int> y(akcount,0); // holds matches resulting from
int* d_y = y.data().get(); // sort are marked with '1'
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), (akcount-1),
[d_y, d_w] __device__ (int idx) {
custring_view* lhs = d_w[idx];
custring_view* rhs = d_w[idx+1];
if( lhs && rhs )
d_y[idx] = (int)(lhs->compare(*rhs)==0);
else
d_y[idx] = (int)(lhs==rhs);
});
//
int matched = thrust::reduce( execpol->on(0), d_y, d_y + akcount ); // how many keys matched
rmm::device_vector<int> nidxs(akcount); // needed for gather methods
int* d_nidxs = nidxs.data().get(); // indexes of keys from key1 not in key2
int cpcount = akcount; // how many keys copied
{
thrust::counting_iterator<int> citr(0);
int* nend = thrust::copy_if( execpol->on(0), citr, citr + (akcount), d_nidxs,
[d_x, d_y] __device__ (const int& idx) { return (d_x[idx]<0) || d_y[idx]; });
cpcount = nend - d_nidxs;
}
if( cpcount < akcount )
{ // if keys are removed, we need to make a copy;
// the gather()s here will select the remaining keys
rmm::device_vector<custring_view*> wstrs2(cpcount);
rmm::device_vector<int> x2(cpcount);
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + cpcount, wstrs.begin(), wstrs2.begin() );
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + cpcount, x.begin(), x2.begin() );
wstrs.swap(wstrs2);
d_w = wstrs.data().get();
x.swap(x2);
d_x = x.data().get();
akcount = cpcount;
}
thrust::unique_by_key( execpol->on(0), d_w, d_w + akcount, d_x, [] __device__ (custring_view* lhs, custring_view* rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)==0) : (lhs==rhs)); });
int ucount = akcount - matched;
// d_w,ucount are now the keys
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_w,ucount);
// now remap the values: positive values in d_x are [0:ucount)
thrust::fill( execpol->on(0), d_y, d_y + kcount, -1);
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), ucount,
[d_y, d_x] __device__ (int idx) {
int u = d_x[idx];
if( u >= 0 )
d_y[u] = idx;
});
// allocate new map
int* d_map = pImpl->getMapPtr();
if( mcount && d_map )
{
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount,0);
int* d_newmap = pNewMap->data().get(); // new map goes in here
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), mcount,
[d_map, d_y, d_newmap] __device__ (int idx) {
int v = d_map[idx];
d_newmap[idx] = (v < 0 ? v : d_y[v]);
});
hipDeviceSynchronize();
rtn->pImpl->pMap = pNewMap;
}
return rtn;
}
| c50aeb422100ad0ab4f8431a2865516637b58882.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/count.h>
#include <thrust/remove.h>
#include <thrust/gather.h>
#include <thrust/copy.h>
#include <locale.h>
#include <stdexcept>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVCategory.h"
#include "NVStrings.h"
#include "custring_view.cuh"
#include "custring.cuh"
#include "ipc_transfer.h"
//
typedef custring_view** custring_view_array;
#define ALIGN_SIZE(v) (((v+7)/8)*8)
//static void printDeviceInts( const char* title, int* d_ints, int count )
//{
// thrust::host_vector<int> ints(count);
// int* h_ints = ints.data();
// cudaMemcpy( h_ints, d_ints, count * sizeof(int), cudaMemcpyDeviceToHost);
// if( title )
// printf("%s:\n",title);
// for( int i=0; i < count; ++i )
// printf(" %d",h_ints[i]);
// printf("\n");
//}
//
class NVCategoryImpl
{
public:
//
rmm::device_vector<custring_view*>* pList;
rmm::device_vector<int>* pMap;
void* memoryBuffer;
size_t bufferSize; // total memory size
cudaStream_t stream_id;
bool bIpcHandle;
//
NVCategoryImpl()
: bufferSize(0), memoryBuffer(0), pList(0), pMap(0), stream_id(0), bIpcHandle(false)
{}
~NVCategoryImpl()
{
if( memoryBuffer )
{
if( bIpcHandle )
cudaIpcCloseMemHandle(memoryBuffer);
else
RMM_FREE(memoryBuffer,0);
}
delete pList;
delete pMap;
memoryBuffer = 0;
bufferSize = 0;
}
inline custring_view_array getStringsPtr()
{
custring_view_array rtn = 0;
if( pList )
rtn = pList->data().get();
return rtn;
}
inline custring_view_array createStringsListFrom( custring_view_array strings, unsigned int keys )
{
pList = new rmm::device_vector<custring_view*>(keys);
cudaMemcpy(pList->data().get(), strings, keys*sizeof(custring_view*), cudaMemcpyDeviceToDevice);
return pList->data().get();
}
inline char* getMemoryPtr() { return (char*)memoryBuffer; }
inline int* getMapPtr()
{
int* rtn = 0;
if( pMap )
rtn = pMap->data().get();
return rtn;
}
inline int* createMapFrom( int* vals, unsigned int count )
{
pMap = new rmm::device_vector<int>(count);
cudaMemcpy(pMap->data().get(), vals, count*sizeof(int), cudaMemcpyDeviceToDevice);
return pMap->data().get();
}
inline void setMemoryBuffer( void* ptr, size_t memSize )
{
bufferSize = memSize;
memoryBuffer = ptr;
}
inline void setMemoryHandle( void* ptr, size_t memSize )
{
setMemoryBuffer(ptr,memSize);
bIpcHandle = true;
}
};
//
NVCategory::NVCategory()
{
pImpl = new NVCategoryImpl;
}
NVCategory::~NVCategory()
{
delete pImpl;
}
// utility to create keys from array of string pointers
// pImpl must exist but it's pList should be null -- this method will create it
void NVCategoryImpl_keys_from_index( NVCategoryImpl* pImpl, thrust::pair<const char*,size_t>* d_pairs, unsigned int ucount )
{
auto execpol = rmm::exec_policy(0);
// add up the lengths
rmm::device_vector<size_t> lengths(ucount,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), ucount,
[d_pairs, d_lengths] __device__(size_t idx){
const char* str = d_pairs[idx].first;
int bytes = (int)d_pairs[idx].second;
if( str )
d_lengths[idx] = ALIGN_SIZE(custring_view::alloc_size((char*)str,bytes));
});
// create output buffer to hold the string keys
size_t outsize = thrust::reduce(execpol->on(0), lengths.begin(), lengths.end());
char* d_buffer = 0;
RMM_ALLOC(&d_buffer,outsize,0);
pImpl->setMemoryBuffer(d_buffer,outsize);
rmm::device_vector<size_t> offsets(ucount,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
size_t* d_offsets = offsets.data().get();
// create the vector to hold the pointers
rmm::device_vector<custring_view*>* pList = new rmm::device_vector<custring_view*>(ucount,nullptr);
custring_view_array d_results = pList->data().get();
// copy keys strings to new memory buffer
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), ucount,
[d_pairs, d_buffer, d_offsets, d_results] __device__ (size_t idx) {
const char* str = d_pairs[idx].first;
int bytes = (int)d_pairs[idx].second;
if( str )
d_results[idx] = custring_view::create_from(d_buffer+d_offsets[idx],(char*)str,bytes);
});
pImpl->pList = pList;
}
// utility to create keys from array of custrings
// pImpl must exist but it's pList should be null -- this method will create it
void NVCategoryImpl_keys_from_custringarray( NVCategoryImpl* pImpl, custring_view_array d_keys, unsigned int ucount )
{
auto execpol = rmm::exec_policy(0);
// add up the lengths
rmm::device_vector<size_t> lengths(ucount,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), ucount,
[d_keys, d_lengths] __device__(size_t idx){
custring_view* dstr = d_keys[idx];
if( dstr )
d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size());
});
// create output buffer to hold the string keys
size_t outsize = thrust::reduce(execpol->on(0), lengths.begin(), lengths.end());
char* d_buffer = 0;
RMM_ALLOC(&d_buffer,outsize,0);
pImpl->setMemoryBuffer(d_buffer,outsize);
rmm::device_vector<size_t> offsets(ucount,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
size_t* d_offsets = offsets.data().get();
// create the vector to hold the pointers
rmm::device_vector<custring_view*>* pList = new rmm::device_vector<custring_view*>(ucount,nullptr);
custring_view_array d_results = pList->data().get();
// copy keys strings to new memory buffer
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), ucount,
[d_keys, d_buffer, d_offsets, d_results] __device__ (size_t idx) {
custring_view* dstr = d_keys[idx];
if( dstr )
d_results[idx] = custring_view::create_from(d_buffer+d_offsets[idx],*dstr);
});
pImpl->pList = pList;
}
// Utility to create category instance data from array of string pointers (in device memory).
// It does all operations using the given pointers (or copies) to build the map.
// This method can be given the index values from the NVStrings::create_index.
// So however an NVStrings can be created can also create an NVCategory.
//
// Should investigating converting this use custring pointers instead of index pairs.
// It would likely save some processing since we can create custrings from custrings.
void NVCategoryImpl_init(NVCategoryImpl* pImpl, std::pair<const char*,size_t>* pairs, unsigned int count, bool bdevmem, bool bindexescopied=false )
{
cudaError_t err = cudaSuccess;
auto execpol = rmm::exec_policy(0);
// make a copy of the indexes so we can sort them, etc
thrust::pair<const char*,size_t>* d_pairs = 0;
if( bdevmem )
{
if( bindexescopied ) // means caller already made a temp copy
d_pairs = (thrust::pair<const char*,size_t>*)pairs; // and we can just use it here
else
{
RMM_ALLOC(&d_pairs,sizeof(thrust::pair<const char*,size_t>)*count,0);
cudaMemcpy(d_pairs,pairs,sizeof(thrust::pair<const char*,size_t>)*count,cudaMemcpyDeviceToDevice);
}
}
else
{
RMM_ALLOC(&d_pairs,sizeof(thrust::pair<const char*,size_t>)*count,0);
cudaMemcpy(d_pairs,pairs,sizeof(thrust::pair<const char*,size_t>)*count,cudaMemcpyHostToDevice);
}
//
// example strings used in comments e,a,d,b,c,c,c,e,a
//
rmm::device_vector<int> indexes(count);
thrust::sequence(execpol->on(0),indexes.begin(),indexes.end()); // 0,1,2,3,4,5,6,7,8
int* d_indexes = indexes.data().get();
// sort by key (string) a,a,b,c,c,c,d,e,e
// and indexes go along for the ride 1,8,3,4,5,6,2,0,7
thrust::sort_by_key(execpol->on(0), d_pairs, d_pairs+count, d_indexes,
[] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) {
if( lhs.first==0 || rhs.first==0 )
return rhs.first!=0; // null < non-null
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second) < 0;
});
// build the map; this will let us lookup strings by index
rmm::device_vector<int>* pMap = new rmm::device_vector<int>(count,0);
int* d_map = pMap->data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int>(0), count,
[d_pairs, d_map] __device__ (int idx) {
if( idx==0 )
return;
const char* ptr1 = d_pairs[idx-1].first;
const char* ptr2 = d_pairs[idx].first;
unsigned int len1 = (unsigned int)d_pairs[idx-1].second, len2 = (unsigned int)d_pairs[idx].second;
//d_map[idx] = (int)(custr::compare(ptr1,len1,ptr2,len2)!=0);
int cmp = 0; // vvvvv - probably faster than - ^^^^^
if( !ptr1 || !ptr2 )
cmp = (int)(ptr1!=ptr2);
else if( len1 != len2 )
cmp = 1;
else
for( int i=0; !cmp && (i < len1); ++i)
cmp = (int)(*ptr1++ != *ptr2++);
d_map[idx] = cmp;
});
//
// d_map now identifies just string changes 0,0,1,1,0,0,1,1,0
int ucount = thrust::reduce(execpol->on(0), pMap->begin(), pMap->end()) + 1;
// scan converts to index values 0,0,1,2,2,2,3,4,4
thrust::inclusive_scan(execpol->on(0), pMap->begin(), pMap->end(), pMap->begin());
// re-sort will complete the map 4,0,3,1,2,2,2,4,0
thrust::sort_by_key(execpol->on(0), indexes.begin(), indexes.end(), pMap->begin());
pImpl->pMap = pMap; // index -> str is now just a lookup in the map
// now remove duplicates from string list a,b,c,d,e
thrust::unique(execpol->on(0), d_pairs, d_pairs+count,
[] __device__ ( thrust::pair<const char*,size_t> lhs, thrust::pair<const char*,size_t> rhs ) {
if( lhs.first==0 || rhs.first==0 )
return lhs.first==rhs.first;
if( lhs.second != rhs.second )
return false;
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second)==0;
});
// finally, create new string vector of just the keys
NVCategoryImpl_keys_from_index(pImpl,d_pairs,ucount);
err = cudaDeviceSynchronize();
if( err!=cudaSuccess )
fprintf(stderr,"category: error(%d) creating %'d strings\n",(int)err,ucount);
if( !bindexescopied )
RMM_FREE(d_pairs,0);
}
NVCategory* NVCategory::create_from_index(std::pair<const char*,size_t>* strs, unsigned int count, bool devmem )
{
NVCategory* rtn = new NVCategory;
if( count )
NVCategoryImpl_init(rtn->pImpl,strs,count,devmem);
return rtn;
}
NVCategory* NVCategory::create_from_array(const char** strs, unsigned int count)
{
NVCategory* rtn = new NVCategory;
if( count==0 )
return rtn;
NVStrings* dstrs = NVStrings::create_from_array(strs,count);
std::pair<const char*,size_t>* indexes = 0;
RMM_ALLOC(&indexes, count * sizeof(std::pair<const char*,size_t>),0);
dstrs->create_index(indexes);
NVCategoryImpl_init(rtn->pImpl,indexes,count,true,true);
RMM_FREE(indexes,0);
NVStrings::destroy(dstrs);
return rtn;
}
NVCategory* NVCategory::create_from_strings(NVStrings& strs)
{
NVCategory* rtn = new NVCategory;
unsigned int count = strs.size();
if( count==0 )
return rtn;
std::pair<const char*,size_t>* indexes = 0;
RMM_ALLOC(&indexes, count * sizeof(std::pair<const char*,size_t>),0);
strs.create_index(indexes);
NVCategoryImpl_init(rtn->pImpl,indexes,count,true,true);
RMM_FREE(indexes,0);
return rtn;
}
NVCategory* NVCategory::create_from_strings(std::vector<NVStrings*>& strs)
{
NVCategory* rtn = new NVCategory;
unsigned int count = 0;
for( unsigned int idx=0; idx < (unsigned int)strs.size(); idx++ )
count += strs[idx]->size();
if( count==0 )
return rtn;
std::pair<const char*,size_t>* indexes = 0;
RMM_ALLOC(&indexes, count * sizeof(std::pair<const char*,size_t>),0);
std::pair<const char*,size_t>* ptr = indexes;
for( unsigned int idx=0; idx < (unsigned int)strs.size(); idx++ )
{
strs[idx]->create_index(ptr);
ptr += strs[idx]->size();
}
NVCategoryImpl_init(rtn->pImpl,indexes,count,true,true);
RMM_FREE(indexes,0);
return rtn;
}
// bitmask is in arrow format
NVCategory* NVCategory::create_from_offsets(const char* strs, unsigned int count, const int* offsets, const unsigned char* nullbitmask, int nulls)
{
NVCategory* rtn = new NVCategory;
if( count==0 )
return rtn;
NVStrings* dstrs = NVStrings::create_from_offsets(strs,count,offsets,nullbitmask,nulls);
std::pair<const char*,size_t>* indexes = 0;
RMM_ALLOC(&indexes, count * sizeof(std::pair<const char*,size_t>),0);
dstrs->create_index(indexes); // try using the custring one; may be more efficient
NVCategoryImpl_init(rtn->pImpl,indexes,count,true,true);
RMM_FREE(indexes,0);
NVStrings::destroy(dstrs);
return rtn;
}
// create instance from ipc handle(s)
NVCategory* NVCategory::create_from_ipc( nvcategory_ipc_transfer& ipc )
{
NVCategory* rtn = new NVCategory;
unsigned int keys = ipc.keys;
if( keys==0 )
return rtn;
rtn->pImpl->setMemoryHandle(ipc.getMemoryPtr(),ipc.size);
custring_view_array d_strings = rtn->pImpl->createStringsListFrom((custring_view_array)ipc.getStringsPtr(),ipc.keys);
// fix up the pointers for this context
auto execpol = rmm::exec_policy(0);
char* baseaddr = (char*)ipc.base_address;
char* buffer = rtn->pImpl->getMemoryPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), keys,
[buffer, baseaddr, d_strings] __device__(unsigned int idx){
char* dstr = (char*)d_strings[idx];
if( !dstr )
return;
size_t diff = dstr - baseaddr;
char* newaddr = buffer + diff;
d_strings[idx] = (custring_view*)newaddr;
});
cudaDeviceSynchronize();
// set the map values
rtn->pImpl->createMapFrom( (int*)ipc.getMapPtr(), ipc.count );
// done
return rtn;
}
//
// Example merging two categories and remapping the values:
//
// category1:--------- category2:---------
// | strs1 key1 | | strs2 key2 |
// | abbfcf -> abcf | | aadcce -> acde |
// | 012345 0123 | | 012345 0123 |
// | 011323 <-' | | 002113 <-' |
// ------------------ ------------------
//
// merge-remap should result in new category like:
// strs key
// abbfcfaadcce -> abcdef
// 012345
// 011525003224 <-'
//
// abcfacde -> w = aabccdef
// 01234567 x = 04125673
// y = 00110111
// y'= 00122345 = scan(y)
// y"= 01250234 = sort(x,y')
// v = 0125:0234 = this is y"
// m = 011323:002113 = orig values from each category
// m'= r1[v1]:r2[v2] -> 011525:003224
// w'= unique(w) -> abcdef
//
// This logic works for any number of categories.
// Loop is required at the beginning to combine all the keys.
// And loop is required at the end to combine and remap the values.
//
NVCategory* NVCategory::create_from_categories(std::vector<NVCategory*>& cats)
{
NVCategory* rtn = new NVCategory();
if( cats.empty() )
return rtn;
unsigned int count = 0;
unsigned int mcount = 0;
for( unsigned int idx=0; idx < cats.size(); ++idx )
{
NVCategory* cat = cats[idx];
count += cat->keys_size();
mcount += cat->size();
}
if( count==0 )
return rtn;
auto execpol = rmm::exec_policy(0);
// first combine the keys into one array
rmm::device_vector<custring_view*> wstrs(count);
custring_view_array d_w = wstrs.data().get();
for( unsigned int idx=0; idx < cats.size(); ++idx )
{
NVCategory* cat = cats[idx];
custring_view_array d_keys = cat->pImpl->getStringsPtr();
unsigned int ksize = cat->keys_size();
if( ksize )
cudaMemcpy(d_w, d_keys, ksize*sizeof(custring_view*),cudaMemcpyDeviceToDevice);
d_w += ksize;
}
d_w = wstrs.data().get(); // reset pointer
rmm::device_vector<int> x(count);
int* d_x = x.data().get(); // [0:count)
thrust::sequence( execpol->on(0), d_x, d_x+count );
thrust::stable_sort_by_key( execpol->on(0), d_w, d_w+count, d_x,
[] __device__ (custring_view*& lhs, custring_view*& rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0)); });
// x-vector is sorted sequence we'll use to remap values
rmm::device_vector<int> y(count,0); // y-vector will identify unique keys
int* d_y = y.data().get();
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(1), (count-1),
[d_y, d_w] __device__ (int idx) {
custring_view* lhs = d_w[idx];
custring_view* rhs = d_w[idx-1];
if( lhs && rhs )
d_y[idx] = (int)(lhs->compare(*rhs)!=0);
else
d_y[idx] = (int)(lhs!=rhs);
});
unsigned int kcount = (unsigned int)thrust::reduce( execpol->on(0), d_y, d_y+count )+1;
// use gather to get unique keys
// theory is that copy_if + gather on ints is faster than unique on strings
//rmm::device_vector<int> nidxs(kcount);
//thrust::counting_iterator<int> citr(0);
//thrust::copy_if( execpol->on(0), citr, citr + count, nidxs.data().get(), [d_y] __device__ (const int& idx) { return (idx==0 || d_y[idx]); });
//rmm::device_vector<custring_view*>* pNewList = new rmm::device_vector<custring_view*>(kcount,nullptr);
//custring_view_array d_keys = pNewList->data().get(); // this will hold the merged keyset
//thrust::gather( execpol->on(0), nidxs.begin(), nidxs.end(), d_w, d_keys );
thrust::unique( execpol->on(0), d_w, d_w+count, [] __device__ (custring_view* lhs, custring_view* rhs) { return (lhs && rhs) ? (lhs->compare(*rhs)==0) : (lhs==rhs); });
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_w,kcount);
// now create map to remap the values
thrust::inclusive_scan(execpol->on(0), d_y, d_y+count, d_y );
thrust::sort_by_key(execpol->on(0), d_x, d_x+count, d_y );
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount);
int* d_map = pNewMap->data().get();
int* d_v = d_y;
for( int idx=0; idx < (int)cats.size(); ++idx )
{
NVCategory* cat = cats[idx];
unsigned int msize = cat->size();
if( msize )
{
int* d_catmap = cat->pImpl->getMapPtr();
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), msize,
[d_catmap, d_v, d_map] __device__ (int idx) {
int v = d_catmap[idx];
d_map[idx] = ( v<0 ? v : d_v[v] );
});
}
d_v += cat->keys_size();
d_map += msize;
}
// done
rtn->pImpl->pMap = pNewMap;
return rtn;
}
void NVCategory::destroy(NVCategory* inst)
{
delete inst;
}
// dest should already be empty
void NVCategoryImpl_copy( NVCategoryImpl& dest, NVCategoryImpl& src )
{
if( src.pList==0 )
return;
auto execpol = rmm::exec_policy(0);
if( src.pMap )
{
unsigned int mcount = (unsigned int)src.pMap->size();
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount,0);
// copy map values from non-empty category instance
cudaMemcpy( pNewMap->data().get(), src.pMap->data().get(), mcount*sizeof(int), cudaMemcpyDeviceToDevice );
dest.pMap = pNewMap;
}
// copy key strings buffer
unsigned int ucount = (unsigned int)src.pList->size();
rmm::device_vector<custring_view*>* pNewList = new rmm::device_vector<custring_view*>(ucount,nullptr);
char* d_buffer = (char*)src.memoryBuffer;
size_t bufsize = src.bufferSize;
char* d_newbuffer = 0;
RMM_ALLOC(&d_newbuffer,bufsize,0);
cudaMemcpy(d_newbuffer,d_buffer,bufsize,cudaMemcpyDeviceToDevice);
// need to set custring_view ptrs
custring_view_array d_strings = src.getStringsPtr();
custring_view_array d_results = pNewList->data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), ucount,
[d_strings, d_buffer, d_newbuffer, d_results] __device__ (size_t idx) {
custring_view* dstr = d_strings[idx];
if( dstr )
{
char* buffer = d_newbuffer + ((char*)dstr - d_buffer);
d_results[idx] = (custring_view*)buffer;
}
});
dest.pList = pNewList;
dest.setMemoryBuffer( d_newbuffer, bufsize );
}
NVCategory::NVCategory(const NVCategory& cat)
{
pImpl = new NVCategoryImpl;
NVCategoryImpl_copy(*pImpl,*(cat.pImpl));
}
NVCategory& NVCategory::operator=(const NVCategory& cat)
{
delete pImpl;
pImpl = new NVCategoryImpl;
NVCategoryImpl_copy(*pImpl,*(cat.pImpl));
return *this;
}
NVCategory* NVCategory::copy()
{
NVCategory* rtn = new NVCategory;
NVCategoryImpl_copy(*(rtn->pImpl),*pImpl);
return rtn;
}
// return number of items
unsigned int NVCategory::size()
{
unsigned int size = 0;
if( pImpl->pMap )
size = pImpl->pMap->size();
return size;
}
// return number of keys
unsigned int NVCategory::keys_size()
{
unsigned int size = 0;
if( pImpl->pList )
size = pImpl->pList->size();
return size;
}
// true if any null values exist
bool NVCategory::has_nulls()
{
unsigned int count = keys_size();
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
int n = thrust::count_if(execpol->on(0), d_strings, d_strings+count,
[]__device__(custring_view* dstr) { return dstr==0; } );
return n > 0;
}
// bitarray is for the values; bits are set in arrow format
// return the number of null values found
int NVCategory::set_null_bitarray( unsigned char* bitarray, bool devmem )
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned int size = (count + 7)/8;
unsigned char* d_bitarray = bitarray;
if( !devmem )
RMM_ALLOC(&d_bitarray,size,0);
int nidx = -1;
{
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> nulls(1,-1);
thrust::copy_if( execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), thrust::make_counting_iterator<unsigned int>(keys_size()), nulls.begin(),
[d_strings] __device__ (unsigned int idx) { return d_strings[idx]==0; } );
nidx = nulls[0]; // should be the index of the null entry (or -1)
}
if( nidx < 0 )
{ // no nulls, set everything to 1s
cudaMemset(d_bitarray,255,size); // actually sets more bits than we need to
if( !devmem )
{
cudaMemcpy(bitarray,d_bitarray,size,cudaMemcpyDeviceToHost);
RMM_FREE(d_bitarray,0);
}
return 0; // no nulls;
}
// count nulls in range for return value
int* d_map = pImpl->getMapPtr();
unsigned int ncount = thrust::count_if(execpol->on(0), d_map, d_map + count,
[nidx] __device__ (int index) { return (index==nidx); });
// fill in the bitarray
// the bitmask is in arrow format which means for each byte
// the null indicator is in bit position right-to-left: 76543210
// logic sets the high-bit and shifts to the right
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), size,
[d_map, nidx, count, d_bitarray] __device__(unsigned int byteIdx){
unsigned char byte = 0; // init all bits to zero
for( unsigned int i=0; i < 8; ++i )
{
unsigned int idx = i + (byteIdx*8);
byte = byte >> 1;
if( idx < count )
{
int index = d_map[idx];
byte |= (unsigned char)((index!=nidx) << 7);
}
}
d_bitarray[byteIdx] = byte;
});
cudaDeviceSynchronize();
if( !devmem )
{
cudaMemcpy(bitarray,d_bitarray,size,cudaMemcpyDeviceToHost);
RMM_FREE(d_bitarray,0);
}
return ncount; // number of nulls
}
// build a string-index from this instances strings
int NVCategory::create_index(std::pair<const char*,size_t>* strs, bool bdevmem )
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
int* d_map = pImpl->getMapPtr();
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_map, d_indexes] __device__(unsigned int idx){
custring_view* dstr = d_strings[d_map[idx]];
if( dstr )
{
d_indexes[idx].first = (const char*)dstr->data();
d_indexes[idx].second = (size_t)dstr->size();
}
else
{
d_indexes[idx].first = 0;
d_indexes[idx].second = 0;
}
});
cudaDeviceSynchronize();
//
if( bdevmem )
cudaMemcpy( strs, indexes.data().get(), count * sizeof(std::pair<const char*,size_t>), cudaMemcpyDeviceToDevice );
else
cudaMemcpy( strs, indexes.data().get(), count * sizeof(std::pair<const char*,size_t>), cudaMemcpyDeviceToHost );
return 0;
}
int NVCategory::create_ipc_transfer( nvcategory_ipc_transfer& ipc )
{
ipc.setStrsHandle(pImpl->getStringsPtr(),pImpl->getMemoryPtr(),keys_size());
ipc.setMemHandle(pImpl->getMemoryPtr(),pImpl->bufferSize);
ipc.setMapHandle(pImpl->getMapPtr(),size());
return 0;
}
// return strings keys for this instance
NVStrings* NVCategory::get_keys()
{
int count = keys_size();
if( count==0 )
return NVStrings::create_from_index(0,0);
auto execpol = rmm::exec_policy(0);
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_strings, d_indexes] __device__(size_t idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
d_indexes[idx].first = (const char*)dstr->data();
d_indexes[idx].second = (size_t)dstr->size();
}
else
{
d_indexes[idx].first = 0;
d_indexes[idx].second = 0;
}
});
cudaDeviceSynchronize();
// create strings from index
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
//
int NVCategory::get_value(unsigned int index)
{
if( index >= size() )
return -1;
int* d_map = pImpl->getMapPtr();
int rtn = -1;
if( d_map )
cudaMemcpy(&rtn,d_map+index,sizeof(int),cudaMemcpyDeviceToHost);
return rtn;
}
//
int NVCategory::get_value(const char* str)
{
char* d_str = 0;
unsigned int bytes = 0;
auto execpol = rmm::exec_policy(0);
if( str )
{
bytes = (unsigned int)strlen(str);
RMM_ALLOC(&d_str,bytes+1,0);
cudaMemcpy(d_str,str,bytes,cudaMemcpyHostToDevice);
}
int count = keys_size();
custring_view_array d_strings = pImpl->getStringsPtr();
// find string in this instance
rmm::device_vector<int> keys(1,-1);
thrust::copy_if( execpol->on(0), thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(count), keys.begin(),
[d_strings, d_str, bytes] __device__ (int idx) {
custring_view* dstr = d_strings[idx];
if( (char*)dstr==d_str ) // only true if both are null
return true;
return ( dstr && dstr->compare(d_str,bytes)==0 );
} );
cudaDeviceSynchronize();
if( d_str )
RMM_FREE(d_str,0);
return keys[0];
}
std::pair<int,int> NVCategory::get_value_bounds(const char* str)
{
std::pair<int,int> rtn(-1,-1);
// first check if key exists (saves alot work below)
int value = get_value(str);
if( value>=0 )
{
rtn.first = value;
rtn.second = value;
return rtn;
}
// not found in existing keyset
auto execpol = rmm::exec_policy(0);
unsigned int count = keys_size();
custring_view** d_strings = pImpl->getStringsPtr();
// create index of the keys
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count+1);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_strings, d_indexes] __device__(size_t idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
d_indexes[idx].first = (const char*)dstr->data();
d_indexes[idx].second = (size_t)dstr->size();
}
else
{
d_indexes[idx].first = 0;
d_indexes[idx].second = 0;
}
});
//
cudaDeviceSynchronize();
// and add the passed in string to the indexes
size_t len = 0;
char* d_str = 0;
if( str )
{
len = strlen(str);
RMM_ALLOC(&d_str,len+1,0);
cudaMemcpy(d_str,str,len+1,cudaMemcpyHostToDevice);
}
thrust::pair<const char*,size_t> newstr(d_str,len); // add to the end
cudaMemcpy(d_indexes+count,&newstr,sizeof(thrust::pair<const char*,size_t>),cudaMemcpyHostToDevice);
// sort the keys with attached sequence numbers
rmm::device_vector<int> seqdata(count+1);
thrust::sequence(execpol->on(0),seqdata.begin(),seqdata.end()); // [0:count]
int* d_seqdata = seqdata.data().get();
thrust::sort_by_key(execpol->on(0), d_indexes, d_indexes+(count+1), d_seqdata,
[] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) {
if( lhs.first==0 || rhs.first==0 )
return rhs.first!=0;
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second) < 0;
});
// now find the new position of the argument
// this will be where the sequence number equals the count
rmm::device_vector<int> keys(1,-1);
thrust::copy_if( execpol->on(0), thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(count+1), keys.begin(),
[d_seqdata, count, d_indexes] __device__ (int idx) { return d_seqdata[idx]==count; });
cudaDeviceSynchronize();
int first = 0; // get the position back into host memory
cudaMemcpy(&first,keys.data().get(),sizeof(int),cudaMemcpyDeviceToHost);
rtn.first = first-1; // range is always
rtn.second = first; // position and previous one
if( d_str )
RMM_FREE(d_str,0);
return rtn;
}
// return category values for all indexes
int NVCategory::get_values( int* results, bool bdevmem )
{
int count = (int)size();
int* d_map = pImpl->getMapPtr();
if( count && d_map )
{
if( bdevmem )
cudaMemcpy(results,d_map,count*sizeof(int),cudaMemcpyDeviceToDevice);
else
cudaMemcpy(results,d_map,count*sizeof(int),cudaMemcpyDeviceToHost);
}
return count;
}
const int* NVCategory::values_cptr()
{
return pImpl->getMapPtr();
}
int NVCategory::get_indexes_for( unsigned int index, int* results, bool bdevmem )
{
unsigned int count = size();
if( index >= count )
return -1;
auto execpol = rmm::exec_policy(0);
int* d_map = pImpl->getMapPtr();
if( !d_map )
return 0;
int matches = thrust::count_if( execpol->on(0), d_map, d_map+count, [index] __device__(int idx) { return idx==(int)index; });
if( matches <= 0 )
return 0; // done, found nothing, not likely
if( results==0 )
return matches; // caller just wants the count
int* d_results = results;
if( !bdevmem )
RMM_ALLOC(&d_results,matches*sizeof(int),0);
thrust::counting_iterator<unsigned int> itr(0);
thrust::copy_if( execpol->on(0), itr, itr+count, d_results,
[index, d_map] __device__(unsigned int idx) { return d_map[idx]==(int)index; });
cudaDeviceSynchronize();
if( !bdevmem )
{
cudaMemcpy(results,d_results,matches*sizeof(int),cudaMemcpyDeviceToHost);
RMM_FREE(d_results,0);
}
return matches;
}
int NVCategory::get_indexes_for( const char* str, int* results, bool bdevmem )
{
int id = get_value(str);
if( id < 0 )
return id;
return get_indexes_for((unsigned int)id, results, bdevmem);
}
// creates a new instance incorporating the new strings
NVCategory* NVCategory::add_strings(NVStrings& strs)
{
// create one large index of both datasets
unsigned int count1 = size();
unsigned int count2 = strs.size();
unsigned int count = count1 + count2;
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
create_index((std::pair<const char*,size_t>*)d_indexes,count1);
strs.create_index((std::pair<const char*,size_t>*)d_indexes+count1,count2);
// build the category from this new set
return create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
// creates a new instance without the specified strings
// deprecated by remove_keys?
NVCategory* NVCategory::remove_strings(NVStrings& strs)
{
auto execpol = rmm::exec_policy(0);
unsigned int count = size();
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
create_index((std::pair<const char*,size_t>*)d_indexes,count);
unsigned int delete_count = strs.size();
rmm::device_vector< thrust::pair<const char*,size_t> > deletes(delete_count);
thrust::pair<const char*,size_t>* d_deletes = deletes.data().get();
strs.create_index((std::pair<const char*,size_t>*)d_deletes,delete_count);
// this would be inefficient if strs is very large
thrust::pair<const char*,size_t>* newend = thrust::remove_if(execpol->on(0), d_indexes, d_indexes + count,
[d_deletes,delete_count] __device__ (thrust::pair<const char*,size_t> lhs) {
for( unsigned int idx=0; idx < delete_count; ++idx )
{
thrust::pair<const char*,size_t> rhs = d_deletes[idx];
if( lhs.first == rhs.first )
return true;
if( lhs.second != rhs.second )
continue;
if( custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second)==0 )
return true;
}
return false;
});
// return value ensures a dev-sync has already been performed by thrust
count = (unsigned int)(newend - d_indexes); // new count of strings
// build the category from this new set
return create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
// basically recreates the original string list
NVStrings* NVCategory::to_strings()
{
int count = (int)size();
int* d_map = pImpl->getMapPtr();
if( count==0 || d_map==0 )
return 0;
custring_view** d_strings = pImpl->getStringsPtr();
// use the map to build the indexes array
auto execpol = rmm::exec_policy(0);
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_strings, d_map, d_indexes] __device__(size_t idx){
int stridx = d_map[idx];
custring_view* dstr = 0;
if( stridx >=0 )
dstr = d_strings[stridx];
if( dstr )
{
d_indexes[idx].first = (const char*)dstr->data();
d_indexes[idx].second = (size_t)dstr->size();
}
else
{
d_indexes[idx].first = 0;
d_indexes[idx].second = 0;
}
});
//
cudaDeviceSynchronize();
// create strings from index
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
// creates a new NVStrings instance using the specified index values
NVStrings* NVCategory::gather_strings( const int* pos, unsigned int count, bool bdevmem )
{
auto execpol = rmm::exec_policy(0);
const int* d_pos = pos;
if( !bdevmem )
{
RMM_ALLOC((void**)&d_pos,count*sizeof(int),0);
cudaMemcpy((void*)d_pos,pos,count*sizeof(int),cudaMemcpyHostToDevice);
}
custring_view** d_strings = pImpl->getStringsPtr();
// need to check for invalid values
unsigned int size = keys_size();
rmm::device_vector<int> check(count,0);
int* d_check = check.data().get();
// use the map to build the indexes array
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_strings, d_pos, size, d_check, d_indexes] __device__(size_t idx){
int stridx = d_pos[idx];
if( (stridx < 0) || (stridx >= size) )
{
d_check[idx] = 1;
return;
}
custring_view* dstr = 0;
if( stridx >=0 )
dstr = d_strings[stridx];
if( dstr )
{
d_indexes[idx].first = (const char*)dstr->data();
d_indexes[idx].second = (size_t)dstr->size();
}
else
{
d_indexes[idx].first = 0;
d_indexes[idx].second = 0;
}
});
//
cudaDeviceSynchronize();
if( !bdevmem )
RMM_FREE((void*)d_pos,0);
int invalidcount = thrust::reduce( execpol->on(0), d_check, d_check+count );
if( invalidcount )
throw std::out_of_range("");
// create strings from index
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
}
//
// Create a new category by gathering strings from this category.
// If specific keys are not referenced, the values are remapped.
// This is a shortcut method to calling gather_strings() and then
// just converting the resulting NVStrings instance into an new NVCategory.
//
// Example category
// category :---------
// | strs key |
// | abbfcf -> abcf |
// | 012345 0123 |
// | 011323 <-' |
// ------------------
//
// Specify strings in pos parameter:
// v = 1 3 2 3 1 2 bfcfbc
// x = 0 1 1 1 x[v[idx]] = 1 (set 1 for values in v)
// y = 0 0 1 2 excl-scan(x)
//
// Remap values using:
// v[idx] = y[v[idx]] -> 021201
// New key list is copy_if of keys where x==1 -> bcf
//
NVCategory* NVCategory::gather_and_remap( const int* pos, unsigned int count, bool bdevmem )
{
auto execpol = rmm::exec_policy(0);
const int* d_v = pos;
if( !bdevmem )
{
RMM_ALLOC((void**)&d_v,count*sizeof(int),0);
cudaMemcpy((void*)d_v,pos,count*sizeof(int),cudaMemcpyHostToDevice);
}
unsigned int kcount = keys_size();
// first, do bounds check on input values
int invalidcount = thrust::count_if(execpol->on(0), d_v, d_v+count,
[kcount] __device__ (int v) { return ((v < 0) || (v >= kcount)); } );
if( invalidcount )
{
if( !bdevmem )
RMM_FREE((void*)d_v,0);
throw std::out_of_range("");
}
// build x vector which has 1s for each value in v
rmm::device_vector<int> x(kcount,0);
int* d_x = x.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_v, d_x] __device__ (unsigned int idx) { d_x[d_v[idx]] = 1; });
// y vector is scan of x values
rmm::device_vector<int> y(kcount,0);
int* d_y = y.data().get();
thrust::exclusive_scan(execpol->on(0),d_x,d_x+kcount,d_y,0);
// use y to map input to new values
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(count,0);
int* d_map = pNewMap->data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<size_t>(0), count,
[d_v, d_y, d_map] __device__ (unsigned int idx) { d_map[idx] = d_y[d_v[idx]]; });
// done creating the map
NVCategory* rtn = new NVCategory;
rtn->pImpl->pMap = pNewMap;
// copy/gather the keys
custring_view_array d_keys = pImpl->getStringsPtr();
unsigned int ucount = kcount;
{ // reuse the y vector for gather positions
thrust::counting_iterator<int> citr(0);
auto nend = thrust::copy_if( execpol->on(0), citr, citr + kcount, d_y, [d_x] __device__ (const int& idx) { return d_x[idx]==1; });
ucount = (unsigned int)(nend - d_y); // how many were copied
}
// gather keys into new vector
rmm::device_vector<custring_view*> newkeys(ucount,nullptr);
thrust::gather( execpol->on(0), d_y, d_y + ucount, d_keys, newkeys.data().get() );
cudaDeviceSynchronize();
// build keylist for new category
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,newkeys.data().get(),ucount);
//
if( !bdevmem )
RMM_FREE((void*)d_v,0);
return rtn;
}
// this method simply copies the keys and the passed in values to create a new category instance
NVCategory* NVCategory::gather( const int* pos, unsigned int count, bool bdevmem )
{
unsigned int kcount = keys_size();
NVCategory* rtn = new NVCategory;
auto execpol = rmm::exec_policy(0);
if( count )
{
auto pMap = new rmm::device_vector<int>(count,0);
auto d_pos = pMap->data().get();
if( bdevmem )
cudaMemcpy(d_pos,pos,count*sizeof(int),cudaMemcpyDeviceToDevice);
else
cudaMemcpy(d_pos,pos,count*sizeof(int),cudaMemcpyHostToDevice);
// first, do bounds check on input values; also -1 is allowed
// need to re-evaluate if this check is really necessary here
int invalidcount = thrust::count_if(execpol->on(0), d_pos, d_pos+count,
[kcount] __device__ (int v) { return ((v < -1) || (v >= kcount)); } );
if( invalidcount )
{
delete pMap;
delete rtn;
throw std::out_of_range("");
}
rtn->pImpl->pMap = pMap;
}
custring_view_array d_keys = pImpl->getStringsPtr();
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_keys,kcount);
//
return rtn;
}
//
// Merge two categories and maintain the values and key positions of the this category.
// Very complicated to avoid looping over values or keys from either category.
//
// Example shows logic used:
//
// category1:--------- category2:---------
// | strs1 key1 | | strs2 key2 |
// | abbfcf -> abcf | | aadcce -> acde |
// | 012345 0123 | | 012345 0123 |
// | 011323 <-' | | 002113 <-' |
// ------------------ ------------------
//
// merge/append should result in new category:
// strs key
// abbfcfaadcce -> abcfde
// 012345
// 011323004225 <-'
//
// 1. build vector of all the keys and seq vector (x) and diff vector (y)
// concat keys key2,key1 (w); stable-sort-by-key(w,x) and
// create neg,pos sequence (x) build diff vector (y)
// a c d e a b c f -> w = a a b c c d e f
// 0 1 2 3 -1 -2 -3 -4 x = 0 -1 -2 1 -3 2 3 -4
// y = 1 0 0 1 0 0 0 0
//
// 2. compute keys diff using w,x,y:
// copy-if/gather(w) (x>=0) && (y==0) --> d e
// reduce(y) = 2 -> how many keys matched
// new key: abcf + de = abcfde
//
// a b c d e f :unique-by-key(w,x)
// ubl = 0 -2 1 2 3 -4 :x = unique val--^
// sws = 0 1 2 4 5 3 :sort new key (abcfde) with seq (012345)
//
// 3. gather new indexes for map2
// copy-if/gather(sws) where ubl>=0 --> 0 2 4 5 (remap)
// remove-if(ubl) ubl<0 --> 0 1 2 3
// sort(ubl,remap) --> 0 2 4 5
// new map2 values gathered using original map to remap values:
// 002113 -> 004225
//
// result:
// new key: abcfde
// new map: 011323004225
// abbfcfaadcce
//
// The end result is not guaranteed to be a sorted keyset.
//
NVCategory* NVCategory::merge_category(NVCategory& cat2)
{
unsigned int count1 = keys_size();
unsigned int mcount1 = size();
unsigned int count2 = cat2.keys_size();
unsigned int mcount2 = cat2.size();
NVCategory* rtn = new NVCategory();
if( (count1==0) && (count2==0) )
return rtn;
unsigned int count12 = count1 + count2;
unsigned int mcount = mcount1 + mcount2;
// if either category is empty, just copy the non-empty one
if( (count1==0) || (count2==0) )
{
NVCategory* dcat = ((count1==0) ? &cat2 : this);
return dcat->copy();
}
auto execpol = rmm::exec_policy(0);
// both this cat and cat2 are non-empty
// init working vars
custring_view_array d_keys1 = pImpl->getStringsPtr();
int* d_map1 = pImpl->getMapPtr();
custring_view_array d_keys2 = cat2.pImpl->getStringsPtr();
int* d_map2 = cat2.pImpl->getMapPtr();
// create some vectors we can sort
rmm::device_vector<custring_view*> wstrs(count12); // w = keys2 + keys1
custring_view_array d_w = wstrs.data().get();
cudaMemcpy(d_w, d_keys2, count2*sizeof(custring_view*),cudaMemcpyDeviceToDevice);
cudaMemcpy(d_w+count2, d_keys1, count1*sizeof(custring_view*),cudaMemcpyDeviceToDevice);
rmm::device_vector<int> x(count12); // 0,1,....count2,-1,...,-count1
int* d_x = x.data().get();
// sequence and for-each-n could be combined into for-each-n logic
thrust::sequence( execpol->on(0), d_x, d_x+count2 ); // first half is 0...count2
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int>(0), count1,
[d_x, count2] __device__ (int idx) { d_x[idx+count2]= -idx-1; }); // 2nd half is -1...-count1
thrust::stable_sort_by_key( execpol->on(0), d_w, d_w + count12, d_x, // preserves order for
[] __device__ (custring_view*& lhs, custring_view*& rhs) { // strings that match
return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0));
});
rmm::device_vector<int> y(count12,0); // y-vector will identify overlapped keys
int* d_y = y.data().get();
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), (count12-1),
[d_y, d_w] __device__ (int idx) {
custring_view* lhs = d_w[idx];
custring_view* rhs = d_w[idx+1];
if( lhs && rhs )
d_y[idx] = (int)(lhs->compare(*rhs)==0);
else
d_y[idx] = (int)(lhs==rhs);
});
int matched = thrust::reduce( execpol->on(0), d_y, d_y + count12 ); // how many keys matched
unsigned int ncount = count2 - (unsigned int)matched; // new keys count
unsigned int ucount = count1 + ncount; // total unique keys count
rmm::device_vector<custring_view*> keys(ucount,nullptr);
custring_view_array d_keys = keys.data().get(); // this will hold the merged keyset
rmm::device_vector<int> nidxs(ucount); // needed for various gather methods below
int* d_nidxs = nidxs.data().get(); // indexes of 'new' keys from key2 not in key1
{
thrust::counting_iterator<int> citr(0);
thrust::copy_if( execpol->on(0), citr, citr + (count12), d_nidxs,
[d_x, d_y] __device__ (const int& idx) { return (d_x[idx]>=0) && (d_y[idx]==0); });
cudaDeviceSynchronize();
}
// first half of merged keyset is direct copy of key1
cudaMemcpy( d_keys, d_keys1, count1*sizeof(custring_view*), cudaMemcpyDeviceToDevice);
// append the 'new' keys from key2: extract them from w as identified by nidxs
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + ncount, d_w, d_keys + count1 );
int* d_ubl = d_x; // reuse d_x for unique-bias-left values
thrust::unique_by_key( execpol->on(0), d_w, d_w + count12, d_ubl,
[] __device__ (custring_view* lhs, custring_view* rhs) {
return ((lhs && rhs) ? (lhs->compare(*rhs)==0) : (lhs==rhs));
}); // ubl now contains new index values for key2
int* d_sws = d_y; // reuse d_y for sort-with-seq values
thrust::sequence( execpol->on(0), d_sws, d_sws + ucount); // need to assign new index values
rmm::device_vector<custring_view*> keySort(ucount); // for all the original key2 values
cudaMemcpy( keySort.data().get(), d_keys, ucount * sizeof(custring_view*), cudaMemcpyDeviceToDevice);
thrust::sort_by_key( execpol->on(0), keySort.begin(), keySort.end(), d_sws,
[] __device__ (custring_view*& lhs, custring_view*& rhs ) {
return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0));
}); // sws is now key index values for the new keyset
//printDeviceInts("d_sws",d_sws,ucount);
{
thrust::counting_iterator<int> citr(0); // generate subset of just the key2 values
thrust::copy_if( execpol->on(0), citr, citr + ucount, d_nidxs, [d_ubl] __device__ (const int& idx) { return d_ubl[idx]>=0; });
cudaDeviceSynchronize();
}
// nidxs has the indexes to the key2 values in the new keyset but they are sorted when key2 may not have been
rmm::device_vector<int> remap2(count2); // need to remap the indexes to the original positions
int* d_remap2 = remap2.data().get(); // do this by de-sorting the key2 values from the full keyset
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + count2, d_sws, d_remap2 ); // here grab new positions for key2
// first, remove the key1 indexes from the sorted sequence values; ubl will then have only key2 orig. pos values
thrust::remove_if( execpol->on(0), d_ubl, d_ubl + ucount, [] __device__ (int v) { return v<0; });
thrust::sort_by_key( execpol->on(0), d_ubl, d_ubl+count2, d_remap2 ); // does a de-sort of key2 only
// build new map
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount,0);
int* d_map = pNewMap->data().get(); // first half is identical to map1
cudaMemcpy( d_map, d_map1, mcount1 * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy( d_map+mcount1, d_map2, mcount2 * sizeof(int), cudaMemcpyDeviceToDevice);
// remap map2 values to their new positions in the full keyset
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(mcount1), mcount2,
[d_map, d_remap2] __device__ (int idx) {
int v = d_map[idx];
if( v >= 0 )
d_map[idx] = d_remap2[v];
});
cudaDeviceSynchronize();
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_keys,ucount);
rtn->pImpl->pMap = pNewMap;
return rtn;
}
// see create_from_categories method above for logic details
NVCategory* NVCategory::merge_and_remap(NVCategory& cat2)
{
std::vector<NVCategory*> cats;
cats.push_back(this);
cats.push_back(&cat2);
return create_from_categories(cats);
}
//
// Creates a new instance adding the specified strings as keys and remapping the values.
// Pandas maintains the original position values. This function does a remap.
//
// Example:
//
// category :---------
// | strs key |
// | abbfcf -> abcf |
// | 012345 0123 |
// | 011323 <-' |
// ------------------
//
// new keys: abcd
// duplicate keys can be ignored; new keyset may shift the indexes
//
// a b c f : a b c d -> w = a a b b c c d f
// 0 1 2 3 -1 -2 -3 -4 x = 0 -1 1 -2 2 -3 -4 3
//
// u = a b c d f
// ux = 0 1 2 -4 3
//
// values map: a b c f
// 0 1 2 4
//
// new values: a b b f c f
// 0 1 1 4 2 4
//
NVCategory* NVCategory::add_keys_and_remap(NVStrings& strs)
{
unsigned int kcount = keys_size();
unsigned int mcount = size();
unsigned int count = strs.size();
if( (kcount==0) && (count==0) )
return new NVCategory;
if( count==0 )
return copy();
auto execpol = rmm::exec_policy(0);
// get the keys from the argument
rmm::device_vector<custring_view*> addKeys(count,nullptr);
custring_view_array d_addKeys = addKeys.data().get();
strs.create_custring_index(d_addKeys);
NVCategory* rtn = new NVCategory;
if( kcount==0 )
{
// just take the keys; values are not effected
// need to sort and unique them
thrust::sort(execpol->on(0), d_addKeys, d_addKeys + count, [] __device__( custring_view*& lhs, custring_view*& rhs ) { return ( (lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0) ); });
// now remove duplicates from string list
auto nend = thrust::unique(execpol->on(0), d_addKeys, d_addKeys + count, [] __device__ (custring_view* lhs, custring_view* rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)==0) : (lhs==rhs)); });
unsigned int ucount = nend - d_addKeys;
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_addKeys,ucount);
// copy the values
if( mcount )
{
rtn->pImpl->pMap = new rmm::device_vector<int>(mcount,0);
cudaMemcpy(rtn->pImpl->getMapPtr(),pImpl->getMapPtr(),mcount*sizeof(int),cudaMemcpyDeviceToDevice);
}
return rtn;
}
// both kcount and count are non-zero
custring_view_array d_keys = pImpl->getStringsPtr();
int akcount = kcount + count;
rmm::device_vector<custring_view*> wstrs(akcount);
custring_view_array d_w = wstrs.data().get();
cudaMemcpy(d_w, d_keys, kcount*sizeof(custring_view*),cudaMemcpyDeviceToDevice);
cudaMemcpy(d_w+kcount, d_addKeys, count*sizeof(custring_view*),cudaMemcpyDeviceToDevice);
rmm::device_vector<int> x(akcount); // values arranged like 0,...,(kcount-1),-1,...,-count
int* d_x = x.data().get();
// sequence and for-each-n could be combined into single for-each-n logic
thrust::sequence( execpol->on(0), d_x, d_x + kcount ); // first half is [0:kcount)
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int>(0), count, [d_x, kcount] __device__ (int idx) { d_x[idx+kcount]= -idx-1; }); // 2nd half is [-1:-count]
// stable-sort preserves order for strings that match
thrust::stable_sort_by_key( execpol->on(0), d_w, d_w + akcount, d_x, [] __device__ (custring_view*& lhs, custring_view*& rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0)); });
auto nend = thrust::unique_by_key( execpol->on(0), d_w, d_w + akcount, d_x, [] __device__ (custring_view* lhs, custring_view* rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)==0) : (lhs==rhs)); });
int ucount = nend.second - d_x;
// d_w,ucount are now the keys
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_w,ucount);
// remapping the values
rmm::device_vector<int> y(kcount,-1);
int* d_y = y.data().get();
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), ucount,
[d_y, d_x] __device__ (int idx) {
int u = d_x[idx];
if( u >= 0 )
d_y[u] = idx;
});
// allocate and fill new map
int* d_map = pImpl->getMapPtr();
if( mcount && d_map )
{
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount,-1);
int* d_newmap = pNewMap->data().get();
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), mcount,
[d_map, d_y, d_newmap] __device__ (int idx) {
int v = d_map[idx];
d_newmap[idx] = (v < 0 ? v : d_y[v]);
});
cudaDeviceSynchronize();
rtn->pImpl->pMap = pNewMap;
}
return rtn;
}
// Creates a new instance removing the keys matching the specified strings and remapping the values.
// Pandas maintains the original position values. Below does a remap.
//
// category :---------
// | strs key |
// | abbfcf -> abcf |
// | 012345 0123 |
// | 011323 <-' |
// ------------------
//
// remove keys: bd
// unknown keys can be ignored; new keyset may shift the indexes
//
// a b c f : b d -> w = a b b c d f
// 0 1 2 3 -1 -2 x = 0 1 -1 2 -2 3
// y = 0 1 0 0 0 0
//
// remove keys: x<0 || y==1 : b d
// u = a c f
// ux = 0 2 3
//
// values map: a b c f
// 0 -1 1 2
//
// new values: a b b f c f
// 0 -1 -1 2 1 2
//
//
NVCategory* NVCategory::remove_keys_and_remap(NVStrings& strs)
{
unsigned int kcount = keys_size();
unsigned int count = strs.size();
if( kcount==0 || count==0 )
return copy();
// both kcount and count are non-zero
auto execpol = rmm::exec_policy(0);
// get the keys from the parameter
rmm::device_vector<custring_view*> removeKeys(count,nullptr);
custring_view_array d_removeKeys = removeKeys.data().get();
strs.create_custring_index(d_removeKeys);
// keys for this instance
custring_view_array d_keys = pImpl->getStringsPtr();
// combine the keys into one set to be evaluated
int akcount = kcount + count;
rmm::device_vector<custring_view*> wstrs(akcount);
custring_view_array d_w = wstrs.data().get();
cudaMemcpy(d_w, d_keys, kcount*sizeof(custring_view*),cudaMemcpyDeviceToDevice);
cudaMemcpy(d_w+kcount, d_removeKeys, count*sizeof(custring_view*),cudaMemcpyDeviceToDevice);
rmm::device_vector<int> x(akcount); // 0,1,...,kcount,-1,...,-count
int* d_x = x.data().get();
// sequence and for-each-n could be combined into single for-each-n logic
thrust::sequence( execpol->on(0), d_x, d_x + kcount ); // [0:kcount)
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int>(0), count, [d_x, kcount] __device__ (int idx) { d_x[idx+kcount]= -idx-1; }); // 2nd half is [-1:-count]
// stable-sort preserves order for strings that match
thrust::stable_sort_by_key( execpol->on(0), d_w, d_w + akcount, d_x, [] __device__ (custring_view*& lhs, custring_view*& rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0)); });
rmm::device_vector<int> y(akcount,0); // matches resulting from
int* d_y = y.data().get(); // sort are marked with '1'
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), (akcount-1),
[d_y, d_w] __device__ (int idx) {
custring_view* lhs = d_w[idx];
custring_view* rhs = d_w[idx+1];
if( lhs && rhs )
d_y[idx] = (int)(lhs->compare(*rhs)==0);
else
d_y[idx] = (int)(lhs==rhs);
});
//
int cpcount = akcount; // how many keys copied
{ // scoping to get rid of temporary memory sooner
rmm::device_vector<int> nidxs(akcount); // needed for gather
int* d_nidxs = nidxs.data().get(); // indexes of keys from key1 not in key2
thrust::counting_iterator<int> citr(0);
int* nend = thrust::copy_if( execpol->on(0), citr, citr + (akcount), d_nidxs,
[d_x, d_y] __device__ (const int& idx) { return (d_x[idx]>=0) && (d_y[idx]==0); });
cpcount = nend - d_nidxs;
// the gather()s here will select the remaining keys
rmm::device_vector<custring_view*> wstrs2(cpcount);
rmm::device_vector<int> x2(cpcount);
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + cpcount, wstrs.begin(), wstrs2.begin() );
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + cpcount, x.begin(), x2.begin() );
wstrs.swap(wstrs2);
d_w = wstrs.data().get();
x.swap(x2);
d_x = x.data().get();
cudaDeviceSynchronize();
}
NVCategory* rtn = new NVCategory;
int ucount = cpcount; // final number of unique keys
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_w,ucount); // and d_w are those keys
// now remap the values: positive values in d_x are [0:ucount)
thrust::fill( execpol->on(0), d_y, d_y + kcount, -1);
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), ucount,
[d_y, d_x] __device__ (int idx) { d_y[d_x[idx]] = idx; });
unsigned int mcount = size();
int* d_map = pImpl->getMapPtr();
if( mcount && d_map )
{
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount,0);
int* d_newmap = pNewMap->data().get(); // new map will go here
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), mcount,
[d_map, d_y, d_newmap] __device__ (int idx) {
int v = d_map[idx]; // get old index
d_newmap[idx] = ( v < 0 ? v : d_y[v]); // set new index (may be negative)
});
cudaDeviceSynchronize();
rtn->pImpl->pMap = pNewMap;
}
return rtn;
}
// keys that are not represented in the list of values are removed
// this may cause the values to be remapped if the keys positions are moved
NVCategory* NVCategory::remove_unused_keys_and_remap()
{
unsigned int kcount = keys_size();
unsigned int mcount = size();
if( kcount==0 )
return copy();
// both kcount and count are non-zero
auto execpol = rmm::exec_policy(0);
// keys for this instance
custring_view_array d_keys = pImpl->getStringsPtr();
int* d_map = pImpl->getMapPtr();
rmm::device_vector<unsigned int> usedkeys(kcount,0);
unsigned int* d_usedkeys = usedkeys.data().get();
// find the keys that not being used
unsigned int count = 0;
if( d_map )
{
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), mcount,
[d_map, d_usedkeys] __device__ (int idx) {
int pos = d_map[idx];
if( pos >= 0 )
d_usedkeys[pos] = 1; // race condition not important
});
// compute how many are not used
count = kcount - thrust::reduce(execpol->on(0),d_usedkeys,d_usedkeys+kcount,(unsigned int)0);
}
if( count==0 )
return copy();
// gather the unused keys
rmm::device_vector<custring_view*> removeKeys(count,nullptr);
custring_view_array d_removeKeys = removeKeys.data().get();
{
rmm::device_vector<int> nidxs(count);
int* d_nidxs = nidxs.data().get();
thrust::counting_iterator<int> citr(0);
thrust::copy_if( execpol->on(0), citr, citr + kcount, d_nidxs,
[d_usedkeys] __device__ (const int& idx) { return (d_usedkeys[idx]==0); });
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + count, d_keys, d_removeKeys );
cudaDeviceSynchronize();
}
// the remainder is common with remove_keys_and_remap
// --------------------------------------------------
// combine the keys into one set to be evaluated
int akcount = kcount + count;
rmm::device_vector<custring_view*> wstrs(akcount);
custring_view_array d_w = wstrs.data().get();
cudaMemcpy(d_w, d_keys, kcount*sizeof(custring_view*),cudaMemcpyDeviceToDevice);
cudaMemcpy(d_w+kcount, d_removeKeys, count*sizeof(custring_view*),cudaMemcpyDeviceToDevice);
rmm::device_vector<int> x(akcount); // 0,1,...,kcount,-1,...,-count
int* d_x = x.data().get();
// sequence and for-each-n could be combined into single for-each-n logic
thrust::sequence( execpol->on(0), d_x, d_x + kcount ); // [0:kcount)
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int>(0), count, [d_x, kcount] __device__ (int idx) { d_x[idx+kcount]= -idx-1; }); // 2nd half is [-1:-count]
// stable-sort preserves order for strings that match
thrust::stable_sort_by_key( execpol->on(0), d_w, d_w + akcount, d_x, [] __device__ (custring_view*& lhs, custring_view*& rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0)); });
rmm::device_vector<int> y(akcount,0); // matches resulting from
int* d_y = y.data().get(); // sort are marked with '1'
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), (akcount-1),
[d_y, d_w] __device__ (int idx) {
custring_view* lhs = d_w[idx];
custring_view* rhs = d_w[idx+1];
if( lhs && rhs )
d_y[idx] = (int)(lhs->compare(*rhs)==0);
else
d_y[idx] = (int)(lhs==rhs);
});
int cpcount = akcount; // how many keys copied
{ // scoping to get rid of temporary memory sooner
rmm::device_vector<int> nidxs(akcount); // needed for gather
int* d_nidxs = nidxs.data().get(); // indexes of keys from key1 not in key2
thrust::counting_iterator<int> citr(0);
int* nend = thrust::copy_if( execpol->on(0), citr, citr + (akcount), d_nidxs,
[d_x, d_y] __device__ (const int& idx) { return (d_x[idx]>=0) && (d_y[idx]==0); });
cpcount = nend - d_nidxs;
// the gather()s here will select the remaining keys
rmm::device_vector<custring_view*> wstrs2(cpcount);
rmm::device_vector<int> x2(cpcount);
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + cpcount, wstrs.begin(), wstrs2.begin() );
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + cpcount, x.begin(), x2.begin() );
wstrs.swap(wstrs2);
d_w = wstrs.data().get();
x.swap(x2);
d_x = x.data().get();
cudaDeviceSynchronize();
}
NVCategory* rtn = new NVCategory;
int ucount = cpcount; // final number of unique keys
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_w,ucount); // and d_w are those keys
// now remap the values: positive values in d_x are [0:ucount)
thrust::fill( execpol->on(0), d_y, d_y + kcount, -1);
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), ucount,
[d_y, d_x] __device__ (int idx) { d_y[d_x[idx]] = idx; });
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount,0);
int* d_newmap = pNewMap->data().get(); // new map will go here
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), mcount,
[d_map, d_y, d_newmap] __device__ (int idx) {
int v = d_map[idx]; // get old index
d_newmap[idx] = ( v < 0 ? v : d_y[v]); // set new index (may be negative)
});
cudaDeviceSynchronize();
rtn->pImpl->pMap = pNewMap;
return rtn;
}
// Creates a new instance using the specified strings as keys causing add/remove as appropriate.
// Values are also remapped.
//
// Example:
// category :---------
// | strs key |
// | abbfcf -> abcf |
// | 012345 0123 |
// | 011323 <-' |
// ------------------
// new keyset: bcde
// 0123
// new values: a b b f c f
// -1 0 0 -1 1 -1
//
// Logic:
// a b c f : b c e d -> w = a b b c c d e f
// 0 1 2 3 -1 -2 -3 -4 x = 0 1 -1 2 -2 -4 -3 3
// y = 0 1 0 1 0 0 0 0
//
// remove keys: x>=0 && y==0 : a f -> [0,3] -> [-1,-1]
// w' = b b c c d e
// x' = 1 -1 2 -2 -4 -3
// u = b c d e
// 1 2 -4 -3
//
// need map to set values like: 0 1 2 3
// -1 0 1 -1
//
// so create map using:
// m[]=-1 -init all to -1; we don't need to worry about removed keys
// if(u[idx]>=0): m[u[idx]]=idx
//
// and create new values using:
// v[idx] = m[v[idx]] -- make sure v[idx]>=0
//
NVCategory* NVCategory::set_keys_and_remap(NVStrings& strs)
{
unsigned int kcount = keys_size();
unsigned int mcount = size();
unsigned int count = strs.size();
NVCategory* rtn = new NVCategory;
if( (kcount==0) && (count==0) )
return rtn;
if( count==0 )
{
rtn->pImpl->pMap = new rmm::device_vector<int>(mcount,-1);
return rtn;
}
auto execpol = rmm::exec_policy(0);
// get the keys
rmm::device_vector<custring_view*> newKeys(count,nullptr);
custring_view_array d_newKeys = newKeys.data().get();
strs.create_custring_index(d_newKeys);
if( kcount==0 )
{
// just take the new keys
thrust::sort(execpol->on(0), d_newKeys, d_newKeys + count, [] __device__( custring_view*& lhs, custring_view*& rhs ) { return ( (lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0) ); });
// now remove duplicates from string list
auto nend = thrust::unique(execpol->on(0), d_newKeys, d_newKeys + count, [] __device__ (custring_view* lhs, custring_view* rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)==0) : (lhs==rhs)); });
unsigned int ucount = nend - d_newKeys;
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_newKeys,ucount);
// copy the values
if( mcount )
{
rtn->pImpl->pMap = new rmm::device_vector<int>(mcount,0);
cudaMemcpy(rtn->pImpl->getMapPtr(),pImpl->getMapPtr(),mcount*sizeof(int),cudaMemcpyDeviceToDevice);
}
return rtn;
}
// both kcount and count are non-zero
custring_view_array d_keys = pImpl->getStringsPtr();
// combine the keys into single array
int akcount = kcount + count;
rmm::device_vector<custring_view*> wstrs(akcount);
custring_view_array d_w = wstrs.data().get();
cudaMemcpy(d_w, d_keys, kcount*sizeof(custring_view*),cudaMemcpyDeviceToDevice);
cudaMemcpy(d_w+kcount, d_newKeys, count*sizeof(custring_view*),cudaMemcpyDeviceToDevice);
rmm::device_vector<int> x(akcount); // 0,...,(kcount-),-1,...,-count
int* d_x = x.data().get();
// sequence and for-each-n could be combined into single for-each-n logic
thrust::sequence( execpol->on(0), d_x, d_x + kcount ); // first half is [0:kcount)
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int>(0), count, [d_x, kcount] __device__ (int idx) { d_x[idx+kcount]= -idx-1; }); // 2nd half is [-1:-count]
// stable-sort preserves order for strings that match
thrust::stable_sort_by_key( execpol->on(0), d_w, d_w + akcount, d_x, [] __device__ (custring_view*& lhs, custring_view*& rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)<0) : (rhs!=0)); });
rmm::device_vector<int> y(akcount,0); // holds matches resulting from
int* d_y = y.data().get(); // sort are marked with '1'
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), (akcount-1),
[d_y, d_w] __device__ (int idx) {
custring_view* lhs = d_w[idx];
custring_view* rhs = d_w[idx+1];
if( lhs && rhs )
d_y[idx] = (int)(lhs->compare(*rhs)==0);
else
d_y[idx] = (int)(lhs==rhs);
});
//
int matched = thrust::reduce( execpol->on(0), d_y, d_y + akcount ); // how many keys matched
rmm::device_vector<int> nidxs(akcount); // needed for gather methods
int* d_nidxs = nidxs.data().get(); // indexes of keys from key1 not in key2
int cpcount = akcount; // how many keys copied
{
thrust::counting_iterator<int> citr(0);
int* nend = thrust::copy_if( execpol->on(0), citr, citr + (akcount), d_nidxs,
[d_x, d_y] __device__ (const int& idx) { return (d_x[idx]<0) || d_y[idx]; });
cpcount = nend - d_nidxs;
}
if( cpcount < akcount )
{ // if keys are removed, we need to make a copy;
// the gather()s here will select the remaining keys
rmm::device_vector<custring_view*> wstrs2(cpcount);
rmm::device_vector<int> x2(cpcount);
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + cpcount, wstrs.begin(), wstrs2.begin() );
thrust::gather( execpol->on(0), d_nidxs, d_nidxs + cpcount, x.begin(), x2.begin() );
wstrs.swap(wstrs2);
d_w = wstrs.data().get();
x.swap(x2);
d_x = x.data().get();
akcount = cpcount;
}
thrust::unique_by_key( execpol->on(0), d_w, d_w + akcount, d_x, [] __device__ (custring_view* lhs, custring_view* rhs) { return ((lhs && rhs) ? (lhs->compare(*rhs)==0) : (lhs==rhs)); });
int ucount = akcount - matched;
// d_w,ucount are now the keys
NVCategoryImpl_keys_from_custringarray(rtn->pImpl,d_w,ucount);
// now remap the values: positive values in d_x are [0:ucount)
thrust::fill( execpol->on(0), d_y, d_y + kcount, -1);
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), ucount,
[d_y, d_x] __device__ (int idx) {
int u = d_x[idx];
if( u >= 0 )
d_y[u] = idx;
});
// allocate new map
int* d_map = pImpl->getMapPtr();
if( mcount && d_map )
{
rmm::device_vector<int>* pNewMap = new rmm::device_vector<int>(mcount,0);
int* d_newmap = pNewMap->data().get(); // new map goes in here
thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<int>(0), mcount,
[d_map, d_y, d_newmap] __device__ (int idx) {
int v = d_map[idx];
d_newmap[idx] = (v < 0 ? v : d_y[v]);
});
cudaDeviceSynchronize();
rtn->pImpl->pMap = pNewMap;
}
return rtn;
}
|
cfa2f4f592da9f7c02f37b43c65d5eab21751d73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "filter.cuh"
namespace NKernel {
struct TZeroWeightFilter {
__device__ ui32 operator()(float w) {
return w != 0;
}
};
template <class Filter = TZeroWeightFilter>
__global__ void FilterImpl(const float* weights,
int size,
ui32* result) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
Filter filter;
if (i < size) {
result[i] = filter(weights[i]);
(weights[i] != 0 ? 0 : 1);
}
}
void Filter(const float* weights, const ui32 size, ui32* result, TCudaStream stream) {
if (size > 0) {
const uint blockSize = 512;
const uint numBlocks = (size + blockSize - 1) / (blockSize);
FilterImpl << <numBlocks, blockSize, 0, stream>>>(weights, size, result);
}
}
} | cfa2f4f592da9f7c02f37b43c65d5eab21751d73.cu | #include "filter.cuh"
namespace NKernel {
struct TZeroWeightFilter {
__device__ ui32 operator()(float w) {
return w != 0;
}
};
template <class Filter = TZeroWeightFilter>
__global__ void FilterImpl(const float* weights,
int size,
ui32* result) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
Filter filter;
if (i < size) {
result[i] = filter(weights[i]);
(weights[i] != 0 ? 0 : 1);
}
}
void Filter(const float* weights, const ui32 size, ui32* result, TCudaStream stream) {
if (size > 0) {
const uint blockSize = 512;
const uint numBlocks = (size + blockSize - 1) / (blockSize);
FilterImpl << <numBlocks, blockSize, 0, stream>>>(weights, size, result);
}
}
} |
f654f9fe192f3400d82dab22c6120adadf9beb3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <math.h>
#define TPB 256
#define ARRAY_SIZE 10000
#define N (ARRAY_SIZE/TPB + 1)
using namespace std;
__global__ void saxpy(float *x, float *y, const int a)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < ARRAY_SIZE) {
y[i] = a * x[i] + y[i];
}
}
int main()
{
float *x = NULL; // pointer to array of floats on host
float *y = NULL; // pointer to array of floats on host
float *result = NULL; // pointer to array that stores the results of SAXPY on the CPU
float *d_x = NULL; // pointer to array of floats on device
float *d_y = NULL; // pointer to array of floats on device
float *d_result = NULL; // pointer to array that stores the results of SAXPY on the GPU
int i = 0;
const int a = 3.0; // value of a in a
// Allocate memory for arrays on CPU
x = (float*)malloc(ARRAY_SIZE * sizeof(float));
y = (float*)malloc(ARRAY_SIZE * sizeof(float));
result = (float*)malloc(ARRAY_SIZE * sizeof(float));
d_result = (float*)malloc(ARRAY_SIZE * sizeof(float));
// Allocate memory for arrays on device
hipMalloc(&d_x, ARRAY_SIZE * sizeof(float));
hipMalloc(&d_y, ARRAY_SIZE * sizeof(float));
// Initialize with random values on host
for (int i = 0; i < ARRAY_SIZE; i++) {
x[i] = rand() % 1000;
y[i] = rand() % 1000;
}
// Copy random values to device
hipMemcpy(d_x, x, ARRAY_SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, ARRAY_SIZE * sizeof(float), hipMemcpyHostToDevice);
printf("\nComputing SAXPY on the CPU...");
for (i = 0; i < ARRAY_SIZE; i++) {
result[i] = a * x[i] + y[i];
}
printf("Done!");
printf("\n\nComputing SAXPY on the GPU...");
hipLaunchKernelGGL(( saxpy), dim3(N), dim3(TPB), 0, 0, d_x, d_y, a);
printf("Done!");
// comparing the results of the two versions
printf("\n\nComparing the output for each implementation...");
hipMemcpy(d_result, d_y, ARRAY_SIZE * sizeof(float), hipMemcpyDeviceToHost);
int flag_comparison = 0;
for (i = 0; i < ARRAY_SIZE; i++) {
if(abs(result[i] - d_result[i]) > 1)
{
flag_comparison = 1;
break;
}
}
if(flag_comparison == 0)
{
printf("Correct!");
}
else
{
printf("Incorrect!");
}
free(x);
free(y);
hipFree(d_x);
hipFree(d_y);
return 0;
}
| f654f9fe192f3400d82dab22c6120adadf9beb3d.cu | #include <stdio.h>
#include <iostream>
#include <math.h>
#define TPB 256
#define ARRAY_SIZE 10000
#define N (ARRAY_SIZE/TPB + 1)
using namespace std;
__global__ void saxpy(float *x, float *y, const int a)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < ARRAY_SIZE) {
y[i] = a * x[i] + y[i];
}
}
int main()
{
float *x = NULL; // pointer to array of floats on host
float *y = NULL; // pointer to array of floats on host
float *result = NULL; // pointer to array that stores the results of SAXPY on the CPU
float *d_x = NULL; // pointer to array of floats on device
float *d_y = NULL; // pointer to array of floats on device
float *d_result = NULL; // pointer to array that stores the results of SAXPY on the GPU
int i = 0;
const int a = 3.0; // value of a in a
// Allocate memory for arrays on CPU
x = (float*)malloc(ARRAY_SIZE * sizeof(float));
y = (float*)malloc(ARRAY_SIZE * sizeof(float));
result = (float*)malloc(ARRAY_SIZE * sizeof(float));
d_result = (float*)malloc(ARRAY_SIZE * sizeof(float));
// Allocate memory for arrays on device
cudaMalloc(&d_x, ARRAY_SIZE * sizeof(float));
cudaMalloc(&d_y, ARRAY_SIZE * sizeof(float));
// Initialize with random values on host
for (int i = 0; i < ARRAY_SIZE; i++) {
x[i] = rand() % 1000;
y[i] = rand() % 1000;
}
// Copy random values to device
cudaMemcpy(d_x, x, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
printf("\nComputing SAXPY on the CPU...");
for (i = 0; i < ARRAY_SIZE; i++) {
result[i] = a * x[i] + y[i];
}
printf("Done!");
printf("\n\nComputing SAXPY on the GPU...");
saxpy<<<N, TPB>>>(d_x, d_y, a);
printf("Done!");
// comparing the results of the two versions
printf("\n\nComparing the output for each implementation...");
cudaMemcpy(d_result, d_y, ARRAY_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
int flag_comparison = 0;
for (i = 0; i < ARRAY_SIZE; i++) {
if(abs(result[i] - d_result[i]) > 1)
{
flag_comparison = 1;
break;
}
}
if(flag_comparison == 0)
{
printf("Correct!");
}
else
{
printf("Incorrect!");
}
free(x);
free(y);
cudaFree(d_x);
cudaFree(d_y);
return 0;
}
|
c151e91ee0ecf49e09f82d6ecf1ff4451c7724dc.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/* Example of integrating CUDA functions into an existing
* application / framework.
* Host part of the device code.
* Compiled with Cuda compiler.
*/
// System includes
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
extern "C" void computeGold(char *reference, char *idata, const unsigned int len);
extern "C" void computeGold2(int2 *reference, int2 *idata, const unsigned int len);
///////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_odata memory to process (in and out)
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel(int *g_data)
{
// write data to global memory
const unsigned int tid = threadIdx.x;
int data = g_data[tid];
// use integer arithmetic to process all four bytes with one thread
// this serializes the execution, but is the simplest solutions to avoid
// bank conflicts for this very low number of threads
// in general it is more efficient to process each byte by a separate thread,
// to avoid bank conflicts the access pattern should be
// g_data[4 * wtid + wid], where wtid is the thread id within the half warp
// and wid is the warp id
// see also the programming guide for a more in depth discussion.
g_data[tid] = ((((data << 0) >> 24) - 10) << 24)
| ((((data << 8) >> 24) - 10) << 16)
| ((((data << 16) >> 24) - 10) << 8)
| ((((data << 24) >> 24) - 10) << 0);
}
///////////////////////////////////////////////////////////////////////////////
//! Demonstration that int2 data can be used in the cpp code
//! @param g_odata memory to process (in and out)
///////////////////////////////////////////////////////////////////////////////
__global__ void
kernel2(int2 *g_data)
{
// write data to global memory
const unsigned int tid = threadIdx.x;
int2 data = g_data[tid];
// use integer arithmetic to process all four bytes with one thread
// this serializes the execution, but is the simplest solutions to avoid
// bank conflicts for this very low number of threads
// in general it is more efficient to process each byte by a separate thread,
// to avoid bank conflicts the access pattern should be
// g_data[4 * wtid + wid], where wtid is the thread id within the half warp
// and wid is the warp id
// see also the programming guide for a more in depth discussion.
g_data[tid].x = data.x - data.y;
}
////////////////////////////////////////////////////////////////////////////////
//! Entry point for Cuda functionality on host side
//! @param argc command line argument count
//! @param argv command line arguments
//! @param data data to process on the device
//! @param len len of \a data
////////////////////////////////////////////////////////////////////////////////
extern "C" bool
runTest(const int argc, const char **argv, char *data, int2 *data_int2, unsigned int len)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaDevice(argc, (const char **)argv);
const unsigned int num_threads = len / 4;
assert(0 == (len % 4));
const unsigned int mem_size = sizeof(char) * len;
const unsigned int mem_size_int2 = sizeof(int2) * len;
// allocate device memory
char *d_data;
checkCudaErrors(hipMalloc((void **) &d_data, mem_size));
// copy host memory to device
checkCudaErrors(hipMemcpy(d_data, data, mem_size,
hipMemcpyHostToDevice));
// allocate device memory for int2 version
int2 *d_data_int2;
checkCudaErrors(hipMalloc((void **) &d_data_int2, mem_size_int2));
// copy host memory to device
checkCudaErrors(hipMemcpy(d_data_int2, data_int2, mem_size_int2,
hipMemcpyHostToDevice));
// setup execution parameters
dim3 grid(1, 1, 1);
dim3 threads(num_threads, 1, 1);
dim3 threads2(len, 1, 1); // more threads needed fir separate int2 version
// execute the kernel
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads) , 0, 0, (int *) d_data);
hipLaunchKernelGGL(( kernel2), dim3(grid), dim3(threads2) , 0, 0, d_data_int2);
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
// compute reference solutions
char *reference = (char *) malloc(mem_size);
computeGold(reference, data, len);
int2 *reference2 = (int2 *) malloc(mem_size_int2);
computeGold2(reference2, data_int2, len);
// copy results from device to host
checkCudaErrors(hipMemcpy(data, d_data, mem_size,
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(data_int2, d_data_int2, mem_size_int2,
hipMemcpyDeviceToHost));
// check result
bool success = true;
for (unsigned int i = 0; i < len; i++)
{
if (reference[i] != data[i] ||
reference2[i].x != data_int2[i].x ||
reference2[i].y != data_int2[i].y)
{
success = false;
}
}
// cleanup memory
checkCudaErrors(hipFree(d_data));
checkCudaErrors(hipFree(d_data_int2));
free(reference);
free(reference2);
return success;
}
| c151e91ee0ecf49e09f82d6ecf1ff4451c7724dc.cu | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/* Example of integrating CUDA functions into an existing
* application / framework.
* Host part of the device code.
* Compiled with Cuda compiler.
*/
// System includes
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
extern "C" void computeGold(char *reference, char *idata, const unsigned int len);
extern "C" void computeGold2(int2 *reference, int2 *idata, const unsigned int len);
///////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_odata memory to process (in and out)
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel(int *g_data)
{
// write data to global memory
const unsigned int tid = threadIdx.x;
int data = g_data[tid];
// use integer arithmetic to process all four bytes with one thread
// this serializes the execution, but is the simplest solutions to avoid
// bank conflicts for this very low number of threads
// in general it is more efficient to process each byte by a separate thread,
// to avoid bank conflicts the access pattern should be
// g_data[4 * wtid + wid], where wtid is the thread id within the half warp
// and wid is the warp id
// see also the programming guide for a more in depth discussion.
g_data[tid] = ((((data << 0) >> 24) - 10) << 24)
| ((((data << 8) >> 24) - 10) << 16)
| ((((data << 16) >> 24) - 10) << 8)
| ((((data << 24) >> 24) - 10) << 0);
}
///////////////////////////////////////////////////////////////////////////////
//! Demonstration that int2 data can be used in the cpp code
//! @param g_odata memory to process (in and out)
///////////////////////////////////////////////////////////////////////////////
__global__ void
kernel2(int2 *g_data)
{
// write data to global memory
const unsigned int tid = threadIdx.x;
int2 data = g_data[tid];
// use integer arithmetic to process all four bytes with one thread
// this serializes the execution, but is the simplest solutions to avoid
// bank conflicts for this very low number of threads
// in general it is more efficient to process each byte by a separate thread,
// to avoid bank conflicts the access pattern should be
// g_data[4 * wtid + wid], where wtid is the thread id within the half warp
// and wid is the warp id
// see also the programming guide for a more in depth discussion.
g_data[tid].x = data.x - data.y;
}
////////////////////////////////////////////////////////////////////////////////
//! Entry point for Cuda functionality on host side
//! @param argc command line argument count
//! @param argv command line arguments
//! @param data data to process on the device
//! @param len len of \a data
////////////////////////////////////////////////////////////////////////////////
extern "C" bool
runTest(const int argc, const char **argv, char *data, int2 *data_int2, unsigned int len)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaDevice(argc, (const char **)argv);
const unsigned int num_threads = len / 4;
assert(0 == (len % 4));
const unsigned int mem_size = sizeof(char) * len;
const unsigned int mem_size_int2 = sizeof(int2) * len;
// allocate device memory
char *d_data;
checkCudaErrors(cudaMalloc((void **) &d_data, mem_size));
// copy host memory to device
checkCudaErrors(cudaMemcpy(d_data, data, mem_size,
cudaMemcpyHostToDevice));
// allocate device memory for int2 version
int2 *d_data_int2;
checkCudaErrors(cudaMalloc((void **) &d_data_int2, mem_size_int2));
// copy host memory to device
checkCudaErrors(cudaMemcpy(d_data_int2, data_int2, mem_size_int2,
cudaMemcpyHostToDevice));
// setup execution parameters
dim3 grid(1, 1, 1);
dim3 threads(num_threads, 1, 1);
dim3 threads2(len, 1, 1); // more threads needed fir separate int2 version
// execute the kernel
kernel<<< grid, threads >>>((int *) d_data);
kernel2<<< grid, threads2 >>>(d_data_int2);
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
// compute reference solutions
char *reference = (char *) malloc(mem_size);
computeGold(reference, data, len);
int2 *reference2 = (int2 *) malloc(mem_size_int2);
computeGold2(reference2, data_int2, len);
// copy results from device to host
checkCudaErrors(cudaMemcpy(data, d_data, mem_size,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(data_int2, d_data_int2, mem_size_int2,
cudaMemcpyDeviceToHost));
// check result
bool success = true;
for (unsigned int i = 0; i < len; i++)
{
if (reference[i] != data[i] ||
reference2[i].x != data_int2[i].x ||
reference2[i].y != data_int2[i].y)
{
success = false;
}
}
// cleanup memory
checkCudaErrors(cudaFree(d_data));
checkCudaErrors(cudaFree(d_data_int2));
free(reference);
free(reference2);
return success;
}
|
d81ac5cff82dd65914cd0a9ac1c1c557755570df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gather_hip.cuh"
#define TB_SIZE 1024 // 2^9 (has to be power of 2)
using std::cout;
using std::endl;
using std::vector;
void print(float3 point) {
cout << "float3(" << point.x << ", " << point.y << ", " << point.z << ") ";
}
void println(float3 point) {
cout << "float3(" << point.x << ", " << point.y << ", " << point.z << ")" << endl;
}
void print(int a) {
cout << a << " ";
}
__device__ __host__ float3 operator+(const float3 &a, const float3 &b) {
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
inline __host__ __device__ void operator+=(float3 &a, float3 b)
{
a.x += b.x; a.y += b.y; a.z += b.z;
}
inline __host__ __device__ float3 operator/(const float3 &a, const int b) {
return make_float3(a.x / b, a.y / b, a.z / b);
}
inline __host__ __device__ void operator/=(float3 &a, const int b) {
if (b != 0) {
a.x /= b; a.y /= b; a.z /= b;
}
else {
printf("Zero division!\n");
}
}
__device__ void atomicAdd(float3 *d_val, float3 val) {
atomicAdd(&((*d_val).x), val.x);
atomicAdd(&((*d_val).y), val.y);
atomicAdd(&((*d_val).z), val.z);
}
__global__ void calculate_mean_per_key_kernel(int n, int k, int *d_keys, float3 *d_values, float3 *d_means, int *d_counts) {
const int block = blockIdx.x; // responsible for a mean of the corresponding key
const int thread = threadIdx.x;
__shared__ float3 sh_partial_sum[TB_SIZE];
__shared__ int sh_partial_count[TB_SIZE];
// Iterating over whole array in chunks of block size (all threads are active)
for (int i = 0; i * blockDim.x < n; ++i) { // blockDim.x == TB_SIZE
const int pos = i * blockDim.x + thread;
const int key = d_keys[pos];
sh_partial_sum[thread] = key == block ? d_values[pos] : make_float3(0.0f, 0.0f, 0.0f);
sh_partial_count[thread] = key == block ? 1 : 0;
if (pos >= n) { // Exiting only after assigning shared memory!
return;
}
__syncthreads();
// Reduce using shared memory (TB_SIZE is a power of 2),
// better thread-index assignment, no modulo operator, less idle threads
for (int s = 1; s < blockDim.x; s <<= 1) { // s *= 2
int index = 2 * s * thread;
if (index < blockDim.x) {
sh_partial_sum[index] += sh_partial_sum[index + s];
sh_partial_count[index] += sh_partial_count[index + s];
}
__syncthreads();
}
// Write to global memory (no conflicts, because only one block is responsible for writing to given key position)
// No need for another kernel runs of final reductions.
if (thread == 0) {
if (i == 0) {
d_means[block] = sh_partial_sum[0];
d_counts[block] = sh_partial_count[0];
}
else {
d_means[block] += sh_partial_sum[0];
d_counts[block] += sh_partial_count[0];
}
}
__syncthreads();
}
// Finally, divide sum by count
if (thread == 0) {
d_means[block] /= d_counts[block];
}
}
void calculate_mean_per_key_gather(int n, int k, int *d_keys, float3 *d_values, float3 *d_means) {
int *d_counts;
hipMalloc(&d_counts, k*sizeof(int));
hipMemset(d_counts, 0, k*sizeof(int));
hipLaunchKernelGGL(( calculate_mean_per_key_kernel), dim3(k), dim3(TB_SIZE), 0, 0, n, k, d_keys, d_values, d_means, d_counts);
hipDeviceSynchronize();
} | d81ac5cff82dd65914cd0a9ac1c1c557755570df.cu | #include "gather.cuh"
#define TB_SIZE 1024 // 2^9 (has to be power of 2)
using std::cout;
using std::endl;
using std::vector;
void print(float3 point) {
cout << "float3(" << point.x << ", " << point.y << ", " << point.z << ") ";
}
void println(float3 point) {
cout << "float3(" << point.x << ", " << point.y << ", " << point.z << ")" << endl;
}
void print(int a) {
cout << a << " ";
}
__device__ __host__ float3 operator+(const float3 &a, const float3 &b) {
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
inline __host__ __device__ void operator+=(float3 &a, float3 b)
{
a.x += b.x; a.y += b.y; a.z += b.z;
}
inline __host__ __device__ float3 operator/(const float3 &a, const int b) {
return make_float3(a.x / b, a.y / b, a.z / b);
}
inline __host__ __device__ void operator/=(float3 &a, const int b) {
if (b != 0) {
a.x /= b; a.y /= b; a.z /= b;
}
else {
printf("Zero division!\n");
}
}
__device__ void atomicAdd(float3 *d_val, float3 val) {
atomicAdd(&((*d_val).x), val.x);
atomicAdd(&((*d_val).y), val.y);
atomicAdd(&((*d_val).z), val.z);
}
__global__ void calculate_mean_per_key_kernel(int n, int k, int *d_keys, float3 *d_values, float3 *d_means, int *d_counts) {
const int block = blockIdx.x; // responsible for a mean of the corresponding key
const int thread = threadIdx.x;
__shared__ float3 sh_partial_sum[TB_SIZE];
__shared__ int sh_partial_count[TB_SIZE];
// Iterating over whole array in chunks of block size (all threads are active)
for (int i = 0; i * blockDim.x < n; ++i) { // blockDim.x == TB_SIZE
const int pos = i * blockDim.x + thread;
const int key = d_keys[pos];
sh_partial_sum[thread] = key == block ? d_values[pos] : make_float3(0.0f, 0.0f, 0.0f);
sh_partial_count[thread] = key == block ? 1 : 0;
if (pos >= n) { // Exiting only after assigning shared memory!
return;
}
__syncthreads();
// Reduce using shared memory (TB_SIZE is a power of 2),
// better thread-index assignment, no modulo operator, less idle threads
for (int s = 1; s < blockDim.x; s <<= 1) { // s *= 2
int index = 2 * s * thread;
if (index < blockDim.x) {
sh_partial_sum[index] += sh_partial_sum[index + s];
sh_partial_count[index] += sh_partial_count[index + s];
}
__syncthreads();
}
// Write to global memory (no conflicts, because only one block is responsible for writing to given key position)
// No need for another kernel runs of final reductions.
if (thread == 0) {
if (i == 0) {
d_means[block] = sh_partial_sum[0];
d_counts[block] = sh_partial_count[0];
}
else {
d_means[block] += sh_partial_sum[0];
d_counts[block] += sh_partial_count[0];
}
}
__syncthreads();
}
// Finally, divide sum by count
if (thread == 0) {
d_means[block] /= d_counts[block];
}
}
void calculate_mean_per_key_gather(int n, int k, int *d_keys, float3 *d_values, float3 *d_means) {
int *d_counts;
cudaMalloc(&d_counts, k*sizeof(int));
cudaMemset(d_counts, 0, k*sizeof(int));
calculate_mean_per_key_kernel<<<k, TB_SIZE>>> (n, k, d_keys, d_values, d_means, d_counts);
cudaDeviceSynchronize();
} |
c025dd26c9e14bfc42cff5893d662083cd8fe1e9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "momentumKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int numberIterations = 1;
int *parameterIndices = NULL;
hipMalloc(¶meterIndices, XSIZE*YSIZE);
int *counts = NULL;
hipMalloc(&counts, XSIZE*YSIZE);
int dimension = 1;
float *parameters = NULL;
hipMalloc(¶meters, XSIZE*YSIZE);
float *gradient = NULL;
hipMalloc(&gradient, XSIZE*YSIZE);
float learningRate = 1;
float momentum = 1;
float *history = NULL;
hipMalloc(&history, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
momentumKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, numberIterations,parameterIndices,counts,dimension,parameters,gradient,learningRate,momentum,history);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
momentumKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, numberIterations,parameterIndices,counts,dimension,parameters,gradient,learningRate,momentum,history);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
momentumKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, numberIterations,parameterIndices,counts,dimension,parameters,gradient,learningRate,momentum,history);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c025dd26c9e14bfc42cff5893d662083cd8fe1e9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "momentumKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int numberIterations = 1;
int *parameterIndices = NULL;
cudaMalloc(¶meterIndices, XSIZE*YSIZE);
int *counts = NULL;
cudaMalloc(&counts, XSIZE*YSIZE);
int dimension = 1;
float *parameters = NULL;
cudaMalloc(¶meters, XSIZE*YSIZE);
float *gradient = NULL;
cudaMalloc(&gradient, XSIZE*YSIZE);
float learningRate = 1;
float momentum = 1;
float *history = NULL;
cudaMalloc(&history, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
momentumKernel<<<gridBlock,threadBlock>>>(numberIterations,parameterIndices,counts,dimension,parameters,gradient,learningRate,momentum,history);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
momentumKernel<<<gridBlock,threadBlock>>>(numberIterations,parameterIndices,counts,dimension,parameters,gradient,learningRate,momentum,history);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
momentumKernel<<<gridBlock,threadBlock>>>(numberIterations,parameterIndices,counts,dimension,parameters,gradient,learningRate,momentum,history);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6a9ed28db96931fe2d5ebddcb8263156d661bdbe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
2
3 __global__ void cuda_hello()
4 {
5 }
6
7 int main() {
8 hipLaunchKernelGGL(( cuda_hello), dim3(1),dim3(1), 0, 0, );
9 printf("Hello World from GPU!\n");
10 return 0;
11 }
12
| 6a9ed28db96931fe2d5ebddcb8263156d661bdbe.cu | #include <stdio.h>
2
3 __global__ void cuda_hello()
4 {
5 }
6
7 int main() {
8 cuda_hello<<<1,1>>>();
9 printf("Hello World from GPU!\n");
10 return 0;
11 }
12
|
94f846c7a9801fad28afa0925f123eed2a40e115.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: mphoward
/*! \file LoadBalancerGPU.cu
\brief Implementation the GPU functions for load balancing
*/
#ifdef ENABLE_MPI
#include "LoadBalancerGPU.cuh"
#include "hipcub/hipcub.hpp"
//! Mark the particles that are off rank
/*!
* \param d_ranks The current rank of each particle
* \param d_pos Particle positions
* \param d_cart_ranks Map from Cartesian coordinates to rank number
* \param rank_pos Cartesian coordinates of current rank
* \param box Local box
* \param di Domain indexer
* \param N Number of local particles
*
* Using a thread per particle, the current rank of each particle is computed assuming that a particle cannot migrate
* more than a single rank in any direction. The Cartesian rank of the particle is computed, and mapped back to a physical
* rank.
*/
__global__ void gpu_load_balance_mark_rank_kernel(unsigned int *d_ranks,
const Scalar4 *d_pos,
const unsigned int *d_cart_ranks,
const uint3 rank_pos,
const BoxDim box,
const Index3D di,
const unsigned int N)
{
// particle index
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= N)
return;
const Scalar4 postype = d_pos[idx];
const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
const Scalar3 f = box.makeFraction(pos);
int3 grid_pos = make_int3(rank_pos.x, rank_pos.y, rank_pos.z);
if (f.x >= Scalar(1.0)) ++grid_pos.x;
if (f.x < Scalar(0.0)) --grid_pos.x;
if (f.y >= Scalar(1.0)) ++grid_pos.y;
if (f.y < Scalar(0.0)) --grid_pos.y;
if (f.z >= Scalar(1.0)) ++grid_pos.z;
if (f.z < Scalar(0.0)) --grid_pos.z;
if (grid_pos.x == (int)di.getW())
grid_pos.x = 0;
else if (grid_pos.x < 0)
grid_pos.x += di.getW();
if (grid_pos.y == (int)di.getH())
grid_pos.y = 0;
else if (grid_pos.y < 0)
grid_pos.y += di.getH();
if (grid_pos.z == (int)di.getD())
grid_pos.z = 0;
else if (grid_pos.z < 0)
grid_pos.z += di.getD();
const unsigned int cur_rank = d_cart_ranks[di(grid_pos.x,grid_pos.y,grid_pos.z)];
d_ranks[idx] = cur_rank;
}
/*!
* \param d_ranks The current rank of each particle
* \param d_pos Particle positions
* \param d_cart_ranks Map from Cartesian coordinates to rank number
* \param rank_pos Cartesian coordinates of current rank
* \param box Local box
* \param di Domain indexer
* \param N Number of local particles
* \param block_size Kernel launch block size
*
* This simply a kernel driver, see gpu_load_balance_mark_rank_kernel for details.
*/
void gpu_load_balance_mark_rank(unsigned int *d_ranks,
const Scalar4 *d_pos,
const unsigned int *d_cart_ranks,
const uint3 rank_pos,
const BoxDim& box,
const Index3D& di,
const unsigned int N,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_load_balance_mark_rank_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
unsigned int n_blocks = N/run_block_size + 1;
hipLaunchKernelGGL(( gpu_load_balance_mark_rank_kernel), dim3(n_blocks), dim3(run_block_size), 0, 0, d_ranks, d_pos, d_cart_ranks, rank_pos, box, di, N);
}
//! Functor for selecting ranks not equal to the current rank
struct NotEqual
{
unsigned int not_eq_val; //!< Value to test if not equal to
__host__ __device__ __forceinline__
NotEqual(unsigned int _not_eq_val) : not_eq_val(_not_eq_val) {}
__host__ __device__ __forceinline__
bool operator()(const unsigned int &a) const
{
return (a != not_eq_val);
}
};
/*!
* \param d_off_rank (Reduced) list of particles that are off the current rank
* \param d_n_select Number of particles that are off the current rank
* \param d_ranks The current rank of each particle
* \param d_tmp_storage Temporary storage array, or NULL
* \param tmp_storage_bytes Size of temporary storage, or 0
* \param N Number of local particles
* \param cur_rank Current rank index
*
* This function uses the CUB DeviceSelect::If primitive to select particles that are off rank using the NotEqual
* functor. As is usual, this function must be called twice in order to perform the selection. If \a d_tmp_storage
* is NULL, the temporary storage requirement is computed and saved in \a tmp_storage_bytes. This is externally
* allocated from the CachedAllocator. When called the second time, the ranks of the particles not on the current
* rank are saved in \a d_off_rank, and the number of these particles is saved in \a d_n_select.
*/
void gpu_load_balance_select_off_rank(unsigned int *d_off_rank,
unsigned int *d_n_select,
unsigned int *d_ranks,
void *d_tmp_storage,
size_t &tmp_storage_bytes,
const unsigned int N,
const unsigned int cur_rank)
{
// final precaution against calling with an empty array
if (N == 0) return;
NotEqual select_op(cur_rank);
hipcub::DeviceSelect::If(d_tmp_storage, tmp_storage_bytes, d_ranks, d_off_rank, d_n_select, N, select_op);
}
#endif // ENABLE_MPI
| 94f846c7a9801fad28afa0925f123eed2a40e115.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: mphoward
/*! \file LoadBalancerGPU.cu
\brief Implementation the GPU functions for load balancing
*/
#ifdef ENABLE_MPI
#include "LoadBalancerGPU.cuh"
#include "cub/cub.cuh"
//! Mark the particles that are off rank
/*!
* \param d_ranks The current rank of each particle
* \param d_pos Particle positions
* \param d_cart_ranks Map from Cartesian coordinates to rank number
* \param rank_pos Cartesian coordinates of current rank
* \param box Local box
* \param di Domain indexer
* \param N Number of local particles
*
* Using a thread per particle, the current rank of each particle is computed assuming that a particle cannot migrate
* more than a single rank in any direction. The Cartesian rank of the particle is computed, and mapped back to a physical
* rank.
*/
__global__ void gpu_load_balance_mark_rank_kernel(unsigned int *d_ranks,
const Scalar4 *d_pos,
const unsigned int *d_cart_ranks,
const uint3 rank_pos,
const BoxDim box,
const Index3D di,
const unsigned int N)
{
// particle index
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= N)
return;
const Scalar4 postype = d_pos[idx];
const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
const Scalar3 f = box.makeFraction(pos);
int3 grid_pos = make_int3(rank_pos.x, rank_pos.y, rank_pos.z);
if (f.x >= Scalar(1.0)) ++grid_pos.x;
if (f.x < Scalar(0.0)) --grid_pos.x;
if (f.y >= Scalar(1.0)) ++grid_pos.y;
if (f.y < Scalar(0.0)) --grid_pos.y;
if (f.z >= Scalar(1.0)) ++grid_pos.z;
if (f.z < Scalar(0.0)) --grid_pos.z;
if (grid_pos.x == (int)di.getW())
grid_pos.x = 0;
else if (grid_pos.x < 0)
grid_pos.x += di.getW();
if (grid_pos.y == (int)di.getH())
grid_pos.y = 0;
else if (grid_pos.y < 0)
grid_pos.y += di.getH();
if (grid_pos.z == (int)di.getD())
grid_pos.z = 0;
else if (grid_pos.z < 0)
grid_pos.z += di.getD();
const unsigned int cur_rank = d_cart_ranks[di(grid_pos.x,grid_pos.y,grid_pos.z)];
d_ranks[idx] = cur_rank;
}
/*!
* \param d_ranks The current rank of each particle
* \param d_pos Particle positions
* \param d_cart_ranks Map from Cartesian coordinates to rank number
* \param rank_pos Cartesian coordinates of current rank
* \param box Local box
* \param di Domain indexer
* \param N Number of local particles
* \param block_size Kernel launch block size
*
* This simply a kernel driver, see gpu_load_balance_mark_rank_kernel for details.
*/
void gpu_load_balance_mark_rank(unsigned int *d_ranks,
const Scalar4 *d_pos,
const unsigned int *d_cart_ranks,
const uint3 rank_pos,
const BoxDim& box,
const Index3D& di,
const unsigned int N,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_load_balance_mark_rank_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
unsigned int n_blocks = N/run_block_size + 1;
gpu_load_balance_mark_rank_kernel<<<n_blocks, run_block_size>>>(d_ranks, d_pos, d_cart_ranks, rank_pos, box, di, N);
}
//! Functor for selecting ranks not equal to the current rank
struct NotEqual
{
unsigned int not_eq_val; //!< Value to test if not equal to
__host__ __device__ __forceinline__
NotEqual(unsigned int _not_eq_val) : not_eq_val(_not_eq_val) {}
__host__ __device__ __forceinline__
bool operator()(const unsigned int &a) const
{
return (a != not_eq_val);
}
};
/*!
* \param d_off_rank (Reduced) list of particles that are off the current rank
* \param d_n_select Number of particles that are off the current rank
* \param d_ranks The current rank of each particle
* \param d_tmp_storage Temporary storage array, or NULL
* \param tmp_storage_bytes Size of temporary storage, or 0
* \param N Number of local particles
* \param cur_rank Current rank index
*
* This function uses the CUB DeviceSelect::If primitive to select particles that are off rank using the NotEqual
* functor. As is usual, this function must be called twice in order to perform the selection. If \a d_tmp_storage
* is NULL, the temporary storage requirement is computed and saved in \a tmp_storage_bytes. This is externally
* allocated from the CachedAllocator. When called the second time, the ranks of the particles not on the current
* rank are saved in \a d_off_rank, and the number of these particles is saved in \a d_n_select.
*/
void gpu_load_balance_select_off_rank(unsigned int *d_off_rank,
unsigned int *d_n_select,
unsigned int *d_ranks,
void *d_tmp_storage,
size_t &tmp_storage_bytes,
const unsigned int N,
const unsigned int cur_rank)
{
// final precaution against calling with an empty array
if (N == 0) return;
NotEqual select_op(cur_rank);
cub::DeviceSelect::If(d_tmp_storage, tmp_storage_bytes, d_ranks, d_off_rank, d_n_select, N, select_op);
}
#endif // ENABLE_MPI
|
2e013ba0aeb1c0ec94bf06601a25ca221bc815df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <chrono>
// Input size
int const BATCH = 1; //Must be 1 in this program
int const DEPTH = 3;
int const WIDTH = 128;
int const LENGTH = 128;
// Kernel characteristics
int const ZPADX = 0;
int const ZPADY = 0;
int const STRIDEX = 1;
int const STRIDEY = 1;
int const CONV_RECP_SIZEX = 3;
int const CONV_RECP_SIZEY = 3;
int const NUM_OF_KERNELS = 128;
// Convolution output characteristics
int const convLayerSizeX = ((WIDTH - CONV_RECP_SIZEX + 2 * ZPADX) / STRIDEX + 1);
int const convLayerSizeY = ((LENGTH - CONV_RECP_SIZEY + 2 * ZPADY) / STRIDEY + 1);
// transformation matrix characteristics
int const transformSizeY = convLayerSizeY * convLayerSizeX;
int const transformSizeX = CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH;
int const transformSizeX_nodepth = CONV_RECP_SIZEX * CONV_RECP_SIZEY;
int const CONV_FINAL_SIZE = convLayerSizeX * convLayerSizeY * NUM_OF_KERNELS;
//MASK :2X2
int const MASKX = 3;
int const MASKY = 3;
int const MASKPADX = convLayerSizeX % MASKX;
int const MASKPADY = convLayerSizeY % MASKY;
int const MASK_SIZE = MASKX * MASKY;
int const pseudo_transformSizeY = (convLayerSizeY + MASKPADY) * (convLayerSizeX + MASKPADX);
int const MASKS_IN_Y = convLayerSizeY / (MASKY + MASKPADY);
int const MASKS_IN_X = convLayerSizeX / (MASKX + MASKPADX);
#define COUT_input if (1) std::cout
#define COUT_result if (1) std::cout
__global__
void Convolution(float* inputMatrix, float* weights, float* result)
{
int x1 = 0; //(blockIdx.x * MASK_SIZE) % (convLayerSizeX);
int y1 = 0;// ((blockIdx.x * MASK_SIZE) / (convLayerSizeX)) % convLayerSizeY;
int z1 = 0;//(blockIdx.x * MASK_SIZE) / transformSizeY;
int x2 = 1;//(blockIdx.x * MASK_SIZE + 4) % (convLayerSizeX);
int y2 = 1;//((blockIdx.x * MASK_SIZE + 4) / (convLayerSizeX)) % convLayerSizeY;
int z2 = 1;//(blockIdx.x * MASK_SIZE + 4) / transformSizeY;
int x3 = 2;//(blockIdx.x * MASK_SIZE + 8) % (convLayerSizeX);
int y3 = 2;//((blockIdx.x * MASK_SIZE + 8) / (convLayerSizeX)) % convLayerSizeY;
int z3 = 2;//(blockIdx.x * MASK_SIZE + 8) / transformSizeY;
int X = (blockIdx.x * MASK_SIZE + threadIdx.x) % (convLayerSizeX);
int Y = ((blockIdx.x * MASK_SIZE + threadIdx.x) / (convLayerSizeX)) % convLayerSizeY;
int Z = (blockIdx.x * MASK_SIZE + threadIdx.x) / transformSizeY;
int maskX_offset = X % MASKX;
int maskY_offset = Y % MASKY;
if (maskX_offset == maskY_offset)
{
for (int i = 0; i < DEPTH; i++)
{
for (int j = 0; j < CONV_RECP_SIZEY; j++)
{
for (int l = 0; l < CONV_RECP_SIZEX; l++)
{
result[Z * transformSizeY + Y * convLayerSizeX + X] += inputMatrix[i * WIDTH * LENGTH + (j + Y * STRIDEY) * WIDTH + (l + X * STRIDEX)] * weights[Z * transformSizeX + i * transformSizeX_nodepth + j * CONV_RECP_SIZEX + l];
}
}
}
}
__syncthreads();
if (!(maskX_offset == maskY_offset) &&
((int)result[z1 * transformSizeY + y1 * convLayerSizeX + x1] != 0 ||
(int)result[z2 * transformSizeY + y2 * convLayerSizeX + x2] != 0 ||
(int)result[z3 * transformSizeY + y3 * convLayerSizeX + x3] != 0)&& 0)
{
for (int i = 0; i < DEPTH; i++)
{
for (int j = 0; j < CONV_RECP_SIZEY; j++)
{
for (int l = 0; l < CONV_RECP_SIZEX; l++)
{
result[Z * transformSizeY + Y * convLayerSizeX + X] += inputMatrix[i * WIDTH * LENGTH + (j + Y * STRIDEY) * WIDTH + (l + X * STRIDEX)] * weights[Z * transformSizeX + i * transformSizeX_nodepth + j * CONV_RECP_SIZEX + l];
}
}
}
}
/*
int MaskNum = (X / MASKX) + (Y / MASKY) * MASKS_IN_X;
int masksX = (MaskNum % MASKS_IN_X) * MASKX;
int masksY = (MaskNum / MASKS_IN_X) * MASKY;
int convXNew = masksX + maskX_offset;
int convYNew = masksY + maskY_offset;
int inputX = convXNew * STRIDEX + X % CONV_RECP_SIZEY;
int inputY = convYNew * STRIDEY + (X % (transformSizeX_nodepth)) / CONV_RECP_SIZEX;
int inputZ = X / (transformSizeX_nodepth);
result[Z * transformSizeY + Y * convLayerSizeX + X] = 1;
*/
}
void generateFlat4DData(float* matrix, int x, int y, int z, int d, double type, double jump)
{
double w = jump;
for (int b = 0; b < d; b++)
{
for (int c = 0; c < z; c++)
{
COUT_input << "slice: " << c + 1 << "\n";
for (int j = 0; j < y; j++)
{
for (int i = 0; i < x; i++)
{
if (type == -1)
{
matrix[((b * z + c) * y + j) * x + i] = rand() % 10;
}
else if (type == 0)
{
matrix[((b * z + c) * y + j) * x + i] = jump;
}
else
{
matrix[((b * z + c) * y + j) * x + i] = w;
w += jump;
}
COUT_input << std::setprecision(1) << std::fixed << matrix[((b * z + c) * y + j) * x + i] << " , ";
}
COUT_input << "\n";
}
COUT_input << "\n";
}
COUT_input << "\n";
}
}
int main()
{
// Performance test variables
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipError_t cudaStatus;
// Initialize Host data, kernel and output
float* hostInputMatrix = new float[BATCH * DEPTH * LENGTH * WIDTH];
float* hostConvResult = new float[CONV_FINAL_SIZE]();
float* hostConvLayerWeights = new float[NUM_OF_KERNELS * DEPTH * CONV_RECP_SIZEY * CONV_RECP_SIZEX];
// GENERATING INPUT
std::cout << "Inputs:\n";
generateFlat4DData(hostInputMatrix, WIDTH, LENGTH, DEPTH, BATCH, 1, 0.1);
generateFlat4DData(hostConvLayerWeights, CONV_RECP_SIZEX, CONV_RECP_SIZEY, DEPTH, NUM_OF_KERNELS, 1, 0.1);
// Initializing and allocating Device data, kernels and output
float* deviceInputMatrix;
float* deviceConvLayerWeights;
float* deviceConvResult;
cudaStatus = hipMalloc((void **)&deviceConvLayerWeights, (CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH * NUM_OF_KERNELS) * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void **)&deviceInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void **)&deviceConvResult, (CONV_FINAL_SIZE) * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(deviceInputMatrix, hostInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(deviceConvLayerWeights, hostConvLayerWeights, (CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH * NUM_OF_KERNELS) * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Initializing sizes of grid and block of threads
dim3 threadsPerBlock(MASK_SIZE, 1);
dim3 blocksPerGrid(ceil(double(CONV_FINAL_SIZE) / double(MASK_SIZE)), 1);
/*
if (transformSizeY * transformSizeX > 1024) {
threadsPerBlock.x = transformSizeX;
threadsPerBlock.y = 1;//1024 / transformSizeX;
blocksPerGrid.x = ceil(double(transformSizeX) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(transformSizeY) / double(threadsPerBlock.y));
}
*/
// Run the kernel function and meassure time
hipEventRecord(start, 0);
Convolution << < blocksPerGrid, threadsPerBlock >> > (deviceInputMatrix, deviceConvLayerWeights, deviceConvResult);
cudaStatus = cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Transform addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipEventRecord(stop, 0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "EventRecord failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipEventSynchronize(stop);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "EventSynchronize failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipEventElapsedTime(&time, start, stop);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "ElapsedTime failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
time = time * 1000;
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "DeviceSynchronize failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// Get the results from device
cudaStatus = hipMemcpy(hostConvResult, deviceConvResult, (CONV_FINAL_SIZE) * sizeof(float), hipMemcpyDeviceToHost); // Not relevant to this program
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// PRINTING RESULTS
COUT_result << "Convolution result:\n";
for (int k = 0; k < CONV_FINAL_SIZE; k++)
{
if (k % convLayerSizeX == 0)
{
COUT_result << "\n";
}
if (k % (convLayerSizeX * convLayerSizeY) == 0)
{
COUT_result << "Depth = " << k / (convLayerSizeX * convLayerSizeY) << "\n";
}
COUT_result << std::setprecision(1) << std::fixed << hostConvResult[k] << " ";
}
COUT_result << "\n\n";
// CLEAN UP
printf("Time for Convolution: %f us\n", time);
Error:
hipFree(deviceInputMatrix);
hipFree(deviceConvLayerWeights);
return 0;
}
| 2e013ba0aeb1c0ec94bf06601a25ca221bc815df.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <chrono>
// Input size
int const BATCH = 1; //Must be 1 in this program
int const DEPTH = 3;
int const WIDTH = 128;
int const LENGTH = 128;
// Kernel characteristics
int const ZPADX = 0;
int const ZPADY = 0;
int const STRIDEX = 1;
int const STRIDEY = 1;
int const CONV_RECP_SIZEX = 3;
int const CONV_RECP_SIZEY = 3;
int const NUM_OF_KERNELS = 128;
// Convolution output characteristics
int const convLayerSizeX = ((WIDTH - CONV_RECP_SIZEX + 2 * ZPADX) / STRIDEX + 1);
int const convLayerSizeY = ((LENGTH - CONV_RECP_SIZEY + 2 * ZPADY) / STRIDEY + 1);
// transformation matrix characteristics
int const transformSizeY = convLayerSizeY * convLayerSizeX;
int const transformSizeX = CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH;
int const transformSizeX_nodepth = CONV_RECP_SIZEX * CONV_RECP_SIZEY;
int const CONV_FINAL_SIZE = convLayerSizeX * convLayerSizeY * NUM_OF_KERNELS;
//MASK :2X2
int const MASKX = 3;
int const MASKY = 3;
int const MASKPADX = convLayerSizeX % MASKX;
int const MASKPADY = convLayerSizeY % MASKY;
int const MASK_SIZE = MASKX * MASKY;
int const pseudo_transformSizeY = (convLayerSizeY + MASKPADY) * (convLayerSizeX + MASKPADX);
int const MASKS_IN_Y = convLayerSizeY / (MASKY + MASKPADY);
int const MASKS_IN_X = convLayerSizeX / (MASKX + MASKPADX);
#define COUT_input if (1) std::cout
#define COUT_result if (1) std::cout
__global__
void Convolution(float* inputMatrix, float* weights, float* result)
{
int x1 = 0; //(blockIdx.x * MASK_SIZE) % (convLayerSizeX);
int y1 = 0;// ((blockIdx.x * MASK_SIZE) / (convLayerSizeX)) % convLayerSizeY;
int z1 = 0;//(blockIdx.x * MASK_SIZE) / transformSizeY;
int x2 = 1;//(blockIdx.x * MASK_SIZE + 4) % (convLayerSizeX);
int y2 = 1;//((blockIdx.x * MASK_SIZE + 4) / (convLayerSizeX)) % convLayerSizeY;
int z2 = 1;//(blockIdx.x * MASK_SIZE + 4) / transformSizeY;
int x3 = 2;//(blockIdx.x * MASK_SIZE + 8) % (convLayerSizeX);
int y3 = 2;//((blockIdx.x * MASK_SIZE + 8) / (convLayerSizeX)) % convLayerSizeY;
int z3 = 2;//(blockIdx.x * MASK_SIZE + 8) / transformSizeY;
int X = (blockIdx.x * MASK_SIZE + threadIdx.x) % (convLayerSizeX);
int Y = ((blockIdx.x * MASK_SIZE + threadIdx.x) / (convLayerSizeX)) % convLayerSizeY;
int Z = (blockIdx.x * MASK_SIZE + threadIdx.x) / transformSizeY;
int maskX_offset = X % MASKX;
int maskY_offset = Y % MASKY;
if (maskX_offset == maskY_offset)
{
for (int i = 0; i < DEPTH; i++)
{
for (int j = 0; j < CONV_RECP_SIZEY; j++)
{
for (int l = 0; l < CONV_RECP_SIZEX; l++)
{
result[Z * transformSizeY + Y * convLayerSizeX + X] += inputMatrix[i * WIDTH * LENGTH + (j + Y * STRIDEY) * WIDTH + (l + X * STRIDEX)] * weights[Z * transformSizeX + i * transformSizeX_nodepth + j * CONV_RECP_SIZEX + l];
}
}
}
}
__syncthreads();
if (!(maskX_offset == maskY_offset) &&
((int)result[z1 * transformSizeY + y1 * convLayerSizeX + x1] != 0 ||
(int)result[z2 * transformSizeY + y2 * convLayerSizeX + x2] != 0 ||
(int)result[z3 * transformSizeY + y3 * convLayerSizeX + x3] != 0)&& 0)
{
for (int i = 0; i < DEPTH; i++)
{
for (int j = 0; j < CONV_RECP_SIZEY; j++)
{
for (int l = 0; l < CONV_RECP_SIZEX; l++)
{
result[Z * transformSizeY + Y * convLayerSizeX + X] += inputMatrix[i * WIDTH * LENGTH + (j + Y * STRIDEY) * WIDTH + (l + X * STRIDEX)] * weights[Z * transformSizeX + i * transformSizeX_nodepth + j * CONV_RECP_SIZEX + l];
}
}
}
}
/*
int MaskNum = (X / MASKX) + (Y / MASKY) * MASKS_IN_X;
int masksX = (MaskNum % MASKS_IN_X) * MASKX;
int masksY = (MaskNum / MASKS_IN_X) * MASKY;
int convXNew = masksX + maskX_offset;
int convYNew = masksY + maskY_offset;
int inputX = convXNew * STRIDEX + X % CONV_RECP_SIZEY;
int inputY = convYNew * STRIDEY + (X % (transformSizeX_nodepth)) / CONV_RECP_SIZEX;
int inputZ = X / (transformSizeX_nodepth);
result[Z * transformSizeY + Y * convLayerSizeX + X] = 1;
*/
}
void generateFlat4DData(float* matrix, int x, int y, int z, int d, double type, double jump)
{
double w = jump;
for (int b = 0; b < d; b++)
{
for (int c = 0; c < z; c++)
{
COUT_input << "slice: " << c + 1 << "\n";
for (int j = 0; j < y; j++)
{
for (int i = 0; i < x; i++)
{
if (type == -1)
{
matrix[((b * z + c) * y + j) * x + i] = rand() % 10;
}
else if (type == 0)
{
matrix[((b * z + c) * y + j) * x + i] = jump;
}
else
{
matrix[((b * z + c) * y + j) * x + i] = w;
w += jump;
}
COUT_input << std::setprecision(1) << std::fixed << matrix[((b * z + c) * y + j) * x + i] << " , ";
}
COUT_input << "\n";
}
COUT_input << "\n";
}
COUT_input << "\n";
}
}
int main()
{
// Performance test variables
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaError_t cudaStatus;
// Initialize Host data, kernel and output
float* hostInputMatrix = new float[BATCH * DEPTH * LENGTH * WIDTH];
float* hostConvResult = new float[CONV_FINAL_SIZE]();
float* hostConvLayerWeights = new float[NUM_OF_KERNELS * DEPTH * CONV_RECP_SIZEY * CONV_RECP_SIZEX];
// GENERATING INPUT
std::cout << "Inputs:\n";
generateFlat4DData(hostInputMatrix, WIDTH, LENGTH, DEPTH, BATCH, 1, 0.1);
generateFlat4DData(hostConvLayerWeights, CONV_RECP_SIZEX, CONV_RECP_SIZEY, DEPTH, NUM_OF_KERNELS, 1, 0.1);
// Initializing and allocating Device data, kernels and output
float* deviceInputMatrix;
float* deviceConvLayerWeights;
float* deviceConvResult;
cudaStatus = cudaMalloc((void **)&deviceConvLayerWeights, (CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH * NUM_OF_KERNELS) * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void **)&deviceInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void **)&deviceConvResult, (CONV_FINAL_SIZE) * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(deviceInputMatrix, hostInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(deviceConvLayerWeights, hostConvLayerWeights, (CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH * NUM_OF_KERNELS) * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Initializing sizes of grid and block of threads
dim3 threadsPerBlock(MASK_SIZE, 1);
dim3 blocksPerGrid(ceil(double(CONV_FINAL_SIZE) / double(MASK_SIZE)), 1);
/*
if (transformSizeY * transformSizeX > 1024) {
threadsPerBlock.x = transformSizeX;
threadsPerBlock.y = 1;//1024 / transformSizeX;
blocksPerGrid.x = ceil(double(transformSizeX) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(transformSizeY) / double(threadsPerBlock.y));
}
*/
// Run the kernel function and meassure time
cudaEventRecord(start, 0);
Convolution << < blocksPerGrid, threadsPerBlock >> > (deviceInputMatrix, deviceConvLayerWeights, deviceConvResult);
cudaStatus = cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Transform addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaEventRecord(stop, 0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "EventRecord failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaEventSynchronize(stop);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "EventSynchronize failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaEventElapsedTime(&time, start, stop);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "ElapsedTime failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
time = time * 1000;
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "DeviceSynchronize failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// Get the results from device
cudaStatus = cudaMemcpy(hostConvResult, deviceConvResult, (CONV_FINAL_SIZE) * sizeof(float), cudaMemcpyDeviceToHost); // Not relevant to this program
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// PRINTING RESULTS
COUT_result << "Convolution result:\n";
for (int k = 0; k < CONV_FINAL_SIZE; k++)
{
if (k % convLayerSizeX == 0)
{
COUT_result << "\n";
}
if (k % (convLayerSizeX * convLayerSizeY) == 0)
{
COUT_result << "Depth = " << k / (convLayerSizeX * convLayerSizeY) << "\n";
}
COUT_result << std::setprecision(1) << std::fixed << hostConvResult[k] << " ";
}
COUT_result << "\n\n";
// CLEAN UP
printf("Time for Convolution: %f us\n", time);
Error:
cudaFree(deviceInputMatrix);
cudaFree(deviceConvLayerWeights);
return 0;
}
|
9c0f2adcf200ec7727ac7a5782a63a7e218b10c2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "CudaRefine.h"
#include "MeshReconstruct.h"
#include "CudaMesh.h"
#include "CudaThrust.h"
#include "CudaSplitEncseg.h"
#include "CudaSplitEncsubface.h"
#include "CudaSplitBadtet.h"
#include "CudaSplitBadElement.h"
#include "CudaCompactMesh.h"
/*************************************************************************************/
/* */
/* GPU_Refine_3D() Compute 3D constrained Delaunay refinement on GPU. */
/* */
/*************************************************************************************/
void GPU_Refine_3D(
MESHIO* input_gpu,
MESHBH* input_behavior,
int& out_numofpoint,
double*& out_pointlist,
int& out_numofedge,
int*& out_edgelist,
int& out_numoftriface,
int*& out_trifacelist,
int& out_numoftet,
int*& out_tetlist
)
{
/* Check input behavior */
if (input_behavior->mode != 1 && input_behavior->mode != 2)
{
printf("Unknown input mode: #%d\n", input_behavior->mode);
exit(0);
}
internalmesh* drawmesh = input_behavior->drawmesh;
/* Set up timer */
StopWatchInterface *inner_timer = 0;
sdkCreateTimer(&inner_timer);
/******************************************/
/* 0. Reconstruct the input cdt mesh */
/******************************************/
printf(" 0. Reconstructing the input CDT mesh...\n");
// Reset and start timer.
sdkResetTimer( &inner_timer );
sdkStartTimer( &inner_timer );
// input variables
tethandle* inpoint2tetlist;
trihandle* inpoint2trilist;
verttype* inpointtypelist;
int innumofedge;
int* inseglist;
trihandle* inseg2trilist;
tethandle* inseg2tetlist;
int innumoftriface;
int* intrifacelist;
tethandle* intri2tetlist;
trihandle* intri2trilist;
trihandle* intri2seglist;
int innumoftetrahedron;
int* intetlist;
tethandle* inneighborlist;
trihandle* intet2trilist;
trihandle* intet2seglist;
// reconstruct mesh
reconstructMesh(
input_gpu,
inpoint2tetlist,
inpoint2trilist,
inpointtypelist,
innumofedge,
inseglist,
inseg2trilist,
inseg2tetlist,
innumoftriface,
intrifacelist,
intri2tetlist,
intri2trilist,
intri2seglist,
innumoftetrahedron,
intetlist,
inneighborlist,
intet2trilist,
intet2seglist,
false
);
// Construct segment to parent list
int* inseg2parentidxlist;
int* insegparentendpointidxlist;
int innumofsegparent;
makesegment2parentmap(
innumofedge,
inseglist,
inseg2trilist,
inseg2parentidxlist,
insegparentendpointidxlist,
innumofsegparent);
// Construct subface endpoint list
// Although there are only triangles in the input PLC, the parents of subfaces may still be
// polygon because Tetgen merged the nearly-coplaner
int* intri2parentidxlist;
int* inid2triparentoffsetlist;
int* intriparentendpointidxlist;
int innumoftriparent;
int innumoftriparentendpoint;
makesubfacepointsmap(
input_gpu->numofpoints,
input_gpu->pointlist,
inpointtypelist,
innumoftriface,
intrifacelist,
intri2seglist,
intri2trilist,
intri2parentidxlist,
inid2triparentoffsetlist,
intriparentendpointidxlist,
innumoftriparent,
innumoftriparentendpoint
);
// stop timer
sdkStopTimer(&inner_timer);
// print out info
printf(" Reconstructed Mesh Size:\n");
printf(" Number of point = %d\n", input_gpu->numofpoints);
printf(" Number of edge = %d\n", innumofedge);
printf(" Number of triface = %d\n", innumoftriface);
printf(" Number of tetrahedron = %d\n", innumoftetrahedron);
printf(" Reconstruction time = %.3f ms\n", sdkGetTimerValue(&inner_timer));
input_behavior->times[0] = sdkGetTimerValue(&inner_timer);
/******************************************/
/* 1. Initialization */
/******************************************/
printf(" 1. Initialization\n");
// Reset and start timer.
sdkResetTimer(&inner_timer);
sdkStartTimer(&inner_timer);
// Control variables
int last_point = input_gpu->numofpoints;
int last_subseg = innumofedge;
int last_subface = innumoftriface;
int last_subfaceparent = innumoftriparent;
int last_tet = innumoftetrahedron;
// Input mesh arrays, copy from the host
RealD t_pointlist(input_gpu->pointlist, input_gpu->pointlist + 3 * last_point);
TetHandleD t_point2tetlist(inpoint2tetlist, inpoint2tetlist + last_point);
TriHandleD t_point2trilist(inpoint2trilist, inpoint2trilist + last_point);
PointTypeD t_pointtypelist(inpointtypelist, inpointtypelist + last_point);
IntD t_seglist(inseglist, inseglist + 3 * last_subseg);
TriHandleD t_seg2trilist(inseg2trilist, inseg2trilist + 3 * last_subseg);
TetHandleD t_seg2tetlist(inseg2tetlist, inseg2tetlist + last_subseg);
IntD t_seg2parentidxlist(inseg2parentidxlist, inseg2parentidxlist + last_subseg);
IntD t_segparentendpointidxlist(insegparentendpointidxlist, insegparentendpointidxlist + 2 * innumofsegparent);
IntD t_trifacelist(intrifacelist, intrifacelist + 3 * last_subface);
TetHandleD t_tri2tetlist(intri2tetlist, intri2tetlist + 2 * last_subface);
TriHandleD t_tri2trilist(intri2trilist, intri2trilist + 3 * last_subface);
TriHandleD t_tri2seglist(intri2seglist, intri2seglist + 3 * last_subface);
IntD t_tri2parentidxlist(intri2parentidxlist, intri2parentidxlist + last_subface);
IntD t_triid2parentoffsetlist(inid2triparentoffsetlist, inid2triparentoffsetlist + last_subfaceparent + 1);
IntD t_triparentendpointidxlist(intriparentendpointidxlist, intriparentendpointidxlist + innumoftriparentendpoint);
IntD t_tetlist(intetlist, intetlist + 4 * last_tet);
TetHandleD t_neighborlist(inneighborlist, inneighborlist + 4 * last_tet);
TriHandleD t_tet2trilist(intet2trilist, intet2trilist + 4 * last_tet);
TriHandleD t_tet2seglist(intet2seglist, intet2seglist + 6 * last_tet);
// Internal arrays
RealD t_pointradius(last_point, 0.0);
if (input_gpu->interpointradius != NULL)
thrust::copy(input_gpu->interpointradius, input_gpu->interpointradius + last_point, t_pointradius.begin());
TetStatusD t_tetstatus(last_tet, tetstatus(1));
TriStatusD t_tristatus(last_subface, tristatus(1));
TriStatusD t_segstatus(last_subseg, tristatus(1));
// Marker arrays
IntD t_segencmarker(last_subseg, -1); // initialize to non-encroached
IntD t_subfaceencmarker(last_subface, -1);
// Cuda mesh manipulation
int xmax, xmin, ymax, ymin, zmax, zmin;
cudamesh_inittables();
cudamesh_initbbox(input_gpu->numofpoints, input_gpu->pointlist,
xmax, xmin, ymax, ymin, zmax, zmin);
cudamesh_exactinit(0, 0, 0, xmax - xmin, ymax - ymin, zmax - zmin);
cudamesh_initkernelconstants(xmax - xmin, ymax - ymin, zmax - zmin);
// initialize subseg encroach marker
initSegEncmarkers(
t_pointlist,
t_seglist,
t_seg2tetlist,
t_segencmarker,
t_tetlist,
t_neighborlist,
last_subseg
);
initSubfaceEncmarkers(
t_pointlist,
t_trifacelist,
t_tri2tetlist,
t_subfaceencmarker,
t_tetlist,
last_subface
);
initTetBadstatus(
t_pointlist,
t_tetlist,
t_tetstatus,
input_behavior->radius_to_edge_ratio,
last_tet
);
// stop timer
sdkStopTimer(&inner_timer);
// print out info
printf(" Initialization time = %.3f ms\n", sdkGetTimerValue(&inner_timer));
input_behavior->times[1] = sdkGetTimerValue(&inner_timer);
//gpuMemoryCheck();
if (input_behavior->mode == 2)
{
/******************************************/
/* 2. Split bad elements */
/******************************************/
printf(" 2. Split bad elements\n");
// Reset and start timer.
sdkResetTimer(&inner_timer);
sdkStartTimer(&inner_timer);
// split encroached segments
splitBadElements(
t_pointlist,
t_point2trilist,
t_point2tetlist,
t_pointtypelist,
t_pointradius,
t_seglist,
t_seg2trilist,
t_seg2tetlist,
t_seg2parentidxlist,
t_segparentendpointidxlist,
t_segstatus,
t_trifacelist,
t_tri2tetlist,
t_tri2trilist,
t_tri2seglist,
t_tri2parentidxlist,
t_triid2parentoffsetlist,
t_triparentendpointidxlist,
t_tristatus,
t_tetlist,
t_neighborlist,
t_tet2trilist,
t_tet2seglist,
t_tetstatus,
t_segencmarker,
t_subfaceencmarker,
last_point,
last_subseg,
last_subface,
last_tet,
input_behavior,
1,
false,
false
);
// stop timer
hipDeviceSynchronize();
sdkStopTimer(&inner_timer);
// print out info
printf(" Splitting bad elements time = %.3f ms\n", sdkGetTimerValue(&inner_timer));
printf(" Number of points = %d, segments = %d, subfaces = %d, tets = %d\n",
last_point, last_subseg, last_subface, last_tet);
input_behavior->times[2] = sdkGetTimerValue(&inner_timer);
input_behavior->times[3] = 0;
input_behavior->times[4] = 0;
}
else if (input_behavior->mode == 1)
{
/******************************************/
/* 2. Split encroached subsegments */
/******************************************/
printf(" 2. Split encroached subsegments\n");
// Reset and start timer.
sdkResetTimer(&inner_timer);
sdkStartTimer(&inner_timer);
// split encroached segments
splitEncsegs(
t_pointlist,
t_point2trilist,
t_point2tetlist,
t_pointtypelist,
t_pointradius,
t_seglist,
t_seg2trilist,
t_seg2tetlist,
t_seg2parentidxlist,
t_segparentendpointidxlist,
t_segstatus,
t_trifacelist,
t_tri2tetlist,
t_tri2trilist,
t_tri2seglist,
t_tri2parentidxlist,
t_triid2parentoffsetlist,
t_triparentendpointidxlist,
t_tristatus,
t_tetlist,
t_neighborlist,
t_tet2trilist,
t_tet2seglist,
t_tetstatus,
t_segencmarker,
t_subfaceencmarker,
last_point,
last_subseg,
last_subface,
last_tet,
input_behavior,
-1,
-1,
0,
false,
false
);
// stop timer
hipDeviceSynchronize();
sdkStopTimer(&inner_timer);
// print out info
printf(" Splitting encroached subsegments time = %.3f ms\n", sdkGetTimerValue(&inner_timer));
printf(" Number of points = %d, segments = %d, subfaces = %d, tets = %d\n",
last_point, last_subseg, last_subface, last_tet);
input_behavior->times[2] = sdkGetTimerValue(&inner_timer);
if (drawmesh != NULL && !drawmesh->animation)
{
if (drawmesh->iter_seg != -1 && drawmesh->iter_subface == -1 && drawmesh->iter_tet == -1)
return;
}
/******************************************/
/* 3. Split encroached subfaces */
/******************************************/
printf(" 3. Split encroached subfaces\n");
// Reset and start timer.
sdkResetTimer(&inner_timer);
sdkStartTimer(&inner_timer);
// split encroached subfaces
splitEncsubfaces(
t_pointlist,
t_point2trilist,
t_point2tetlist,
t_pointtypelist,
t_pointradius,
t_seglist,
t_seg2trilist,
t_seg2tetlist,
t_seg2parentidxlist,
t_segparentendpointidxlist,
t_segstatus,
t_trifacelist,
t_tri2tetlist,
t_tri2trilist,
t_tri2seglist,
t_tri2parentidxlist,
t_triid2parentoffsetlist,
t_triparentendpointidxlist,
t_tristatus,
t_tetlist,
t_neighborlist,
t_tet2trilist,
t_tet2seglist,
t_tetstatus,
t_segencmarker,
t_subfaceencmarker,
last_point,
last_subseg,
last_subface,
last_tet,
input_behavior,
-1,
0,
false,
false
);
// stop timer
hipDeviceSynchronize();
sdkStopTimer(&inner_timer);
// print out info
printf(" Splitting encroached subfaces time = %.3f ms\n", sdkGetTimerValue(&inner_timer));
printf(" Number of points = %d, segments = %d, subfaces = %d, tets = %d\n",
last_point, last_subseg, last_subface, last_tet);
input_behavior->times[3] = sdkGetTimerValue(&inner_timer);
if (drawmesh != NULL && !drawmesh->animation)
{
if (drawmesh->iter_subface != -1 && drawmesh->iter_tet == -1)
return;
}
/******************************************/
/* 4. Split bad quality tets */
/******************************************/
//gpuMemoryCheck();
printf(" 4. Split bad quality tets\n");
// Reset and start timer.
sdkResetTimer(&inner_timer);
sdkStartTimer(&inner_timer);
// split bad tets
splitBadTets(
t_pointlist,
t_point2trilist,
t_point2tetlist,
t_pointtypelist,
t_pointradius,
t_seglist,
t_seg2trilist,
t_seg2tetlist,
t_seg2parentidxlist,
t_segparentendpointidxlist,
t_segstatus,
t_trifacelist,
t_tri2tetlist,
t_tri2trilist,
t_tri2seglist,
t_tri2parentidxlist,
t_triid2parentoffsetlist,
t_triparentendpointidxlist,
t_tristatus,
t_tetlist,
t_neighborlist,
t_tet2trilist,
t_tet2seglist,
t_tetstatus,
t_segencmarker,
t_subfaceencmarker,
last_point,
last_subseg,
last_subface,
last_tet,
input_behavior,
1,
false,
false
);
// stop timer
hipDeviceSynchronize();
sdkStopTimer(&inner_timer);
// print out info
printf(" Splitting bad tets time = %.3f ms\n", sdkGetTimerValue(&inner_timer));
printf(" Number of points = %d, segments = %d, subfaces = %d, tets = %d\n",
last_point, last_subseg, last_subface, last_tet);
input_behavior->times[4] = sdkGetTimerValue(&inner_timer);
if (drawmesh != NULL && !drawmesh->animation)
{
if (drawmesh->iter_tet != -1)
return;
}
}
/******************************************/
/* 5. Output final quality mesh */
/******************************************/
printf(" 5. Output final quality mesh\n");
// Reset and start timer.
sdkResetTimer(&inner_timer);
sdkStartTimer(&inner_timer);
compactMesh(
out_numofpoint, out_pointlist, t_pointlist,
out_numofedge, out_edgelist, t_seglist, t_segstatus,
out_numoftriface, out_trifacelist, t_trifacelist, t_tristatus,
out_numoftet, out_tetlist, t_tetlist, t_tetstatus
);
// stop timer
hipDeviceSynchronize();
sdkStopTimer(&inner_timer);
input_behavior->times[5] = sdkGetTimerValue(&inner_timer);
} | 9c0f2adcf200ec7727ac7a5782a63a7e218b10c2.cu | #include <stdio.h>
#include "CudaRefine.h"
#include "MeshReconstruct.h"
#include "CudaMesh.h"
#include "CudaThrust.h"
#include "CudaSplitEncseg.h"
#include "CudaSplitEncsubface.h"
#include "CudaSplitBadtet.h"
#include "CudaSplitBadElement.h"
#include "CudaCompactMesh.h"
/*************************************************************************************/
/* */
/* GPU_Refine_3D() Compute 3D constrained Delaunay refinement on GPU. */
/* */
/*************************************************************************************/
void GPU_Refine_3D(
MESHIO* input_gpu,
MESHBH* input_behavior,
int& out_numofpoint,
double*& out_pointlist,
int& out_numofedge,
int*& out_edgelist,
int& out_numoftriface,
int*& out_trifacelist,
int& out_numoftet,
int*& out_tetlist
)
{
/* Check input behavior */
if (input_behavior->mode != 1 && input_behavior->mode != 2)
{
printf("Unknown input mode: #%d\n", input_behavior->mode);
exit(0);
}
internalmesh* drawmesh = input_behavior->drawmesh;
/* Set up timer */
StopWatchInterface *inner_timer = 0;
sdkCreateTimer(&inner_timer);
/******************************************/
/* 0. Reconstruct the input cdt mesh */
/******************************************/
printf(" 0. Reconstructing the input CDT mesh...\n");
// Reset and start timer.
sdkResetTimer( &inner_timer );
sdkStartTimer( &inner_timer );
// input variables
tethandle* inpoint2tetlist;
trihandle* inpoint2trilist;
verttype* inpointtypelist;
int innumofedge;
int* inseglist;
trihandle* inseg2trilist;
tethandle* inseg2tetlist;
int innumoftriface;
int* intrifacelist;
tethandle* intri2tetlist;
trihandle* intri2trilist;
trihandle* intri2seglist;
int innumoftetrahedron;
int* intetlist;
tethandle* inneighborlist;
trihandle* intet2trilist;
trihandle* intet2seglist;
// reconstruct mesh
reconstructMesh(
input_gpu,
inpoint2tetlist,
inpoint2trilist,
inpointtypelist,
innumofedge,
inseglist,
inseg2trilist,
inseg2tetlist,
innumoftriface,
intrifacelist,
intri2tetlist,
intri2trilist,
intri2seglist,
innumoftetrahedron,
intetlist,
inneighborlist,
intet2trilist,
intet2seglist,
false
);
// Construct segment to parent list
int* inseg2parentidxlist;
int* insegparentendpointidxlist;
int innumofsegparent;
makesegment2parentmap(
innumofedge,
inseglist,
inseg2trilist,
inseg2parentidxlist,
insegparentendpointidxlist,
innumofsegparent);
// Construct subface endpoint list
// Although there are only triangles in the input PLC, the parents of subfaces may still be
// polygon because Tetgen merged the nearly-coplaner
int* intri2parentidxlist;
int* inid2triparentoffsetlist;
int* intriparentendpointidxlist;
int innumoftriparent;
int innumoftriparentendpoint;
makesubfacepointsmap(
input_gpu->numofpoints,
input_gpu->pointlist,
inpointtypelist,
innumoftriface,
intrifacelist,
intri2seglist,
intri2trilist,
intri2parentidxlist,
inid2triparentoffsetlist,
intriparentendpointidxlist,
innumoftriparent,
innumoftriparentendpoint
);
// stop timer
sdkStopTimer(&inner_timer);
// print out info
printf(" Reconstructed Mesh Size:\n");
printf(" Number of point = %d\n", input_gpu->numofpoints);
printf(" Number of edge = %d\n", innumofedge);
printf(" Number of triface = %d\n", innumoftriface);
printf(" Number of tetrahedron = %d\n", innumoftetrahedron);
printf(" Reconstruction time = %.3f ms\n", sdkGetTimerValue(&inner_timer));
input_behavior->times[0] = sdkGetTimerValue(&inner_timer);
/******************************************/
/* 1. Initialization */
/******************************************/
printf(" 1. Initialization\n");
// Reset and start timer.
sdkResetTimer(&inner_timer);
sdkStartTimer(&inner_timer);
// Control variables
int last_point = input_gpu->numofpoints;
int last_subseg = innumofedge;
int last_subface = innumoftriface;
int last_subfaceparent = innumoftriparent;
int last_tet = innumoftetrahedron;
// Input mesh arrays, copy from the host
RealD t_pointlist(input_gpu->pointlist, input_gpu->pointlist + 3 * last_point);
TetHandleD t_point2tetlist(inpoint2tetlist, inpoint2tetlist + last_point);
TriHandleD t_point2trilist(inpoint2trilist, inpoint2trilist + last_point);
PointTypeD t_pointtypelist(inpointtypelist, inpointtypelist + last_point);
IntD t_seglist(inseglist, inseglist + 3 * last_subseg);
TriHandleD t_seg2trilist(inseg2trilist, inseg2trilist + 3 * last_subseg);
TetHandleD t_seg2tetlist(inseg2tetlist, inseg2tetlist + last_subseg);
IntD t_seg2parentidxlist(inseg2parentidxlist, inseg2parentidxlist + last_subseg);
IntD t_segparentendpointidxlist(insegparentendpointidxlist, insegparentendpointidxlist + 2 * innumofsegparent);
IntD t_trifacelist(intrifacelist, intrifacelist + 3 * last_subface);
TetHandleD t_tri2tetlist(intri2tetlist, intri2tetlist + 2 * last_subface);
TriHandleD t_tri2trilist(intri2trilist, intri2trilist + 3 * last_subface);
TriHandleD t_tri2seglist(intri2seglist, intri2seglist + 3 * last_subface);
IntD t_tri2parentidxlist(intri2parentidxlist, intri2parentidxlist + last_subface);
IntD t_triid2parentoffsetlist(inid2triparentoffsetlist, inid2triparentoffsetlist + last_subfaceparent + 1);
IntD t_triparentendpointidxlist(intriparentendpointidxlist, intriparentendpointidxlist + innumoftriparentendpoint);
IntD t_tetlist(intetlist, intetlist + 4 * last_tet);
TetHandleD t_neighborlist(inneighborlist, inneighborlist + 4 * last_tet);
TriHandleD t_tet2trilist(intet2trilist, intet2trilist + 4 * last_tet);
TriHandleD t_tet2seglist(intet2seglist, intet2seglist + 6 * last_tet);
// Internal arrays
RealD t_pointradius(last_point, 0.0);
if (input_gpu->interpointradius != NULL)
thrust::copy(input_gpu->interpointradius, input_gpu->interpointradius + last_point, t_pointradius.begin());
TetStatusD t_tetstatus(last_tet, tetstatus(1));
TriStatusD t_tristatus(last_subface, tristatus(1));
TriStatusD t_segstatus(last_subseg, tristatus(1));
// Marker arrays
IntD t_segencmarker(last_subseg, -1); // initialize to non-encroached
IntD t_subfaceencmarker(last_subface, -1);
// Cuda mesh manipulation
int xmax, xmin, ymax, ymin, zmax, zmin;
cudamesh_inittables();
cudamesh_initbbox(input_gpu->numofpoints, input_gpu->pointlist,
xmax, xmin, ymax, ymin, zmax, zmin);
cudamesh_exactinit(0, 0, 0, xmax - xmin, ymax - ymin, zmax - zmin);
cudamesh_initkernelconstants(xmax - xmin, ymax - ymin, zmax - zmin);
// initialize subseg encroach marker
initSegEncmarkers(
t_pointlist,
t_seglist,
t_seg2tetlist,
t_segencmarker,
t_tetlist,
t_neighborlist,
last_subseg
);
initSubfaceEncmarkers(
t_pointlist,
t_trifacelist,
t_tri2tetlist,
t_subfaceencmarker,
t_tetlist,
last_subface
);
initTetBadstatus(
t_pointlist,
t_tetlist,
t_tetstatus,
input_behavior->radius_to_edge_ratio,
last_tet
);
// stop timer
sdkStopTimer(&inner_timer);
// print out info
printf(" Initialization time = %.3f ms\n", sdkGetTimerValue(&inner_timer));
input_behavior->times[1] = sdkGetTimerValue(&inner_timer);
//gpuMemoryCheck();
if (input_behavior->mode == 2)
{
/******************************************/
/* 2. Split bad elements */
/******************************************/
printf(" 2. Split bad elements\n");
// Reset and start timer.
sdkResetTimer(&inner_timer);
sdkStartTimer(&inner_timer);
// split encroached segments
splitBadElements(
t_pointlist,
t_point2trilist,
t_point2tetlist,
t_pointtypelist,
t_pointradius,
t_seglist,
t_seg2trilist,
t_seg2tetlist,
t_seg2parentidxlist,
t_segparentendpointidxlist,
t_segstatus,
t_trifacelist,
t_tri2tetlist,
t_tri2trilist,
t_tri2seglist,
t_tri2parentidxlist,
t_triid2parentoffsetlist,
t_triparentendpointidxlist,
t_tristatus,
t_tetlist,
t_neighborlist,
t_tet2trilist,
t_tet2seglist,
t_tetstatus,
t_segencmarker,
t_subfaceencmarker,
last_point,
last_subseg,
last_subface,
last_tet,
input_behavior,
1,
false,
false
);
// stop timer
cudaDeviceSynchronize();
sdkStopTimer(&inner_timer);
// print out info
printf(" Splitting bad elements time = %.3f ms\n", sdkGetTimerValue(&inner_timer));
printf(" Number of points = %d, segments = %d, subfaces = %d, tets = %d\n",
last_point, last_subseg, last_subface, last_tet);
input_behavior->times[2] = sdkGetTimerValue(&inner_timer);
input_behavior->times[3] = 0;
input_behavior->times[4] = 0;
}
else if (input_behavior->mode == 1)
{
/******************************************/
/* 2. Split encroached subsegments */
/******************************************/
printf(" 2. Split encroached subsegments\n");
// Reset and start timer.
sdkResetTimer(&inner_timer);
sdkStartTimer(&inner_timer);
// split encroached segments
splitEncsegs(
t_pointlist,
t_point2trilist,
t_point2tetlist,
t_pointtypelist,
t_pointradius,
t_seglist,
t_seg2trilist,
t_seg2tetlist,
t_seg2parentidxlist,
t_segparentendpointidxlist,
t_segstatus,
t_trifacelist,
t_tri2tetlist,
t_tri2trilist,
t_tri2seglist,
t_tri2parentidxlist,
t_triid2parentoffsetlist,
t_triparentendpointidxlist,
t_tristatus,
t_tetlist,
t_neighborlist,
t_tet2trilist,
t_tet2seglist,
t_tetstatus,
t_segencmarker,
t_subfaceencmarker,
last_point,
last_subseg,
last_subface,
last_tet,
input_behavior,
-1,
-1,
0,
false,
false
);
// stop timer
cudaDeviceSynchronize();
sdkStopTimer(&inner_timer);
// print out info
printf(" Splitting encroached subsegments time = %.3f ms\n", sdkGetTimerValue(&inner_timer));
printf(" Number of points = %d, segments = %d, subfaces = %d, tets = %d\n",
last_point, last_subseg, last_subface, last_tet);
input_behavior->times[2] = sdkGetTimerValue(&inner_timer);
if (drawmesh != NULL && !drawmesh->animation)
{
if (drawmesh->iter_seg != -1 && drawmesh->iter_subface == -1 && drawmesh->iter_tet == -1)
return;
}
/******************************************/
/* 3. Split encroached subfaces */
/******************************************/
printf(" 3. Split encroached subfaces\n");
// Reset and start timer.
sdkResetTimer(&inner_timer);
sdkStartTimer(&inner_timer);
// split encroached subfaces
splitEncsubfaces(
t_pointlist,
t_point2trilist,
t_point2tetlist,
t_pointtypelist,
t_pointradius,
t_seglist,
t_seg2trilist,
t_seg2tetlist,
t_seg2parentidxlist,
t_segparentendpointidxlist,
t_segstatus,
t_trifacelist,
t_tri2tetlist,
t_tri2trilist,
t_tri2seglist,
t_tri2parentidxlist,
t_triid2parentoffsetlist,
t_triparentendpointidxlist,
t_tristatus,
t_tetlist,
t_neighborlist,
t_tet2trilist,
t_tet2seglist,
t_tetstatus,
t_segencmarker,
t_subfaceencmarker,
last_point,
last_subseg,
last_subface,
last_tet,
input_behavior,
-1,
0,
false,
false
);
// stop timer
cudaDeviceSynchronize();
sdkStopTimer(&inner_timer);
// print out info
printf(" Splitting encroached subfaces time = %.3f ms\n", sdkGetTimerValue(&inner_timer));
printf(" Number of points = %d, segments = %d, subfaces = %d, tets = %d\n",
last_point, last_subseg, last_subface, last_tet);
input_behavior->times[3] = sdkGetTimerValue(&inner_timer);
if (drawmesh != NULL && !drawmesh->animation)
{
if (drawmesh->iter_subface != -1 && drawmesh->iter_tet == -1)
return;
}
/******************************************/
/* 4. Split bad quality tets */
/******************************************/
//gpuMemoryCheck();
printf(" 4. Split bad quality tets\n");
// Reset and start timer.
sdkResetTimer(&inner_timer);
sdkStartTimer(&inner_timer);
// split bad tets
splitBadTets(
t_pointlist,
t_point2trilist,
t_point2tetlist,
t_pointtypelist,
t_pointradius,
t_seglist,
t_seg2trilist,
t_seg2tetlist,
t_seg2parentidxlist,
t_segparentendpointidxlist,
t_segstatus,
t_trifacelist,
t_tri2tetlist,
t_tri2trilist,
t_tri2seglist,
t_tri2parentidxlist,
t_triid2parentoffsetlist,
t_triparentendpointidxlist,
t_tristatus,
t_tetlist,
t_neighborlist,
t_tet2trilist,
t_tet2seglist,
t_tetstatus,
t_segencmarker,
t_subfaceencmarker,
last_point,
last_subseg,
last_subface,
last_tet,
input_behavior,
1,
false,
false
);
// stop timer
cudaDeviceSynchronize();
sdkStopTimer(&inner_timer);
// print out info
printf(" Splitting bad tets time = %.3f ms\n", sdkGetTimerValue(&inner_timer));
printf(" Number of points = %d, segments = %d, subfaces = %d, tets = %d\n",
last_point, last_subseg, last_subface, last_tet);
input_behavior->times[4] = sdkGetTimerValue(&inner_timer);
if (drawmesh != NULL && !drawmesh->animation)
{
if (drawmesh->iter_tet != -1)
return;
}
}
/******************************************/
/* 5. Output final quality mesh */
/******************************************/
printf(" 5. Output final quality mesh\n");
// Reset and start timer.
sdkResetTimer(&inner_timer);
sdkStartTimer(&inner_timer);
compactMesh(
out_numofpoint, out_pointlist, t_pointlist,
out_numofedge, out_edgelist, t_seglist, t_segstatus,
out_numoftriface, out_trifacelist, t_trifacelist, t_tristatus,
out_numoftet, out_tetlist, t_tetlist, t_tetstatus
);
// stop timer
cudaDeviceSynchronize();
sdkStopTimer(&inner_timer);
input_behavior->times[5] = sdkGetTimerValue(&inner_timer);
} |
d9be49269c1f900305f478f93fa2d98da53df6a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "spatial_coherence.h"
#include <glm/glm.hpp>
#include <hip/hip_runtime.h>
__global__
void kernSpatialCoherence(int width, int height, int frames, float * img_cur, float * img_next, unsigned char * orig, float * pixels) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
int idx = y * width + x;
float mn = 1e12;
float val = img_cur[idx];
for (int i = 0; i < frames; i++) {
float d = 0.0f;
for (int dy = -3; dy <= 3; dy++) {
for (int dx = -3; dx <= 3; dx++) {
int nx = glm::clamp(x + dx, 0, width - 1);
int ny = glm::clamp(y + dy, 0, height - 1);
int nidx = ny * width + nx;
d += glm::abs(pixels[idx * frames + i] - img_cur[nidx]);
}
}
d += 25.0f * glm::abs(pixels[idx * frames + i] - (float)orig[idx]);
if (d < mn) {
mn = d;
val = pixels[idx * frames + i];
}
}
img_next[idx] = val;
return;
}
void spatialCoherence(int width, int height, int frames, float * img, unsigned char * orig, float * pixels) {
int N = width * height;
float * dev_img[2];
unsigned char * dev_orig;
float * dev_pixels;
int cur = 0;
for (int i = 0; i < 2; i++) {
hipMalloc(&dev_img[i], N * sizeof(float));
}
hipMalloc(&dev_orig, N * sizeof(unsigned char));
hipMalloc(&dev_pixels, frames * N * sizeof(float));
hipMemcpy(dev_img[0], img, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_orig, orig, N * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(dev_pixels, pixels, frames * N * sizeof(float), hipMemcpyHostToDevice);
dim3 blockSize2d(16, 16);
dim3 blocksPerGrid2d(
(width + blockSize2d.x - 1) / blockSize2d.x,
(height + blockSize2d.y - 1) / blockSize2d.y);
for (int i = 0; i < 10; i++) {
hipLaunchKernelGGL(( kernSpatialCoherence), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, width, height, frames, dev_img[cur], dev_img[1 - cur], dev_orig, dev_pixels);
cur = 1 - cur;
}
hipMemcpy(img, dev_img[cur], N * sizeof(float), hipMemcpyDeviceToHost);
hipFree(dev_orig);
hipFree(dev_img[0]);
hipFree(dev_img[1]);
hipFree(dev_pixels);
return;
}
| d9be49269c1f900305f478f93fa2d98da53df6a6.cu | #include "spatial_coherence.h"
#include <glm/glm.hpp>
#include <cuda.h>
__global__
void kernSpatialCoherence(int width, int height, int frames, float * img_cur, float * img_next, unsigned char * orig, float * pixels) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
int idx = y * width + x;
float mn = 1e12;
float val = img_cur[idx];
for (int i = 0; i < frames; i++) {
float d = 0.0f;
for (int dy = -3; dy <= 3; dy++) {
for (int dx = -3; dx <= 3; dx++) {
int nx = glm::clamp(x + dx, 0, width - 1);
int ny = glm::clamp(y + dy, 0, height - 1);
int nidx = ny * width + nx;
d += glm::abs(pixels[idx * frames + i] - img_cur[nidx]);
}
}
d += 25.0f * glm::abs(pixels[idx * frames + i] - (float)orig[idx]);
if (d < mn) {
mn = d;
val = pixels[idx * frames + i];
}
}
img_next[idx] = val;
return;
}
void spatialCoherence(int width, int height, int frames, float * img, unsigned char * orig, float * pixels) {
int N = width * height;
float * dev_img[2];
unsigned char * dev_orig;
float * dev_pixels;
int cur = 0;
for (int i = 0; i < 2; i++) {
cudaMalloc(&dev_img[i], N * sizeof(float));
}
cudaMalloc(&dev_orig, N * sizeof(unsigned char));
cudaMalloc(&dev_pixels, frames * N * sizeof(float));
cudaMemcpy(dev_img[0], img, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_orig, orig, N * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(dev_pixels, pixels, frames * N * sizeof(float), cudaMemcpyHostToDevice);
dim3 blockSize2d(16, 16);
dim3 blocksPerGrid2d(
(width + blockSize2d.x - 1) / blockSize2d.x,
(height + blockSize2d.y - 1) / blockSize2d.y);
for (int i = 0; i < 10; i++) {
kernSpatialCoherence<<<blocksPerGrid2d, blockSize2d>>>(width, height, frames, dev_img[cur], dev_img[1 - cur], dev_orig, dev_pixels);
cur = 1 - cur;
}
cudaMemcpy(img, dev_img[cur], N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_orig);
cudaFree(dev_img[0]);
cudaFree(dev_img[1]);
cudaFree(dev_pixels);
return;
}
|
cedb3c2a5ad37948bb8888089dde83b6a417e8d8.hip | // !!! This is a file automatically generated by hipify!!!
// Automatically generated CU for C:\Users\rben.KECK-CENTER\Documents\GitHub\NeuroGPU\URapNeuron\Mainen\runModel.hoc
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
#include "AllModels_hip.cuh"
// Universals:
#define PI (3.1415927f)
#define R (8.31441f)
#define FARADAY (96485.309f)
#define ktf (1000.*8.3134*(celsius + 273.15)/FARADAY)
// GGlobals
#define celsius (37.00000)
#define stoprun (0.00000)
#define clamp_resist (0.00100)
#define secondorder (0.00000)
// NGlobals:
#define q10_ca (2.3)
#define temp_ca (23)
#define tadj_ca (3.2094)
#define vmin_ca (-120)
#define vmax_ca (100)
#define vshift_ca (0)
#define depth_cad (0.1)
#define cainf_cad (0.0001)
#define taur_cad (200)
#define q10_kca (2.3)
#define temp_kca (23)
#define tadj_kca (3.2094)
#define vmin_kca (-120)
#define vmax_kca (100)
#define q10_km (2.3)
#define temp_km (23)
#define tadj_km (3.2094)
#define vmin_km (-120)
#define vmax_km (100)
#define q10_kv (2.3)
#define temp_kv (23)
#define tadj_kv (3.2094)
#define vmin_kv (-120)
#define vmax_kv (100)
#define q10_na (2.3)
#define temp_na (23)
#define tadj_na (3.2094)
#define vmin_na (-120)
#define vmax_na (100)
#define vshift_na (-5)
// Reversals:
#define eca (140.00000f)
#define ek (-90.00000f)
#define ena (60.00000f)
// Declarations:
__device__ void Cutrates_ca(float v ,float gbar_ca,float cao_ca,float &hinf,float &htau,float &minf,float &mtau);
__device__ void Curates_ca(float vm ,float gbar_ca,float cao_ca,float &hinf,float &htau,float &minf,float &mtau);
__device__ void Curates_kca(float cai,float gbar_kca,float caix_kca,float Ra_kca,float Rb_kca,float &a,float &b,float &ninf,float &ntau);
__device__ void Cutrates_km(float v ,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km,float &a,float &b,float &ninf,float &ntau);
__device__ void Curates_km(float v ,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km,float &a,float &b,float &ninf,float &ntau);
__device__ void Cutrates_kv(float v ,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv,float &a,float &b,float &ninf,float &ntau);
__device__ void Curates_kv(float v ,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv,float &a,float &b,float &ninf,float &ntau);
__device__ void Cutrates_na(float v,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na,float &hinf,float &htau,float &minf,float &mtau);
__device__ void Curates_na(float vm,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na,float &hinf,float &htau,float &minf,float &mtau);
float Cunernst(float ci,float co, float z) {
if (z == 0) {
return 0.;
}
if (ci <= 0.) {
return 1e6;
}else if (co <= 0.) {
return -1e6;
}else{
return ktf/z*log(co/ci);
}
}
// Functions:
__device__ float Cuefun_ca(float z) {
if (fabs(z) < 1e-4) {;
return 1 - z/2;
}else{;
return z/(exp(z) - 1);
};
};
__device__ float Cuefun_km(float z) {
if (fabs(z) < 1e-4) {;
return 1 - z/2;
}else{;
return z/(exp(z) - 1);
};
};
__device__ float Cuefun_kv(float z) {
if (fabs(z) < 1e-4) {;
return 1 - z/2;
}else{;
return z/(exp(z) - 1);
};
};
__device__ float Cutrap0_na(float v,float th,float a,float q) {
if (fabs((v-th)/q) > 1e-6) {;
return a * (v - th) / (1 - exp(-(v - th)/q));
} else {;
return a * q;
};
} ;
// Procedures:
__device__ void Cutrates_ca(float v ,float gbar_ca,float cao_ca,float &hinf,float &htau,float &minf,float &mtau) {
Curates_ca(v,gbar_ca,cao_ca,hinf,htau,minf,mtau);
;};
__device__ void Curates_ca(float vm ,float gbar_ca,float cao_ca,float &hinf,float &htau,float &minf,float &mtau) {
float a, b;
/* removed tadj_ca recalculation */
a = 0.209*Cuefun_ca(-(27+vm)/3.8);
b = 0.94*exp((-75-vm)/17);
;
mtau = 1/tadj_ca/(a+b);
minf = a/(a+b);
;
a = 0.000457*exp((-13-vm)/50);
b = 0.0065/(exp((-vm-15)/28) + 1);
htau = 1/tadj_ca/(a+b);
hinf = a/(a+b);
;};
__device__ void Curates_kca(float cai,float gbar_kca,float caix_kca,float Ra_kca,float Rb_kca,float &a,float &b,float &ninf,float &ntau) {
;
a =pow((MYFTYPE) Ra_kca * cai,(MYFTYPE)caix_kca);
b = Rb_kca;
/* removed tadj_kca recalculation */
ntau = 1/tadj_kca/(a+b);
ninf = a/(a+b);
;
;};
__device__ void Cutrates_km(float v ,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km,float &a,float &b,float &ninf,float &ntau) {
;
Curates_km(v,gbar_km,tha_km,qa_km,Ra_km,Rb_km,a,b,ninf,ntau);
;};
__device__ void Curates_km(float v ,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km,float &a,float &b,float &ninf,float &ntau) {
;
;
a = Ra_km * qa_km * Cuefun_km(-(v - tha_km)/qa_km);
;
b = Rb_km * qa_km * Cuefun_km((v - tha_km)/qa_km);
/* removed tadj_km recalculation */
ntau = 1/tadj_km/(a+b);
ninf = a/(a+b);
;};
__device__ void Cutrates_kv(float v ,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv,float &a,float &b,float &ninf,float &ntau) {
;
Curates_kv(v,gbar_kv,tha_kv,qa_kv,Ra_kv,Rb_kv,a,b,ninf,ntau);
;};
__device__ void Curates_kv(float v ,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv,float &a,float &b,float &ninf,float &ntau) {
;
;
a = Ra_kv * qa_kv * Cuefun_kv(-(v - tha_kv)/qa_kv);
;
b = Rb_kv * qa_kv * Cuefun_kv((v - tha_kv)/qa_kv);
/* removed tadj_kv recalculation */
ntau = 1/tadj_kv/(a+b);
ninf = a/(a+b);
;};
__device__ void Cutrates_na(float v,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na,float &hinf,float &htau,float &minf,float &mtau) {
;
;
;
Curates_na(v,gbar_na,tha_na,qa_na,Ra_na,Rb_na,thi1_na,thi2_na,qi_na,thinf_na,qinf_na,Rg_na,Rd_na,hinf,htau,minf,mtau);
;};
__device__ void Curates_na(float vm,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na,float &hinf,float &htau,float &minf,float &mtau) {
float a, b;
a = Cutrap0_na(vm,tha_na,Ra_na,qa_na);
b = Cutrap0_na(-vm,-tha_na,Rb_na,qa_na);
/* removed tadj_na recalculation */
mtau = 1/tadj_na/(a+b);
minf = a/(a+b);
;
a = Cutrap0_na(vm,thi1_na,Rd_na,qi_na);
b = Cutrap0_na(-vm,-thi2_na,Rg_na,qi_na);
htau = 1/tadj_na/(a+b);
hinf = 1/(1+exp((vm-thinf_na)/qinf_na));
;};
// Inits:
__device__ void CuInitModel_ca(float v,float &m,float &h,float gbar_ca,float cao_ca, float cai, float &ica) {
float hinf,htau,minf,mtau;
/* removed tadj_ca recalculation */
Cutrates_ca(v+vshift_ca,gbar_ca,cao_ca,hinf,htau,minf,mtau);
m = minf;
h = hinf;
;};
__device__ void CuInitModel_cad(float v,float &ca, float ica, float &cai) {
ca = cainf_cad;
cai = ca;
;};
__device__ void CuInitModel_kca(float v,float &n,float gbar_kca,float caix_kca,float Ra_kca,float Rb_kca, float cai) {
float a,b,ninf,ntau;
Curates_kca(cai,gbar_kca,caix_kca,Ra_kca,Rb_kca,a,b,ninf,ntau);
n = ninf;
;};
__device__ void CuInitModel_km(float v,float &n,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km) {
float a,b,ninf,ntau;
/* removed tadj_km recalculation */
Cutrates_km(v,gbar_km,tha_km,qa_km,Ra_km,Rb_km,a,b,ninf,ntau);
n = ninf;
;};
__device__ void CuInitModel_kv(float v,float &n,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv) {
float a,b,ninf,ntau;
/* removed tadj_kv recalculation */
Cutrates_kv(v,gbar_kv,tha_kv,qa_kv,Ra_kv,Rb_kv,a,b,ninf,ntau);
n = ninf;
;};
__device__ void CuInitModel_na(float v,float &m,float &h,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na) {
float hinf,htau,minf,mtau;
/* removed tadj_na recalculation */
Cutrates_na(v+vshift_na,gbar_na,tha_na,qa_na,Ra_na,Rb_na,thi1_na,thi2_na,qi_na,thinf_na,qinf_na,Rg_na,Rd_na,hinf,htau,minf,mtau);
m = minf;
h = hinf;
;};
__device__ void CuInitModel_pas(float v,float g_pas,float e_pas) {
g_pas = .001;
e_pas = -70 ;
;};
// Derivs:
__device__ void CuDerivModel_ca(float dt, float v,float &m,float &h,float gbar_ca,float cao_ca, float cai, float &ica) {
float hinf,htau,minf,mtau;
Cutrates_ca ( v + vshift_ca ,gbar_ca,cao_ca,hinf,htau,minf,mtau);
m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mtau)))*(- ( ( ( minf ) ) / mtau ) / ( ( ( ( - 1.0) ) ) / mtau ) - m) ;
h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / htau)))*(- ( ( ( hinf ) ) / htau ) / ( ( ( ( - 1.0) ) ) / htau ) - h) ;
;}
__device__ void CuDerivModel_cad(float dt, float v,float &ca, float ica, float &cai) {
float drive_channel;
drive_channel = - ( 10000.0 ) * ica / ( 2.0 * FARADAY * depth_cad ) ;
if ( drive_channel <= 0. ) {
drive_channel = 0. ;
;}
ca = ca + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / taur_cad)))*(- ( drive_channel + ( ( cainf_cad ) ) / taur_cad ) / ( ( ( ( - 1.0) ) ) / taur_cad ) - ca) ;
cai = ca ;
;}
__device__ void CuDerivModel_kca(float dt, float v,float &n,float gbar_kca,float caix_kca,float Ra_kca,float Rb_kca, float cai) {
float a,b,ninf,ntau;
Curates_kca ( cai ,gbar_kca,caix_kca,Ra_kca,Rb_kca,a,b,ninf,ntau);
n = n + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / ntau)))*(- ( ( ( ninf ) ) / ntau ) / ( ( ( ( - 1.0) ) ) / ntau ) - n) ;
;}
__device__ void CuDerivModel_km(float dt, float v,float &n,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km) {
float a,b,ninf,ntau;
Cutrates_km ( v ,gbar_km,tha_km,qa_km,Ra_km,Rb_km,a,b,ninf,ntau);
n = n + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / ntau)))*(- ( ( ( ninf ) ) / ntau ) / ( ( ( ( - 1.0) ) ) / ntau ) - n) ;
;}
__device__ void CuDerivModel_kv(float dt, float v,float &n,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv) {
float a,b,ninf,ntau;
Cutrates_kv ( v ,gbar_kv,tha_kv,qa_kv,Ra_kv,Rb_kv,a,b,ninf,ntau);
n = n + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / ntau)))*(- ( ( ( ninf ) ) / ntau ) / ( ( ( ( - 1.0) ) ) / ntau ) - n) ;
;}
__device__ void CuDerivModel_na(float dt, float v,float &m,float &h,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na) {
float hinf,htau,minf,mtau;
Cutrates_na ( v + vshift_na ,gbar_na,tha_na,qa_na,Ra_na,Rb_na,thi1_na,thi2_na,qi_na,thinf_na,qinf_na,Rg_na,Rd_na,hinf,htau,minf,mtau);
m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mtau)))*(- ( ( ( minf ) ) / mtau ) / ( ( ( ( - 1.0) ) ) / mtau ) - m) ;
h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / htau)))*(- ( ( ( hinf ) ) / htau ) / ( ( ( ( - 1.0) ) ) / htau ) - h) ;
;}
// Breakpoints:
__device__ void CuBreakpointModel_ca(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float &m,float &h,float gbar_ca,float cao_ca, float cai, float &ica) {
float eca;
float gca,hinf,htau,minf,mtau;
gca=tadj_ca*gbar_ca*m*m*h;
ica=(1e-4)*gca*(v-eca);
sumCurrents+= ica;
sumConductivity+= gca;
;};
__device__ void CuBreakpointModel_cad(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float &ca, float ica, float &cai) {
float gca;
;};
__device__ void CuBreakpointModel_kca(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float &n,float gbar_kca,float caix_kca,float Ra_kca,float Rb_kca, float cai) {
float gca,gk,ninf,ntau;
float ik;
gk=tadj_kca*gbar_kca*n;
ik=(1e-4)*gk*(v-ek);
sumCurrents+= ik;
sumConductivity+= gk;
;};
__device__ void CuBreakpointModel_km(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float &n,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km) {
float gk,ninf,ntau;
float ik;
gk=tadj_km*gbar_km*n;
ik=(1e-4)*gk*(v-ek);
sumCurrents+= ik;
sumConductivity+= gk;
;};
__device__ void CuBreakpointModel_kv(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float &n,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv) {
float gk,ninf,ntau;
float ik;
gk=tadj_kv*gbar_kv*n;
ik=(1e-4)*gk*(v-ek);
sumCurrents+= ik;
sumConductivity+= gk;
;};
__device__ void CuBreakpointModel_na(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float &m,float &h,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na) {
float gna,hinf,htau,minf,mtau;
float ina;
gna=tadj_na*gbar_na*m*m*m*h;
ina=(1e-4)*gna*(v-ena);
sumCurrents+= ina;
sumConductivity+= gna;
;};
__device__ void CuBreakpointModel_pas(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float g_pas,float e_pas) {
float gpas;
float i;
i=g_pas*(v-e_pas);
sumCurrents+= i;
sumConductivity+= g_pas;
;};
// Kinetic:
| cedb3c2a5ad37948bb8888089dde83b6a417e8d8.cu | // Automatically generated CU for C:\Users\rben.KECK-CENTER\Documents\GitHub\NeuroGPU\URapNeuron\Mainen\runModel.hoc
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include "AllModels.cuh"
// Universals:
#define PI (3.1415927f)
#define R (8.31441f)
#define FARADAY (96485.309f)
#define ktf (1000.*8.3134*(celsius + 273.15)/FARADAY)
// GGlobals
#define celsius (37.00000)
#define stoprun (0.00000)
#define clamp_resist (0.00100)
#define secondorder (0.00000)
// NGlobals:
#define q10_ca (2.3)
#define temp_ca (23)
#define tadj_ca (3.2094)
#define vmin_ca (-120)
#define vmax_ca (100)
#define vshift_ca (0)
#define depth_cad (0.1)
#define cainf_cad (0.0001)
#define taur_cad (200)
#define q10_kca (2.3)
#define temp_kca (23)
#define tadj_kca (3.2094)
#define vmin_kca (-120)
#define vmax_kca (100)
#define q10_km (2.3)
#define temp_km (23)
#define tadj_km (3.2094)
#define vmin_km (-120)
#define vmax_km (100)
#define q10_kv (2.3)
#define temp_kv (23)
#define tadj_kv (3.2094)
#define vmin_kv (-120)
#define vmax_kv (100)
#define q10_na (2.3)
#define temp_na (23)
#define tadj_na (3.2094)
#define vmin_na (-120)
#define vmax_na (100)
#define vshift_na (-5)
// Reversals:
#define eca (140.00000f)
#define ek (-90.00000f)
#define ena (60.00000f)
// Declarations:
__device__ void Cutrates_ca(float v ,float gbar_ca,float cao_ca,float &hinf,float &htau,float &minf,float &mtau);
__device__ void Curates_ca(float vm ,float gbar_ca,float cao_ca,float &hinf,float &htau,float &minf,float &mtau);
__device__ void Curates_kca(float cai,float gbar_kca,float caix_kca,float Ra_kca,float Rb_kca,float &a,float &b,float &ninf,float &ntau);
__device__ void Cutrates_km(float v ,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km,float &a,float &b,float &ninf,float &ntau);
__device__ void Curates_km(float v ,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km,float &a,float &b,float &ninf,float &ntau);
__device__ void Cutrates_kv(float v ,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv,float &a,float &b,float &ninf,float &ntau);
__device__ void Curates_kv(float v ,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv,float &a,float &b,float &ninf,float &ntau);
__device__ void Cutrates_na(float v,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na,float &hinf,float &htau,float &minf,float &mtau);
__device__ void Curates_na(float vm,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na,float &hinf,float &htau,float &minf,float &mtau);
float Cunernst(float ci,float co, float z) {
if (z == 0) {
return 0.;
}
if (ci <= 0.) {
return 1e6;
}else if (co <= 0.) {
return -1e6;
}else{
return ktf/z*log(co/ci);
}
}
// Functions:
__device__ float Cuefun_ca(float z) {
if (fabs(z) < 1e-4) {;
return 1 - z/2;
}else{;
return z/(exp(z) - 1);
};
};
__device__ float Cuefun_km(float z) {
if (fabs(z) < 1e-4) {;
return 1 - z/2;
}else{;
return z/(exp(z) - 1);
};
};
__device__ float Cuefun_kv(float z) {
if (fabs(z) < 1e-4) {;
return 1 - z/2;
}else{;
return z/(exp(z) - 1);
};
};
__device__ float Cutrap0_na(float v,float th,float a,float q) {
if (fabs((v-th)/q) > 1e-6) {;
return a * (v - th) / (1 - exp(-(v - th)/q));
} else {;
return a * q;
};
} ;
// Procedures:
__device__ void Cutrates_ca(float v ,float gbar_ca,float cao_ca,float &hinf,float &htau,float &minf,float &mtau) {
Curates_ca(v,gbar_ca,cao_ca,hinf,htau,minf,mtau);
;};
__device__ void Curates_ca(float vm ,float gbar_ca,float cao_ca,float &hinf,float &htau,float &minf,float &mtau) {
float a, b;
/* removed tadj_ca recalculation */
a = 0.209*Cuefun_ca(-(27+vm)/3.8);
b = 0.94*exp((-75-vm)/17);
;
mtau = 1/tadj_ca/(a+b);
minf = a/(a+b);
;
a = 0.000457*exp((-13-vm)/50);
b = 0.0065/(exp((-vm-15)/28) + 1);
htau = 1/tadj_ca/(a+b);
hinf = a/(a+b);
;};
__device__ void Curates_kca(float cai,float gbar_kca,float caix_kca,float Ra_kca,float Rb_kca,float &a,float &b,float &ninf,float &ntau) {
;
a =pow((MYFTYPE) Ra_kca * cai,(MYFTYPE)caix_kca);
b = Rb_kca;
/* removed tadj_kca recalculation */
ntau = 1/tadj_kca/(a+b);
ninf = a/(a+b);
;
;};
__device__ void Cutrates_km(float v ,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km,float &a,float &b,float &ninf,float &ntau) {
;
Curates_km(v,gbar_km,tha_km,qa_km,Ra_km,Rb_km,a,b,ninf,ntau);
;};
__device__ void Curates_km(float v ,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km,float &a,float &b,float &ninf,float &ntau) {
;
;
a = Ra_km * qa_km * Cuefun_km(-(v - tha_km)/qa_km);
;
b = Rb_km * qa_km * Cuefun_km((v - tha_km)/qa_km);
/* removed tadj_km recalculation */
ntau = 1/tadj_km/(a+b);
ninf = a/(a+b);
;};
__device__ void Cutrates_kv(float v ,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv,float &a,float &b,float &ninf,float &ntau) {
;
Curates_kv(v,gbar_kv,tha_kv,qa_kv,Ra_kv,Rb_kv,a,b,ninf,ntau);
;};
__device__ void Curates_kv(float v ,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv,float &a,float &b,float &ninf,float &ntau) {
;
;
a = Ra_kv * qa_kv * Cuefun_kv(-(v - tha_kv)/qa_kv);
;
b = Rb_kv * qa_kv * Cuefun_kv((v - tha_kv)/qa_kv);
/* removed tadj_kv recalculation */
ntau = 1/tadj_kv/(a+b);
ninf = a/(a+b);
;};
__device__ void Cutrates_na(float v,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na,float &hinf,float &htau,float &minf,float &mtau) {
;
;
;
Curates_na(v,gbar_na,tha_na,qa_na,Ra_na,Rb_na,thi1_na,thi2_na,qi_na,thinf_na,qinf_na,Rg_na,Rd_na,hinf,htau,minf,mtau);
;};
__device__ void Curates_na(float vm,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na,float &hinf,float &htau,float &minf,float &mtau) {
float a, b;
a = Cutrap0_na(vm,tha_na,Ra_na,qa_na);
b = Cutrap0_na(-vm,-tha_na,Rb_na,qa_na);
/* removed tadj_na recalculation */
mtau = 1/tadj_na/(a+b);
minf = a/(a+b);
;
a = Cutrap0_na(vm,thi1_na,Rd_na,qi_na);
b = Cutrap0_na(-vm,-thi2_na,Rg_na,qi_na);
htau = 1/tadj_na/(a+b);
hinf = 1/(1+exp((vm-thinf_na)/qinf_na));
;};
// Inits:
__device__ void CuInitModel_ca(float v,float &m,float &h,float gbar_ca,float cao_ca, float cai, float &ica) {
float hinf,htau,minf,mtau;
/* removed tadj_ca recalculation */
Cutrates_ca(v+vshift_ca,gbar_ca,cao_ca,hinf,htau,minf,mtau);
m = minf;
h = hinf;
;};
__device__ void CuInitModel_cad(float v,float &ca, float ica, float &cai) {
ca = cainf_cad;
cai = ca;
;};
__device__ void CuInitModel_kca(float v,float &n,float gbar_kca,float caix_kca,float Ra_kca,float Rb_kca, float cai) {
float a,b,ninf,ntau;
Curates_kca(cai,gbar_kca,caix_kca,Ra_kca,Rb_kca,a,b,ninf,ntau);
n = ninf;
;};
__device__ void CuInitModel_km(float v,float &n,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km) {
float a,b,ninf,ntau;
/* removed tadj_km recalculation */
Cutrates_km(v,gbar_km,tha_km,qa_km,Ra_km,Rb_km,a,b,ninf,ntau);
n = ninf;
;};
__device__ void CuInitModel_kv(float v,float &n,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv) {
float a,b,ninf,ntau;
/* removed tadj_kv recalculation */
Cutrates_kv(v,gbar_kv,tha_kv,qa_kv,Ra_kv,Rb_kv,a,b,ninf,ntau);
n = ninf;
;};
__device__ void CuInitModel_na(float v,float &m,float &h,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na) {
float hinf,htau,minf,mtau;
/* removed tadj_na recalculation */
Cutrates_na(v+vshift_na,gbar_na,tha_na,qa_na,Ra_na,Rb_na,thi1_na,thi2_na,qi_na,thinf_na,qinf_na,Rg_na,Rd_na,hinf,htau,minf,mtau);
m = minf;
h = hinf;
;};
__device__ void CuInitModel_pas(float v,float g_pas,float e_pas) {
g_pas = .001;
e_pas = -70 ;
;};
// Derivs:
__device__ void CuDerivModel_ca(float dt, float v,float &m,float &h,float gbar_ca,float cao_ca, float cai, float &ica) {
float hinf,htau,minf,mtau;
Cutrates_ca ( v + vshift_ca ,gbar_ca,cao_ca,hinf,htau,minf,mtau);
m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mtau)))*(- ( ( ( minf ) ) / mtau ) / ( ( ( ( - 1.0) ) ) / mtau ) - m) ;
h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / htau)))*(- ( ( ( hinf ) ) / htau ) / ( ( ( ( - 1.0) ) ) / htau ) - h) ;
;}
__device__ void CuDerivModel_cad(float dt, float v,float &ca, float ica, float &cai) {
float drive_channel;
drive_channel = - ( 10000.0 ) * ica / ( 2.0 * FARADAY * depth_cad ) ;
if ( drive_channel <= 0. ) {
drive_channel = 0. ;
;}
ca = ca + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / taur_cad)))*(- ( drive_channel + ( ( cainf_cad ) ) / taur_cad ) / ( ( ( ( - 1.0) ) ) / taur_cad ) - ca) ;
cai = ca ;
;}
__device__ void CuDerivModel_kca(float dt, float v,float &n,float gbar_kca,float caix_kca,float Ra_kca,float Rb_kca, float cai) {
float a,b,ninf,ntau;
Curates_kca ( cai ,gbar_kca,caix_kca,Ra_kca,Rb_kca,a,b,ninf,ntau);
n = n + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / ntau)))*(- ( ( ( ninf ) ) / ntau ) / ( ( ( ( - 1.0) ) ) / ntau ) - n) ;
;}
__device__ void CuDerivModel_km(float dt, float v,float &n,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km) {
float a,b,ninf,ntau;
Cutrates_km ( v ,gbar_km,tha_km,qa_km,Ra_km,Rb_km,a,b,ninf,ntau);
n = n + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / ntau)))*(- ( ( ( ninf ) ) / ntau ) / ( ( ( ( - 1.0) ) ) / ntau ) - n) ;
;}
__device__ void CuDerivModel_kv(float dt, float v,float &n,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv) {
float a,b,ninf,ntau;
Cutrates_kv ( v ,gbar_kv,tha_kv,qa_kv,Ra_kv,Rb_kv,a,b,ninf,ntau);
n = n + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / ntau)))*(- ( ( ( ninf ) ) / ntau ) / ( ( ( ( - 1.0) ) ) / ntau ) - n) ;
;}
__device__ void CuDerivModel_na(float dt, float v,float &m,float &h,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na) {
float hinf,htau,minf,mtau;
Cutrates_na ( v + vshift_na ,gbar_na,tha_na,qa_na,Ra_na,Rb_na,thi1_na,thi2_na,qi_na,thinf_na,qinf_na,Rg_na,Rd_na,hinf,htau,minf,mtau);
m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / mtau)))*(- ( ( ( minf ) ) / mtau ) / ( ( ( ( - 1.0) ) ) / mtau ) - m) ;
h = h + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / htau)))*(- ( ( ( hinf ) ) / htau ) / ( ( ( ( - 1.0) ) ) / htau ) - h) ;
;}
// Breakpoints:
__device__ void CuBreakpointModel_ca(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float &m,float &h,float gbar_ca,float cao_ca, float cai, float &ica) {
float eca;
float gca,hinf,htau,minf,mtau;
gca=tadj_ca*gbar_ca*m*m*h;
ica=(1e-4)*gca*(v-eca);
sumCurrents+= ica;
sumConductivity+= gca;
;};
__device__ void CuBreakpointModel_cad(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float &ca, float ica, float &cai) {
float gca;
;};
__device__ void CuBreakpointModel_kca(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float &n,float gbar_kca,float caix_kca,float Ra_kca,float Rb_kca, float cai) {
float gca,gk,ninf,ntau;
float ik;
gk=tadj_kca*gbar_kca*n;
ik=(1e-4)*gk*(v-ek);
sumCurrents+= ik;
sumConductivity+= gk;
;};
__device__ void CuBreakpointModel_km(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float &n,float gbar_km,float tha_km,float qa_km,float Ra_km,float Rb_km) {
float gk,ninf,ntau;
float ik;
gk=tadj_km*gbar_km*n;
ik=(1e-4)*gk*(v-ek);
sumCurrents+= ik;
sumConductivity+= gk;
;};
__device__ void CuBreakpointModel_kv(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float &n,float gbar_kv,float tha_kv,float qa_kv,float Ra_kv,float Rb_kv) {
float gk,ninf,ntau;
float ik;
gk=tadj_kv*gbar_kv*n;
ik=(1e-4)*gk*(v-ek);
sumCurrents+= ik;
sumConductivity+= gk;
;};
__device__ void CuBreakpointModel_na(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float &m,float &h,float gbar_na,float tha_na,float qa_na,float Ra_na,float Rb_na,float thi1_na,float thi2_na,float qi_na,float thinf_na,float qinf_na,float Rg_na,float Rd_na) {
float gna,hinf,htau,minf,mtau;
float ina;
gna=tadj_na*gbar_na*m*m*m*h;
ina=(1e-4)*gna*(v-ena);
sumCurrents+= ina;
sumConductivity+= gna;
;};
__device__ void CuBreakpointModel_pas(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, float v,float g_pas,float e_pas) {
float gpas;
float i;
i=g_pas*(v-e_pas);
sumCurrents+= i;
sumConductivity+= g_pas;
;};
// Kinetic:
|
14f99b794420b4a13fa6b2fe02552d3dc1b2068c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include"E:\Program Files\MATLAB\R2012b\extern\include\mex.h"
#include <stdio.h>
#include <algorithm>
__global__ void Transfer_kernel(double *Result, double* Result2,double * Result3,double * Result4,double *ModelNum,double* ModelSm,double *SpNum,double *ColorD,double *SumG,double *MaxDim,double* MainColor)
{
int z = threadIdx.x;
int MNum = (int)(*ModelNum), SNum = (int)(*SpNum),MDim=(int)(*MaxDim);
if (threadIdx.x >= SNum)
return;
for (int k = 0; k < MNum; k++)
{
if (ModelSm[k] != 0&&ColorD[z+MDim*k]!=1000)
{
Result[z] = Result[z] + ModelSm[k] *exp(-ColorD[z+MDim*k])*SumG[z+MDim*k];//
Result2[z+MDim*k]=exp(-MainColor[z+MDim*k]);
Result4[z+MDim*k]=ModelSm[k] *exp(-ColorD[z+MDim*k])*SumG[z+MDim*k];
Result3[z+MDim*k]=ModelSm[k] *exp(-ColorD[z+MDim*k])*SumG[z+MDim*k];
}
}
return;
}
void ModelT(double *Result, double* Result2,double * Result3,double * Result4,double *ModelNum,double* ModelSm,double *SpNum,double *MaxDim,double *ColorD,double *SumG,double* MainColor)
{
double * dev_Result,*dev_Result2,*dev_Result3,*dev_Result4;
double *dev_ModelNum;
double *dev_ModelSm;
double *dev_SpNum,*dev_MaxDim;
double *dev_ColorD, *dev_SumG,*dev_MainColor;
int MDim = (int)(*MaxDim);
int MNum = (int)(*ModelNum);
int Spnum = (int)(*SpNum);
hipMalloc((void **)&dev_Result, sizeof(double)* MDim);
hipMalloc((void **)&dev_Result2, sizeof(double)* MDim*MNum);
hipMalloc((void **)&dev_Result3, sizeof(double)* MDim*MNum);
hipMalloc((void **)&dev_Result4, sizeof(double)* MDim*MNum);
hipMalloc((void **)&dev_ModelSm, sizeof(double)* MNum);
hipMalloc((void **)&dev_SpNum, sizeof(double));
hipMalloc((void **)&dev_MaxDim, sizeof(double));
hipMalloc((void **)&dev_ModelNum, sizeof(double));
hipMalloc((void **)&dev_ColorD, sizeof(double)* MDim*MNum);
hipMalloc((void **)&dev_SumG, sizeof(double)* MDim*MNum);
hipMalloc((void **)&dev_MainColor, sizeof(double)* MDim*MNum);
hipMemcpy(dev_ModelNum, ModelNum, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_SpNum, SpNum, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_MaxDim, MaxDim, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_ModelSm, ModelSm, sizeof(double)* MNum, hipMemcpyHostToDevice);
hipMemcpy(dev_ColorD, ColorD, sizeof(double)* MDim*MNum, hipMemcpyHostToDevice);
hipMemcpy(dev_SumG, SumG, sizeof(double)* MDim*MNum, hipMemcpyHostToDevice);
hipMemcpy(dev_MainColor, MainColor, sizeof(double)* MDim*MNum, hipMemcpyHostToDevice);
dim3 threads(Spnum);
Transfer_kernel << <1, threads >> >(dev_Result, dev_Result2, dev_Result3,dev_Result4, dev_ModelNum, dev_ModelSm, dev_SpNum, dev_ColorD,dev_SumG,dev_MaxDim,dev_MainColor);
hipMemcpy(Result, dev_Result, sizeof(double)*MDim, hipMemcpyDeviceToHost);
hipMemcpy(Result2, dev_Result2, sizeof(double)* MDim*MNum, hipMemcpyDeviceToHost);
hipMemcpy(Result3, dev_Result3, sizeof(double)* MDim*MNum, hipMemcpyDeviceToHost);
hipMemcpy(Result4, dev_Result4, sizeof(double)* MDim*MNum, hipMemcpyDeviceToHost);
hipFree(dev_ModelSm);
hipFree(dev_ModelNum);
hipFree(dev_Result);
hipFree(dev_Result2);
hipFree(dev_Result3);
hipFree(dev_Result4);
hipFree(dev_SpNum);
hipFree(dev_Result);
hipFree(dev_ColorD);
hipFree(dev_SumG);
hipFree(dev_MainColor);
}
| 14f99b794420b4a13fa6b2fe02552d3dc1b2068c.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include"E:\Program Files\MATLAB\R2012b\extern\include\mex.h"
#include <stdio.h>
#include <algorithm>
__global__ void Transfer_kernel(double *Result, double* Result2,double * Result3,double * Result4,double *ModelNum,double* ModelSm,double *SpNum,double *ColorD,double *SumG,double *MaxDim,double* MainColor)
{
int z = threadIdx.x;
int MNum = (int)(*ModelNum), SNum = (int)(*SpNum),MDim=(int)(*MaxDim);
if (threadIdx.x >= SNum)
return;
for (int k = 0; k < MNum; k++)
{
if (ModelSm[k] != 0&&ColorD[z+MDim*k]!=1000)
{
Result[z] = Result[z] + ModelSm[k] *exp(-ColorD[z+MDim*k])*SumG[z+MDim*k];//
Result2[z+MDim*k]=exp(-MainColor[z+MDim*k]);
Result4[z+MDim*k]=ModelSm[k] *exp(-ColorD[z+MDim*k])*SumG[z+MDim*k];
Result3[z+MDim*k]=ModelSm[k] *exp(-ColorD[z+MDim*k])*SumG[z+MDim*k];
}
}
return;
}
void ModelT(double *Result, double* Result2,double * Result3,double * Result4,double *ModelNum,double* ModelSm,double *SpNum,double *MaxDim,double *ColorD,double *SumG,double* MainColor)
{
double * dev_Result,*dev_Result2,*dev_Result3,*dev_Result4;
double *dev_ModelNum;
double *dev_ModelSm;
double *dev_SpNum,*dev_MaxDim;
double *dev_ColorD, *dev_SumG,*dev_MainColor;
int MDim = (int)(*MaxDim);
int MNum = (int)(*ModelNum);
int Spnum = (int)(*SpNum);
cudaMalloc((void **)&dev_Result, sizeof(double)* MDim);
cudaMalloc((void **)&dev_Result2, sizeof(double)* MDim*MNum);
cudaMalloc((void **)&dev_Result3, sizeof(double)* MDim*MNum);
cudaMalloc((void **)&dev_Result4, sizeof(double)* MDim*MNum);
cudaMalloc((void **)&dev_ModelSm, sizeof(double)* MNum);
cudaMalloc((void **)&dev_SpNum, sizeof(double));
cudaMalloc((void **)&dev_MaxDim, sizeof(double));
cudaMalloc((void **)&dev_ModelNum, sizeof(double));
cudaMalloc((void **)&dev_ColorD, sizeof(double)* MDim*MNum);
cudaMalloc((void **)&dev_SumG, sizeof(double)* MDim*MNum);
cudaMalloc((void **)&dev_MainColor, sizeof(double)* MDim*MNum);
cudaMemcpy(dev_ModelNum, ModelNum, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_SpNum, SpNum, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_MaxDim, MaxDim, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_ModelSm, ModelSm, sizeof(double)* MNum, cudaMemcpyHostToDevice);
cudaMemcpy(dev_ColorD, ColorD, sizeof(double)* MDim*MNum, cudaMemcpyHostToDevice);
cudaMemcpy(dev_SumG, SumG, sizeof(double)* MDim*MNum, cudaMemcpyHostToDevice);
cudaMemcpy(dev_MainColor, MainColor, sizeof(double)* MDim*MNum, cudaMemcpyHostToDevice);
dim3 threads(Spnum);
Transfer_kernel << <1, threads >> >(dev_Result, dev_Result2, dev_Result3,dev_Result4, dev_ModelNum, dev_ModelSm, dev_SpNum, dev_ColorD,dev_SumG,dev_MaxDim,dev_MainColor);
cudaMemcpy(Result, dev_Result, sizeof(double)*MDim, cudaMemcpyDeviceToHost);
cudaMemcpy(Result2, dev_Result2, sizeof(double)* MDim*MNum, cudaMemcpyDeviceToHost);
cudaMemcpy(Result3, dev_Result3, sizeof(double)* MDim*MNum, cudaMemcpyDeviceToHost);
cudaMemcpy(Result4, dev_Result4, sizeof(double)* MDim*MNum, cudaMemcpyDeviceToHost);
cudaFree(dev_ModelSm);
cudaFree(dev_ModelNum);
cudaFree(dev_Result);
cudaFree(dev_Result2);
cudaFree(dev_Result3);
cudaFree(dev_Result4);
cudaFree(dev_SpNum);
cudaFree(dev_Result);
cudaFree(dev_ColorD);
cudaFree(dev_SumG);
cudaFree(dev_MainColor);
}
|
6c930f2d9baed2c4b685921c4b371257d2543de9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "latte/layers/softmax_layer.h"
namespace latte {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype *data,
Dtype *out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count, const int num,
const int channels,
const int spatial_dim,
const Dtype *channel_max, Dtype *data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype *data, Dtype *out) {
CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); }
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype *data,
Dtype *channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count, const int num,
const int channels, const int spatial_dim,
const Dtype *channel_sum, Dtype *data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype *data_1,
const Dtype *data_2, Dtype *channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s] *
data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
const Dtype *bottom_data = bottom[0]->gpu_data();
Dtype *top_data = top[0]->mutable_gpu_data();
Dtype *scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
latte_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max<Dtype>)
, dim3(LATTE_GET_BLOCKS(outer_num_ * inner_num_)), dim3(LATTE_CUDA_NUM_THREADS), 0, 0,
outer_num_, channels, inner_num_, top_data, scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>)
, dim3(LATTE_GET_BLOCKS(count)), dim3(LATTE_CUDA_NUM_THREADS), 0, 0,
count, outer_num_, channels, inner_num_, scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(LATTE_GET_BLOCKS(count)), dim3(LATTE_CUDA_NUM_THREADS), 0, 0,
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>)
, dim3(LATTE_GET_BLOCKS(outer_num_ * inner_num_)), dim3(LATTE_CUDA_NUM_THREADS), 0, 0,
outer_num_, channels, inner_num_, top_data, scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div<Dtype>)
, dim3(LATTE_GET_BLOCKS(count)), dim3(LATTE_CUDA_NUM_THREADS), 0, 0,
count, outer_num_, channels, inner_num_, scale_data, top_data);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace latte | 6c930f2d9baed2c4b685921c4b371257d2543de9.cu | #include <cfloat>
#include "latte/layers/softmax_layer.h"
namespace latte {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype *data,
Dtype *out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count, const int num,
const int channels,
const int spatial_dim,
const Dtype *channel_max, Dtype *data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype *data, Dtype *out) {
CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); }
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype *data,
Dtype *channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count, const int num,
const int channels, const int spatial_dim,
const Dtype *channel_sum, Dtype *data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype *data_1,
const Dtype *data_2, Dtype *channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s] *
data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
const Dtype *bottom_data = bottom[0]->gpu_data();
Dtype *top_data = top[0]->mutable_gpu_data();
Dtype *scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
latte_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype>
<<<LATTE_GET_BLOCKS(outer_num_ * inner_num_), LATTE_CUDA_NUM_THREADS>>>(
outer_num_, channels, inner_num_, top_data, scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype>
<<<LATTE_GET_BLOCKS(count), LATTE_CUDA_NUM_THREADS>>>(
count, outer_num_, channels, inner_num_, scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype><<<LATTE_GET_BLOCKS(count), LATTE_CUDA_NUM_THREADS>>>(
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype>
<<<LATTE_GET_BLOCKS(outer_num_ * inner_num_), LATTE_CUDA_NUM_THREADS>>>(
outer_num_, channels, inner_num_, top_data, scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype>
<<<LATTE_GET_BLOCKS(count), LATTE_CUDA_NUM_THREADS>>>(
count, outer_num_, channels, inner_num_, scale_data, top_data);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace latte |
ffd8082e73be32753c8d61ba3bfc5ade08c3149e.hip | // !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/batch_matmul_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
template <>
bool BatchMatMulOp<CUDAContext, DefaultEngine>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
REGISTER_CUDA_OPERATOR(BatchMatMul, BatchMatMulOp<CUDAContext>);
#if TORCH_HIP_VERSION >= 9000
template <>
bool BatchMatMulOp<CUDAContext, TensorCoreEngine>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
REGISTER_CUDA_OPERATOR_WITH_ENGINE(
BatchMatMul,
TENSORCORE,
BatchMatMulOp<CUDAContext, TensorCoreEngine>);
#endif
} // namespace caffe2
| ffd8082e73be32753c8d61ba3bfc5ade08c3149e.cu | #include "caffe2/operators/batch_matmul_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
template <>
bool BatchMatMulOp<CUDAContext, DefaultEngine>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
REGISTER_CUDA_OPERATOR(BatchMatMul, BatchMatMulOp<CUDAContext>);
#if CUDA_VERSION >= 9000
template <>
bool BatchMatMulOp<CUDAContext, TensorCoreEngine>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
REGISTER_CUDA_OPERATOR_WITH_ENGINE(
BatchMatMul,
TENSORCORE,
BatchMatMulOp<CUDAContext, TensorCoreEngine>);
#endif
} // namespace caffe2
|
b2fe52386e5596123912d67e10345f04ffce6085.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "TgvSolveEtaKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float alpha0 = 2;
float alpha1 = 2;
float *atensor = NULL;
hipMalloc(&atensor, XSIZE*YSIZE);
float *btensor = NULL;
hipMalloc(&btensor, XSIZE*YSIZE);
float *ctensor = NULL;
hipMalloc(&ctensor, XSIZE*YSIZE);
float *etau = NULL;
hipMalloc(&etau, XSIZE*YSIZE);
float *etav1 = NULL;
hipMalloc(&etav1, XSIZE*YSIZE);
float *etav2 = NULL;
hipMalloc(&etav2, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int stride = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
TgvSolveEtaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, alpha0,alpha1,atensor,btensor,ctensor,etau,etav1,etav2,width,height,stride);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
TgvSolveEtaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, alpha0,alpha1,atensor,btensor,ctensor,etau,etav1,etav2,width,height,stride);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
TgvSolveEtaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, alpha0,alpha1,atensor,btensor,ctensor,etau,etav1,etav2,width,height,stride);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b2fe52386e5596123912d67e10345f04ffce6085.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "TgvSolveEtaKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float alpha0 = 2;
float alpha1 = 2;
float *atensor = NULL;
cudaMalloc(&atensor, XSIZE*YSIZE);
float *btensor = NULL;
cudaMalloc(&btensor, XSIZE*YSIZE);
float *ctensor = NULL;
cudaMalloc(&ctensor, XSIZE*YSIZE);
float *etau = NULL;
cudaMalloc(&etau, XSIZE*YSIZE);
float *etav1 = NULL;
cudaMalloc(&etav1, XSIZE*YSIZE);
float *etav2 = NULL;
cudaMalloc(&etav2, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int stride = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
TgvSolveEtaKernel<<<gridBlock,threadBlock>>>(alpha0,alpha1,atensor,btensor,ctensor,etau,etav1,etav2,width,height,stride);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
TgvSolveEtaKernel<<<gridBlock,threadBlock>>>(alpha0,alpha1,atensor,btensor,ctensor,etau,etav1,etav2,width,height,stride);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
TgvSolveEtaKernel<<<gridBlock,threadBlock>>>(alpha0,alpha1,atensor,btensor,ctensor,etau,etav1,etav2,width,height,stride);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a4806fda641ac9dedd78d332a2f57b98dd757264.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#define NUM_STEPS 2048
#define BLOCK_SIZE 128
#define MAX_OPTIONS 1024
#define ELEMS_PER_THREAD (NUM_STEPS/BLOCK_SIZE)
/* at T_N = NUM_STEPS, there are (NUM_STEPS + 1) leaves*/
double cpuSecond( ) {
double sec;
struct timeval tp;
gettimeofday(&tp, NULL);
sec = (double) tp.tv_sec + (double) tp.tv_usec*1e-6;
return sec;
}
// cumulative standard normal distribution
__device__ __host__ double CND(double d) {
const double b = 0.2316419;
const double a1 = 0.31938153;
const double a2 = -0.356563782;
const double a3 = 1.781477937;
const double a4 = -1.821255978;
const double a5 = 1.330274429;
const double rsqrt2pi = 0.3989422804014327;
double K = 1.0/(1.0 + b*abs(d));
double ans;
ans = K*(a1 + K*(a2 + K*(a3 + K*(a4 + K*a5) ) ) );
ans *= rsqrt2pi*exp(-d*d/2.0);
if (d > 0) {
ans = 1.0 - ans;
}
return ans;
}
__host__ double
BlackScholes(double S0, double X, double r, double v, double T) {
double sqrtT = sqrt(T);
double d1 = ( log(S0/X) + (r + 0.5*v*v)*T )/(v*sqrtT);
double d2 = d1 - v*sqrtT;
double expRT = exp(-r*T);
double CE;
CE = S0*CND(d1) - X*expRT*CND(d2);
return CE;
}
/* CPU verion of the binomial model */
__host__ double
binomial(double S, double X, double r, double v, double T, int N, double* V ) {
// S current stock price; X strike price;
// r, v the risk-free interest rate and volatility;
// T the expiry (unit yr); N the number of time steps in the binomial model.
// V[N+1] is the array to do the iteration;
// In the end, V[0] stores the price of the call option
double dt = T/(double) N;
double vdt = v*sqrt(dt);
double u = exp( vdt );
double d = 1.0/u;
double disc = exp( r * dt ); // discounting factor
double discr = 1.0/disc;
double pu = (disc - d)/(u - d); // risk-neutral/martingale probability
double pd = 1.0 - pu;
int i, j;
double Si; // intermediate stock price in the node
// initialize the CALL option value on expiry;
for (i = 0; i <= N; i++) {
Si = S * exp( vdt*(2*i - N) ); // S[0] lowest stock price at expiry.
V[i] = (Si - X > 0) ? (Si - X) : 0; // Call option, use "max(X-Si, 0.0)" for puts
}
// iterate backward of the binomial tree (j the time step).
for (j = N-1; j >= 0; j--) {
for (i = 0; i <= j; i++) {
V[i] = (pd * V[i] + pu * V[i+1]) * discr;
}
}
return V[0];
}
/* 1D grid, and 1D block; blocksize << NUM_STEPS
each block deals with only one option
each thread deals with part of the binomial tree */
__global__ void
binomialGPUv5(double* Sptr, double* Xptr, double* Cptr,
double r, double v, double T) {
int tx = threadIdx.x;
int bx = blockIdx.x;
double S = Sptr[bx];
double X = Xptr[bx];
double dt = T/(double) NUM_STEPS;
double vdt = v*sqrt(dt);
double u = exp(vdt);
double d = 1.0/u;
double disc = exp( r * dt ); // discounting factor
double discr = 1.0/disc;
double pu = (disc - d)/(u - d); // risk-neutral/martingale probability
double pd = 1.0 - pu;
int i, j, k;
double Si;
double call_loc[ELEMS_PER_THREAD + 1]; //local array
__shared__
double call_bound[BLOCK_SIZE + 1];
for (i = 0; i < ELEMS_PER_THREAD; i++) {
k = tx * ELEMS_PER_THREAD + i;
Si = S * exp( vdt * (2*k - NUM_STEPS) );
call_loc[i] = (Si - X > 0) ? (Si - X) : 0;
}
if (tx == BLOCK_SIZE - 1) {
Si = S * exp( vdt * NUM_STEPS);
call_bound[BLOCK_SIZE] = (Si - X > 0) ? (Si - X) : 0;
}
call_bound[tx] = call_loc[0];
__syncthreads();
call_loc[ELEMS_PER_THREAD] = call_bound[tx + 1];
__syncthreads();
for (j = 0; j < (BLOCK_SIZE - tx)*ELEMS_PER_THREAD; j++) {
for (i = 0; i < ELEMS_PER_THREAD; i++) {
call_loc[i] = (pd*call_loc[i] + pu*call_loc[i+1])*discr;
}
call_bound[tx] = call_loc[0];
__syncthreads();
call_loc[ELEMS_PER_THREAD] = call_bound[tx + 1];
__syncthreads(); // must be there, otherwise, fail
}
if (tx == 0) {
Cptr[bx] = call_loc[0];
}
}
/* general uniform random number between [low, high] */
double UniRand(double low, double high) {
double t = (double) rand() / (double) RAND_MAX;
return (1.0 - t)*low + t*high;
}
/* test the GPU binomial model for European Call Pricing */
int main(int argc, char** argv) {
int OPT_N = MAX_OPTIONS;
double * Sptr_h;
double * Sptr_d;
double * Xptr_h;
double * Xptr_d;
double * Cptr_h;
double * Cptr_d;
double * Cptr_c;
double * Cptr_b; // from the Black-Scholes model
int size = OPT_N*sizeof(double);
int i;
double * Vptr_h; // used by the CPU verions
double r = 0.02; // risk-free rate
double v = 0.30; // volatility
double T = 1.00; // maturity
hipError_t error;
double tStart;
double tStop1, tStop2;
double sumDel = 0, sumRef = 0;
Sptr_h = (double *) malloc(size); // input
Xptr_h = (double *) malloc(size); // input
Cptr_h = (double *) malloc(size); // CPU version output
Cptr_c = (double *) malloc(size); // GPU version output
Cptr_b = (double *) malloc(size); // CPU Black-Scholes output
Vptr_h = (double *) malloc((NUM_STEPS+1)*sizeof(double));
for (i = 0; i < OPT_N; i++) {
Sptr_h[i] = UniRand(5.0, 30.0);
Xptr_h[i] = UniRand(1.0, 40.0);
}
hipMalloc( (void **) &Sptr_d, size);
hipMalloc( (void **) &Xptr_d, size);
hipMalloc( (void **) &Cptr_d, size);
hipMemcpy(Sptr_d, Sptr_h, size, hipMemcpyHostToDevice);
hipMemcpy(Xptr_d, Xptr_h, size, hipMemcpyHostToDevice);
dim3 dimGrid(MAX_OPTIONS);
dim3 dimBlock(BLOCK_SIZE);
printf("Depth of the tree NUM_STEPS = %d\n", NUM_STEPS);
printf("grid structure: <<<%d, %d>>>\n", dimGrid.x, dimBlock.x);
printf("Starting the GPU code...\n");
tStart = cpuSecond();
hipDeviceSynchronize();
hipLaunchKernelGGL(( binomialGPUv5), dim3(dimGrid), dim3(dimBlock), 0, 0, Sptr_d, Xptr_d, Cptr_d, r, v, T);
hipMemcpy(Cptr_c, Cptr_d, size, hipMemcpyDeviceToHost);
error = hipPeekAtLastError();
if (error != hipSuccess) {
printf("GPU code failed %s\n", hipGetErrorString(error) );
exit(-1);
} else {
tStop1 = cpuSecond() - tStart;
printf("GPU code finished within %12.6f seconds\n", tStop1);
}
tStart = cpuSecond();
for (i = 0; i < OPT_N; i++) {
Cptr_h[i] = binomial(Sptr_h[i], Xptr_h[i], r, v, T, NUM_STEPS, Vptr_h);
}
tStop2 = cpuSecond() - tStart;
printf("CPU code finished within %12.6f seconds\n", tStop2);
printf("Speed up you got %8.2f\n", tStop2/tStop1);
for (i = 0; i < OPT_N; i++) {
Cptr_b[i] = BlackScholes(Sptr_h[i], Xptr_h[i], r, v, T);
}
printf("Compare the GPU and CPU binary model now...\n");
for (i = 0; i < OPT_N; i++) {
if ( abs( Cptr_h[i] - Cptr_c[i] ) > 1e-8 ) {
printf("%d %8.2f %8.2f %12.6f %12.6f \n", i, Sptr_h[i], Xptr_h[i], Cptr_h[i], Cptr_c[i]);
printf("comparion failed\n");
exit(-1);
}
}
printf("passed comparison between GPU and CPU binomial model\n");
printf("Compare the binary model with Black-Scholes model now...\n");
for (i = 0; i < OPT_N; i++) {
sumDel += abs( Cptr_h[i] - Cptr_b[i] );
sumRef += Cptr_h[i];
}
if ( sumDel/sumRef > 1e-4 ) {
printf("Black-Scholes VS Binomial comparison failed\n");
exit(-1);
}
printf("passed comparison between binomial and Black-Scholes model\n");
printf("Here are outputs for the first 10 lines\n");
for (i = 0; i < 10; i++) {
printf("%8.2f, %8.2f, %10.4f, %10.4f, %10.4f\n",
Sptr_h[i], Xptr_h[i], Cptr_c[i], Cptr_h[i], Cptr_b[i]);
}
free(Sptr_h); free(Xptr_h); free(Cptr_h); free(Cptr_c); free(Cptr_b); free(Vptr_h);
hipFree(Sptr_d); hipFree(Xptr_d); hipFree(Cptr_d);
hipDeviceReset();
return(0);
}
| a4806fda641ac9dedd78d332a2f57b98dd757264.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#define NUM_STEPS 2048
#define BLOCK_SIZE 128
#define MAX_OPTIONS 1024
#define ELEMS_PER_THREAD (NUM_STEPS/BLOCK_SIZE)
/* at T_N = NUM_STEPS, there are (NUM_STEPS + 1) leaves*/
double cpuSecond( ) {
double sec;
struct timeval tp;
gettimeofday(&tp, NULL);
sec = (double) tp.tv_sec + (double) tp.tv_usec*1e-6;
return sec;
}
// cumulative standard normal distribution
__device__ __host__ double CND(double d) {
const double b = 0.2316419;
const double a1 = 0.31938153;
const double a2 = -0.356563782;
const double a3 = 1.781477937;
const double a4 = -1.821255978;
const double a5 = 1.330274429;
const double rsqrt2pi = 0.3989422804014327;
double K = 1.0/(1.0 + b*abs(d));
double ans;
ans = K*(a1 + K*(a2 + K*(a3 + K*(a4 + K*a5) ) ) );
ans *= rsqrt2pi*exp(-d*d/2.0);
if (d > 0) {
ans = 1.0 - ans;
}
return ans;
}
__host__ double
BlackScholes(double S0, double X, double r, double v, double T) {
double sqrtT = sqrt(T);
double d1 = ( log(S0/X) + (r + 0.5*v*v)*T )/(v*sqrtT);
double d2 = d1 - v*sqrtT;
double expRT = exp(-r*T);
double CE;
CE = S0*CND(d1) - X*expRT*CND(d2);
return CE;
}
/* CPU verion of the binomial model */
__host__ double
binomial(double S, double X, double r, double v, double T, int N, double* V ) {
// S current stock price; X strike price;
// r, v the risk-free interest rate and volatility;
// T the expiry (unit yr); N the number of time steps in the binomial model.
// V[N+1] is the array to do the iteration;
// In the end, V[0] stores the price of the call option
double dt = T/(double) N;
double vdt = v*sqrt(dt);
double u = exp( vdt );
double d = 1.0/u;
double disc = exp( r * dt ); // discounting factor
double discr = 1.0/disc;
double pu = (disc - d)/(u - d); // risk-neutral/martingale probability
double pd = 1.0 - pu;
int i, j;
double Si; // intermediate stock price in the node
// initialize the CALL option value on expiry;
for (i = 0; i <= N; i++) {
Si = S * exp( vdt*(2*i - N) ); // S[0] lowest stock price at expiry.
V[i] = (Si - X > 0) ? (Si - X) : 0; // Call option, use "max(X-Si, 0.0)" for puts
}
// iterate backward of the binomial tree (j the time step).
for (j = N-1; j >= 0; j--) {
for (i = 0; i <= j; i++) {
V[i] = (pd * V[i] + pu * V[i+1]) * discr;
}
}
return V[0];
}
/* 1D grid, and 1D block; blocksize << NUM_STEPS
each block deals with only one option
each thread deals with part of the binomial tree */
__global__ void
binomialGPUv5(double* Sptr, double* Xptr, double* Cptr,
double r, double v, double T) {
int tx = threadIdx.x;
int bx = blockIdx.x;
double S = Sptr[bx];
double X = Xptr[bx];
double dt = T/(double) NUM_STEPS;
double vdt = v*sqrt(dt);
double u = exp(vdt);
double d = 1.0/u;
double disc = exp( r * dt ); // discounting factor
double discr = 1.0/disc;
double pu = (disc - d)/(u - d); // risk-neutral/martingale probability
double pd = 1.0 - pu;
int i, j, k;
double Si;
double call_loc[ELEMS_PER_THREAD + 1]; //local array
__shared__
double call_bound[BLOCK_SIZE + 1];
for (i = 0; i < ELEMS_PER_THREAD; i++) {
k = tx * ELEMS_PER_THREAD + i;
Si = S * exp( vdt * (2*k - NUM_STEPS) );
call_loc[i] = (Si - X > 0) ? (Si - X) : 0;
}
if (tx == BLOCK_SIZE - 1) {
Si = S * exp( vdt * NUM_STEPS);
call_bound[BLOCK_SIZE] = (Si - X > 0) ? (Si - X) : 0;
}
call_bound[tx] = call_loc[0];
__syncthreads();
call_loc[ELEMS_PER_THREAD] = call_bound[tx + 1];
__syncthreads();
for (j = 0; j < (BLOCK_SIZE - tx)*ELEMS_PER_THREAD; j++) {
for (i = 0; i < ELEMS_PER_THREAD; i++) {
call_loc[i] = (pd*call_loc[i] + pu*call_loc[i+1])*discr;
}
call_bound[tx] = call_loc[0];
__syncthreads();
call_loc[ELEMS_PER_THREAD] = call_bound[tx + 1];
__syncthreads(); // must be there, otherwise, fail
}
if (tx == 0) {
Cptr[bx] = call_loc[0];
}
}
/* general uniform random number between [low, high] */
double UniRand(double low, double high) {
double t = (double) rand() / (double) RAND_MAX;
return (1.0 - t)*low + t*high;
}
/* test the GPU binomial model for European Call Pricing */
int main(int argc, char** argv) {
int OPT_N = MAX_OPTIONS;
double * Sptr_h;
double * Sptr_d;
double * Xptr_h;
double * Xptr_d;
double * Cptr_h;
double * Cptr_d;
double * Cptr_c;
double * Cptr_b; // from the Black-Scholes model
int size = OPT_N*sizeof(double);
int i;
double * Vptr_h; // used by the CPU verions
double r = 0.02; // risk-free rate
double v = 0.30; // volatility
double T = 1.00; // maturity
cudaError_t error;
double tStart;
double tStop1, tStop2;
double sumDel = 0, sumRef = 0;
Sptr_h = (double *) malloc(size); // input
Xptr_h = (double *) malloc(size); // input
Cptr_h = (double *) malloc(size); // CPU version output
Cptr_c = (double *) malloc(size); // GPU version output
Cptr_b = (double *) malloc(size); // CPU Black-Scholes output
Vptr_h = (double *) malloc((NUM_STEPS+1)*sizeof(double));
for (i = 0; i < OPT_N; i++) {
Sptr_h[i] = UniRand(5.0, 30.0);
Xptr_h[i] = UniRand(1.0, 40.0);
}
cudaMalloc( (void **) &Sptr_d, size);
cudaMalloc( (void **) &Xptr_d, size);
cudaMalloc( (void **) &Cptr_d, size);
cudaMemcpy(Sptr_d, Sptr_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(Xptr_d, Xptr_h, size, cudaMemcpyHostToDevice);
dim3 dimGrid(MAX_OPTIONS);
dim3 dimBlock(BLOCK_SIZE);
printf("Depth of the tree NUM_STEPS = %d\n", NUM_STEPS);
printf("grid structure: <<<%d, %d>>>\n", dimGrid.x, dimBlock.x);
printf("Starting the GPU code...\n");
tStart = cpuSecond();
cudaDeviceSynchronize();
binomialGPUv5<<<dimGrid, dimBlock>>>(Sptr_d, Xptr_d, Cptr_d, r, v, T);
cudaMemcpy(Cptr_c, Cptr_d, size, cudaMemcpyDeviceToHost);
error = cudaPeekAtLastError();
if (error != cudaSuccess) {
printf("GPU code failed %s\n", cudaGetErrorString(error) );
exit(-1);
} else {
tStop1 = cpuSecond() - tStart;
printf("GPU code finished within %12.6f seconds\n", tStop1);
}
tStart = cpuSecond();
for (i = 0; i < OPT_N; i++) {
Cptr_h[i] = binomial(Sptr_h[i], Xptr_h[i], r, v, T, NUM_STEPS, Vptr_h);
}
tStop2 = cpuSecond() - tStart;
printf("CPU code finished within %12.6f seconds\n", tStop2);
printf("Speed up you got %8.2f\n", tStop2/tStop1);
for (i = 0; i < OPT_N; i++) {
Cptr_b[i] = BlackScholes(Sptr_h[i], Xptr_h[i], r, v, T);
}
printf("Compare the GPU and CPU binary model now...\n");
for (i = 0; i < OPT_N; i++) {
if ( abs( Cptr_h[i] - Cptr_c[i] ) > 1e-8 ) {
printf("%d %8.2f %8.2f %12.6f %12.6f \n", i, Sptr_h[i], Xptr_h[i], Cptr_h[i], Cptr_c[i]);
printf("comparion failed\n");
exit(-1);
}
}
printf("passed comparison between GPU and CPU binomial model\n");
printf("Compare the binary model with Black-Scholes model now...\n");
for (i = 0; i < OPT_N; i++) {
sumDel += abs( Cptr_h[i] - Cptr_b[i] );
sumRef += Cptr_h[i];
}
if ( sumDel/sumRef > 1e-4 ) {
printf("Black-Scholes VS Binomial comparison failed\n");
exit(-1);
}
printf("passed comparison between binomial and Black-Scholes model\n");
printf("Here are outputs for the first 10 lines\n");
for (i = 0; i < 10; i++) {
printf("%8.2f, %8.2f, %10.4f, %10.4f, %10.4f\n",
Sptr_h[i], Xptr_h[i], Cptr_c[i], Cptr_h[i], Cptr_b[i]);
}
free(Sptr_h); free(Xptr_h); free(Cptr_h); free(Cptr_c); free(Cptr_b); free(Vptr_h);
cudaFree(Sptr_d); cudaFree(Xptr_d); cudaFree(Cptr_d);
cudaDeviceReset();
return(0);
}
|
ef81757d099abbe862e5f0a1a3a31edbac3f6942.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <exchcxx/xc_functional.hpp>
#include <string>
__global__ void scal_kernel( const int N, const double fact, const double* X_device, double* Y_device ) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if( tid < N ) Y_device[tid] = X_device[tid] * fact;
}
__global__ void add_scal_kernel( const int N, const double fact, const double* X_device, double* Y_device ) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if( tid < N ) Y_device[tid] += X_device[tid] * fact;
}
void scal_device( const int N, const double fact, const double* X_device, double* Y_device ) {
int threads = 1024;
int blocks = ::ceil( N / 1024. );
hipLaunchKernelGGL(( scal_kernel), dim3(blocks), dim3(threads) , 0, 0, N, fact, X_device, Y_device );
}
void scal_device( const int N, const double fact, const double* X_device, double* Y_device, hipStream_t& stream ) {
int threads = 1024;
int blocks = ::ceil( N / 1024. );
hipLaunchKernelGGL(( scal_kernel), dim3(blocks), dim3(threads), 0, stream , N, fact, X_device, Y_device );
}
void add_scal_device( const int N, const double fact, const double* X_device, double* Y_device ) {
int threads = 1024;
int blocks = ::ceil( N / 1024. );
hipLaunchKernelGGL(( add_scal_kernel), dim3(blocks), dim3(threads) , 0, 0, N, fact, X_device, Y_device );
}
void add_scal_device( const int N, const double fact, const double* X_device, double* Y_device, hipStream_t& stream ) {
int threads = 1024;
int blocks = ::ceil( N / 1024. );
hipLaunchKernelGGL(( add_scal_kernel), dim3(blocks), dim3(threads), 0, stream , N, fact, X_device, Y_device );
}
template <typename T = double>
T* safe_cuda_malloc( size_t N ) {
T* ptr = nullptr;
auto stat = hipMalloc( &ptr, N*sizeof(T) );
if( stat != hipSuccess ) throw std::runtime_error("Alloc Failed");
return ptr;
}
template <typename T>
void safe_zero( size_t len, T* ptr, hipStream_t stream ) {
auto stat = hipMemsetAsync( ptr, 0, len*sizeof(T), stream );
if( stat != hipSuccess )
throw std::runtime_error("Memset Failed : " + std::string(hipGetErrorString( stat )));
}
namespace ExchCXX {
LDA_EXC_GENERATOR_DEVICE( XCFunctional::eval_exc_device ) const {
throw_if_not_sane();
assert( is_lda() );
size_t len_exc_buffer = exc_buffer_len( N );
double* eps_scr = nullptr;
if( kernels_.size() > 1 and not supports_inc_interface() )
eps_scr = safe_cuda_malloc( len_exc_buffer );
safe_zero( len_exc_buffer, eps, stream );
for( auto i = 0ul; i < kernels_.size(); ++i ) {
if( supports_inc_interface() ) {
kernels_[i].second.eval_exc_inc_device(
kernels_[i].first, N, rho, eps, stream
);
} else {
double* eps_eval = i ? eps_scr : eps;
kernels_[i].second.eval_exc_device(N, rho, eps_eval, stream);
if( i )
add_scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
else
scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
}
}
if( eps_scr ) hipFree( eps_scr );
}
LDA_EXC_VXC_GENERATOR_DEVICE( XCFunctional::eval_exc_vxc_device ) const {
throw_if_not_sane();
assert( is_lda() );
size_t len_exc_buffer = exc_buffer_len( N );
size_t len_vxc_buffer = vrho_buffer_len( N );
double* eps_scr(nullptr), *vxc_scr(nullptr);
if( kernels_.size() > 1 and not supports_inc_interface() ) {
eps_scr = safe_cuda_malloc( len_exc_buffer );
vxc_scr = safe_cuda_malloc( len_vxc_buffer );
}
safe_zero( len_exc_buffer, eps, stream );
safe_zero( len_vxc_buffer, vxc, stream );
for( auto i = 0ul; i < kernels_.size(); ++i ) {
if( supports_inc_interface() ) {
kernels_[i].second.eval_exc_vxc_inc_device(
kernels_[i].first, N, rho, eps, vxc, stream
);
} else {
double* eps_eval = i ? eps_scr : eps;
double* vxc_eval = i ? vxc_scr : vxc;
kernels_[i].second.eval_exc_vxc_device(N, rho, eps_eval, vxc_eval, stream);
if( i ) {
add_scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
add_scal_device( len_vxc_buffer, kernels_[i].first, vxc_eval, vxc, stream );
} else {
scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
scal_device( len_vxc_buffer, kernels_[i].first, vxc_eval, vxc, stream );
}
}
}
if( eps_scr ) hipFree( eps_scr );
if( vxc_scr ) hipFree( vxc_scr );
}
// GGA Interfaces
GGA_EXC_GENERATOR_DEVICE( XCFunctional::eval_exc_device ) const {
throw_if_not_sane();
assert( is_gga() );
size_t len_exc_buffer = exc_buffer_len( N );
double* eps_scr = nullptr;
if( kernels_.size() > 1 and not supports_inc_interface() )
eps_scr = safe_cuda_malloc( len_exc_buffer );
safe_zero( len_exc_buffer, eps, stream );
for( auto i = 0ul; i < kernels_.size(); ++i ) {
if( supports_inc_interface() ) {
if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_inc_device(
kernels_[i].first, N, rho, sigma, eps, stream
);
else
kernels_[i].second.eval_exc_inc_device(
kernels_[i].first, N, rho, eps, stream
);
} else {
double* eps_eval = i ? eps_scr : eps;
if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_device(N, rho, sigma, eps_eval, stream);
else
kernels_[i].second.eval_exc_device(N, rho, eps_eval, stream);
if( i )
add_scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
else
scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
}
}
if( eps_scr ) hipFree( eps_scr );
}
GGA_EXC_VXC_GENERATOR_DEVICE( XCFunctional::eval_exc_vxc_device ) const {
throw_if_not_sane();
assert( is_gga() );
size_t len_exc_buffer = exc_buffer_len(N);
size_t len_vrho_buffer = vrho_buffer_len(N);
size_t len_vsigma_buffer = vsigma_buffer_len(N);
double* eps_scr(nullptr), *vrho_scr(nullptr), *vsigma_scr(nullptr);
if( kernels_.size() > 1 and not supports_inc_interface() ) {
eps_scr = safe_cuda_malloc( len_exc_buffer );
vrho_scr = safe_cuda_malloc( len_vrho_buffer );
vsigma_scr = safe_cuda_malloc( len_vsigma_buffer );
}
safe_zero( len_exc_buffer, eps, stream );
safe_zero( len_vrho_buffer, vrho, stream );
safe_zero( len_vsigma_buffer, vsigma, stream );
for( auto i = 0ul; i < kernels_.size(); ++i ) {
if( supports_inc_interface() ) {
if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_vxc_inc_device(
kernels_[i].first, N, rho, sigma, eps, vrho,
vsigma, stream
);
else
kernels_[i].second.eval_exc_vxc_inc_device(
kernels_[i].first, N, rho, eps, vrho, stream
);
} else {
double* eps_eval = i ? eps_scr : eps;
double* vrho_eval = i ? vrho_scr : vrho;
double* vsigma_eval = i ? vsigma_scr : vsigma;
if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_vxc_device(N, rho, sigma, eps_eval, vrho_eval,
vsigma_eval, stream );
else
kernels_[i].second.eval_exc_vxc_device(N, rho, eps_eval, vrho_eval, stream);
if( i ) {
add_scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
add_scal_device( len_vrho_buffer, kernels_[i].first, vrho_eval, vrho, stream);
if( kernels_[i].second.is_gga() )
add_scal_device( len_vsigma_buffer, kernels_[i].first, vsigma_eval, vsigma, stream );
} else {
scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
scal_device( len_vrho_buffer, kernels_[i].first, vrho_eval, vrho, stream );
if( kernels_[i].second.is_gga() )
scal_device( len_vsigma_buffer, kernels_[i].first, vsigma_eval, vsigma, stream );
}
}
}
if( eps_scr ) hipFree( eps_scr );
if( vrho_scr ) hipFree( vrho_scr );
if( vsigma_scr ) hipFree( vsigma_scr );
}
// mGGA Interfaces
MGGA_EXC_GENERATOR_DEVICE( XCFunctional::eval_exc_device ) const {
throw_if_not_sane();
assert( is_mgga() );
size_t len_exc_buffer = exc_buffer_len( N );
double* eps_scr = nullptr;
if( kernels_.size() > 1 and not supports_inc_interface() )
eps_scr = safe_cuda_malloc( len_exc_buffer );
safe_zero( len_exc_buffer, eps, stream );
for( auto i = 0ul; i < kernels_.size(); ++i ) {
if( supports_inc_interface() ) {
if( kernels_[i].second.is_mgga() )
kernels_[i].second.eval_exc_inc_device(
kernels_[i].first, N, rho, sigma, lapl, tau, eps, stream
);
else if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_inc_device(
kernels_[i].first, N, rho, sigma, eps, stream
);
else
kernels_[i].second.eval_exc_inc_device(
kernels_[i].first, N, rho, eps, stream
);
} else {
double* eps_eval = i ? eps_scr : eps;
if( kernels_[i].second.is_mgga() )
kernels_[i].second.eval_exc_device(N, rho, sigma, lapl, tau, eps_eval, stream);
else if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_device(N, rho, sigma, eps_eval, stream);
else
kernels_[i].second.eval_exc_device(N, rho, eps_eval, stream);
if( i )
add_scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
else
scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
}
}
if( eps_scr ) hipFree( eps_scr );
}
MGGA_EXC_VXC_GENERATOR_DEVICE( XCFunctional::eval_exc_vxc_device ) const {
throw_if_not_sane();
assert( is_gga() );
size_t len_exc_buffer = exc_buffer_len(N);
size_t len_vrho_buffer = vrho_buffer_len(N);
size_t len_vsigma_buffer = vsigma_buffer_len(N);
size_t len_vlapl_buffer = vlapl_buffer_len(N);
size_t len_vtau_buffer = vtau_buffer_len(N);
double* eps_scr(nullptr), *vrho_scr(nullptr), *vsigma_scr(nullptr),
*vlapl_scr(nullptr), *vtau_scr(nullptr);
if( kernels_.size() > 1 and not supports_inc_interface() ) {
eps_scr = safe_cuda_malloc( len_exc_buffer );
vrho_scr = safe_cuda_malloc( len_vrho_buffer );
vsigma_scr = safe_cuda_malloc( len_vsigma_buffer );
vlapl_scr = safe_cuda_malloc( len_vlapl_buffer );
vtau_scr = safe_cuda_malloc( len_vtau_buffer );
}
safe_zero( len_exc_buffer, eps, stream );
safe_zero( len_vrho_buffer, vrho, stream );
safe_zero( len_vsigma_buffer, vsigma, stream );
safe_zero( len_vlapl_buffer, vlapl, stream );
safe_zero( len_vtau_buffer, vtau, stream );
for( auto i = 0ul; i < kernels_.size(); ++i ) {
if( supports_inc_interface() ) {
if( kernels_[i].second.is_mgga() )
kernels_[i].second.eval_exc_vxc_inc_device(
kernels_[i].first, N, rho, sigma, lapl, tau, eps,
vrho, vsigma, vlapl, vtau, stream
);
else if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_vxc_inc_device(
kernels_[i].first, N, rho, sigma, eps, vrho,
vsigma, stream
);
else
kernels_[i].second.eval_exc_vxc_inc_device(
kernels_[i].first, N, rho, eps, vrho, stream
);
} else {
double* eps_eval = i ? eps_scr : eps;
double* vrho_eval = i ? vrho_scr : vrho;
double* vsigma_eval = i ? vsigma_scr : vsigma;
double* vlapl_eval = i ? vlapl_scr : vlapl;
double* vtau_eval = i ? vtau_scr : vtau;
if( kernels_[i].second.is_mgga() )
kernels_[i].second.eval_exc_vxc_device(N, rho, sigma, lapl, tau, eps_eval,
vrho_eval, vsigma_eval, vlapl_eval, vtau_eval, stream );
else if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_vxc_device(N, rho, sigma, eps_eval, vrho_eval,
vsigma_eval, stream );
else
kernels_[i].second.eval_exc_vxc_device(N, rho, eps_eval, vrho_eval, stream);
if( i ) {
add_scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
add_scal_device( len_vrho_buffer, kernels_[i].first, vrho_eval, vrho, stream );
if( kernels_[i].second.is_gga() )
add_scal_device( len_vsigma_buffer, kernels_[i].first, vsigma_eval, vsigma, stream );
if( kernels_[i].second.is_mgga() ) {
add_scal_device( len_vlapl_buffer, kernels_[i].first, vlapl_eval, vlapl, stream );
add_scal_device( len_vtau_buffer, kernels_[i].first, vtau_eval, vtau, stream );
}
} else {
scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
scal_device( len_vrho_buffer, kernels_[i].first, vrho_eval, vrho, stream );
if( kernels_[i].second.is_gga() or kernels_[i].second.is_mgga() )
scal_device( len_vsigma_buffer, kernels_[i].first, vsigma_eval, vsigma, stream );
if( kernels_[i].second.is_mgga() ) {
scal_device( len_vlapl_buffer, kernels_[i].first, vlapl_eval, vlapl, stream );
scal_device( len_vtau_buffer, kernels_[i].first, vtau_eval, vtau, stream );
}
}
}
}
if( eps_scr ) hipFree( eps_scr );
if( vrho_scr ) hipFree( vrho_scr );
if( vsigma_scr ) hipFree( vsigma_scr );
if( vlapl_scr ) hipFree( vlapl_scr );
if( vtau_scr ) hipFree( vtau_scr );
}
}
| ef81757d099abbe862e5f0a1a3a31edbac3f6942.cu | #include <exchcxx/xc_functional.hpp>
#include <string>
__global__ void scal_kernel( const int N, const double fact, const double* X_device, double* Y_device ) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if( tid < N ) Y_device[tid] = X_device[tid] * fact;
}
__global__ void add_scal_kernel( const int N, const double fact, const double* X_device, double* Y_device ) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if( tid < N ) Y_device[tid] += X_device[tid] * fact;
}
void scal_device( const int N, const double fact, const double* X_device, double* Y_device ) {
int threads = 1024;
int blocks = std::ceil( N / 1024. );
scal_kernel<<< blocks, threads >>>( N, fact, X_device, Y_device );
}
void scal_device( const int N, const double fact, const double* X_device, double* Y_device, cudaStream_t& stream ) {
int threads = 1024;
int blocks = std::ceil( N / 1024. );
scal_kernel<<< blocks, threads, 0, stream >>>( N, fact, X_device, Y_device );
}
void add_scal_device( const int N, const double fact, const double* X_device, double* Y_device ) {
int threads = 1024;
int blocks = std::ceil( N / 1024. );
add_scal_kernel<<< blocks, threads >>>( N, fact, X_device, Y_device );
}
void add_scal_device( const int N, const double fact, const double* X_device, double* Y_device, cudaStream_t& stream ) {
int threads = 1024;
int blocks = std::ceil( N / 1024. );
add_scal_kernel<<< blocks, threads, 0, stream >>>( N, fact, X_device, Y_device );
}
template <typename T = double>
T* safe_cuda_malloc( size_t N ) {
T* ptr = nullptr;
auto stat = cudaMalloc( &ptr, N*sizeof(T) );
if( stat != cudaSuccess ) throw std::runtime_error("Alloc Failed");
return ptr;
}
template <typename T>
void safe_zero( size_t len, T* ptr, cudaStream_t stream ) {
auto stat = cudaMemsetAsync( ptr, 0, len*sizeof(T), stream );
if( stat != cudaSuccess )
throw std::runtime_error("Memset Failed : " + std::string(cudaGetErrorString( stat )));
}
namespace ExchCXX {
LDA_EXC_GENERATOR_DEVICE( XCFunctional::eval_exc_device ) const {
throw_if_not_sane();
assert( is_lda() );
size_t len_exc_buffer = exc_buffer_len( N );
double* eps_scr = nullptr;
if( kernels_.size() > 1 and not supports_inc_interface() )
eps_scr = safe_cuda_malloc( len_exc_buffer );
safe_zero( len_exc_buffer, eps, stream );
for( auto i = 0ul; i < kernels_.size(); ++i ) {
if( supports_inc_interface() ) {
kernels_[i].second.eval_exc_inc_device(
kernels_[i].first, N, rho, eps, stream
);
} else {
double* eps_eval = i ? eps_scr : eps;
kernels_[i].second.eval_exc_device(N, rho, eps_eval, stream);
if( i )
add_scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
else
scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
}
}
if( eps_scr ) cudaFree( eps_scr );
}
LDA_EXC_VXC_GENERATOR_DEVICE( XCFunctional::eval_exc_vxc_device ) const {
throw_if_not_sane();
assert( is_lda() );
size_t len_exc_buffer = exc_buffer_len( N );
size_t len_vxc_buffer = vrho_buffer_len( N );
double* eps_scr(nullptr), *vxc_scr(nullptr);
if( kernels_.size() > 1 and not supports_inc_interface() ) {
eps_scr = safe_cuda_malloc( len_exc_buffer );
vxc_scr = safe_cuda_malloc( len_vxc_buffer );
}
safe_zero( len_exc_buffer, eps, stream );
safe_zero( len_vxc_buffer, vxc, stream );
for( auto i = 0ul; i < kernels_.size(); ++i ) {
if( supports_inc_interface() ) {
kernels_[i].second.eval_exc_vxc_inc_device(
kernels_[i].first, N, rho, eps, vxc, stream
);
} else {
double* eps_eval = i ? eps_scr : eps;
double* vxc_eval = i ? vxc_scr : vxc;
kernels_[i].second.eval_exc_vxc_device(N, rho, eps_eval, vxc_eval, stream);
if( i ) {
add_scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
add_scal_device( len_vxc_buffer, kernels_[i].first, vxc_eval, vxc, stream );
} else {
scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
scal_device( len_vxc_buffer, kernels_[i].first, vxc_eval, vxc, stream );
}
}
}
if( eps_scr ) cudaFree( eps_scr );
if( vxc_scr ) cudaFree( vxc_scr );
}
// GGA Interfaces
GGA_EXC_GENERATOR_DEVICE( XCFunctional::eval_exc_device ) const {
throw_if_not_sane();
assert( is_gga() );
size_t len_exc_buffer = exc_buffer_len( N );
double* eps_scr = nullptr;
if( kernels_.size() > 1 and not supports_inc_interface() )
eps_scr = safe_cuda_malloc( len_exc_buffer );
safe_zero( len_exc_buffer, eps, stream );
for( auto i = 0ul; i < kernels_.size(); ++i ) {
if( supports_inc_interface() ) {
if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_inc_device(
kernels_[i].first, N, rho, sigma, eps, stream
);
else
kernels_[i].second.eval_exc_inc_device(
kernels_[i].first, N, rho, eps, stream
);
} else {
double* eps_eval = i ? eps_scr : eps;
if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_device(N, rho, sigma, eps_eval, stream);
else
kernels_[i].second.eval_exc_device(N, rho, eps_eval, stream);
if( i )
add_scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
else
scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
}
}
if( eps_scr ) cudaFree( eps_scr );
}
GGA_EXC_VXC_GENERATOR_DEVICE( XCFunctional::eval_exc_vxc_device ) const {
throw_if_not_sane();
assert( is_gga() );
size_t len_exc_buffer = exc_buffer_len(N);
size_t len_vrho_buffer = vrho_buffer_len(N);
size_t len_vsigma_buffer = vsigma_buffer_len(N);
double* eps_scr(nullptr), *vrho_scr(nullptr), *vsigma_scr(nullptr);
if( kernels_.size() > 1 and not supports_inc_interface() ) {
eps_scr = safe_cuda_malloc( len_exc_buffer );
vrho_scr = safe_cuda_malloc( len_vrho_buffer );
vsigma_scr = safe_cuda_malloc( len_vsigma_buffer );
}
safe_zero( len_exc_buffer, eps, stream );
safe_zero( len_vrho_buffer, vrho, stream );
safe_zero( len_vsigma_buffer, vsigma, stream );
for( auto i = 0ul; i < kernels_.size(); ++i ) {
if( supports_inc_interface() ) {
if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_vxc_inc_device(
kernels_[i].first, N, rho, sigma, eps, vrho,
vsigma, stream
);
else
kernels_[i].second.eval_exc_vxc_inc_device(
kernels_[i].first, N, rho, eps, vrho, stream
);
} else {
double* eps_eval = i ? eps_scr : eps;
double* vrho_eval = i ? vrho_scr : vrho;
double* vsigma_eval = i ? vsigma_scr : vsigma;
if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_vxc_device(N, rho, sigma, eps_eval, vrho_eval,
vsigma_eval, stream );
else
kernels_[i].second.eval_exc_vxc_device(N, rho, eps_eval, vrho_eval, stream);
if( i ) {
add_scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
add_scal_device( len_vrho_buffer, kernels_[i].first, vrho_eval, vrho, stream);
if( kernels_[i].second.is_gga() )
add_scal_device( len_vsigma_buffer, kernels_[i].first, vsigma_eval, vsigma, stream );
} else {
scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
scal_device( len_vrho_buffer, kernels_[i].first, vrho_eval, vrho, stream );
if( kernels_[i].second.is_gga() )
scal_device( len_vsigma_buffer, kernels_[i].first, vsigma_eval, vsigma, stream );
}
}
}
if( eps_scr ) cudaFree( eps_scr );
if( vrho_scr ) cudaFree( vrho_scr );
if( vsigma_scr ) cudaFree( vsigma_scr );
}
// mGGA Interfaces
MGGA_EXC_GENERATOR_DEVICE( XCFunctional::eval_exc_device ) const {
throw_if_not_sane();
assert( is_mgga() );
size_t len_exc_buffer = exc_buffer_len( N );
double* eps_scr = nullptr;
if( kernels_.size() > 1 and not supports_inc_interface() )
eps_scr = safe_cuda_malloc( len_exc_buffer );
safe_zero( len_exc_buffer, eps, stream );
for( auto i = 0ul; i < kernels_.size(); ++i ) {
if( supports_inc_interface() ) {
if( kernels_[i].second.is_mgga() )
kernels_[i].second.eval_exc_inc_device(
kernels_[i].first, N, rho, sigma, lapl, tau, eps, stream
);
else if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_inc_device(
kernels_[i].first, N, rho, sigma, eps, stream
);
else
kernels_[i].second.eval_exc_inc_device(
kernels_[i].first, N, rho, eps, stream
);
} else {
double* eps_eval = i ? eps_scr : eps;
if( kernels_[i].second.is_mgga() )
kernels_[i].second.eval_exc_device(N, rho, sigma, lapl, tau, eps_eval, stream);
else if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_device(N, rho, sigma, eps_eval, stream);
else
kernels_[i].second.eval_exc_device(N, rho, eps_eval, stream);
if( i )
add_scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
else
scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
}
}
if( eps_scr ) cudaFree( eps_scr );
}
MGGA_EXC_VXC_GENERATOR_DEVICE( XCFunctional::eval_exc_vxc_device ) const {
throw_if_not_sane();
assert( is_gga() );
size_t len_exc_buffer = exc_buffer_len(N);
size_t len_vrho_buffer = vrho_buffer_len(N);
size_t len_vsigma_buffer = vsigma_buffer_len(N);
size_t len_vlapl_buffer = vlapl_buffer_len(N);
size_t len_vtau_buffer = vtau_buffer_len(N);
double* eps_scr(nullptr), *vrho_scr(nullptr), *vsigma_scr(nullptr),
*vlapl_scr(nullptr), *vtau_scr(nullptr);
if( kernels_.size() > 1 and not supports_inc_interface() ) {
eps_scr = safe_cuda_malloc( len_exc_buffer );
vrho_scr = safe_cuda_malloc( len_vrho_buffer );
vsigma_scr = safe_cuda_malloc( len_vsigma_buffer );
vlapl_scr = safe_cuda_malloc( len_vlapl_buffer );
vtau_scr = safe_cuda_malloc( len_vtau_buffer );
}
safe_zero( len_exc_buffer, eps, stream );
safe_zero( len_vrho_buffer, vrho, stream );
safe_zero( len_vsigma_buffer, vsigma, stream );
safe_zero( len_vlapl_buffer, vlapl, stream );
safe_zero( len_vtau_buffer, vtau, stream );
for( auto i = 0ul; i < kernels_.size(); ++i ) {
if( supports_inc_interface() ) {
if( kernels_[i].second.is_mgga() )
kernels_[i].second.eval_exc_vxc_inc_device(
kernels_[i].first, N, rho, sigma, lapl, tau, eps,
vrho, vsigma, vlapl, vtau, stream
);
else if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_vxc_inc_device(
kernels_[i].first, N, rho, sigma, eps, vrho,
vsigma, stream
);
else
kernels_[i].second.eval_exc_vxc_inc_device(
kernels_[i].first, N, rho, eps, vrho, stream
);
} else {
double* eps_eval = i ? eps_scr : eps;
double* vrho_eval = i ? vrho_scr : vrho;
double* vsigma_eval = i ? vsigma_scr : vsigma;
double* vlapl_eval = i ? vlapl_scr : vlapl;
double* vtau_eval = i ? vtau_scr : vtau;
if( kernels_[i].second.is_mgga() )
kernels_[i].second.eval_exc_vxc_device(N, rho, sigma, lapl, tau, eps_eval,
vrho_eval, vsigma_eval, vlapl_eval, vtau_eval, stream );
else if( kernels_[i].second.is_gga() )
kernels_[i].second.eval_exc_vxc_device(N, rho, sigma, eps_eval, vrho_eval,
vsigma_eval, stream );
else
kernels_[i].second.eval_exc_vxc_device(N, rho, eps_eval, vrho_eval, stream);
if( i ) {
add_scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
add_scal_device( len_vrho_buffer, kernels_[i].first, vrho_eval, vrho, stream );
if( kernels_[i].second.is_gga() )
add_scal_device( len_vsigma_buffer, kernels_[i].first, vsigma_eval, vsigma, stream );
if( kernels_[i].second.is_mgga() ) {
add_scal_device( len_vlapl_buffer, kernels_[i].first, vlapl_eval, vlapl, stream );
add_scal_device( len_vtau_buffer, kernels_[i].first, vtau_eval, vtau, stream );
}
} else {
scal_device( len_exc_buffer, kernels_[i].first, eps_eval, eps, stream );
scal_device( len_vrho_buffer, kernels_[i].first, vrho_eval, vrho, stream );
if( kernels_[i].second.is_gga() or kernels_[i].second.is_mgga() )
scal_device( len_vsigma_buffer, kernels_[i].first, vsigma_eval, vsigma, stream );
if( kernels_[i].second.is_mgga() ) {
scal_device( len_vlapl_buffer, kernels_[i].first, vlapl_eval, vlapl, stream );
scal_device( len_vtau_buffer, kernels_[i].first, vtau_eval, vtau, stream );
}
}
}
}
if( eps_scr ) cudaFree( eps_scr );
if( vrho_scr ) cudaFree( vrho_scr );
if( vsigma_scr ) cudaFree( vsigma_scr );
if( vlapl_scr ) cudaFree( vlapl_scr );
if( vtau_scr ) cudaFree( vtau_scr );
}
}
|
67b498ceefb1d5d1574351a8a3e0328d437cd455.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by liang on 2/26/18.
//
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <cuda_device_runtime_api.h>
#include <gflags/gflags.h>
#include <groute/device/cta_scheduler.cuh>
#include <groute/device/queue.cuh>
#include <groute/graphs/csr_graph.h>
#include <groute/dwl/distributed_worklist.cuh>
#include <groute/dwl/workers.cuh>
#include <utils/graphs/traversal.h>
#include "pr_common.h"
#define EPSILON 0.01
namespace muti_pr {
struct RankData {
index_t node;
rank_t rank;
__host__ __device__ __forceinline__ RankData(index_t node, rank_t rank) : node(node), rank(rank) {}
__host__ __device__ __forceinline__ RankData() : node(UINT_MAX), rank(-1.0f) {}
};
typedef index_t local_work_t;
typedef RankData remote_work_t;
struct PageRankInit {
template<
typename WorkSource, typename WorkTarget,
typename TGraph, typename ResidualDatum, typename RankDatum>
__device__ static void work(
const WorkSource &work_source, WorkTarget &work_target,
const TGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks
) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // We need all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> np_local = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
current_ranks[node] = 1.0 - ALPHA; // Initial rank
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
if (np_local.size > 0) // Skip zero-degree nodes
{
rank_t update = ((1.0 - ALPHA) * ALPHA) / np_local.size; // Initial update
np_local.meta_data = update;
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
np_local,
[&work_target, &graph, &residual](index_t edge, index_t size, rank_t update) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (!graph.owns(dest) && prev == 0) // Push only remote nodes since we process all owned nodes at init step 2 anyhow
{
work_target.append_work(dest);
}
}
);
}
}
};
/// PR work with Collective Thread Array scheduling for exploiting nested parallelism
struct PageRankWork {
template<
typename WorkSource, typename WorkTarget,
typename TGraph, typename ResidualDatum, typename RankDatum>
__device__ static void work(
const WorkSource &work_source, WorkTarget &work_target,
const TGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks
) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> np_local = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res > 0) {
current_ranks[node] += res;
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
if (np_local.size > 0) // Skip zero-degree nodes
{
rank_t update = res * ALPHA / np_local.size;
np_local.meta_data = update;
}
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
np_local,
[&work_target, &graph, &residual](index_t edge, index_t size, rank_t update) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
// The EPSILON test must be decided by the owner, so if
// dest belongs to another device the threshold is 0
rank_t threshold = graph.owns(dest) ? EPSILON : 0;
if (prev <= threshold && prev + update > threshold) {
work_target.append_work(dest);
}
}
);
}
}
};
struct DWCallbacks {
private:
groute::graphs::dev::CSRGraphSeg m_graph_seg;
groute::graphs::dev::GraphDatum<rank_t> m_residual;
public:
template<typename...UnusedData>
DWCallbacks(
const groute::graphs::dev::CSRGraphSeg &graph_seg,
const groute::graphs::dev::GraphDatum<rank_t> &residual,
const groute::graphs::dev::GraphDatumSeg<rank_t> ¤t_ranks,
UnusedData &... data)
:
m_graph_seg(graph_seg),
m_residual(residual) {
}
DWCallbacks(
const groute::graphs::dev::CSRGraphSeg &graph_seg,
const groute::graphs::dev::GraphDatum<rank_t> &residual)
:
m_graph_seg(graph_seg),
m_residual(residual) {
}
DWCallbacks() {}
__device__ __forceinline__ groute::SplitFlags on_receive(const remote_work_t &work) {
if (m_graph_seg.owns(work.node)) {
rank_t prev = atomicAdd(m_residual.get_item_ptr(work.node), work.rank);
return (prev + work.rank > EPSILON && prev <= EPSILON)
? groute::SF_Take
: groute::SF_None;
}
return groute::SF_Pass;
}
__device__ __forceinline__ bool should_defer(const local_work_t &work, const rank_t &global_threshold) {
return false; // TODO (research): How can soft-priority be helpfull for PR?
}
__device__ __forceinline__ groute::SplitFlags on_send(local_work_t work) {
return (m_graph_seg.owns(work))
? groute::SF_Take
: groute::SF_Pass;
}
__device__ __forceinline__ remote_work_t pack(local_work_t work) {
return RankData(work, atomicExch(m_residual.get_item_ptr(work), 0));
}
__device__ __forceinline__ local_work_t unpack(const remote_work_t &work) {
return work.node;
}
};
struct Algo {
static const char *NameLower() { return "muti pr"; }
static const char *Name() { return "MUTI PR"; }
static void HostInit(
utils::traversal::Context<muti_pr::Algo> &context,
groute::graphs::multi::CSRGraphAllocator &graph_manager,
groute::IDistributedWorklist<local_work_t, remote_work_t> &distributed_worklist) {
// PR starts with all nodes
distributed_worklist.ReportInitialWork(context.host_graph.nnodes, groute::Endpoint::HostEndpoint(0));
}
template<typename TGraph, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static void DeviceMemset(groute::Stream &stream, TGraph graph, ResidualDatum residual, RankDatum ranks) {
GROUTE_CUDA_CHECK(
hipMemsetAsync(residual.data_ptr, 0, residual.size * sizeof(rank_t), stream.cuda_stream));
GROUTE_CUDA_CHECK(
hipMemsetAsync(ranks.data_ptr, 0, ranks.size * sizeof(rank_t), stream.cuda_stream));
}
template<typename TGraph, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static void DeviceInit(
groute::Endpoint endpoint, groute::Stream &stream,
groute::IDistributedWorklist<local_work_t, remote_work_t> &distributed_worklist,
groute::IDistributedWorklistPeer<local_work_t, remote_work_t, DWCallbacks> *peer,
TGraph graph, ResidualDatum residual, RankDatum ranks) {
auto &workspace = peer->GetLocalQueue(0);
DWCallbacks callbacks = peer->GetDeviceCallbacks();
dim3 grid_dims, block_dims;
// Init step 1 (PageRankInit)
KernelSizing(grid_dims, block_dims, graph.owned_nnodes());
groute::WorkKernel<groute::dev::WorkSourceRange<index_t>, local_work_t, remote_work_t, DWCallbacks, PageRankInit, TGraph, ResidualDatum, RankDatum>
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
groute::dev::WorkSourceRange<index_t>(graph.owned_start_node(), graph.owned_nnodes()),
workspace.DeviceObject(),
callbacks,
graph, residual, ranks
);
auto output_seg = workspace.GetSeg(stream);
distributed_worklist.ReportWork(output_seg.GetSegmentSize(), 0, endpoint);
peer->SplitSend(output_seg, stream);
workspace.ResetAsync(stream);
// Init step 2 (PageRankWork starting from all owned nodes)
KernelSizing(grid_dims, block_dims, graph.owned_nnodes());
groute::WorkKernel<groute::dev::WorkSourceRange<index_t>, local_work_t, remote_work_t, DWCallbacks, PageRankWork, TGraph, ResidualDatum, RankDatum>
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
groute::dev::WorkSourceRange<index_t>(graph.owned_start_node(), graph.owned_nnodes()),
workspace.DeviceObject(),
callbacks,
graph, residual, ranks
);
output_seg = workspace.GetSeg(stream);
distributed_worklist.ReportWork(output_seg.GetSegmentSize(), graph.owned_nnodes(), endpoint);
peer->SplitSend(output_seg, stream);
workspace.ResetAsync(stream);
}
template<
typename TGraphAllocator, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static const std::vector<rank_t> &Gather(
TGraphAllocator &graph_allocator, ResidualDatum &residual, RankDatum ¤t_ranks, UnusedData &... data) {
graph_allocator.GatherDatum(current_ranks);
return current_ranks.GetHostData();
}
template<
typename ResidualDatum, typename RankDatum, typename...UnusedData>
static std::vector<rank_t> Host(
groute::graphs::host::CSRGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks, UnusedData &... data) {
return PageRankHost(graph);
}
static int Output(const char *file, const std::vector<rank_t> &ranks) {
return PageRankOutput(file, ranks);
}
static int CheckErrors(std::vector<rank_t> &ranks, std::vector<rank_t> ®ression) {
return PageRankCheckErrors(ranks, regression);
}
};
using NodeResidualDatumType = groute::graphs::multi::NodeOutputGlobalDatum<rank_t>;
using NodeRankDatumType = groute::graphs::multi::NodeOutputLocalDatum<rank_t>;
using WorkerType = groute::Worker<
local_work_t, remote_work_t, DWCallbacks, PageRankWork,
groute::graphs::dev::CSRGraphSeg, NodeResidualDatumType::DeviceObjectType, NodeRankDatumType::DeviceObjectType>;
template<typename TWorker>
using RunnerType = utils::traversal::Runner<
Algo, TWorker, DWCallbacks, local_work_t, remote_work_t,
NodeResidualDatumType, NodeRankDatumType>;
}
bool TestPageRankAsyncMulti() {
int ngpus = 2;
muti_pr::RunnerType<muti_pr::WorkerType> runner;
muti_pr::NodeResidualDatumType residual;
muti_pr::NodeRankDatumType ranks;
return runner(ngpus, 0, residual, ranks);
}
| 67b498ceefb1d5d1574351a8a3e0328d437cd455.cu | //
// Created by liang on 2/26/18.
//
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <cuda_device_runtime_api.h>
#include <gflags/gflags.h>
#include <groute/device/cta_scheduler.cuh>
#include <groute/device/queue.cuh>
#include <groute/graphs/csr_graph.h>
#include <groute/dwl/distributed_worklist.cuh>
#include <groute/dwl/workers.cuh>
#include <utils/graphs/traversal.h>
#include "pr_common.h"
#define EPSILON 0.01
namespace muti_pr {
struct RankData {
index_t node;
rank_t rank;
__host__ __device__ __forceinline__ RankData(index_t node, rank_t rank) : node(node), rank(rank) {}
__host__ __device__ __forceinline__ RankData() : node(UINT_MAX), rank(-1.0f) {}
};
typedef index_t local_work_t;
typedef RankData remote_work_t;
struct PageRankInit {
template<
typename WorkSource, typename WorkTarget,
typename TGraph, typename ResidualDatum, typename RankDatum>
__device__ static void work(
const WorkSource &work_source, WorkTarget &work_target,
const TGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks
) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // We need all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> np_local = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
current_ranks[node] = 1.0 - ALPHA; // Initial rank
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
if (np_local.size > 0) // Skip zero-degree nodes
{
rank_t update = ((1.0 - ALPHA) * ALPHA) / np_local.size; // Initial update
np_local.meta_data = update;
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
np_local,
[&work_target, &graph, &residual](index_t edge, index_t size, rank_t update) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
if (!graph.owns(dest) && prev == 0) // Push only remote nodes since we process all owned nodes at init step 2 anyhow
{
work_target.append_work(dest);
}
}
);
}
}
};
/// PR work with Collective Thread Array scheduling for exploiting nested parallelism
struct PageRankWork {
template<
typename WorkSource, typename WorkTarget,
typename TGraph, typename ResidualDatum, typename RankDatum>
__device__ static void work(
const WorkSource &work_source, WorkTarget &work_target,
const TGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks
) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> np_local = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res > 0) {
current_ranks[node] += res;
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
if (np_local.size > 0) // Skip zero-degree nodes
{
rank_t update = res * ALPHA / np_local.size;
np_local.meta_data = update;
}
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
np_local,
[&work_target, &graph, &residual](index_t edge, index_t size, rank_t update) {
index_t dest = graph.edge_dest(edge);
rank_t prev = atomicAdd(residual.get_item_ptr(dest), update);
// The EPSILON test must be decided by the owner, so if
// dest belongs to another device the threshold is 0
rank_t threshold = graph.owns(dest) ? EPSILON : 0;
if (prev <= threshold && prev + update > threshold) {
work_target.append_work(dest);
}
}
);
}
}
};
struct DWCallbacks {
private:
groute::graphs::dev::CSRGraphSeg m_graph_seg;
groute::graphs::dev::GraphDatum<rank_t> m_residual;
public:
template<typename...UnusedData>
DWCallbacks(
const groute::graphs::dev::CSRGraphSeg &graph_seg,
const groute::graphs::dev::GraphDatum<rank_t> &residual,
const groute::graphs::dev::GraphDatumSeg<rank_t> ¤t_ranks,
UnusedData &... data)
:
m_graph_seg(graph_seg),
m_residual(residual) {
}
DWCallbacks(
const groute::graphs::dev::CSRGraphSeg &graph_seg,
const groute::graphs::dev::GraphDatum<rank_t> &residual)
:
m_graph_seg(graph_seg),
m_residual(residual) {
}
DWCallbacks() {}
__device__ __forceinline__ groute::SplitFlags on_receive(const remote_work_t &work) {
if (m_graph_seg.owns(work.node)) {
rank_t prev = atomicAdd(m_residual.get_item_ptr(work.node), work.rank);
return (prev + work.rank > EPSILON && prev <= EPSILON)
? groute::SF_Take
: groute::SF_None;
}
return groute::SF_Pass;
}
__device__ __forceinline__ bool should_defer(const local_work_t &work, const rank_t &global_threshold) {
return false; // TODO (research): How can soft-priority be helpfull for PR?
}
__device__ __forceinline__ groute::SplitFlags on_send(local_work_t work) {
return (m_graph_seg.owns(work))
? groute::SF_Take
: groute::SF_Pass;
}
__device__ __forceinline__ remote_work_t pack(local_work_t work) {
return RankData(work, atomicExch(m_residual.get_item_ptr(work), 0));
}
__device__ __forceinline__ local_work_t unpack(const remote_work_t &work) {
return work.node;
}
};
struct Algo {
static const char *NameLower() { return "muti pr"; }
static const char *Name() { return "MUTI PR"; }
static void HostInit(
utils::traversal::Context<muti_pr::Algo> &context,
groute::graphs::multi::CSRGraphAllocator &graph_manager,
groute::IDistributedWorklist<local_work_t, remote_work_t> &distributed_worklist) {
// PR starts with all nodes
distributed_worklist.ReportInitialWork(context.host_graph.nnodes, groute::Endpoint::HostEndpoint(0));
}
template<typename TGraph, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static void DeviceMemset(groute::Stream &stream, TGraph graph, ResidualDatum residual, RankDatum ranks) {
GROUTE_CUDA_CHECK(
cudaMemsetAsync(residual.data_ptr, 0, residual.size * sizeof(rank_t), stream.cuda_stream));
GROUTE_CUDA_CHECK(
cudaMemsetAsync(ranks.data_ptr, 0, ranks.size * sizeof(rank_t), stream.cuda_stream));
}
template<typename TGraph, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static void DeviceInit(
groute::Endpoint endpoint, groute::Stream &stream,
groute::IDistributedWorklist<local_work_t, remote_work_t> &distributed_worklist,
groute::IDistributedWorklistPeer<local_work_t, remote_work_t, DWCallbacks> *peer,
TGraph graph, ResidualDatum residual, RankDatum ranks) {
auto &workspace = peer->GetLocalQueue(0);
DWCallbacks callbacks = peer->GetDeviceCallbacks();
dim3 grid_dims, block_dims;
// Init step 1 (PageRankInit)
KernelSizing(grid_dims, block_dims, graph.owned_nnodes());
groute::WorkKernel<groute::dev::WorkSourceRange<index_t>, local_work_t, remote_work_t, DWCallbacks, PageRankInit, TGraph, ResidualDatum, RankDatum>
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
groute::dev::WorkSourceRange<index_t>(graph.owned_start_node(), graph.owned_nnodes()),
workspace.DeviceObject(),
callbacks,
graph, residual, ranks
);
auto output_seg = workspace.GetSeg(stream);
distributed_worklist.ReportWork(output_seg.GetSegmentSize(), 0, endpoint);
peer->SplitSend(output_seg, stream);
workspace.ResetAsync(stream);
// Init step 2 (PageRankWork starting from all owned nodes)
KernelSizing(grid_dims, block_dims, graph.owned_nnodes());
groute::WorkKernel<groute::dev::WorkSourceRange<index_t>, local_work_t, remote_work_t, DWCallbacks, PageRankWork, TGraph, ResidualDatum, RankDatum>
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
groute::dev::WorkSourceRange<index_t>(graph.owned_start_node(), graph.owned_nnodes()),
workspace.DeviceObject(),
callbacks,
graph, residual, ranks
);
output_seg = workspace.GetSeg(stream);
distributed_worklist.ReportWork(output_seg.GetSegmentSize(), graph.owned_nnodes(), endpoint);
peer->SplitSend(output_seg, stream);
workspace.ResetAsync(stream);
}
template<
typename TGraphAllocator, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static const std::vector<rank_t> &Gather(
TGraphAllocator &graph_allocator, ResidualDatum &residual, RankDatum ¤t_ranks, UnusedData &... data) {
graph_allocator.GatherDatum(current_ranks);
return current_ranks.GetHostData();
}
template<
typename ResidualDatum, typename RankDatum, typename...UnusedData>
static std::vector<rank_t> Host(
groute::graphs::host::CSRGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks, UnusedData &... data) {
return PageRankHost(graph);
}
static int Output(const char *file, const std::vector<rank_t> &ranks) {
return PageRankOutput(file, ranks);
}
static int CheckErrors(std::vector<rank_t> &ranks, std::vector<rank_t> ®ression) {
return PageRankCheckErrors(ranks, regression);
}
};
using NodeResidualDatumType = groute::graphs::multi::NodeOutputGlobalDatum<rank_t>;
using NodeRankDatumType = groute::graphs::multi::NodeOutputLocalDatum<rank_t>;
using WorkerType = groute::Worker<
local_work_t, remote_work_t, DWCallbacks, PageRankWork,
groute::graphs::dev::CSRGraphSeg, NodeResidualDatumType::DeviceObjectType, NodeRankDatumType::DeviceObjectType>;
template<typename TWorker>
using RunnerType = utils::traversal::Runner<
Algo, TWorker, DWCallbacks, local_work_t, remote_work_t,
NodeResidualDatumType, NodeRankDatumType>;
}
bool TestPageRankAsyncMulti() {
int ngpus = 2;
muti_pr::RunnerType<muti_pr::WorkerType> runner;
muti_pr::NodeResidualDatumType residual;
muti_pr::NodeRankDatumType ranks;
return runner(ngpus, 0, residual, ranks);
}
|
75a8289fad8f741fd3a021f814f06f46d884a92b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "PicFlip_Kernel.cuh"
#include "Fluid_Kernel_Utils_hip.cuh"
#include "radixsort.cuh"
using namespace utils;
#define PICFLIP_PROFILE_EACH_KERNEL FALSE
#define XFER_USE_TRPLE_CUDA_DIM FALSE
#define PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL FALSE
__constant__ PicFlip_Params dParams;
#if PICFLIP_PROFILE_EACH_KERNEL
hipEvent_t pfprofile_start = NULL, pfprofile_stop = NULL;
#define PICFLIP_PROFILE_BEGIN_KERNEL hipEventRecord(pfprofile_start);
#define PICFLIP_PROFILE_END_KERNEL(description, identifier) { hipEventRecord(pfprofile_stop); \
hipEventSynchronize(pfprofile_stop); \
float milliseconds = 0; \
hipEventElapsedTime(&milliseconds, pfprofile_start, pfprofile_stop); \
printf("\tKernel Timing: %5.2fms (%s -> %d)\n", milliseconds, description, identifier); }
#else
#define PICFLIP_PROFILE_BEGIN_KERNEL
#define PICFLIP_PROFILE_END_KERNEL(description, identifier)
#endif
#if XFER_USE_TRPLE_CUDA_DIM
__device__
bool GetCellPos(int3& cell_pos)
{
cell_pos = make_int3(
blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y,
blockIdx.z*blockDim.z + threadIdx.z
);
return (cell_pos.x < dParams.grid_resolution.x
&& cell_pos.y < dParams.grid_resolution.y
&& cell_pos.z < dParams.grid_resolution.z);
}
__device__
bool GetCellPosVel(int3& cell_pos)
{
cell_pos = make_int3(
blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y,
blockIdx.z*blockDim.z + threadIdx.z
);
return (cell_pos.x <= dParams.grid_resolution.x
&& cell_pos.y <= dParams.grid_resolution.y
&& cell_pos.z <= dParams.grid_resolution.z);
}
#else
__device__
bool GetCellPos(int3& cell_pos)
{
int idx = blockIdx.y*blockDim.y + threadIdx.y;
cell_pos = make_int3(
blockIdx.x*blockDim.x + threadIdx.x,
idx % dParams.grid_resolution.y,
idx / dParams.grid_resolution.y
);
return (cell_pos.x < dParams.grid_resolution.x
&& cell_pos.y < dParams.grid_resolution.y
&& cell_pos.z < dParams.grid_resolution.z);
}
__device__
bool GetCellPosVel(int3& cell_pos)
{
int idx = blockIdx.y*blockDim.y + threadIdx.y;
cell_pos = make_int3(
blockIdx.x*blockDim.x + threadIdx.x,
idx % (dParams.grid_resolution.y + 1),
idx / (dParams.grid_resolution.y + 1)
);
return (cell_pos.x <= dParams.grid_resolution.x
&& cell_pos.y <= dParams.grid_resolution.y
&& cell_pos.z <= dParams.grid_resolution.z);
}
#endif
void picflip::set_parameters(PicFlip_Params *hParam)
{
#if PICFLIP_PROFILE_EACH_KERNEL
if (pfprofile_start == NULL)
{
hipEventCreate(&pfprofile_start);
hipEventCreate(&pfprofile_stop);
}
#endif
PicFlip_Params* dParamsArr;
//Copy Paramaters to device
gpuErrchk(hipGetSymbolAddress((void **)&dParamsArr, dParams));
gpuErrchk(hipMemcpy(dParamsArr, hParam, sizeof(PicFlip_Params), hipMemcpyHostToDevice));
//gpuErrchk(hipMemcpyToSymbol(dParamsArr, hParam, sizeof(SimulationParams)));
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
template <class T>
__device__
void writeTex(hipSurfaceObject_t surface, const T& data, int x, int y, int z)
{
surf3Dwrite(data, surface, (x)* sizeof(T), y, z);
}
template <class T>
__device__
void writeTexVel(hipSurfaceObject_t surface, const T& data, int x, int y, int z)
{
surf3Dwrite(data, surface, (x)* sizeof(T), y, z);
}
template <class T>
__device__
T readTexInterpolate(hipTextureObject_t texture, float xs, float ys, float zs)
{
/*return tex3D<T>(texture,
(xs + 0.5f),
(ys + 0.5f),
(zs + 0.5f));*/
//ys = dParams.grid_resolution.y - 1 - ys;
float x = floor(xs);
float y = floor(ys);
float z = floor(zs);
float fx = xs - x;
float fy = ys - y;
float fz = zs - z;
T ftl = tex3D<T>(texture, x + 0.5f, y + 0.5f, z + 0.5f);
T ftr = tex3D<T>(texture, x + 0.5f + 1.0f, y + 0.5f, z + 0.5f);
T fbl = tex3D<T>(texture, x + 0.5f, y + 0.5f + 1.0f, z + 0.5f);
T fbr = tex3D<T>(texture, x + 0.5f + 1.0f, y + 0.5f + 1.0f, z + 0.5f);
T btl = tex3D<T>(texture, x + 0.5f, y + 0.5f, z + 0.5f + 1.0f);
T btr = tex3D<T>(texture, x + 0.5f + 1.0f, y + 0.5f, z + 0.5f + 1.0f);
T bbl = tex3D<T>(texture, x + 0.5f, y + 0.5f + 1.0f, z + 0.5f + 1.0f);
T bbr = tex3D<T>(texture, x + 0.5f + 1.0f, y + 0.5f + 1.0f, z + 0.5f + 1.0f);
ftl = ftl * (1.0f - fx) + ftr * fx;
fbl = fbl * (1.0f - fx) + fbr * fx;
btl = btl * (1.0f - fx) + btr * fx;
bbl = btl * (1.0f - fx) + bbl * fx;
ftl = ftl * (1.0f - fy) + fbl * fy;
btl = btl * (1.0f - fy) + bbl * fy;
return ftl * (1.0f - fz) + btl * fz;
}
template <class T>
__device__
T readTexNearest(hipTextureObject_t texture, float xs, float ys, float zs)
{
return tex3D<T>(texture,
(xs + 0.5f),
(ys + 0.5f),
(zs + 0.5f));
}
__device__
float h(const float& r) {
return fmaxf(1.0 - fabsf(r), 0.0);
}
__device__
float k(const float3& v) {
return h(v.x) * h(v.y) * h(v.z);
}
__device__
float kx(const float3& v) {
volatile float half = 0.5f;
return h(v.x) * h(v.y - half) * h(v.z - half);
}
__device__
float ky(const float3& v) {
volatile float half = 0.5f;
return h(v.x - 0.5f) * h(v.y) * h(v.z - 0.5f);
}
__device__
float kz(const float3& v) {
volatile float half = 0.5f;
return h(v.x - half) * h(v.y - half) * h(v.z);
}
__device__
float kw(const float3& v) {
volatile float half = 0.5f;
return h(v.x - half) * h(v.y - half) * h(v.z - half);
}
__device__ void clamp_float3(float3& v, float minv, float maxv)
{
v.x = min(max(v.x, minv), maxv);
v.y = min(max(v.y, minv), maxv);
v.z = min(max(v.z, minv), maxv);
}
__device__ float3 get_wrld_posf(const float3& pos)
{
float3 wp;
wp.x = pos.x / dParams.world_to_grid.x - dParams.world_to_grid_offset.x;
wp.y = pos.y / dParams.world_to_grid.y - dParams.world_to_grid_offset.y;
wp.z = pos.z / dParams.world_to_grid.z - dParams.world_to_grid_offset.z;
return wp;
}
__device__ float3 get_cell_posf(const float3& pos)
{
float3 cp;
cp.x = (pos.x + dParams.world_to_grid_offset.x) * dParams.world_to_grid.x;
cp.y = (pos.y + dParams.world_to_grid_offset.y) * dParams.world_to_grid.y;
cp.z = (pos.z + dParams.world_to_grid_offset.z) * dParams.world_to_grid.z;
return cp;
}
__device__ int3 get_cell_pos(const float3& pos)
{
int3 cp;
cp.x = floor((pos.x + dParams.world_to_grid_offset.x) * dParams.world_to_grid.x);
cp.y = floor((pos.y + dParams.world_to_grid_offset.y) * dParams.world_to_grid.y);
cp.z = floor((pos.z + dParams.world_to_grid_offset.z) * dParams.world_to_grid.z);
return cp;
}
__device__ uint get_cell_hash(const int3& cell_pos)
{
return (cell_pos.z * dParams.grid_resolution.y + cell_pos.y) * dParams.grid_resolution.x + cell_pos.x;
}
__global__
void pfkernel_sort_initialize_keyvalues(uint particle_count, KeyValuePair* particle_keyvalues, float3* particle_positions)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
{
return;
}
int3 grid_pos = get_cell_pos(particle_positions[index]);
uint hash = get_cell_hash(grid_pos);
particle_keyvalues[index].key = hash;
particle_keyvalues[index].value = index;
}
__global__
void pfkernel_sort_reorder_and_insert_boundary_offsets(uint particle_count,
hipSurfaceObject_t particles_start, hipSurfaceObject_t particles_end,
KeyValuePair* boundary_sort_pair,
float3* in_positions, float3* out_positions, float3* in_velocities, float3* out_velocities)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
{
return;
}
KeyValuePair sort_pair = boundary_sort_pair[index];
//Load src position/velocity
out_positions[index] = in_positions[sort_pair.value];
out_velocities[index] = in_velocities[sort_pair.value];
//Calculate Offset
uint grid_xy = dParams.grid_resolution.x * dParams.grid_resolution.y;
uint3 cell_pos = make_uint3(
sort_pair.key % dParams.grid_resolution.x,
(sort_pair.key % grid_xy) / dParams.grid_resolution.x,
sort_pair.key / grid_xy
);
// -> key != prev_key => cell_start
if (index == 0 || sort_pair.key != boundary_sort_pair[index - 1].key)
{
//cell_offsets[sort_pair.key].x = index;
writeTex<uint>(particles_start, index, cell_pos.x, cell_pos.y, cell_pos.z);
}
// -> key != next_key => cell_end
if (index == particle_count - 1 || sort_pair.key != boundary_sort_pair[index + 1].key)
{
//cell_offsets[sort_pair.key].y = index + 1;
writeTex<uint>(particles_end, index + 1, cell_pos.x, cell_pos.y, cell_pos.z);
}
}
void picflip::sortByGridIndex(hipStream_t stream,
uint particle_count,
hipSurfaceObject_t particles_start, hipSurfaceObject_t particles_end,
KeyValuePair* keyvalues,
KeyValuePair* keyvalues_tmp,
float3* positions,
float3* positions_tmp,
float3* velocities,
float3* velocities_tmp)
{
if (particle_count == 0)
return;
uint num_threads;
uint num_blocks;
utils::compute_grid_size(particle_count, CUDA_BLOCK_SIZE, num_blocks, num_threads);
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
pfkernel_sort_initialize_keyvalues << <num_blocks, num_threads, 0, stream >> >(particle_count, keyvalues, positions);
PICFLIP_PROFILE_END_KERNEL("kernel_sort_initialize_keyvalues", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
//Sort CellIndexes
PICFLIP_PROFILE_BEGIN_KERNEL
RadixSort(keyvalues, keyvalues_tmp, particle_count, 32, stream);
PICFLIP_PROFILE_END_KERNEL("RadixSort", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
//Reorder and insert boundary offsets
PICFLIP_PROFILE_BEGIN_KERNEL
pfkernel_sort_reorder_and_insert_boundary_offsets << <num_blocks, num_threads, 0, stream >> >(particle_count,
particles_start, particles_end,
keyvalues_tmp,
positions, positions_tmp, velocities, velocities_tmp);
PICFLIP_PROFILE_END_KERNEL("kernel_sort_reorder_and_insert_boundary_offsets", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
__global__
void kernel_transferToGridProgram(
hipTextureObject_t particles_start,
hipTextureObject_t particles_end,
float3* positions, float3* velocities,
hipSurfaceObject_t out_velgrid,
hipSurfaceObject_t out_veloriggrid)
{
int3 cell_pos;
if (!GetCellPosVel(cell_pos))
return;
/*const float3 xPosition = make_float3(cell_pos.x, cell_pos.y + 0.5, cell_pos.z + 0.5);
const float3 yPosition = make_float3(cell_pos.x + 0.5, cell_pos.y, cell_pos.z + 0.5);
const float3 zPosition = make_float3(cell_pos.x + 0.5, cell_pos.y + 0.5, cell_pos.z);
const float3 scalarPosition = make_float3(cell_pos.x + 0.5, cell_pos.y + 0.5, cell_pos.z + 0.5);*/
float4 out_weight = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
float3 out_vel = make_float3(0.0f, 0.0f, 0.0f);
/*uint3 search_min;
search_min.x = max(cell_pos.x - 1, 0);
search_min.y = max(cell_pos.y - 1, 0);
search_min.z = max(cell_pos.z - 1, 0);
uint3 search_range;
search_range.x = min(cell_pos.x + 1, dParams.grid_resolution.x - 1) - search_min.x + 1;
search_range.y = min(cell_pos.y + 1, dParams.grid_resolution.y - 1) - search_min.y + 1;
search_range.z = min(cell_pos.z + 1, dParams.grid_resolution.z - 1) - search_min.z + 1;
search_range.z = search_range.x * search_range.y * search_range.z;
search_range.y = search_range.x * search_range.y;
uint i, hash;
float3 fs_range = make_float3(search_range.x, search_range.y, search_range.z);
int3 cell_offset;
for (i = 0; i < search_range.z; i++)
{
//Get Cell Particle List
int3 cell_offset = make_int3(
search_min.x + (i % search_range.x),
search_min.y + ((i % search_range.y) / search_range.x),
search_min.z + (i / search_range.y)
);
//const float eps = 0.001f;
//float fi = float(i);
//float ix = fi / fs_range.x;
//float iz = fi / fs_range.y;
//cell_offset.x = search_min.x + (int)(ix - floorf(ix + eps) + eps);
//cell_offset.z = search_min.y + (int)(iz + eps);
//cell_offset.y = search_min.z + (int)((iz - floorf(iz + eps)) / fs_range.x + eps);
//hash = ((search_min.z + (i / search_range.y)) * dParams.grid_resolution.y + (search_min.y + ((i % search_range.y) / search_range.x))) * dParams.grid_resolution.x + (search_min.x + (i % search_range.x));
uint hash = get_cell_hash(cell_offset);
cell_desc = grid_offsets[hash];
//Iterate over each particle
for (; cell_desc.x < cell_desc.y; cell_desc.x++)
{
v_velocity = velocities[cell_desc.x];
g_position = get_cell_posf(positions[cell_desc.x]);
g_position.x -= float(cell_pos.x);
g_position.y -= float(cell_pos.y);
g_position.z -= float(cell_pos.z);
cur_weight.x = kx(g_position);
cur_weight.y = ky(g_position);
cur_weight.z = kz(g_position);
cur_weight.w = kw(g_position);
out_vel.x += cur_weight.x * v_velocity.x;
out_vel.y += cur_weight.y * v_velocity.y;
out_vel.z += cur_weight.z * v_velocity.z;
out_weight += cur_weight;
}
}*/
//Search all neighbours -1, +1 (x, y ,z)
int3 cell_max = make_int3(min(cell_pos.x + 1, dParams.grid_resolution.x - 1),
min(cell_pos.y + 1, dParams.grid_resolution.y - 1),
min(cell_pos.z + 1, dParams.grid_resolution.z - 1));
int3 cell_offset;
for (cell_offset.z = max(cell_pos.z - 1, 0); cell_offset.z <= cell_max.z; cell_offset.z++)
{
for (cell_offset.y = max(cell_pos.y - 1, 0); cell_offset.y <= cell_max.y; cell_offset.y++)
{
for (cell_offset.x = max(cell_pos.x - 1, 0); cell_offset.x <= cell_max.x; cell_offset.x++)
{
//Get Cell Particle List
//uint2 cell_desc = grid_offsets[get_cell_hash(cell_offset)];
uint cell_itr = readTexNearest<uint>(particles_start, cell_offset.x, cell_offset.y, cell_offset.z);
uint cell_end = readTexNearest<uint>(particles_end, cell_offset.x, cell_offset.y, cell_offset.z);
//Iterate over each particle
for (; cell_itr < cell_end; cell_itr++)
{
float3 v_velocity = velocities[cell_itr];
float3 g_position = get_cell_posf(positions[cell_itr]);
g_position.x -= float(cell_pos.x);
g_position.y -= float(cell_pos.y);
g_position.z -= float(cell_pos.z);
float4 cur_weight = make_float4(
kx(g_position),
ky(g_position),
kz(g_position),
kw(g_position));
out_vel.x += cur_weight.x * v_velocity.x;
out_vel.y += cur_weight.y * v_velocity.y;
out_vel.z += cur_weight.z * v_velocity.z;
out_weight.x += cur_weight.x;
out_weight.y += cur_weight.y;
out_weight.z += cur_weight.z;
out_weight.w += cur_weight.w;
}
}
}
}
//Store Output (out_weight = [normalized vel].xyz + out_weight.w)
out_weight.x = (out_weight.x > 0) ? out_vel.x / out_weight.x : 0.0;
out_weight.y = (out_weight.y > 0) ? out_vel.y / out_weight.y : 0.0;
out_weight.z = (out_weight.z > 0) ? out_vel.z / out_weight.z : 0.0;
writeTexVel<float4>(out_velgrid, out_weight, cell_pos.x, cell_pos.y, cell_pos.z);
writeTexVel<float4>(out_veloriggrid, out_weight, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::transferToGridProgram(
hipStream_t stream,
uint3 grid_resolution,
uint particle_count,
hipTextureObject_t particles_start,
hipTextureObject_t particles_end,
hipSurfaceObject_t out_velgrid,
hipSurfaceObject_t out_veloriggrid,
float3* positions,
float3* velocities)
{
//Optimisations:
// - Reduce offset's list to ignore any empty cells
// - Run one warp per cell instead of one thread
if (particle_count == 0)
return;
dim3 num_threads;
dim3 num_blocks;
#if XFER_USE_TRPLE_CUDA_DIM
utils::compute_grid_size(grid_resolution.x + 1, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_resolution.y + 1, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_resolution.z + 1, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
#else
utils::compute_grid_size(grid_resolution.x + 1, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_resolution.y + 1) * (grid_resolution.z + 1), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
#endif
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_transferToGridProgram << <num_blocks, num_threads, 0, stream >> >(particles_start, particles_end,
positions, velocities, out_velgrid, out_veloriggrid);
PICFLIP_PROFILE_END_KERNEL("kernel_transferToGridProgram", (grid_resolution.x + 1) * (grid_resolution.y + 1) * (grid_resolution.z + 1))
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
__global__
void kernel_markProgram(uint particle_count, float3* positions, hipSurfaceObject_t out_markergrid)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
{
return;
}
float3 pos = positions[index];
int3 cell_pos = get_cell_pos(pos);
//Thread write safety
if (index > 0)
{
float3 pos2 = positions[index - 1];
int3 cell_pos2 = get_cell_pos(pos2);
if (cell_pos2.x == cell_pos.x
&& cell_pos2.y == cell_pos.y
&& cell_pos2.z == cell_pos.z)
{
return;
}
}
writeTex<unsigned char>(out_markergrid, 1, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::markProgram(hipStream_t stream,
uint particle_count,
float3* positions,
hipSurfaceObject_t out_markergrid)
{
if (particle_count == 0)
return;
uint num_threads;
uint num_blocks;
utils::compute_grid_size(particle_count, CUDA_BLOCK_SIZE, num_blocks, num_threads);
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_markProgram << <num_blocks, num_threads, 0, stream >> >(particle_count, positions, out_markergrid);
PICFLIP_PROFILE_END_KERNEL("kernel_markProgram", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
__global__
void kernel_addForceProgram(hipTextureObject_t in_velgrid, hipSurfaceObject_t out_velgrid)
{
int3 cell_pos;
if (!GetCellPosVel(cell_pos))
return;
float4 vel = readTexNearest<float4>(in_velgrid, cell_pos.x, cell_pos.y, cell_pos.z);
//Apply Gravity
vel.y -= 9.81f * dParams.dt;
//Enforce Tank Boundary Conditions
if (cell_pos.x == 0) {
vel.x = 0.0;
}
if (cell_pos.x == dParams.grid_resolution.x) {
vel.x = 0.0;
}
if (cell_pos.y == 0) {
vel.y = 0.0f;
}
if (cell_pos.y == dParams.grid_resolution.y) {
vel.y = min(vel.y, 0.0);
}
if (cell_pos.z == 0) {
vel.z = 0.0;
}
if (cell_pos.z == dParams.grid_resolution.z) {
vel.z = 0.0;
}
writeTexVel<float4>(out_velgrid, vel, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::addForceProgram(hipStream_t stream,
uint3 grid_resolution,
hipTextureObject_t in_velgrid,
hipSurfaceObject_t out_velgrid)
{
dim3 num_threads;
dim3 num_blocks;
#if XFER_USE_TRPLE_CUDA_DIM
utils::compute_grid_size(grid_resolution.x + 1, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_resolution.y + 1, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_resolution.z + 1, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
#else
utils::compute_grid_size(grid_resolution.x + 1, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_resolution.y + 1) * (grid_resolution.z + 1), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
#endif
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_addForceProgram << <num_blocks, num_threads, 0, stream >> >(in_velgrid, out_velgrid);
PICFLIP_PROFILE_END_KERNEL("kernel_addForceProgram", grid_resolution.x * grid_resolution.y * grid_resolution.z)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
__global__
void kernel_divergenceProgram(
hipTextureObject_t in_velgrid,
hipTextureObject_t in_markergrid,
hipSurfaceObject_t out_divgrid)
{
int3 cell_pos;
if (!GetCellPos(cell_pos))
return;
float out_div = 0.0;
char marker = readTexNearest<char>(in_markergrid, cell_pos.x, cell_pos.y, cell_pos.z);
//Only compute divergence for fluid
if (marker & 1)
{
float3 idx = make_float3(cell_pos.x, cell_pos.y, cell_pos.z);
float4 vel_min = readTexInterpolate<float4>(in_velgrid, idx.x, idx.y, idx.z);
float3 vel_max;
vel_max.x = readTexInterpolate<float4>(in_velgrid, idx.x + 1, idx.y, idx.z).x;
vel_max.y = readTexInterpolate<float4>(in_velgrid, idx.x, idx.y + 1, idx.z).y;
vel_max.z = readTexInterpolate<float4>(in_velgrid, idx.x, idx.y, idx.z + 1).z;
out_div = ((vel_max.x - vel_min.x) + (vel_max.y - vel_min.y) + (vel_max.z - vel_min.z));
//float density = readTexNearest<float4>(in_weightgrid, index_x, index_y, index_z).w;
out_div -= max((vel_min.w - dParams.particles_per_cell), 0.0f); //volume conservation
//out_div *= 2.0f;
}
writeTex<float>(out_divgrid, out_div, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::divergenceProgram(hipStream_t stream,
uint3 grid_resolution,
hipTextureObject_t in_velgrid,
hipTextureObject_t in_markergrid,
hipSurfaceObject_t out_divgrid)
{
dim3 num_threads;
dim3 num_blocks;
#if XFER_USE_TRPLE_CUDA_DIM
utils::compute_grid_size(grid_resolution.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_resolution.y, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_resolution.z, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
#else
utils::compute_grid_size(grid_resolution.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_resolution.y) * (grid_resolution.z), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
#endif
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_divergenceProgram << <num_blocks, num_threads, 0, stream >> >(in_velgrid, in_markergrid, out_divgrid);
PICFLIP_PROFILE_END_KERNEL("kernel_divergenceProgram", grid_resolution.x * grid_resolution.y * grid_resolution.z)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
__global__
void kernel_marksolidcells(
uint3 grid_start,
uint3 grid_size,
hipTextureObject_t markergrid)
{
int idx = blockIdx.y*blockDim.y + threadIdx.y;
int3 cell_pos = make_int3(
blockIdx.x*blockDim.x + threadIdx.x,
idx % grid_size.y,
idx / grid_size.y
);
if (cell_pos.x >= grid_size.x
|| cell_pos.y >= grid_size.y
|| cell_pos.z >= grid_size.z)
{
return;
}
cell_pos.x += grid_start.x;
cell_pos.y += grid_start.y;
cell_pos.z += grid_start.z;
unsigned char marker = readTexNearest<unsigned int>(markergrid, cell_pos.x, cell_pos.y, cell_pos.z);
marker |= 2;
writeTex<unsigned char>(markergrid, marker, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::marksolidcells(hipStream_t stream,
uint3 grid_start,
uint3 grid_size,
hipTextureObject_t markergrid)
{
dim3 num_threads;
dim3 num_blocks;
utils::compute_grid_size(grid_size.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_size.y) * (grid_size.z), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_marksolidcells << <num_blocks, num_threads, 0, stream >> >(grid_start, grid_size, markergrid);
PICFLIP_PROFILE_END_KERNEL("kernel_marksolidcells", grid_size.x * grid_size.y * grid_size.z)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
__global__
void kernel_initPressureGrid(
hipSurfaceObject_t presgrid,
hipSurfaceObject_t presgrid_old)
{
int3 cell_pos;
if (!GetCellPos(cell_pos))
return;
float op = 0.0;
//if (cell_pos.x >= 55 && cell_pos.x < 60)
//{
// op = 32.0f;
//}
surf3Dwrite(op, presgrid, cell_pos.x * sizeof(float), cell_pos.y, cell_pos.z);
surf3Dwrite(op, presgrid_old, cell_pos.x * sizeof(float), cell_pos.y, cell_pos.z);
}
void picflip::initPressureGrid(hipStream_t stream, uint3 grid_resolution, hipSurfaceObject_t presgrid, hipSurfaceObject_t presgrid_old)
{
dim3 num_threads;
dim3 num_blocks;
#if XFER_USE_TRPLE_CUDA_DIM
utils::compute_grid_size(grid_resolution.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_resolution.y, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_resolution.z, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
#else
utils::compute_grid_size(grid_resolution.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_resolution.y) * (grid_resolution.z), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
#endif
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_initPressureGrid << <num_blocks, num_threads, 0, stream >> >(presgrid, presgrid_old);
PICFLIP_PROFILE_END_KERNEL("kernel_divergenceProgram", grid_resolution.x * grid_resolution.y * grid_resolution.z)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
__device__
float GetAdjPressure(const float& mypres, hipTextureObject_t in_markergrid, hipTextureObject_t in_presgrid, const int& cx, const int& cy, const int& cz)
{
#if 1
return readTexNearest<float>(in_presgrid, cx, cy, cz);
#else
unsigned char marker = readTexNearest<unsigned char>(in_markergrid, cx, cy, cz);
float pres = readTexNearest<float>(in_presgrid, cx, cy, cz);
if (marker & 2)
{
pres = mypres;
//printf("moo");
}
return pres;
#endif
}
__global__
void kernel_jacobiProgram(
hipTextureObject_t in_markergrid,
hipTextureObject_t in_divgrid,
hipTextureObject_t in_presgrid,
hipSurfaceObject_t out_presgrid)
{
int3 cell_pos;
if (!GetCellPos(cell_pos))
return;
char marker = readTexNearest<char>(in_markergrid, cell_pos.x, cell_pos.y, cell_pos.z);
if (marker & 1 == 0)
return;
//Only compute pressure for fluid cells
float out_pressure = 0.0;
//if (marker == 1)
{
float divergenceCenter = readTexNearest<float>(in_divgrid, cell_pos.x, cell_pos.y, cell_pos.z);
float mypres = readTexNearest<float>(in_presgrid, cell_pos.x, cell_pos.y, cell_pos.z);
float left = GetAdjPressure(mypres, in_markergrid, in_presgrid, cell_pos.x - 1, cell_pos.y, cell_pos.z);
float right = GetAdjPressure(mypres, in_markergrid, in_presgrid, cell_pos.x + 1, cell_pos.y, cell_pos.z);
float bottom = GetAdjPressure(mypres, in_markergrid, in_presgrid, cell_pos.x, cell_pos.y - 1, cell_pos.z);
float top = GetAdjPressure(mypres, in_markergrid, in_presgrid, cell_pos.x, cell_pos.y + 1, cell_pos.z);
float back = GetAdjPressure(mypres, in_markergrid, in_presgrid, cell_pos.x, cell_pos.y, cell_pos.z - 1);
float front = GetAdjPressure(mypres, in_markergrid, in_presgrid, cell_pos.x, cell_pos.y, cell_pos.z + 1);
out_pressure = (left + right + bottom + top + back + front - divergenceCenter) / 6.0;
}
writeTex<float>(out_presgrid, out_pressure, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::jacobiProgram(hipStream_t stream,
uint jacobi_iterations,
uint3 grid_resolution,
hipTextureObject_t in_markergrid,
hipTextureObject_t in_divgrid,
hipTextureObject_t presgridtex_ping,
hipSurfaceObject_t presgridsur_ping,
hipTextureObject_t presgridtex_pong,
hipSurfaceObject_t presgridsur_pong)
{
dim3 num_threads;
dim3 num_blocks;
#if XFER_USE_TRPLE_CUDA_DIM
utils::compute_grid_size(grid_resolution.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_resolution.y, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_resolution.z, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
#else
utils::compute_grid_size(grid_resolution.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_resolution.y) * (grid_resolution.z), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
#endif
PICFLIP_PROFILE_BEGIN_KERNEL
for (uint i = 0; i < jacobi_iterations; ++i)
{
bool swap = i % 2 == 0;
kernel_jacobiProgram << <num_blocks, num_threads, 0, stream >> >(
in_markergrid,
in_divgrid,
swap ? presgridtex_ping : presgridtex_pong,
swap ? presgridsur_pong : presgridsur_ping);
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
PICFLIP_PROFILE_END_KERNEL("kernel_jacobiProgram", grid_resolution.x * grid_resolution.y * grid_resolution.z)
}
__device__
bool readInterpolated(float& out_float, hipTextureObject_t markergrid, hipTextureObject_t pressuregrid, float xs1, float ys1, float zs1, float xs2, float ys2, float zs2, float factor)
{
unsigned char uftl = tex3D<unsigned char >(markergrid, xs1 + 0.5f, ys1 + 0.5f, zs1 + 0.5f);
unsigned char uftr = tex3D<unsigned char >(markergrid, xs2 + 0.5f, ys2 + 0.5f, zs2 + 0.5f);
float ftl = tex3D<float>(pressuregrid, xs1 + 0.5f, ys1 + 0.5f, zs1 + 0.5f);
float ftr = tex3D<float>(pressuregrid, xs2 + 0.5f, ys2 + 0.5f, zs2 + 0.5f);
if (uftl & uftr & 2)
{
out_float = 0.0f;
return false;
}
else if (uftl & 2)
ftl = ftr;
else if (uftr & 2)
{
ftr = ftl;
}
out_float = ftl * (1.0f - factor) + ftr * factor;
return true;
}
__device__
float readInterpolatedPressures(hipTextureObject_t markergrid, hipTextureObject_t pressuregrid, float xs, float ys, float zs)
{
float x = floor(xs);
float y = floor(ys);
float z = floor(zs);
float fx = xs - x;
float fy = ys - y;
float fz = zs - z;
float ftl, fbl, btl, bbl;
bool bftl = readInterpolated(ftl, markergrid, pressuregrid,
x , y, z,
x + 1.0f, y, z, fx);
bool bfbl = readInterpolated(fbl, markergrid, pressuregrid,
x , y + 1.0f, z ,
x + 1.0f, y + 1.0f, z , fx);
bool bbtl = readInterpolated(btl, markergrid, pressuregrid,
x , y, z + 1.0f,
x + 1.0f, y, z + 1.0f, fx);
bool bbbl = readInterpolated(bbl, markergrid, pressuregrid,
x, y + 1.0f, z + 1.0f,
x + 1.0f, y + 1.0f, z + 1.0f, fx);
bool by1 = true, by2 = true;
if (!bftl && !bfbl)
by1 = false;
else if (!bftl)
ftl = fbl;
else if (!bfbl)
{
fbl = ftl;
}
if (!bbtl && !bbbl)
by2 = false;
else if (!bbtl)
btl = bbl;
else if (!bbbl)
{
bbl = btl;
}
ftl = ftl * (1.0f - fy) + fbl * fy;
btl = btl * (1.0f - fy) + bbl * fy;
if (!by1 && !by2)
return 0.0f;
else if (!by1)
ftl = btl;
else if (!by2)
{
btl = ftl;
}
return ftl * (1.0f - fz) + btl * fz;
}
__global__
void kernel_subtractProgram(
hipTextureObject_t in_markergrid,
hipTextureObject_t in_velgrid,
hipTextureObject_t in_presgrid,
hipSurfaceObject_t out_velgrid)
{
int3 cell_pos;
if (!GetCellPosVel(cell_pos))
return;
float3 idx = make_float3(cell_pos.x, cell_pos.y, cell_pos.z);
#if 1
float pres_max = readTexInterpolate<float>(in_presgrid, idx.x, idx.y, idx.z);
float3 pres_min;
pres_min.x = readTexInterpolate<float>(in_presgrid, idx.x - 1, idx.y, idx.z);
pres_min.y = readTexInterpolate<float>(in_presgrid, idx.x, idx.y - 1, idx.z);
pres_min.z = readTexInterpolate<float>(in_presgrid, idx.x, idx.y, idx.z - 1);
#else
float pres_max = readInterpolatedPressures(in_markergrid, in_presgrid, idx.x, idx.y, idx.z);
float3 pres_min;
pres_min.x = readInterpolatedPressures(in_markergrid, in_presgrid, idx.x - 1, idx.y, idx.z);
pres_min.y = readInterpolatedPressures(in_markergrid, in_presgrid, idx.x, idx.y - 1, idx.z);
pres_min.z = readInterpolatedPressures(in_markergrid, in_presgrid, idx.x, idx.y, idx.z - 1);
#endif
//compute gradient of pressure
float4 gradient;
gradient.x = (pres_max - pres_min.x);
gradient.y = (pres_max - pres_min.y);
gradient.z = (pres_max - pres_min.z);
gradient.w = 0.0f;
float4 velocity = readTexNearest<float4>(in_velgrid, cell_pos.x, cell_pos.y, cell_pos.z);
float4 newVelocity = velocity - gradient;
writeTexVel<float4>(out_velgrid, newVelocity, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::subtractProgram(hipStream_t stream,
uint3 grid_resolution,
hipTextureObject_t in_markergrid,
hipTextureObject_t in_velgrid,
hipTextureObject_t in_presgrid,
hipSurfaceObject_t out_velgrid)
{
dim3 num_threads;
dim3 num_blocks;
#if XFER_USE_TRPLE_CUDA_DIM
utils::compute_grid_size(grid_resolution.x + 1, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_resolution.y + 1, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_resolution.z + 1, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
#else
utils::compute_grid_size(grid_resolution.x + 1, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_resolution.y + 1) * (grid_resolution.z + 1), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
#endif
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_subtractProgram << <num_blocks, num_threads, 0, stream >> >(
in_markergrid,
in_velgrid,
in_presgrid,
out_velgrid);
PICFLIP_PROFILE_END_KERNEL("kernel_subtractProgram", grid_resolution.x * grid_resolution.y * grid_resolution.z)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
__device__
float3 sample_velocity(float3 cell_pos, hipTextureObject_t in_velgrid)
{
float3 vel;
vel.x = readTexInterpolate<float4>(in_velgrid, cell_pos.x, cell_pos.y - 0.5f, cell_pos.z - 0.5f).x;
vel.y = readTexInterpolate<float4>(in_velgrid, cell_pos.x - 0.5f, cell_pos.y, cell_pos.z - 0.5f).y;
vel.z = readTexInterpolate<float4>(in_velgrid, cell_pos.x - 0.5f, cell_pos.y - 0.5f, cell_pos.z).z;
return vel;
}
__global__
void kernel_transferToParticlesProgram(uint particle_count, float3* positions, float3* velocities, float3* out_velocities,
hipTextureObject_t in_velgrid, hipTextureObject_t in_veloriggrid)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
return;
float3 pos = positions[index];
float3 cell_pos = get_cell_posf(pos);
float3 particle_vel = velocities[index];
float3 grid_vel = sample_velocity(cell_pos, in_velgrid);
float3 grid_vel_orig = sample_velocity(cell_pos, in_veloriggrid);
float3 grid_change = grid_vel - grid_vel_orig;
float3 flip_vel = particle_vel + grid_change;
float3 pic_vel = grid_vel;
//pic_vel.y += ((index % 5) / 5.0f) * 0.05f;
float3 new_vel = pic_vel * (1.0 - dParams.flipness) + flip_vel * dParams.flipness;
//CFL Condition
float3 cfl = make_float3(1.0, 1.0, 1.0) / (dParams.world_to_grid * dParams.dt);
new_vel.x = max(min(new_vel.x, cfl.x), -cfl.x);
new_vel.y = max(min(new_vel.y, cfl.y), -cfl.y);
new_vel.z = max(min(new_vel.z, cfl.z), -cfl.z);
out_velocities[index] = new_vel;
}
void picflip::transferToParticlesProgram(hipStream_t stream, uint particle_count, float3* positions, float3* velocities, float3* out_velocities,
hipTextureObject_t in_velgrid, hipTextureObject_t in_veloriggrid)
{
uint num_threads;
uint num_blocks;
utils::compute_grid_size(particle_count, CUDA_BLOCK_SIZE, num_blocks, num_threads);
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_transferToParticlesProgram << <num_blocks, num_threads, 0, stream >> >(particle_count, positions, velocities, out_velocities,
in_velgrid, in_veloriggrid);
PICFLIP_PROFILE_END_KERNEL("kernel_transferToParticlesProgram", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
__global__
void kernel_advectProgram(uint particle_count, float3* positions, float3* velocities, float3* out_positions, hipTextureObject_t in_velgrid)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
return;
float3 pos = positions[index];
float3 cell_pos = get_cell_posf(pos);
float3 grid_vel = sample_velocity(cell_pos, in_velgrid);
float3 halfway_pos = pos + grid_vel * dParams.dt * 0.5f;
cell_pos = get_cell_posf(halfway_pos);
float3 halfway_vel = sample_velocity(cell_pos, in_velgrid);
//CFL Condition
float3 step = (halfway_vel * dParams.dt);
/* float3 cfl = make_float3(1.0f, 1.0f, 1.0f) / dParams.world_to_grid;
step.x = max(min(step.x, cfl.x), -cfl.x);
step.y = max(min(step.y, cfl.y), -cfl.y);
step.z = max(min(step.z, cfl.z), -cfl.z);*/
float3 new_pos = pos + step;
//Clamp positions inside grid
cell_pos = get_cell_posf(new_pos);
const float WALL_OFFSET = 0.01f;
cell_pos.x = min(max(cell_pos.x, WALL_OFFSET), dParams.grid_resolution.x - WALL_OFFSET);
cell_pos.y = min(max(cell_pos.y, WALL_OFFSET), dParams.grid_resolution.y - WALL_OFFSET);
cell_pos.z = min(max(cell_pos.z, WALL_OFFSET), dParams.grid_resolution.z - WALL_OFFSET);
new_pos = get_wrld_posf(cell_pos);
out_positions[index] = new_pos;
}
void picflip::advectProgram(hipStream_t stream, uint particle_count, float3* positions, float3* velocities, float3* out_positions, hipTextureObject_t in_velgrid)
{
uint num_threads;
uint num_blocks;
utils::compute_grid_size(particle_count, CUDA_BLOCK_SIZE, num_blocks, num_threads);
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_advectProgram << <num_blocks, num_threads, 0, stream >> >(particle_count, positions, velocities, out_positions, in_velgrid);
PICFLIP_PROFILE_END_KERNEL("kernel_advectProgram", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
__global__
void kernel_enforceBoundaries(uint particle_count, float3* positions)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
return;
float3 pos = positions[index];
float3 cell_pos = get_cell_posf(pos);
const float WALL_OFFSET = 0.01f;
cell_pos.x = min(max(cell_pos.x, WALL_OFFSET), dParams.grid_resolution.x - WALL_OFFSET);
cell_pos.y = min(max(cell_pos.y, WALL_OFFSET), dParams.grid_resolution.y - WALL_OFFSET);
cell_pos.z = min(max(cell_pos.z, WALL_OFFSET), dParams.grid_resolution.z - WALL_OFFSET);
cell_pos = get_wrld_posf(cell_pos);
positions[index] = cell_pos;
}
void picflip::enforceBoundaries(hipStream_t stream, uint particle_count, float3* positions)
{
uint num_threads;
uint num_blocks;
utils::compute_grid_size(particle_count, CUDA_BLOCK_SIZE, num_blocks, num_threads);
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_enforceBoundaries << <num_blocks, num_threads, 0, stream >> >(particle_count, positions);
PICFLIP_PROFILE_END_KERNEL("kernel_enforceBoundaries", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
template <class T>
__global__ void Kernel_MemSetSurface(hipSurfaceObject_t out_grid, T val, uint step, uint3 grid_size)
{
/*int dimxy = grid_size.x * grid_size.y;
dim3 idx = thread3d(grid_size.x, dimxy);
if (idx.x >= grid_size.x || idx.y >= grid_size.y || idx.z >= grid_size.z)
return;*/
uint index_x = blockIdx.x*blockDim.x + threadIdx.x;
if (index_x >= grid_size.x)
return;
uint index_y = blockIdx.y*blockDim.y + threadIdx.y;
if (index_y >= grid_size.y)
return;
uint index_z = blockIdx.z*blockDim.z + threadIdx.z;
if (index_z >= grid_size.z)
return;
surf3Dwrite(val, out_grid, index_x * step, index_y, index_z);
}
template <class T>
void MemSetSurface(hipStream_t stream, hipSurfaceObject_t out_grid, T val, uint3 grid_size)
{
dim3 num_threads;
dim3 num_blocks;
utils::compute_grid_size(grid_size.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_size.y, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_size.z, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
PICFLIP_PROFILE_BEGIN_KERNEL
Kernel_MemSetSurface<T> << <num_blocks, num_threads, 0, stream >> >(
out_grid,
val,
sizeof(val),
grid_size);
PICFLIP_PROFILE_END_KERNEL("Kernel_MemSetSurface<T>", grid_size.x * grid_size.y * grid_size.z)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
__global__
void kernel_copyPressures(uint particle_count, float3* positions, hipTextureObject_t in_weightgrid, float* out_pressures)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
return;
float3 pos = positions[index];
float3 cell_pos = get_cell_posf(pos);
float4 weight = tex3D<float4>(in_weightgrid, cell_pos.x, cell_pos.y, cell_pos.z);
out_pressures[index] = weight.w;
}
void copyPressures(hipStream_t stream, uint particle_count, float3* positions, hipTextureObject_t in_weightgrid, float* out_pressures)
{
uint num_threads;
uint num_blocks;
utils::compute_grid_size(particle_count, CUDA_BLOCK_SIZE, num_blocks, num_threads);
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_copyPressures << <num_blocks, num_threads, 0, stream >> >(particle_count, positions, in_weightgrid, out_pressures);
PICFLIP_PROFILE_END_KERNEL("kernel_copyPressures", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
} | 75a8289fad8f741fd3a021f814f06f46d884a92b.cu | #include "PicFlip_Kernel.cuh"
#include "Fluid_Kernel_Utils.cuh"
#include "radixsort.cuh"
using namespace utils;
#define PICFLIP_PROFILE_EACH_KERNEL FALSE
#define XFER_USE_TRPLE_CUDA_DIM FALSE
#define PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL FALSE
__constant__ PicFlip_Params dParams;
#if PICFLIP_PROFILE_EACH_KERNEL
cudaEvent_t pfprofile_start = NULL, pfprofile_stop = NULL;
#define PICFLIP_PROFILE_BEGIN_KERNEL cudaEventRecord(pfprofile_start);
#define PICFLIP_PROFILE_END_KERNEL(description, identifier) { cudaEventRecord(pfprofile_stop); \
cudaEventSynchronize(pfprofile_stop); \
float milliseconds = 0; \
cudaEventElapsedTime(&milliseconds, pfprofile_start, pfprofile_stop); \
printf("\tKernel Timing: %5.2fms (%s -> %d)\n", milliseconds, description, identifier); }
#else
#define PICFLIP_PROFILE_BEGIN_KERNEL
#define PICFLIP_PROFILE_END_KERNEL(description, identifier)
#endif
#if XFER_USE_TRPLE_CUDA_DIM
__device__
bool GetCellPos(int3& cell_pos)
{
cell_pos = make_int3(
blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y,
blockIdx.z*blockDim.z + threadIdx.z
);
return (cell_pos.x < dParams.grid_resolution.x
&& cell_pos.y < dParams.grid_resolution.y
&& cell_pos.z < dParams.grid_resolution.z);
}
__device__
bool GetCellPosVel(int3& cell_pos)
{
cell_pos = make_int3(
blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y,
blockIdx.z*blockDim.z + threadIdx.z
);
return (cell_pos.x <= dParams.grid_resolution.x
&& cell_pos.y <= dParams.grid_resolution.y
&& cell_pos.z <= dParams.grid_resolution.z);
}
#else
__device__
bool GetCellPos(int3& cell_pos)
{
int idx = blockIdx.y*blockDim.y + threadIdx.y;
cell_pos = make_int3(
blockIdx.x*blockDim.x + threadIdx.x,
idx % dParams.grid_resolution.y,
idx / dParams.grid_resolution.y
);
return (cell_pos.x < dParams.grid_resolution.x
&& cell_pos.y < dParams.grid_resolution.y
&& cell_pos.z < dParams.grid_resolution.z);
}
__device__
bool GetCellPosVel(int3& cell_pos)
{
int idx = blockIdx.y*blockDim.y + threadIdx.y;
cell_pos = make_int3(
blockIdx.x*blockDim.x + threadIdx.x,
idx % (dParams.grid_resolution.y + 1),
idx / (dParams.grid_resolution.y + 1)
);
return (cell_pos.x <= dParams.grid_resolution.x
&& cell_pos.y <= dParams.grid_resolution.y
&& cell_pos.z <= dParams.grid_resolution.z);
}
#endif
void picflip::set_parameters(PicFlip_Params *hParam)
{
#if PICFLIP_PROFILE_EACH_KERNEL
if (pfprofile_start == NULL)
{
cudaEventCreate(&pfprofile_start);
cudaEventCreate(&pfprofile_stop);
}
#endif
PicFlip_Params* dParamsArr;
//Copy Paramaters to device
gpuErrchk(cudaGetSymbolAddress((void **)&dParamsArr, dParams));
gpuErrchk(cudaMemcpy(dParamsArr, hParam, sizeof(PicFlip_Params), cudaMemcpyHostToDevice));
//gpuErrchk(cudaMemcpyToSymbol(dParamsArr, hParam, sizeof(SimulationParams)));
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template <class T>
__device__
void writeTex(cudaSurfaceObject_t surface, const T& data, int x, int y, int z)
{
surf3Dwrite(data, surface, (x)* sizeof(T), y, z);
}
template <class T>
__device__
void writeTexVel(cudaSurfaceObject_t surface, const T& data, int x, int y, int z)
{
surf3Dwrite(data, surface, (x)* sizeof(T), y, z);
}
template <class T>
__device__
T readTexInterpolate(cudaTextureObject_t texture, float xs, float ys, float zs)
{
/*return tex3D<T>(texture,
(xs + 0.5f),
(ys + 0.5f),
(zs + 0.5f));*/
//ys = dParams.grid_resolution.y - 1 - ys;
float x = floor(xs);
float y = floor(ys);
float z = floor(zs);
float fx = xs - x;
float fy = ys - y;
float fz = zs - z;
T ftl = tex3D<T>(texture, x + 0.5f, y + 0.5f, z + 0.5f);
T ftr = tex3D<T>(texture, x + 0.5f + 1.0f, y + 0.5f, z + 0.5f);
T fbl = tex3D<T>(texture, x + 0.5f, y + 0.5f + 1.0f, z + 0.5f);
T fbr = tex3D<T>(texture, x + 0.5f + 1.0f, y + 0.5f + 1.0f, z + 0.5f);
T btl = tex3D<T>(texture, x + 0.5f, y + 0.5f, z + 0.5f + 1.0f);
T btr = tex3D<T>(texture, x + 0.5f + 1.0f, y + 0.5f, z + 0.5f + 1.0f);
T bbl = tex3D<T>(texture, x + 0.5f, y + 0.5f + 1.0f, z + 0.5f + 1.0f);
T bbr = tex3D<T>(texture, x + 0.5f + 1.0f, y + 0.5f + 1.0f, z + 0.5f + 1.0f);
ftl = ftl * (1.0f - fx) + ftr * fx;
fbl = fbl * (1.0f - fx) + fbr * fx;
btl = btl * (1.0f - fx) + btr * fx;
bbl = btl * (1.0f - fx) + bbl * fx;
ftl = ftl * (1.0f - fy) + fbl * fy;
btl = btl * (1.0f - fy) + bbl * fy;
return ftl * (1.0f - fz) + btl * fz;
}
template <class T>
__device__
T readTexNearest(cudaTextureObject_t texture, float xs, float ys, float zs)
{
return tex3D<T>(texture,
(xs + 0.5f),
(ys + 0.5f),
(zs + 0.5f));
}
__device__
float h(const float& r) {
return fmaxf(1.0 - fabsf(r), 0.0);
}
__device__
float k(const float3& v) {
return h(v.x) * h(v.y) * h(v.z);
}
__device__
float kx(const float3& v) {
volatile float half = 0.5f;
return h(v.x) * h(v.y - half) * h(v.z - half);
}
__device__
float ky(const float3& v) {
volatile float half = 0.5f;
return h(v.x - 0.5f) * h(v.y) * h(v.z - 0.5f);
}
__device__
float kz(const float3& v) {
volatile float half = 0.5f;
return h(v.x - half) * h(v.y - half) * h(v.z);
}
__device__
float kw(const float3& v) {
volatile float half = 0.5f;
return h(v.x - half) * h(v.y - half) * h(v.z - half);
}
__device__ void clamp_float3(float3& v, float minv, float maxv)
{
v.x = min(max(v.x, minv), maxv);
v.y = min(max(v.y, minv), maxv);
v.z = min(max(v.z, minv), maxv);
}
__device__ float3 get_wrld_posf(const float3& pos)
{
float3 wp;
wp.x = pos.x / dParams.world_to_grid.x - dParams.world_to_grid_offset.x;
wp.y = pos.y / dParams.world_to_grid.y - dParams.world_to_grid_offset.y;
wp.z = pos.z / dParams.world_to_grid.z - dParams.world_to_grid_offset.z;
return wp;
}
__device__ float3 get_cell_posf(const float3& pos)
{
float3 cp;
cp.x = (pos.x + dParams.world_to_grid_offset.x) * dParams.world_to_grid.x;
cp.y = (pos.y + dParams.world_to_grid_offset.y) * dParams.world_to_grid.y;
cp.z = (pos.z + dParams.world_to_grid_offset.z) * dParams.world_to_grid.z;
return cp;
}
__device__ int3 get_cell_pos(const float3& pos)
{
int3 cp;
cp.x = floor((pos.x + dParams.world_to_grid_offset.x) * dParams.world_to_grid.x);
cp.y = floor((pos.y + dParams.world_to_grid_offset.y) * dParams.world_to_grid.y);
cp.z = floor((pos.z + dParams.world_to_grid_offset.z) * dParams.world_to_grid.z);
return cp;
}
__device__ uint get_cell_hash(const int3& cell_pos)
{
return (cell_pos.z * dParams.grid_resolution.y + cell_pos.y) * dParams.grid_resolution.x + cell_pos.x;
}
__global__
void pfkernel_sort_initialize_keyvalues(uint particle_count, KeyValuePair* particle_keyvalues, float3* particle_positions)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
{
return;
}
int3 grid_pos = get_cell_pos(particle_positions[index]);
uint hash = get_cell_hash(grid_pos);
particle_keyvalues[index].key = hash;
particle_keyvalues[index].value = index;
}
__global__
void pfkernel_sort_reorder_and_insert_boundary_offsets(uint particle_count,
cudaSurfaceObject_t particles_start, cudaSurfaceObject_t particles_end,
KeyValuePair* boundary_sort_pair,
float3* in_positions, float3* out_positions, float3* in_velocities, float3* out_velocities)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
{
return;
}
KeyValuePair sort_pair = boundary_sort_pair[index];
//Load src position/velocity
out_positions[index] = in_positions[sort_pair.value];
out_velocities[index] = in_velocities[sort_pair.value];
//Calculate Offset
uint grid_xy = dParams.grid_resolution.x * dParams.grid_resolution.y;
uint3 cell_pos = make_uint3(
sort_pair.key % dParams.grid_resolution.x,
(sort_pair.key % grid_xy) / dParams.grid_resolution.x,
sort_pair.key / grid_xy
);
// -> key != prev_key => cell_start
if (index == 0 || sort_pair.key != boundary_sort_pair[index - 1].key)
{
//cell_offsets[sort_pair.key].x = index;
writeTex<uint>(particles_start, index, cell_pos.x, cell_pos.y, cell_pos.z);
}
// -> key != next_key => cell_end
if (index == particle_count - 1 || sort_pair.key != boundary_sort_pair[index + 1].key)
{
//cell_offsets[sort_pair.key].y = index + 1;
writeTex<uint>(particles_end, index + 1, cell_pos.x, cell_pos.y, cell_pos.z);
}
}
void picflip::sortByGridIndex(cudaStream_t stream,
uint particle_count,
cudaSurfaceObject_t particles_start, cudaSurfaceObject_t particles_end,
KeyValuePair* keyvalues,
KeyValuePair* keyvalues_tmp,
float3* positions,
float3* positions_tmp,
float3* velocities,
float3* velocities_tmp)
{
if (particle_count == 0)
return;
uint num_threads;
uint num_blocks;
utils::compute_grid_size(particle_count, CUDA_BLOCK_SIZE, num_blocks, num_threads);
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
pfkernel_sort_initialize_keyvalues << <num_blocks, num_threads, 0, stream >> >(particle_count, keyvalues, positions);
PICFLIP_PROFILE_END_KERNEL("kernel_sort_initialize_keyvalues", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
//Sort CellIndexes
PICFLIP_PROFILE_BEGIN_KERNEL
RadixSort(keyvalues, keyvalues_tmp, particle_count, 32, stream);
PICFLIP_PROFILE_END_KERNEL("RadixSort", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
//Reorder and insert boundary offsets
PICFLIP_PROFILE_BEGIN_KERNEL
pfkernel_sort_reorder_and_insert_boundary_offsets << <num_blocks, num_threads, 0, stream >> >(particle_count,
particles_start, particles_end,
keyvalues_tmp,
positions, positions_tmp, velocities, velocities_tmp);
PICFLIP_PROFILE_END_KERNEL("kernel_sort_reorder_and_insert_boundary_offsets", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
__global__
void kernel_transferToGridProgram(
cudaTextureObject_t particles_start,
cudaTextureObject_t particles_end,
float3* positions, float3* velocities,
cudaSurfaceObject_t out_velgrid,
cudaSurfaceObject_t out_veloriggrid)
{
int3 cell_pos;
if (!GetCellPosVel(cell_pos))
return;
/*const float3 xPosition = make_float3(cell_pos.x, cell_pos.y + 0.5, cell_pos.z + 0.5);
const float3 yPosition = make_float3(cell_pos.x + 0.5, cell_pos.y, cell_pos.z + 0.5);
const float3 zPosition = make_float3(cell_pos.x + 0.5, cell_pos.y + 0.5, cell_pos.z);
const float3 scalarPosition = make_float3(cell_pos.x + 0.5, cell_pos.y + 0.5, cell_pos.z + 0.5);*/
float4 out_weight = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
float3 out_vel = make_float3(0.0f, 0.0f, 0.0f);
/*uint3 search_min;
search_min.x = max(cell_pos.x - 1, 0);
search_min.y = max(cell_pos.y - 1, 0);
search_min.z = max(cell_pos.z - 1, 0);
uint3 search_range;
search_range.x = min(cell_pos.x + 1, dParams.grid_resolution.x - 1) - search_min.x + 1;
search_range.y = min(cell_pos.y + 1, dParams.grid_resolution.y - 1) - search_min.y + 1;
search_range.z = min(cell_pos.z + 1, dParams.grid_resolution.z - 1) - search_min.z + 1;
search_range.z = search_range.x * search_range.y * search_range.z;
search_range.y = search_range.x * search_range.y;
uint i, hash;
float3 fs_range = make_float3(search_range.x, search_range.y, search_range.z);
int3 cell_offset;
for (i = 0; i < search_range.z; i++)
{
//Get Cell Particle List
int3 cell_offset = make_int3(
search_min.x + (i % search_range.x),
search_min.y + ((i % search_range.y) / search_range.x),
search_min.z + (i / search_range.y)
);
//const float eps = 0.001f;
//float fi = float(i);
//float ix = fi / fs_range.x;
//float iz = fi / fs_range.y;
//cell_offset.x = search_min.x + (int)(ix - floorf(ix + eps) + eps);
//cell_offset.z = search_min.y + (int)(iz + eps);
//cell_offset.y = search_min.z + (int)((iz - floorf(iz + eps)) / fs_range.x + eps);
//hash = ((search_min.z + (i / search_range.y)) * dParams.grid_resolution.y + (search_min.y + ((i % search_range.y) / search_range.x))) * dParams.grid_resolution.x + (search_min.x + (i % search_range.x));
uint hash = get_cell_hash(cell_offset);
cell_desc = grid_offsets[hash];
//Iterate over each particle
for (; cell_desc.x < cell_desc.y; cell_desc.x++)
{
v_velocity = velocities[cell_desc.x];
g_position = get_cell_posf(positions[cell_desc.x]);
g_position.x -= float(cell_pos.x);
g_position.y -= float(cell_pos.y);
g_position.z -= float(cell_pos.z);
cur_weight.x = kx(g_position);
cur_weight.y = ky(g_position);
cur_weight.z = kz(g_position);
cur_weight.w = kw(g_position);
out_vel.x += cur_weight.x * v_velocity.x;
out_vel.y += cur_weight.y * v_velocity.y;
out_vel.z += cur_weight.z * v_velocity.z;
out_weight += cur_weight;
}
}*/
//Search all neighbours -1, +1 (x, y ,z)
int3 cell_max = make_int3(min(cell_pos.x + 1, dParams.grid_resolution.x - 1),
min(cell_pos.y + 1, dParams.grid_resolution.y - 1),
min(cell_pos.z + 1, dParams.grid_resolution.z - 1));
int3 cell_offset;
for (cell_offset.z = max(cell_pos.z - 1, 0); cell_offset.z <= cell_max.z; cell_offset.z++)
{
for (cell_offset.y = max(cell_pos.y - 1, 0); cell_offset.y <= cell_max.y; cell_offset.y++)
{
for (cell_offset.x = max(cell_pos.x - 1, 0); cell_offset.x <= cell_max.x; cell_offset.x++)
{
//Get Cell Particle List
//uint2 cell_desc = grid_offsets[get_cell_hash(cell_offset)];
uint cell_itr = readTexNearest<uint>(particles_start, cell_offset.x, cell_offset.y, cell_offset.z);
uint cell_end = readTexNearest<uint>(particles_end, cell_offset.x, cell_offset.y, cell_offset.z);
//Iterate over each particle
for (; cell_itr < cell_end; cell_itr++)
{
float3 v_velocity = velocities[cell_itr];
float3 g_position = get_cell_posf(positions[cell_itr]);
g_position.x -= float(cell_pos.x);
g_position.y -= float(cell_pos.y);
g_position.z -= float(cell_pos.z);
float4 cur_weight = make_float4(
kx(g_position),
ky(g_position),
kz(g_position),
kw(g_position));
out_vel.x += cur_weight.x * v_velocity.x;
out_vel.y += cur_weight.y * v_velocity.y;
out_vel.z += cur_weight.z * v_velocity.z;
out_weight.x += cur_weight.x;
out_weight.y += cur_weight.y;
out_weight.z += cur_weight.z;
out_weight.w += cur_weight.w;
}
}
}
}
//Store Output (out_weight = [normalized vel].xyz + out_weight.w)
out_weight.x = (out_weight.x > 0) ? out_vel.x / out_weight.x : 0.0;
out_weight.y = (out_weight.y > 0) ? out_vel.y / out_weight.y : 0.0;
out_weight.z = (out_weight.z > 0) ? out_vel.z / out_weight.z : 0.0;
writeTexVel<float4>(out_velgrid, out_weight, cell_pos.x, cell_pos.y, cell_pos.z);
writeTexVel<float4>(out_veloriggrid, out_weight, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::transferToGridProgram(
cudaStream_t stream,
uint3 grid_resolution,
uint particle_count,
cudaTextureObject_t particles_start,
cudaTextureObject_t particles_end,
cudaSurfaceObject_t out_velgrid,
cudaSurfaceObject_t out_veloriggrid,
float3* positions,
float3* velocities)
{
//Optimisations:
// - Reduce offset's list to ignore any empty cells
// - Run one warp per cell instead of one thread
if (particle_count == 0)
return;
dim3 num_threads;
dim3 num_blocks;
#if XFER_USE_TRPLE_CUDA_DIM
utils::compute_grid_size(grid_resolution.x + 1, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_resolution.y + 1, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_resolution.z + 1, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
#else
utils::compute_grid_size(grid_resolution.x + 1, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_resolution.y + 1) * (grid_resolution.z + 1), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
#endif
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_transferToGridProgram << <num_blocks, num_threads, 0, stream >> >(particles_start, particles_end,
positions, velocities, out_velgrid, out_veloriggrid);
PICFLIP_PROFILE_END_KERNEL("kernel_transferToGridProgram", (grid_resolution.x + 1) * (grid_resolution.y + 1) * (grid_resolution.z + 1))
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
__global__
void kernel_markProgram(uint particle_count, float3* positions, cudaSurfaceObject_t out_markergrid)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
{
return;
}
float3 pos = positions[index];
int3 cell_pos = get_cell_pos(pos);
//Thread write safety
if (index > 0)
{
float3 pos2 = positions[index - 1];
int3 cell_pos2 = get_cell_pos(pos2);
if (cell_pos2.x == cell_pos.x
&& cell_pos2.y == cell_pos.y
&& cell_pos2.z == cell_pos.z)
{
return;
}
}
writeTex<unsigned char>(out_markergrid, 1, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::markProgram(cudaStream_t stream,
uint particle_count,
float3* positions,
cudaSurfaceObject_t out_markergrid)
{
if (particle_count == 0)
return;
uint num_threads;
uint num_blocks;
utils::compute_grid_size(particle_count, CUDA_BLOCK_SIZE, num_blocks, num_threads);
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_markProgram << <num_blocks, num_threads, 0, stream >> >(particle_count, positions, out_markergrid);
PICFLIP_PROFILE_END_KERNEL("kernel_markProgram", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
__global__
void kernel_addForceProgram(cudaTextureObject_t in_velgrid, cudaSurfaceObject_t out_velgrid)
{
int3 cell_pos;
if (!GetCellPosVel(cell_pos))
return;
float4 vel = readTexNearest<float4>(in_velgrid, cell_pos.x, cell_pos.y, cell_pos.z);
//Apply Gravity
vel.y -= 9.81f * dParams.dt;
//Enforce Tank Boundary Conditions
if (cell_pos.x == 0) {
vel.x = 0.0;
}
if (cell_pos.x == dParams.grid_resolution.x) {
vel.x = 0.0;
}
if (cell_pos.y == 0) {
vel.y = 0.0f;
}
if (cell_pos.y == dParams.grid_resolution.y) {
vel.y = min(vel.y, 0.0);
}
if (cell_pos.z == 0) {
vel.z = 0.0;
}
if (cell_pos.z == dParams.grid_resolution.z) {
vel.z = 0.0;
}
writeTexVel<float4>(out_velgrid, vel, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::addForceProgram(cudaStream_t stream,
uint3 grid_resolution,
cudaTextureObject_t in_velgrid,
cudaSurfaceObject_t out_velgrid)
{
dim3 num_threads;
dim3 num_blocks;
#if XFER_USE_TRPLE_CUDA_DIM
utils::compute_grid_size(grid_resolution.x + 1, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_resolution.y + 1, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_resolution.z + 1, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
#else
utils::compute_grid_size(grid_resolution.x + 1, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_resolution.y + 1) * (grid_resolution.z + 1), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
#endif
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_addForceProgram << <num_blocks, num_threads, 0, stream >> >(in_velgrid, out_velgrid);
PICFLIP_PROFILE_END_KERNEL("kernel_addForceProgram", grid_resolution.x * grid_resolution.y * grid_resolution.z)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
__global__
void kernel_divergenceProgram(
cudaTextureObject_t in_velgrid,
cudaTextureObject_t in_markergrid,
cudaSurfaceObject_t out_divgrid)
{
int3 cell_pos;
if (!GetCellPos(cell_pos))
return;
float out_div = 0.0;
char marker = readTexNearest<char>(in_markergrid, cell_pos.x, cell_pos.y, cell_pos.z);
//Only compute divergence for fluid
if (marker & 1)
{
float3 idx = make_float3(cell_pos.x, cell_pos.y, cell_pos.z);
float4 vel_min = readTexInterpolate<float4>(in_velgrid, idx.x, idx.y, idx.z);
float3 vel_max;
vel_max.x = readTexInterpolate<float4>(in_velgrid, idx.x + 1, idx.y, idx.z).x;
vel_max.y = readTexInterpolate<float4>(in_velgrid, idx.x, idx.y + 1, idx.z).y;
vel_max.z = readTexInterpolate<float4>(in_velgrid, idx.x, idx.y, idx.z + 1).z;
out_div = ((vel_max.x - vel_min.x) + (vel_max.y - vel_min.y) + (vel_max.z - vel_min.z));
//float density = readTexNearest<float4>(in_weightgrid, index_x, index_y, index_z).w;
out_div -= max((vel_min.w - dParams.particles_per_cell), 0.0f); //volume conservation
//out_div *= 2.0f;
}
writeTex<float>(out_divgrid, out_div, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::divergenceProgram(cudaStream_t stream,
uint3 grid_resolution,
cudaTextureObject_t in_velgrid,
cudaTextureObject_t in_markergrid,
cudaSurfaceObject_t out_divgrid)
{
dim3 num_threads;
dim3 num_blocks;
#if XFER_USE_TRPLE_CUDA_DIM
utils::compute_grid_size(grid_resolution.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_resolution.y, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_resolution.z, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
#else
utils::compute_grid_size(grid_resolution.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_resolution.y) * (grid_resolution.z), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
#endif
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_divergenceProgram << <num_blocks, num_threads, 0, stream >> >(in_velgrid, in_markergrid, out_divgrid);
PICFLIP_PROFILE_END_KERNEL("kernel_divergenceProgram", grid_resolution.x * grid_resolution.y * grid_resolution.z)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
__global__
void kernel_marksolidcells(
uint3 grid_start,
uint3 grid_size,
cudaTextureObject_t markergrid)
{
int idx = blockIdx.y*blockDim.y + threadIdx.y;
int3 cell_pos = make_int3(
blockIdx.x*blockDim.x + threadIdx.x,
idx % grid_size.y,
idx / grid_size.y
);
if (cell_pos.x >= grid_size.x
|| cell_pos.y >= grid_size.y
|| cell_pos.z >= grid_size.z)
{
return;
}
cell_pos.x += grid_start.x;
cell_pos.y += grid_start.y;
cell_pos.z += grid_start.z;
unsigned char marker = readTexNearest<unsigned int>(markergrid, cell_pos.x, cell_pos.y, cell_pos.z);
marker |= 2;
writeTex<unsigned char>(markergrid, marker, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::marksolidcells(cudaStream_t stream,
uint3 grid_start,
uint3 grid_size,
cudaTextureObject_t markergrid)
{
dim3 num_threads;
dim3 num_blocks;
utils::compute_grid_size(grid_size.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_size.y) * (grid_size.z), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_marksolidcells << <num_blocks, num_threads, 0, stream >> >(grid_start, grid_size, markergrid);
PICFLIP_PROFILE_END_KERNEL("kernel_marksolidcells", grid_size.x * grid_size.y * grid_size.z)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
__global__
void kernel_initPressureGrid(
cudaSurfaceObject_t presgrid,
cudaSurfaceObject_t presgrid_old)
{
int3 cell_pos;
if (!GetCellPos(cell_pos))
return;
float op = 0.0;
//if (cell_pos.x >= 55 && cell_pos.x < 60)
//{
// op = 32.0f;
//}
surf3Dwrite(op, presgrid, cell_pos.x * sizeof(float), cell_pos.y, cell_pos.z);
surf3Dwrite(op, presgrid_old, cell_pos.x * sizeof(float), cell_pos.y, cell_pos.z);
}
void picflip::initPressureGrid(cudaStream_t stream, uint3 grid_resolution, cudaSurfaceObject_t presgrid, cudaSurfaceObject_t presgrid_old)
{
dim3 num_threads;
dim3 num_blocks;
#if XFER_USE_TRPLE_CUDA_DIM
utils::compute_grid_size(grid_resolution.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_resolution.y, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_resolution.z, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
#else
utils::compute_grid_size(grid_resolution.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_resolution.y) * (grid_resolution.z), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
#endif
//Create Cell Indexes
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_initPressureGrid << <num_blocks, num_threads, 0, stream >> >(presgrid, presgrid_old);
PICFLIP_PROFILE_END_KERNEL("kernel_divergenceProgram", grid_resolution.x * grid_resolution.y * grid_resolution.z)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
__device__
float GetAdjPressure(const float& mypres, cudaTextureObject_t in_markergrid, cudaTextureObject_t in_presgrid, const int& cx, const int& cy, const int& cz)
{
#if 1
return readTexNearest<float>(in_presgrid, cx, cy, cz);
#else
unsigned char marker = readTexNearest<unsigned char>(in_markergrid, cx, cy, cz);
float pres = readTexNearest<float>(in_presgrid, cx, cy, cz);
if (marker & 2)
{
pres = mypres;
//printf("moo");
}
return pres;
#endif
}
__global__
void kernel_jacobiProgram(
cudaTextureObject_t in_markergrid,
cudaTextureObject_t in_divgrid,
cudaTextureObject_t in_presgrid,
cudaSurfaceObject_t out_presgrid)
{
int3 cell_pos;
if (!GetCellPos(cell_pos))
return;
char marker = readTexNearest<char>(in_markergrid, cell_pos.x, cell_pos.y, cell_pos.z);
if (marker & 1 == 0)
return;
//Only compute pressure for fluid cells
float out_pressure = 0.0;
//if (marker == 1)
{
float divergenceCenter = readTexNearest<float>(in_divgrid, cell_pos.x, cell_pos.y, cell_pos.z);
float mypres = readTexNearest<float>(in_presgrid, cell_pos.x, cell_pos.y, cell_pos.z);
float left = GetAdjPressure(mypres, in_markergrid, in_presgrid, cell_pos.x - 1, cell_pos.y, cell_pos.z);
float right = GetAdjPressure(mypres, in_markergrid, in_presgrid, cell_pos.x + 1, cell_pos.y, cell_pos.z);
float bottom = GetAdjPressure(mypres, in_markergrid, in_presgrid, cell_pos.x, cell_pos.y - 1, cell_pos.z);
float top = GetAdjPressure(mypres, in_markergrid, in_presgrid, cell_pos.x, cell_pos.y + 1, cell_pos.z);
float back = GetAdjPressure(mypres, in_markergrid, in_presgrid, cell_pos.x, cell_pos.y, cell_pos.z - 1);
float front = GetAdjPressure(mypres, in_markergrid, in_presgrid, cell_pos.x, cell_pos.y, cell_pos.z + 1);
out_pressure = (left + right + bottom + top + back + front - divergenceCenter) / 6.0;
}
writeTex<float>(out_presgrid, out_pressure, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::jacobiProgram(cudaStream_t stream,
uint jacobi_iterations,
uint3 grid_resolution,
cudaTextureObject_t in_markergrid,
cudaTextureObject_t in_divgrid,
cudaTextureObject_t presgridtex_ping,
cudaSurfaceObject_t presgridsur_ping,
cudaTextureObject_t presgridtex_pong,
cudaSurfaceObject_t presgridsur_pong)
{
dim3 num_threads;
dim3 num_blocks;
#if XFER_USE_TRPLE_CUDA_DIM
utils::compute_grid_size(grid_resolution.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_resolution.y, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_resolution.z, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
#else
utils::compute_grid_size(grid_resolution.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_resolution.y) * (grid_resolution.z), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
#endif
PICFLIP_PROFILE_BEGIN_KERNEL
for (uint i = 0; i < jacobi_iterations; ++i)
{
bool swap = i % 2 == 0;
kernel_jacobiProgram << <num_blocks, num_threads, 0, stream >> >(
in_markergrid,
in_divgrid,
swap ? presgridtex_ping : presgridtex_pong,
swap ? presgridsur_pong : presgridsur_ping);
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
PICFLIP_PROFILE_END_KERNEL("kernel_jacobiProgram", grid_resolution.x * grid_resolution.y * grid_resolution.z)
}
__device__
bool readInterpolated(float& out_float, cudaTextureObject_t markergrid, cudaTextureObject_t pressuregrid, float xs1, float ys1, float zs1, float xs2, float ys2, float zs2, float factor)
{
unsigned char uftl = tex3D<unsigned char >(markergrid, xs1 + 0.5f, ys1 + 0.5f, zs1 + 0.5f);
unsigned char uftr = tex3D<unsigned char >(markergrid, xs2 + 0.5f, ys2 + 0.5f, zs2 + 0.5f);
float ftl = tex3D<float>(pressuregrid, xs1 + 0.5f, ys1 + 0.5f, zs1 + 0.5f);
float ftr = tex3D<float>(pressuregrid, xs2 + 0.5f, ys2 + 0.5f, zs2 + 0.5f);
if (uftl & uftr & 2)
{
out_float = 0.0f;
return false;
}
else if (uftl & 2)
ftl = ftr;
else if (uftr & 2)
{
ftr = ftl;
}
out_float = ftl * (1.0f - factor) + ftr * factor;
return true;
}
__device__
float readInterpolatedPressures(cudaTextureObject_t markergrid, cudaTextureObject_t pressuregrid, float xs, float ys, float zs)
{
float x = floor(xs);
float y = floor(ys);
float z = floor(zs);
float fx = xs - x;
float fy = ys - y;
float fz = zs - z;
float ftl, fbl, btl, bbl;
bool bftl = readInterpolated(ftl, markergrid, pressuregrid,
x , y, z,
x + 1.0f, y, z, fx);
bool bfbl = readInterpolated(fbl, markergrid, pressuregrid,
x , y + 1.0f, z ,
x + 1.0f, y + 1.0f, z , fx);
bool bbtl = readInterpolated(btl, markergrid, pressuregrid,
x , y, z + 1.0f,
x + 1.0f, y, z + 1.0f, fx);
bool bbbl = readInterpolated(bbl, markergrid, pressuregrid,
x, y + 1.0f, z + 1.0f,
x + 1.0f, y + 1.0f, z + 1.0f, fx);
bool by1 = true, by2 = true;
if (!bftl && !bfbl)
by1 = false;
else if (!bftl)
ftl = fbl;
else if (!bfbl)
{
fbl = ftl;
}
if (!bbtl && !bbbl)
by2 = false;
else if (!bbtl)
btl = bbl;
else if (!bbbl)
{
bbl = btl;
}
ftl = ftl * (1.0f - fy) + fbl * fy;
btl = btl * (1.0f - fy) + bbl * fy;
if (!by1 && !by2)
return 0.0f;
else if (!by1)
ftl = btl;
else if (!by2)
{
btl = ftl;
}
return ftl * (1.0f - fz) + btl * fz;
}
__global__
void kernel_subtractProgram(
cudaTextureObject_t in_markergrid,
cudaTextureObject_t in_velgrid,
cudaTextureObject_t in_presgrid,
cudaSurfaceObject_t out_velgrid)
{
int3 cell_pos;
if (!GetCellPosVel(cell_pos))
return;
float3 idx = make_float3(cell_pos.x, cell_pos.y, cell_pos.z);
#if 1
float pres_max = readTexInterpolate<float>(in_presgrid, idx.x, idx.y, idx.z);
float3 pres_min;
pres_min.x = readTexInterpolate<float>(in_presgrid, idx.x - 1, idx.y, idx.z);
pres_min.y = readTexInterpolate<float>(in_presgrid, idx.x, idx.y - 1, idx.z);
pres_min.z = readTexInterpolate<float>(in_presgrid, idx.x, idx.y, idx.z - 1);
#else
float pres_max = readInterpolatedPressures(in_markergrid, in_presgrid, idx.x, idx.y, idx.z);
float3 pres_min;
pres_min.x = readInterpolatedPressures(in_markergrid, in_presgrid, idx.x - 1, idx.y, idx.z);
pres_min.y = readInterpolatedPressures(in_markergrid, in_presgrid, idx.x, idx.y - 1, idx.z);
pres_min.z = readInterpolatedPressures(in_markergrid, in_presgrid, idx.x, idx.y, idx.z - 1);
#endif
//compute gradient of pressure
float4 gradient;
gradient.x = (pres_max - pres_min.x);
gradient.y = (pres_max - pres_min.y);
gradient.z = (pres_max - pres_min.z);
gradient.w = 0.0f;
float4 velocity = readTexNearest<float4>(in_velgrid, cell_pos.x, cell_pos.y, cell_pos.z);
float4 newVelocity = velocity - gradient;
writeTexVel<float4>(out_velgrid, newVelocity, cell_pos.x, cell_pos.y, cell_pos.z);
}
void picflip::subtractProgram(cudaStream_t stream,
uint3 grid_resolution,
cudaTextureObject_t in_markergrid,
cudaTextureObject_t in_velgrid,
cudaTextureObject_t in_presgrid,
cudaSurfaceObject_t out_velgrid)
{
dim3 num_threads;
dim3 num_blocks;
#if XFER_USE_TRPLE_CUDA_DIM
utils::compute_grid_size(grid_resolution.x + 1, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_resolution.y + 1, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_resolution.z + 1, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
#else
utils::compute_grid_size(grid_resolution.x + 1, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size((grid_resolution.y + 1) * (grid_resolution.z + 1), CUDA_BLOCK_SIZE_2D, num_blocks.y, num_threads.y);
num_threads.z = 1; num_blocks.z = 1;
#endif
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_subtractProgram << <num_blocks, num_threads, 0, stream >> >(
in_markergrid,
in_velgrid,
in_presgrid,
out_velgrid);
PICFLIP_PROFILE_END_KERNEL("kernel_subtractProgram", grid_resolution.x * grid_resolution.y * grid_resolution.z)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
__device__
float3 sample_velocity(float3 cell_pos, cudaTextureObject_t in_velgrid)
{
float3 vel;
vel.x = readTexInterpolate<float4>(in_velgrid, cell_pos.x, cell_pos.y - 0.5f, cell_pos.z - 0.5f).x;
vel.y = readTexInterpolate<float4>(in_velgrid, cell_pos.x - 0.5f, cell_pos.y, cell_pos.z - 0.5f).y;
vel.z = readTexInterpolate<float4>(in_velgrid, cell_pos.x - 0.5f, cell_pos.y - 0.5f, cell_pos.z).z;
return vel;
}
__global__
void kernel_transferToParticlesProgram(uint particle_count, float3* positions, float3* velocities, float3* out_velocities,
cudaTextureObject_t in_velgrid, cudaTextureObject_t in_veloriggrid)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
return;
float3 pos = positions[index];
float3 cell_pos = get_cell_posf(pos);
float3 particle_vel = velocities[index];
float3 grid_vel = sample_velocity(cell_pos, in_velgrid);
float3 grid_vel_orig = sample_velocity(cell_pos, in_veloriggrid);
float3 grid_change = grid_vel - grid_vel_orig;
float3 flip_vel = particle_vel + grid_change;
float3 pic_vel = grid_vel;
//pic_vel.y += ((index % 5) / 5.0f) * 0.05f;
float3 new_vel = pic_vel * (1.0 - dParams.flipness) + flip_vel * dParams.flipness;
//CFL Condition
float3 cfl = make_float3(1.0, 1.0, 1.0) / (dParams.world_to_grid * dParams.dt);
new_vel.x = max(min(new_vel.x, cfl.x), -cfl.x);
new_vel.y = max(min(new_vel.y, cfl.y), -cfl.y);
new_vel.z = max(min(new_vel.z, cfl.z), -cfl.z);
out_velocities[index] = new_vel;
}
void picflip::transferToParticlesProgram(cudaStream_t stream, uint particle_count, float3* positions, float3* velocities, float3* out_velocities,
cudaTextureObject_t in_velgrid, cudaTextureObject_t in_veloriggrid)
{
uint num_threads;
uint num_blocks;
utils::compute_grid_size(particle_count, CUDA_BLOCK_SIZE, num_blocks, num_threads);
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_transferToParticlesProgram << <num_blocks, num_threads, 0, stream >> >(particle_count, positions, velocities, out_velocities,
in_velgrid, in_veloriggrid);
PICFLIP_PROFILE_END_KERNEL("kernel_transferToParticlesProgram", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
__global__
void kernel_advectProgram(uint particle_count, float3* positions, float3* velocities, float3* out_positions, cudaTextureObject_t in_velgrid)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
return;
float3 pos = positions[index];
float3 cell_pos = get_cell_posf(pos);
float3 grid_vel = sample_velocity(cell_pos, in_velgrid);
float3 halfway_pos = pos + grid_vel * dParams.dt * 0.5f;
cell_pos = get_cell_posf(halfway_pos);
float3 halfway_vel = sample_velocity(cell_pos, in_velgrid);
//CFL Condition
float3 step = (halfway_vel * dParams.dt);
/* float3 cfl = make_float3(1.0f, 1.0f, 1.0f) / dParams.world_to_grid;
step.x = max(min(step.x, cfl.x), -cfl.x);
step.y = max(min(step.y, cfl.y), -cfl.y);
step.z = max(min(step.z, cfl.z), -cfl.z);*/
float3 new_pos = pos + step;
//Clamp positions inside grid
cell_pos = get_cell_posf(new_pos);
const float WALL_OFFSET = 0.01f;
cell_pos.x = min(max(cell_pos.x, WALL_OFFSET), dParams.grid_resolution.x - WALL_OFFSET);
cell_pos.y = min(max(cell_pos.y, WALL_OFFSET), dParams.grid_resolution.y - WALL_OFFSET);
cell_pos.z = min(max(cell_pos.z, WALL_OFFSET), dParams.grid_resolution.z - WALL_OFFSET);
new_pos = get_wrld_posf(cell_pos);
out_positions[index] = new_pos;
}
void picflip::advectProgram(cudaStream_t stream, uint particle_count, float3* positions, float3* velocities, float3* out_positions, cudaTextureObject_t in_velgrid)
{
uint num_threads;
uint num_blocks;
utils::compute_grid_size(particle_count, CUDA_BLOCK_SIZE, num_blocks, num_threads);
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_advectProgram << <num_blocks, num_threads, 0, stream >> >(particle_count, positions, velocities, out_positions, in_velgrid);
PICFLIP_PROFILE_END_KERNEL("kernel_advectProgram", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
__global__
void kernel_enforceBoundaries(uint particle_count, float3* positions)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
return;
float3 pos = positions[index];
float3 cell_pos = get_cell_posf(pos);
const float WALL_OFFSET = 0.01f;
cell_pos.x = min(max(cell_pos.x, WALL_OFFSET), dParams.grid_resolution.x - WALL_OFFSET);
cell_pos.y = min(max(cell_pos.y, WALL_OFFSET), dParams.grid_resolution.y - WALL_OFFSET);
cell_pos.z = min(max(cell_pos.z, WALL_OFFSET), dParams.grid_resolution.z - WALL_OFFSET);
cell_pos = get_wrld_posf(cell_pos);
positions[index] = cell_pos;
}
void picflip::enforceBoundaries(cudaStream_t stream, uint particle_count, float3* positions)
{
uint num_threads;
uint num_blocks;
utils::compute_grid_size(particle_count, CUDA_BLOCK_SIZE, num_blocks, num_threads);
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_enforceBoundaries << <num_blocks, num_threads, 0, stream >> >(particle_count, positions);
PICFLIP_PROFILE_END_KERNEL("kernel_enforceBoundaries", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template <class T>
__global__ void Kernel_MemSetSurface(cudaSurfaceObject_t out_grid, T val, uint step, uint3 grid_size)
{
/*int dimxy = grid_size.x * grid_size.y;
dim3 idx = thread3d(grid_size.x, dimxy);
if (idx.x >= grid_size.x || idx.y >= grid_size.y || idx.z >= grid_size.z)
return;*/
uint index_x = blockIdx.x*blockDim.x + threadIdx.x;
if (index_x >= grid_size.x)
return;
uint index_y = blockIdx.y*blockDim.y + threadIdx.y;
if (index_y >= grid_size.y)
return;
uint index_z = blockIdx.z*blockDim.z + threadIdx.z;
if (index_z >= grid_size.z)
return;
surf3Dwrite(val, out_grid, index_x * step, index_y, index_z);
}
template <class T>
void MemSetSurface(cudaStream_t stream, cudaSurfaceObject_t out_grid, T val, uint3 grid_size)
{
dim3 num_threads;
dim3 num_blocks;
utils::compute_grid_size(grid_size.x, CUDA_BLOCK_SIZE_3D, num_blocks.x, num_threads.x);
utils::compute_grid_size(grid_size.y, CUDA_BLOCK_SIZE_3D, num_blocks.y, num_threads.y);
utils::compute_grid_size(grid_size.z, CUDA_BLOCK_SIZE_3D, num_blocks.z, num_threads.z);
PICFLIP_PROFILE_BEGIN_KERNEL
Kernel_MemSetSurface<T> << <num_blocks, num_threads, 0, stream >> >(
out_grid,
val,
sizeof(val),
grid_size);
PICFLIP_PROFILE_END_KERNEL("Kernel_MemSetSurface<T>", grid_size.x * grid_size.y * grid_size.z)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
__global__
void kernel_copyPressures(uint particle_count, float3* positions, cudaTextureObject_t in_weightgrid, float* out_pressures)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= particle_count)
return;
float3 pos = positions[index];
float3 cell_pos = get_cell_posf(pos);
float4 weight = tex3D<float4>(in_weightgrid, cell_pos.x, cell_pos.y, cell_pos.z);
out_pressures[index] = weight.w;
}
void copyPressures(cudaStream_t stream, uint particle_count, float3* positions, cudaTextureObject_t in_weightgrid, float* out_pressures)
{
uint num_threads;
uint num_blocks;
utils::compute_grid_size(particle_count, CUDA_BLOCK_SIZE, num_blocks, num_threads);
PICFLIP_PROFILE_BEGIN_KERNEL
kernel_copyPressures << <num_blocks, num_threads, 0, stream >> >(particle_count, positions, in_weightgrid, out_pressures);
PICFLIP_PROFILE_END_KERNEL("kernel_copyPressures", particle_count)
#if PICFLIP_FORCE_SYNC_AFTER_EACH_KERNEL
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
} |
4ee19e3330f3c60374faede6d13ec7267be348cc.hip | // !!! This is a file automatically generated by hipify!!!
/*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc -c -I/usr/local/cuda/include gesvdj_example.cpp
* g++ -o gesvdj_example gesvdj_example.o -L/usr/local/cuda/lib64 -lcudart -lcusolver
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
#include <fstream>
#include <sys/time.h>
void printMatrix(int m, int n, const double*A, int lda, const char* name)
{
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
double Areg = A[row + col*lda];
printf("%s(%d,%d) = %20.16E\n", name, row+1, col+1, Areg);
}
}
}
int main(int argc, char*argv[])
{
hipsolverDnHandle_t cusolverH = NULL;
hipStream_t stream = NULL;
hipsolverGesvdjInfo_t gesvdj_params = NULL;
cusolverStatus_t status = CUSOLVER_STATUS_SUCCESS;
hipError_t cudaStat1 = hipSuccess;
hipError_t cudaStat2 = hipSuccess;
hipError_t cudaStat3 = hipSuccess;
hipError_t cudaStat4 = hipSuccess;
hipError_t cudaStat5 = hipSuccess;
const int m = 240;
const int n = 240;
const int lda = m; /* A is m-by-n */
const int ldu = m; /* U is m-by-m */
const int ldv = n; /* V is n-by-n */
const int minmn = min(m,n);
/* | 1 2 |
* A = | 4 5 |
* | 2 1 |
*/
double A[lda*n]; //= { 1.0, 4.0, 2.0, 2.0, 5.0, 1.0};
std::ifstream cov_file("dump_identity.txt");
// char toss;
// cov_file.read(&toss, 1);
for(uint32_t i = 0; i < lda; i += 1) {
for(uint32_t j = 0; j < n; j += 1) {
cov_file >> A[i * n + j];
}
}
std::ofstream file("dump_A.txt");
for(int i = 0; i < lda; i += 1) {
for(int j = 0; j < n; j += 1) {
file << A[i * m + j] << " ";
}
file << std::endl;
}
// for(int i=0;i<lda;i++)
// for(int j=0;j<n;j++)
// A[i*n+j] = i+j-1;
double U[ldu*m]; /* m-by-m unitary matrix, left singular vectors */
double V[ldv*n]; /* n-by-n unitary matrix, right singular vectors */
double S[minmn]; /* numerical singular value */
/* exact singular values */
double S_exact[minmn] = {7.065283497082729, 1.040081297712078};
double *d_A = NULL; /* device copy of A */
double *d_S = NULL; /* singular values */
double *d_U = NULL; /* left singular vectors */
double *d_V = NULL; /* right singular vectors */
int *d_info = NULL; /* error info */
int lwork = 0; /* size of workspace */
double *d_work = NULL; /* devie workspace for gesvdj */
int info = 0; /* host copy of error info */
/* configuration of gesvdj */
const double tol = 1.e-7;
const int max_sweeps = 15;
const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_VECTOR; // compute eigenvectors.
const int econ = 0 ; /* econ = 1 for economy size */
/* numerical results of gesvdj */
double residual = 0;
int executed_sweeps = 0;
printf("example of gesvdj \n");
printf("tol = %E, default value is machine zero \n", tol);
printf("max. sweeps = %d, default value is 100\n", max_sweeps);
printf("econ = %d \n", econ);
printf("A = (matlab base-1)\n");
// printMatrix(m, n, A, lda, "A");
printf("=====\n");
/* step 1: create cusolver handle, bind a stream */
status = hipsolverDnCreate(&cusolverH);
assert(CUSOLVER_STATUS_SUCCESS == status);
cudaStat1 = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
assert(hipSuccess == cudaStat1);
status = hipsolverDnSetStream(cusolverH, stream);
assert(CUSOLVER_STATUS_SUCCESS == status);
/* step 2: configuration of gesvdj */
status = hipsolverDnCreateGesvdjInfo(&gesvdj_params);
assert(CUSOLVER_STATUS_SUCCESS == status);
/* default value of tolerance is machine zero */
status = hipsolverDnXgesvdjSetTolerance(
gesvdj_params,
tol);
assert(CUSOLVER_STATUS_SUCCESS == status);
/* default value of max. sweeps is 100 */
status = hipsolverDnXgesvdjSetMaxSweeps(
gesvdj_params,
max_sweeps);
assert(CUSOLVER_STATUS_SUCCESS == status);
/* step 3: copy A and B to device */
cudaStat1 = hipMalloc ((void**)&d_A , sizeof(double)*lda*n);
cudaStat2 = hipMalloc ((void**)&d_S , sizeof(double)*minmn);
cudaStat3 = hipMalloc ((void**)&d_U , sizeof(double)*ldu*m);
cudaStat4 = hipMalloc ((void**)&d_V , sizeof(double)*ldv*n);
cudaStat5 = hipMalloc ((void**)&d_info, sizeof(int));
assert(hipSuccess == cudaStat1);
assert(hipSuccess == cudaStat2);
assert(hipSuccess == cudaStat3);
assert(hipSuccess == cudaStat4);
assert(hipSuccess == cudaStat5);
cudaStat1 = hipMemcpy(d_A, A, sizeof(double)*lda*n, hipMemcpyHostToDevice);
assert(hipSuccess == cudaStat1);
/* step 4: query workspace of SVD */
status = hipsolverDnDgesvdj_bufferSize(
cusolverH,
jobz, /* HIPSOLVER_EIG_MODE_NOVECTOR: compute singular values only */
/* HIPSOLVER_EIG_MODE_VECTOR: compute singular value and singular vectors */
econ, /* econ = 1 for economy size */
m, /* nubmer of rows of A, 0 <= m */
n, /* number of columns of A, 0 <= n */
d_A, /* m-by-n */
lda, /* leading dimension of A */
d_S, /* min(m,n) */
/* the singular values in descending order */
d_U, /* m-by-m if econ = 0 */
/* m-by-min(m,n) if econ = 1 */
ldu, /* leading dimension of U, ldu >= max(1,m) */
d_V, /* n-by-n if econ = 0 */
/* n-by-min(m,n) if econ = 1 */
ldv, /* leading dimension of V, ldv >= max(1,n) */
&lwork,
gesvdj_params);
assert(CUSOLVER_STATUS_SUCCESS == status);
cudaStat1 = hipMalloc((void**)&d_work , sizeof(double)*lwork);
assert(hipSuccess == cudaStat1);
/* step 5: compute SVD */
struct timeval t1, t2;
gettimeofday(&t1, 0);
status = hipsolverDnDgesvdj(
cusolverH,
jobz, /* HIPSOLVER_EIG_MODE_NOVECTOR: compute singular values only */
/* HIPSOLVER_EIG_MODE_VECTOR: compute singular value and singular vectors */
econ, /* econ = 1 for economy size */
m, /* nubmer of rows of A, 0 <= m */
n, /* number of columns of A, 0 <= n */
d_A, /* m-by-n */
lda, /* leading dimension of A */
d_S, /* min(m,n) */
/* the singular values in descending order */
d_U, /* m-by-m if econ = 0 */
/* m-by-min(m,n) if econ = 1 */
ldu, /* leading dimension of U, ldu >= max(1,m) */
d_V, /* n-by-n if econ = 0 */
/* n-by-min(m,n) if econ = 1 */
ldv, /* leading dimension of V, ldv >= max(1,n) */
d_work,
lwork,
d_info,
gesvdj_params);
cudaStat1 = hipDeviceSynchronize();
assert(CUSOLVER_STATUS_SUCCESS == status);
assert(hipSuccess == cudaStat1);
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Time to generate: %3.1f ms \n", time);
cudaStat1 = hipMemcpy(U, d_U, sizeof(double)*ldu*m, hipMemcpyDeviceToHost);
cudaStat2 = hipMemcpy(V, d_V, sizeof(double)*ldv*n, hipMemcpyDeviceToHost);
cudaStat3 = hipMemcpy(S, d_S, sizeof(double)*minmn, hipMemcpyDeviceToHost);
cudaStat4 = hipMemcpy(&info, d_info, sizeof(int), hipMemcpyDeviceToHost);
cudaStat5 = hipDeviceSynchronize();
assert(hipSuccess == cudaStat1);
assert(hipSuccess == cudaStat2);
assert(hipSuccess == cudaStat3);
assert(hipSuccess == cudaStat4);
assert(hipSuccess == cudaStat5);
if ( 0 == info ){
printf("gesvdj converges \n");
}else if ( 0 > info ){
printf("%d-th parameter is wrong \n", -info);
exit(1);
}else{
printf("WARNING: info = %d : gesvdj does not converge \n", info );
}
printf("S = singular values (matlab base-1)\n");
std::ofstream file1("dump_S.txt");
for(int i = 0; i < minmn; i += 1) {
file1 << S[i] << " ";
file1 << std::endl;
}
// printMatrix(minmn, 1, S, minmn, "S");
printf("=====\n");
printf("U = left singular vectors (matlab base-1)\n");
std::ofstream file2("dump_U.txt");
for(int i = 0; i < ldu; i += 1) {
for(int j = 0; j < m; j += 1) {
file2 << U[i * m + j] << " ";
}
file2 << std::endl;
}
// printMatrix(m, m, U, ldu, "U");
printf("=====\n");
printf("V = right singular vectors (matlab base-1)\n");
std::ofstream file3("dump_V.txt");
for(int i = 0; i < ldv; i += 1) {
for(int j = 0; j < n; j += 1) {
file3 << V[i * n + j] << " ";
}
file3 << std::endl;
}
// printMatrix(n, n, V, ldv, "V");
printf("=====\n");
/* step 6: measure error of singular value */
double ds_sup = 0;
for(int j = 0; j < minmn; j++){
double err = fabs( S[j] - S_exact[j] );
ds_sup = (ds_sup > err)? ds_sup : err;
}
printf("|S - S_exact|_sup = %E \n", ds_sup);
status = hipsolverDnXgesvdjGetSweeps(
cusolverH,
gesvdj_params,
&executed_sweeps);
assert(CUSOLVER_STATUS_SUCCESS == status);
status = hipsolverDnXgesvdjGetResidual(
cusolverH,
gesvdj_params,
&residual);
assert(CUSOLVER_STATUS_SUCCESS == status);
printf("residual |A - U*S*V**H|_F = %E \n", residual );
printf("number of executed sweeps = %d \n", executed_sweeps );
/* free resources */
if (d_A ) hipFree(d_A);
if (d_S ) hipFree(d_S);
if (d_U ) hipFree(d_U);
if (d_V ) hipFree(d_V);
if (d_info) hipFree(d_info);
if (d_work ) hipFree(d_work);
if (cusolverH) hipsolverDnDestroy(cusolverH);
if (stream ) hipStreamDestroy(stream);
if (gesvdj_params) hipsolverDnDestroyGesvdjInfo(gesvdj_params);
hipDeviceReset();
return 0;
}
| 4ee19e3330f3c60374faede6d13ec7267be348cc.cu | /*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc -c -I/usr/local/cuda/include gesvdj_example.cpp
* g++ -o gesvdj_example gesvdj_example.o -L/usr/local/cuda/lib64 -lcudart -lcusolver
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include <fstream>
#include <sys/time.h>
void printMatrix(int m, int n, const double*A, int lda, const char* name)
{
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
double Areg = A[row + col*lda];
printf("%s(%d,%d) = %20.16E\n", name, row+1, col+1, Areg);
}
}
}
int main(int argc, char*argv[])
{
cusolverDnHandle_t cusolverH = NULL;
cudaStream_t stream = NULL;
gesvdjInfo_t gesvdj_params = NULL;
cusolverStatus_t status = CUSOLVER_STATUS_SUCCESS;
cudaError_t cudaStat1 = cudaSuccess;
cudaError_t cudaStat2 = cudaSuccess;
cudaError_t cudaStat3 = cudaSuccess;
cudaError_t cudaStat4 = cudaSuccess;
cudaError_t cudaStat5 = cudaSuccess;
const int m = 240;
const int n = 240;
const int lda = m; /* A is m-by-n */
const int ldu = m; /* U is m-by-m */
const int ldv = n; /* V is n-by-n */
const int minmn = min(m,n);
/* | 1 2 |
* A = | 4 5 |
* | 2 1 |
*/
double A[lda*n]; //= { 1.0, 4.0, 2.0, 2.0, 5.0, 1.0};
std::ifstream cov_file("dump_identity.txt");
// char toss;
// cov_file.read(&toss, 1);
for(uint32_t i = 0; i < lda; i += 1) {
for(uint32_t j = 0; j < n; j += 1) {
cov_file >> A[i * n + j];
}
}
std::ofstream file("dump_A.txt");
for(int i = 0; i < lda; i += 1) {
for(int j = 0; j < n; j += 1) {
file << A[i * m + j] << " ";
}
file << std::endl;
}
// for(int i=0;i<lda;i++)
// for(int j=0;j<n;j++)
// A[i*n+j] = i+j-1;
double U[ldu*m]; /* m-by-m unitary matrix, left singular vectors */
double V[ldv*n]; /* n-by-n unitary matrix, right singular vectors */
double S[minmn]; /* numerical singular value */
/* exact singular values */
double S_exact[minmn] = {7.065283497082729, 1.040081297712078};
double *d_A = NULL; /* device copy of A */
double *d_S = NULL; /* singular values */
double *d_U = NULL; /* left singular vectors */
double *d_V = NULL; /* right singular vectors */
int *d_info = NULL; /* error info */
int lwork = 0; /* size of workspace */
double *d_work = NULL; /* devie workspace for gesvdj */
int info = 0; /* host copy of error info */
/* configuration of gesvdj */
const double tol = 1.e-7;
const int max_sweeps = 15;
const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; // compute eigenvectors.
const int econ = 0 ; /* econ = 1 for economy size */
/* numerical results of gesvdj */
double residual = 0;
int executed_sweeps = 0;
printf("example of gesvdj \n");
printf("tol = %E, default value is machine zero \n", tol);
printf("max. sweeps = %d, default value is 100\n", max_sweeps);
printf("econ = %d \n", econ);
printf("A = (matlab base-1)\n");
// printMatrix(m, n, A, lda, "A");
printf("=====\n");
/* step 1: create cusolver handle, bind a stream */
status = cusolverDnCreate(&cusolverH);
assert(CUSOLVER_STATUS_SUCCESS == status);
cudaStat1 = cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
assert(cudaSuccess == cudaStat1);
status = cusolverDnSetStream(cusolverH, stream);
assert(CUSOLVER_STATUS_SUCCESS == status);
/* step 2: configuration of gesvdj */
status = cusolverDnCreateGesvdjInfo(&gesvdj_params);
assert(CUSOLVER_STATUS_SUCCESS == status);
/* default value of tolerance is machine zero */
status = cusolverDnXgesvdjSetTolerance(
gesvdj_params,
tol);
assert(CUSOLVER_STATUS_SUCCESS == status);
/* default value of max. sweeps is 100 */
status = cusolverDnXgesvdjSetMaxSweeps(
gesvdj_params,
max_sweeps);
assert(CUSOLVER_STATUS_SUCCESS == status);
/* step 3: copy A and B to device */
cudaStat1 = cudaMalloc ((void**)&d_A , sizeof(double)*lda*n);
cudaStat2 = cudaMalloc ((void**)&d_S , sizeof(double)*minmn);
cudaStat3 = cudaMalloc ((void**)&d_U , sizeof(double)*ldu*m);
cudaStat4 = cudaMalloc ((void**)&d_V , sizeof(double)*ldv*n);
cudaStat5 = cudaMalloc ((void**)&d_info, sizeof(int));
assert(cudaSuccess == cudaStat1);
assert(cudaSuccess == cudaStat2);
assert(cudaSuccess == cudaStat3);
assert(cudaSuccess == cudaStat4);
assert(cudaSuccess == cudaStat5);
cudaStat1 = cudaMemcpy(d_A, A, sizeof(double)*lda*n, cudaMemcpyHostToDevice);
assert(cudaSuccess == cudaStat1);
/* step 4: query workspace of SVD */
status = cusolverDnDgesvdj_bufferSize(
cusolverH,
jobz, /* CUSOLVER_EIG_MODE_NOVECTOR: compute singular values only */
/* CUSOLVER_EIG_MODE_VECTOR: compute singular value and singular vectors */
econ, /* econ = 1 for economy size */
m, /* nubmer of rows of A, 0 <= m */
n, /* number of columns of A, 0 <= n */
d_A, /* m-by-n */
lda, /* leading dimension of A */
d_S, /* min(m,n) */
/* the singular values in descending order */
d_U, /* m-by-m if econ = 0 */
/* m-by-min(m,n) if econ = 1 */
ldu, /* leading dimension of U, ldu >= max(1,m) */
d_V, /* n-by-n if econ = 0 */
/* n-by-min(m,n) if econ = 1 */
ldv, /* leading dimension of V, ldv >= max(1,n) */
&lwork,
gesvdj_params);
assert(CUSOLVER_STATUS_SUCCESS == status);
cudaStat1 = cudaMalloc((void**)&d_work , sizeof(double)*lwork);
assert(cudaSuccess == cudaStat1);
/* step 5: compute SVD */
struct timeval t1, t2;
gettimeofday(&t1, 0);
status = cusolverDnDgesvdj(
cusolverH,
jobz, /* CUSOLVER_EIG_MODE_NOVECTOR: compute singular values only */
/* CUSOLVER_EIG_MODE_VECTOR: compute singular value and singular vectors */
econ, /* econ = 1 for economy size */
m, /* nubmer of rows of A, 0 <= m */
n, /* number of columns of A, 0 <= n */
d_A, /* m-by-n */
lda, /* leading dimension of A */
d_S, /* min(m,n) */
/* the singular values in descending order */
d_U, /* m-by-m if econ = 0 */
/* m-by-min(m,n) if econ = 1 */
ldu, /* leading dimension of U, ldu >= max(1,m) */
d_V, /* n-by-n if econ = 0 */
/* n-by-min(m,n) if econ = 1 */
ldv, /* leading dimension of V, ldv >= max(1,n) */
d_work,
lwork,
d_info,
gesvdj_params);
cudaStat1 = cudaDeviceSynchronize();
assert(CUSOLVER_STATUS_SUCCESS == status);
assert(cudaSuccess == cudaStat1);
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Time to generate: %3.1f ms \n", time);
cudaStat1 = cudaMemcpy(U, d_U, sizeof(double)*ldu*m, cudaMemcpyDeviceToHost);
cudaStat2 = cudaMemcpy(V, d_V, sizeof(double)*ldv*n, cudaMemcpyDeviceToHost);
cudaStat3 = cudaMemcpy(S, d_S, sizeof(double)*minmn, cudaMemcpyDeviceToHost);
cudaStat4 = cudaMemcpy(&info, d_info, sizeof(int), cudaMemcpyDeviceToHost);
cudaStat5 = cudaDeviceSynchronize();
assert(cudaSuccess == cudaStat1);
assert(cudaSuccess == cudaStat2);
assert(cudaSuccess == cudaStat3);
assert(cudaSuccess == cudaStat4);
assert(cudaSuccess == cudaStat5);
if ( 0 == info ){
printf("gesvdj converges \n");
}else if ( 0 > info ){
printf("%d-th parameter is wrong \n", -info);
exit(1);
}else{
printf("WARNING: info = %d : gesvdj does not converge \n", info );
}
printf("S = singular values (matlab base-1)\n");
std::ofstream file1("dump_S.txt");
for(int i = 0; i < minmn; i += 1) {
file1 << S[i] << " ";
file1 << std::endl;
}
// printMatrix(minmn, 1, S, minmn, "S");
printf("=====\n");
printf("U = left singular vectors (matlab base-1)\n");
std::ofstream file2("dump_U.txt");
for(int i = 0; i < ldu; i += 1) {
for(int j = 0; j < m; j += 1) {
file2 << U[i * m + j] << " ";
}
file2 << std::endl;
}
// printMatrix(m, m, U, ldu, "U");
printf("=====\n");
printf("V = right singular vectors (matlab base-1)\n");
std::ofstream file3("dump_V.txt");
for(int i = 0; i < ldv; i += 1) {
for(int j = 0; j < n; j += 1) {
file3 << V[i * n + j] << " ";
}
file3 << std::endl;
}
// printMatrix(n, n, V, ldv, "V");
printf("=====\n");
/* step 6: measure error of singular value */
double ds_sup = 0;
for(int j = 0; j < minmn; j++){
double err = fabs( S[j] - S_exact[j] );
ds_sup = (ds_sup > err)? ds_sup : err;
}
printf("|S - S_exact|_sup = %E \n", ds_sup);
status = cusolverDnXgesvdjGetSweeps(
cusolverH,
gesvdj_params,
&executed_sweeps);
assert(CUSOLVER_STATUS_SUCCESS == status);
status = cusolverDnXgesvdjGetResidual(
cusolverH,
gesvdj_params,
&residual);
assert(CUSOLVER_STATUS_SUCCESS == status);
printf("residual |A - U*S*V**H|_F = %E \n", residual );
printf("number of executed sweeps = %d \n", executed_sweeps );
/* free resources */
if (d_A ) cudaFree(d_A);
if (d_S ) cudaFree(d_S);
if (d_U ) cudaFree(d_U);
if (d_V ) cudaFree(d_V);
if (d_info) cudaFree(d_info);
if (d_work ) cudaFree(d_work);
if (cusolverH) cusolverDnDestroy(cusolverH);
if (stream ) cudaStreamDestroy(stream);
if (gesvdj_params) cusolverDnDestroyGesvdjInfo(gesvdj_params);
cudaDeviceReset();
return 0;
}
|
012cd1b060867c9f6fe5a81a8e2d35d56a40c84c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// The MIT License (MIT)
//
// Copyright (c) 2016 Northeastern University
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include <iostream>
#include "include/kdac_gpu.h"
#include "include/gpu_util.h"
// Hack to cope with Clion
#include "../../include/gpu_util.h"
#include "../../../../../../../../usr/local/cuda/include/hip/driver_types.h"
#include "../../include/kdac_gpu.h"
#include "../../include/kernel_types.h"
namespace Nice {
unsigned int nextPow2(unsigned int x) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
bool isPow2(unsigned int x) {
return ((x & (x - 1)) == 0);
}
template <typename T>
__device__ void mv(T *mat_s,
T* vec_in_s,
const int num_rows,
const int num_cols,
T* vec_out_s) {
int tx = threadIdx.x;
int block_size = blockDim.x;
for (int k = tx; k < num_rows; k += block_size) {
for (int col = 0; col < num_cols; col++)
vec_out_s[k] += mat_s[IDXC(k, col, num_rows)] * vec_in_s[col];
}
__syncthreads();
}
template <typename T>
__device__ T reduce_sum(T *data_s, int n) {
T sum = 0;
int block_size = blockDim.x * blockDim.y;
int tx = threadIdx.y * blockDim.x + threadIdx.x;
for (int k = tx; k < n; k += block_size)
sum += data_s[k];
data_s[tx] = sum;
__syncthreads();
if ((block_size >= 512) && (tx < 256))
data_s[tx] = sum = sum + data_s[tx + 256];
__syncthreads();
if ((block_size >= 256) && (tx < 128))
data_s[tx] = sum = sum + data_s[tx + 128];
__syncthreads();
if ((block_size >= 128) && (tx < 64))
data_s[tx] = sum = sum + data_s[tx + 64];
__syncthreads();
// if ((block_size >= 64) && (tx < 32))
// exp_term_s[tx] = sum = sum + exp_term_s[tx + 32];
// __syncthreads();
//
// if ((block_size >= 32) && (tx < 16))
// exp_term_s[tx] = sum = sum + exp_term_s[tx + 16];
// __syncthreads();
//
// if ((block_size >= 16) && (tx < 8))
// exp_term_s[tx] = sum = sum + exp_term_s[tx + 8];
// __syncthreads();
//
// if ((block_size >= 8) && (tx < 4))
// exp_term_s[tx] = sum = sum + exp_term_s[tx + 4];
// __syncthreads();
//
// if ((block_size >= 4) && (tx < 2))
// exp_term_s[tx] = sum = sum + exp_term_s[tx + 2];
// __syncthreads();
//
// if ((block_size >= 2) && (tx < 1))
// exp_term_s[tx] = sum = sum + exp_term_s[tx + 1];
// __syncthreads();
if (tx < 32) {
if (block_size >= 64)
sum += data_s[tx + 32];
for (int offset = warpSize / 2; offset >0; offset /=2)
sum += __shfl_down(sum, offset);
}
if (tx == 0)
data_s[tx] = sum;
__syncthreads();
return data_s[0];
}
template <typename T>
__device__ void GenAij(const T *x_matrix_d,
const int n,
const int d,
T *a_ij_d,
T *delta_ij_d) {
int tx = threadIdx.x;
int i = blockIdx.y;
int j = blockIdx.x;
int block_size = blockDim.x;
for (int k = tx; k < d; k += block_size)
delta_ij_d[k] = x_matrix_d[IDXC(i, k, n)] - x_matrix_d[IDXC(j, k, n)];
__syncthreads();
for (int k = tx; k < d; k += block_size)
for (int col = 0; col < d; col++)
// thread tx calculates a whole row tx of the output matrix a_ij
a_ij_d[IDXC(k, col, d)] = delta_ij_d[col] * delta_ij_d[k];
__syncthreads();
}
template<typename T>
__global__ void UpdateGOfWKernel(const T *x_matrix_d,
const T *w_l_d,
const float constant,
const int n,
const int d,
T *g_of_w_d_) {
// Shared memory to store a_ij * w_l
T *delta_w_s = SharedMemory<T>();
int i = blockIdx.y;
int j = blockIdx.x;
int tx = threadIdx.x;
int block_size = blockDim.x;
for (int k = tx; k < d; k += block_size)
delta_w_s[k] =
(x_matrix_d[IDXC(i,k,n)] - x_matrix_d[IDXC(j,k,n)]) * w_l_d[k];
__syncthreads();
T delta_w =reduce_sum(delta_w_s, d);
T exp_term = expf(-delta_w * delta_w / (2 * constant * constant));
if (tx == 0)
g_of_w_d_[IDXC(i,j,n)] *= exp_term;
}
template<typename T>
__global__ void GenPhiCoeffKernel(const T *x_matrix_d,
const T *w_l_d,
const T *gradient_d,
const int n,
const int d,
T *waw_matrix_d,
T *waf_matrix_d,
T *faf_matrix_d) {
T *delta_w_s = SharedMemory<T>();
T *delta_f_s = SharedMemory<T>() + d;
int i = blockIdx.y;
int j = blockIdx.x;
int tx = threadIdx.x;
int block_size = blockDim.x;
T delta_ij_k;
for (int k = tx; k < d; k += block_size) {
delta_ij_k = x_matrix_d[IDXC(i,k,n)] - x_matrix_d[IDXC(j,k,n)];
delta_w_s[k] = delta_ij_k * w_l_d[k];
delta_f_s[k] = delta_ij_k * gradient_d[k];
}
__syncthreads();
T delta_w = reduce_sum(delta_w_s, d);
T delta_f = reduce_sum(delta_f_s, d);
if (tx == 0) {
waw_matrix_d[IDXC(j, i, n)] = delta_w * delta_w;
waf_matrix_d[IDXC(j, i, n)] = delta_w * delta_f;
faf_matrix_d[IDXC(j, i, n)] = delta_f * delta_f;
}
// T *vec_s = SharedMemory<T>();
// T *waw_s = (T *) vec_s;
// T *waf_s = (T *) &vec_s[d];
// T *faf_s = (T *) &vec_s[2 * d];
// T *w_s = (T *) &vec_s[3 * d];
// T *grad_s = (T *) &vec_s[4 * d];
// T *a_ij_s = (T *) &vec_s[5 * d];
// T *delta_ij_s = (T *) &vec_s[5 * d + d * d];
//
// GenAij(x_matrix_d, n, d, a_ij_s, delta_ij_s);
//
// int i = blockIdx.y;
// int j = blockIdx.x;
// int tx = threadIdx.x;
// int block_size = blockDim.x;
//
// for (int k = tx; k < d; k += block_size) {
// waw_s[k] = 0.0;
// waf_s[k] = 0.0;
// faf_s[k] = 0.0;
// w_s[k] = w_l_d[k];
// grad_s[k] = gradient_d[k];
// }
// __syncthreads();
//
// mv(a_ij_s, w_s, d, d, waw_s);
// mv(a_ij_s, grad_s, d, d, waf_s);
// mv(a_ij_s, grad_s, d, d, faf_s);
//
// // Dot Product
// for (int k = tx; k < d; k += block_size) {
// waw_s[k] = waw_s[k] * w_s[k];
// waf_s[k] = waf_s[k] * w_s[k];
// faf_s[k] = faf_s[k] * grad_s[k];
// }
// __syncthreads();
// reduce_sum(waw_s, d);
// reduce_sum(waf_s, d);
// reduce_sum(faf_s, d);
//
// // Transposed access for better access pattern as waw_s matrix is column-major
// if (tx == 0) {
// waw_matrix_d[IDXC(j, i, n)] = waw_s[tx];
// waf_matrix_d[IDXC(j, i, n)] = waf_s[tx];
// faf_matrix_d[IDXC(j, i, n)] = faf_s[tx];
// }
}
template<typename T>
__global__ void GenPhiKernel(const T alpha,
const T sqrt_one_minus_alpha,
const T denom,
const T *waw_matrix_d,
const T *waf_matrix_d,
const T *faf_matrix_d,
const T *gamma_matrix_d,
const int n,
const int d,
bool w_l_changed,
T *phi_of_alphas_d,
T *phi_of_zeros_d,
T *phi_of_zero_primes_d) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int block_size = blockDim.x * blockDim.y;
int tid = IDXR(threadIdx.y, threadIdx.x, blockDim.x);
int bid = IDXR(blockIdx.y, blockIdx.x, gridDim.x);
T *phi_of_alphas_s = SharedMemory<T>();
T *phi_of_zeros_s = 0;
T *phi_of_zero_primes_s = 0;
phi_of_alphas_s[tid] = 0.0;
if (w_l_changed) {
phi_of_zeros_s = SharedMemory<T>() + block_size;
phi_of_zero_primes_s = SharedMemory<T>() + 2*block_size;
phi_of_zeros_s[tid] = 0.0;
phi_of_zero_primes_s[tid] = 0.0;
}
__syncthreads();
if ((i < n) && (j < n)) {
T waw = waw_matrix_d[IDXC(j, i, n)];
T waf = waf_matrix_d[IDXC(j, i, n)];
T faf = faf_matrix_d[IDXC(j, i, n)];
T gammaij = gamma_matrix_d[IDXC(j, i, n)];
T kij = expf(denom * ((faf - waw) * (alpha * alpha) +
2 * waf * sqrt_one_minus_alpha * alpha + waw));
phi_of_alphas_s[tid] = gammaij * kij;
if (w_l_changed) {
T kij = expf(denom * waw);
phi_of_zeros_s[tid] = gammaij * kij;
phi_of_zero_primes_s[tid] = gammaij * denom * 2 * waf * kij;
// phi_of_alphas_d[IDXC(j, i, n)] = gammaij * kij;
}
__syncthreads();
T phi_of_alpha = reduce_sum(phi_of_alphas_s, block_size);
T phi_of_zero = 0;
T phi_of_zero_prime = 0;
if (w_l_changed) {
phi_of_zero = reduce_sum(phi_of_zeros_s, block_size);
phi_of_zero_prime = reduce_sum(phi_of_zero_primes_s, block_size);
}
if (tid == 0) {
phi_of_alphas_d[bid] = phi_of_alpha;
if (w_l_changed) {
phi_of_zeros_d[bid] = phi_of_zero;
phi_of_zero_primes_d[bid] = phi_of_zero_prime;
}
}
// for (unsigned int s = (blockDim.x * blockDim.y / 2); s > 0; s >>= 1) {
// if (tid < s) {
// phi_of_alphas_s[tid] += phi_of_alphas_s[tid + s];
// if (w_l_changed) {
// phi_of_zeros_s[tid] += phi_of_zeros_s[tid + s];
// phi_of_zero_primes_s[tid] +=
// phi_of_zero_primes_s[tid + s];
// }
// }
// __syncthreads();
// }
// if (tid == 0) {
// phi_of_alphas_d[bid] = phi_of_alphas_s[tid];
// if (w_l_changed) {
// phi_of_zeros_d[bid] = phi_of_zeros_s[tid];
// phi_of_zero_primes_d[bid] = phi_of_zero_primes_s[tid];
// }
// }
}
}
template<typename T>
__global__ void GenWGradientKernel(const T *x_matrix_d,
const T *g_of_w_d,
const T *w_l_d,
const T *gamma_matrix_d,
const float constant,
const int n,
const int d,
T *gradient_fs_d) {
T *delta_ij_s = SharedMemory<T>();
T *delta_w_s = SharedMemory<T>() + d;
int i = blockIdx.y;
int j = blockIdx.x;
int tx = threadIdx.x;
int block_size = blockDim.x;
for (int k = tx; k < d; k += block_size) {
delta_ij_s[k] = x_matrix_d[IDXC(i, k, n)] - x_matrix_d[IDXC(j, k, n)];
// Dot product for delta' * w
delta_w_s[k] = delta_ij_s[k] * w_l_d[k];
}
__syncthreads();
T delta_w = reduce_sum(delta_w_s, d);
T waw = delta_w * delta_w;
T sigma_sq = constant * constant;
int index_ij = IDXC(i, j, n);
T gamma_ij = gamma_matrix_d[index_ij];
T g_of_w_ij = g_of_w_d[index_ij];
T exp_term = expf(-waw / (2 * sigma_sq));
T coeff = -gamma_ij * g_of_w_ij * exp_term / sigma_sq;
T *gradient_f_ij = gradient_fs_d + IDXR(i, j, n) * d;
// delta * delta_w == Aij * w
for (int k = tx; k < d; k += block_size)
gradient_f_ij[k] = coeff * delta_ij_s[k] * delta_w;
}
template<typename T>
void KDACGPU<T>::GenPhiCoeff(const Vector <T> &w_l,
const Vector <T> &gradient) {
int n = this->n_;
int d = this->d_;
// Three terms used to calculate phi of alpha
// They only change if w_l or gradient change
CUDA_CALL(hipMemcpy(w_l_d_, &w_l(0), d * sizeof(T),
hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(gradient_d_, &gradient(0), d * sizeof(T),
hipMemcpyHostToDevice));
unsigned int block_size = (d < block_limit_ * 2) ?
nextPow2((d+1)/2) : block_limit_;
int shared_mem_size = 2 * d * sizeof(T);
dim3 dim_block(block_size, 1);
dim3 dim_grid(n, n);
hipLaunchKernelGGL(( GenPhiCoeffKernel) , dim3(dim_grid), dim3(dim_block), shared_mem_size, 0,
x_matrix_d_,
w_l_d_,
gradient_d_,
n,
d,
waw_matrix_d_,
waf_matrix_d_,
faf_matrix_d_);
CUDA_CALL(hipGetLastError());
}
template
void KDACGPU<float>::GenPhiCoeff(const Vector<float> &w_l,
const Vector<float> &gradient);
// Generate phi(alpha), phi(0) and phi'(0) for LineSearch
// If this is the first time to generate phi(), then w_l_changed is true
// Or if the w_l is negated because phi'(0) is negative,
// then w_l_changed is true
// If w_l_changed is true, generate phi(0) and phi'(0), otherwise
// when we are only computing phi(alpha) with a different alpha in the loop
// of the LineSearch, the w_l_changed is false and we do not generate
// new waw, waf and faf
template<typename T>
void KDACGPU<T>::GenPhi(const Vector <T> &w_l,
const Vector <T> &gradient,
bool w_l_changed) {
int n = this->n_;
int d = this->d_;
if (this->kernel_type_ == kGaussianKernel) {
this->profiler_.gen_phi.Start();
float alpha_square = pow(this->alpha_, 2);
float sqrt_one_minus_alpha = pow((1 - alpha_square), 0.5);
float denom = -1 / (2 * pow(this->constant_, 2));
this->phi_of_alpha_ = 0;
if (w_l_changed) {
GenPhiCoeff(w_l, gradient);
this->phi_of_zero_ = 0;
this->phi_of_zero_prime_ = 0;
}
int block_dim_x = 16;
int block_dim_y = 16;
dim3 dim_block(block_dim_x, block_dim_y);
// If matrix is n x m, then I need an m x n grid for contiguous
// memory access
dim3 dim_grid((n - 1) / block_dim_x + 1,
(n - 1) / block_dim_y + 1);
int block_size = block_dim_x * block_dim_y;
int num_blocks =
((n - 1) / block_dim_x + 1) * ((n - 1) / block_dim_y + 1);
int shared_mem_size;
if (w_l_changed)
shared_mem_size = 3 * block_size * sizeof(T);
else
shared_mem_size = block_size * sizeof(T);
GenPhiKernel << < dim_grid, dim_block, shared_mem_size >> >
(this->alpha_,
sqrt_one_minus_alpha,
denom,
waw_matrix_d_,
waf_matrix_d_,
faf_matrix_d_,
gamma_matrix_d_,
n,
d,
w_l_changed,
phi_of_alphas_d_,
phi_of_zeros_d_,
phi_of_zero_primes_d_);
// Check if error happens in kernel launch
CUDA_CALL(hipGetLastError());
CUDA_CALL(hipMemcpy(phi_of_alphas_h_, phi_of_alphas_d_,
num_blocks * sizeof(T), hipMemcpyDeviceToHost));
for (int i = 0; i < num_blocks; i++) {
this->phi_of_alpha_ += phi_of_alphas_h_[i];
}
if (w_l_changed) {
CUDA_CALL(hipMemcpy(phi_of_zeros_h_, phi_of_zeros_d_,
num_blocks * sizeof(T), hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(phi_of_zero_primes_h_, phi_of_zero_primes_d_,
num_blocks * sizeof(T), hipMemcpyDeviceToHost));
for (int i = 0; i < num_blocks; i++) {
this->phi_of_zero_ += phi_of_zeros_h_[i];
this->phi_of_zero_prime_ += phi_of_zero_primes_h_[i];
}
}
this->profiler_.gen_phi.Record();
}
}
template
void KDACGPU<float>::GenPhi(const Vector<float> &w_l,
const Vector<float> &gradient,
bool w_l_changed);
template<typename T>
Vector <T> KDACGPU<T>::GenWGradient(const Vector <T> &w_l) {
this->profiler_.gen_grad.Start();
int n = this->n_;
int d = this->d_;
Vector <T> w_gradient = Vector<T>::Zero(d);
if (this->kernel_type_ == kGaussianKernel) {
CUDA_CALL(hipMemcpy(w_l_d_, &w_l(0), d * sizeof(T),
hipMemcpyHostToDevice));
// When block_limit is 512
// If d is 128, block_size is 64
// If d is 6, block_size is 4
// If d is 1025, block_size is 512
unsigned int block_size = (d < block_limit_ * 2) ?
nextPow2((d+1)/2) : block_limit_;
int shared_mem_size = 2 * d * sizeof(T);
dim3 dim_block(block_size, 1);
dim3 dim_grid(n, n);
GenWGradientKernel
<< < dim_grid, dim_block, shared_mem_size >> >
(x_matrix_d_,
g_of_w_d_,
w_l_d_,
gamma_matrix_d_,
this->constant_,
n,
d,
gradient_fs_d_);
CUDA_CALL(hipGetLastError());
CUDA_CALL(hipMemcpy(gradient_fs_h_, gradient_fs_d_,
n * n * d * sizeof(T),
hipMemcpyDeviceToHost));
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
T *gradient_f_ij = gradient_fs_h_ + IDXR(i, j, n) * d;
Vector<T> grad_temp = Eigen::Map < Vector < T >> (gradient_f_ij, d);
util::CheckFinite(grad_temp, "grad_temp_"+std::to_string(i));
w_gradient = w_gradient + grad_temp;
}
}
}
this->profiler_.gen_grad.Record();
util::CheckFinite(w_gradient, "w_gradient");
return w_gradient;
}
template
Vector<float> KDACGPU<float>::GenWGradient(const Vector<float> &w_l);
template<typename T>
void KDACGPU<T>::UpdateGOfW(const Vector<T> &w_l) {
this->profiler_.update_g_of_w.Start();
int n = this->n_;
int d = this->d_;
CUDA_CALL(hipMemcpy(w_l_d_, &w_l(0), d * sizeof(T),
hipMemcpyHostToDevice));
if (this->kernel_type_ == kGaussianKernel) {
unsigned int block_size = (d < block_limit_ * 2) ?
nextPow2((d+1)/2) : block_limit_;
int shared_mem_size = d * sizeof(T);
dim3 dim_block(block_size, 1);
dim3 dim_grid(n, n);
hipLaunchKernelGGL(( UpdateGOfWKernel) , dim3(dim_grid), dim3(dim_block), shared_mem_size, 0,
x_matrix_d_,
w_l_d_,
this->constant_,
n,
d,
g_of_w_d_);
CUDA_CALL(hipGetLastError());
}
this->profiler_.update_g_of_w.Record();
}
template
void KDACGPU<float>::UpdateGOfW(const Vector<float> &w_l);
} // Namespace NICE | 012cd1b060867c9f6fe5a81a8e2d35d56a40c84c.cu | // The MIT License (MIT)
//
// Copyright (c) 2016 Northeastern University
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include <iostream>
#include "include/kdac_gpu.h"
#include "include/gpu_util.h"
// Hack to cope with Clion
#include "../../include/gpu_util.h"
#include "../../../../../../../../usr/local/cuda/include/driver_types.h"
#include "../../include/kdac_gpu.h"
#include "../../include/kernel_types.h"
namespace Nice {
unsigned int nextPow2(unsigned int x) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
bool isPow2(unsigned int x) {
return ((x & (x - 1)) == 0);
}
template <typename T>
__device__ void mv(T *mat_s,
T* vec_in_s,
const int num_rows,
const int num_cols,
T* vec_out_s) {
int tx = threadIdx.x;
int block_size = blockDim.x;
for (int k = tx; k < num_rows; k += block_size) {
for (int col = 0; col < num_cols; col++)
vec_out_s[k] += mat_s[IDXC(k, col, num_rows)] * vec_in_s[col];
}
__syncthreads();
}
template <typename T>
__device__ T reduce_sum(T *data_s, int n) {
T sum = 0;
int block_size = blockDim.x * blockDim.y;
int tx = threadIdx.y * blockDim.x + threadIdx.x;
for (int k = tx; k < n; k += block_size)
sum += data_s[k];
data_s[tx] = sum;
__syncthreads();
if ((block_size >= 512) && (tx < 256))
data_s[tx] = sum = sum + data_s[tx + 256];
__syncthreads();
if ((block_size >= 256) && (tx < 128))
data_s[tx] = sum = sum + data_s[tx + 128];
__syncthreads();
if ((block_size >= 128) && (tx < 64))
data_s[tx] = sum = sum + data_s[tx + 64];
__syncthreads();
// if ((block_size >= 64) && (tx < 32))
// exp_term_s[tx] = sum = sum + exp_term_s[tx + 32];
// __syncthreads();
//
// if ((block_size >= 32) && (tx < 16))
// exp_term_s[tx] = sum = sum + exp_term_s[tx + 16];
// __syncthreads();
//
// if ((block_size >= 16) && (tx < 8))
// exp_term_s[tx] = sum = sum + exp_term_s[tx + 8];
// __syncthreads();
//
// if ((block_size >= 8) && (tx < 4))
// exp_term_s[tx] = sum = sum + exp_term_s[tx + 4];
// __syncthreads();
//
// if ((block_size >= 4) && (tx < 2))
// exp_term_s[tx] = sum = sum + exp_term_s[tx + 2];
// __syncthreads();
//
// if ((block_size >= 2) && (tx < 1))
// exp_term_s[tx] = sum = sum + exp_term_s[tx + 1];
// __syncthreads();
if (tx < 32) {
if (block_size >= 64)
sum += data_s[tx + 32];
for (int offset = warpSize / 2; offset >0; offset /=2)
sum += __shfl_down(sum, offset);
}
if (tx == 0)
data_s[tx] = sum;
__syncthreads();
return data_s[0];
}
template <typename T>
__device__ void GenAij(const T *x_matrix_d,
const int n,
const int d,
T *a_ij_d,
T *delta_ij_d) {
int tx = threadIdx.x;
int i = blockIdx.y;
int j = blockIdx.x;
int block_size = blockDim.x;
for (int k = tx; k < d; k += block_size)
delta_ij_d[k] = x_matrix_d[IDXC(i, k, n)] - x_matrix_d[IDXC(j, k, n)];
__syncthreads();
for (int k = tx; k < d; k += block_size)
for (int col = 0; col < d; col++)
// thread tx calculates a whole row tx of the output matrix a_ij
a_ij_d[IDXC(k, col, d)] = delta_ij_d[col] * delta_ij_d[k];
__syncthreads();
}
template<typename T>
__global__ void UpdateGOfWKernel(const T *x_matrix_d,
const T *w_l_d,
const float constant,
const int n,
const int d,
T *g_of_w_d_) {
// Shared memory to store a_ij * w_l
T *delta_w_s = SharedMemory<T>();
int i = blockIdx.y;
int j = blockIdx.x;
int tx = threadIdx.x;
int block_size = blockDim.x;
for (int k = tx; k < d; k += block_size)
delta_w_s[k] =
(x_matrix_d[IDXC(i,k,n)] - x_matrix_d[IDXC(j,k,n)]) * w_l_d[k];
__syncthreads();
T delta_w =reduce_sum(delta_w_s, d);
T exp_term = expf(-delta_w * delta_w / (2 * constant * constant));
if (tx == 0)
g_of_w_d_[IDXC(i,j,n)] *= exp_term;
}
template<typename T>
__global__ void GenPhiCoeffKernel(const T *x_matrix_d,
const T *w_l_d,
const T *gradient_d,
const int n,
const int d,
T *waw_matrix_d,
T *waf_matrix_d,
T *faf_matrix_d) {
T *delta_w_s = SharedMemory<T>();
T *delta_f_s = SharedMemory<T>() + d;
int i = blockIdx.y;
int j = blockIdx.x;
int tx = threadIdx.x;
int block_size = blockDim.x;
T delta_ij_k;
for (int k = tx; k < d; k += block_size) {
delta_ij_k = x_matrix_d[IDXC(i,k,n)] - x_matrix_d[IDXC(j,k,n)];
delta_w_s[k] = delta_ij_k * w_l_d[k];
delta_f_s[k] = delta_ij_k * gradient_d[k];
}
__syncthreads();
T delta_w = reduce_sum(delta_w_s, d);
T delta_f = reduce_sum(delta_f_s, d);
if (tx == 0) {
waw_matrix_d[IDXC(j, i, n)] = delta_w * delta_w;
waf_matrix_d[IDXC(j, i, n)] = delta_w * delta_f;
faf_matrix_d[IDXC(j, i, n)] = delta_f * delta_f;
}
// T *vec_s = SharedMemory<T>();
// T *waw_s = (T *) vec_s;
// T *waf_s = (T *) &vec_s[d];
// T *faf_s = (T *) &vec_s[2 * d];
// T *w_s = (T *) &vec_s[3 * d];
// T *grad_s = (T *) &vec_s[4 * d];
// T *a_ij_s = (T *) &vec_s[5 * d];
// T *delta_ij_s = (T *) &vec_s[5 * d + d * d];
//
// GenAij(x_matrix_d, n, d, a_ij_s, delta_ij_s);
//
// int i = blockIdx.y;
// int j = blockIdx.x;
// int tx = threadIdx.x;
// int block_size = blockDim.x;
//
// for (int k = tx; k < d; k += block_size) {
// waw_s[k] = 0.0;
// waf_s[k] = 0.0;
// faf_s[k] = 0.0;
// w_s[k] = w_l_d[k];
// grad_s[k] = gradient_d[k];
// }
// __syncthreads();
//
// mv(a_ij_s, w_s, d, d, waw_s);
// mv(a_ij_s, grad_s, d, d, waf_s);
// mv(a_ij_s, grad_s, d, d, faf_s);
//
// // Dot Product
// for (int k = tx; k < d; k += block_size) {
// waw_s[k] = waw_s[k] * w_s[k];
// waf_s[k] = waf_s[k] * w_s[k];
// faf_s[k] = faf_s[k] * grad_s[k];
// }
// __syncthreads();
// reduce_sum(waw_s, d);
// reduce_sum(waf_s, d);
// reduce_sum(faf_s, d);
//
// // Transposed access for better access pattern as waw_s matrix is column-major
// if (tx == 0) {
// waw_matrix_d[IDXC(j, i, n)] = waw_s[tx];
// waf_matrix_d[IDXC(j, i, n)] = waf_s[tx];
// faf_matrix_d[IDXC(j, i, n)] = faf_s[tx];
// }
}
template<typename T>
__global__ void GenPhiKernel(const T alpha,
const T sqrt_one_minus_alpha,
const T denom,
const T *waw_matrix_d,
const T *waf_matrix_d,
const T *faf_matrix_d,
const T *gamma_matrix_d,
const int n,
const int d,
bool w_l_changed,
T *phi_of_alphas_d,
T *phi_of_zeros_d,
T *phi_of_zero_primes_d) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int block_size = blockDim.x * blockDim.y;
int tid = IDXR(threadIdx.y, threadIdx.x, blockDim.x);
int bid = IDXR(blockIdx.y, blockIdx.x, gridDim.x);
T *phi_of_alphas_s = SharedMemory<T>();
T *phi_of_zeros_s = 0;
T *phi_of_zero_primes_s = 0;
phi_of_alphas_s[tid] = 0.0;
if (w_l_changed) {
phi_of_zeros_s = SharedMemory<T>() + block_size;
phi_of_zero_primes_s = SharedMemory<T>() + 2*block_size;
phi_of_zeros_s[tid] = 0.0;
phi_of_zero_primes_s[tid] = 0.0;
}
__syncthreads();
if ((i < n) && (j < n)) {
T waw = waw_matrix_d[IDXC(j, i, n)];
T waf = waf_matrix_d[IDXC(j, i, n)];
T faf = faf_matrix_d[IDXC(j, i, n)];
T gammaij = gamma_matrix_d[IDXC(j, i, n)];
T kij = expf(denom * ((faf - waw) * (alpha * alpha) +
2 * waf * sqrt_one_minus_alpha * alpha + waw));
phi_of_alphas_s[tid] = gammaij * kij;
if (w_l_changed) {
T kij = expf(denom * waw);
phi_of_zeros_s[tid] = gammaij * kij;
phi_of_zero_primes_s[tid] = gammaij * denom * 2 * waf * kij;
// phi_of_alphas_d[IDXC(j, i, n)] = gammaij * kij;
}
__syncthreads();
T phi_of_alpha = reduce_sum(phi_of_alphas_s, block_size);
T phi_of_zero = 0;
T phi_of_zero_prime = 0;
if (w_l_changed) {
phi_of_zero = reduce_sum(phi_of_zeros_s, block_size);
phi_of_zero_prime = reduce_sum(phi_of_zero_primes_s, block_size);
}
if (tid == 0) {
phi_of_alphas_d[bid] = phi_of_alpha;
if (w_l_changed) {
phi_of_zeros_d[bid] = phi_of_zero;
phi_of_zero_primes_d[bid] = phi_of_zero_prime;
}
}
// for (unsigned int s = (blockDim.x * blockDim.y / 2); s > 0; s >>= 1) {
// if (tid < s) {
// phi_of_alphas_s[tid] += phi_of_alphas_s[tid + s];
// if (w_l_changed) {
// phi_of_zeros_s[tid] += phi_of_zeros_s[tid + s];
// phi_of_zero_primes_s[tid] +=
// phi_of_zero_primes_s[tid + s];
// }
// }
// __syncthreads();
// }
// if (tid == 0) {
// phi_of_alphas_d[bid] = phi_of_alphas_s[tid];
// if (w_l_changed) {
// phi_of_zeros_d[bid] = phi_of_zeros_s[tid];
// phi_of_zero_primes_d[bid] = phi_of_zero_primes_s[tid];
// }
// }
}
}
template<typename T>
__global__ void GenWGradientKernel(const T *x_matrix_d,
const T *g_of_w_d,
const T *w_l_d,
const T *gamma_matrix_d,
const float constant,
const int n,
const int d,
T *gradient_fs_d) {
T *delta_ij_s = SharedMemory<T>();
T *delta_w_s = SharedMemory<T>() + d;
int i = blockIdx.y;
int j = blockIdx.x;
int tx = threadIdx.x;
int block_size = blockDim.x;
for (int k = tx; k < d; k += block_size) {
delta_ij_s[k] = x_matrix_d[IDXC(i, k, n)] - x_matrix_d[IDXC(j, k, n)];
// Dot product for delta' * w
delta_w_s[k] = delta_ij_s[k] * w_l_d[k];
}
__syncthreads();
T delta_w = reduce_sum(delta_w_s, d);
T waw = delta_w * delta_w;
T sigma_sq = constant * constant;
int index_ij = IDXC(i, j, n);
T gamma_ij = gamma_matrix_d[index_ij];
T g_of_w_ij = g_of_w_d[index_ij];
T exp_term = expf(-waw / (2 * sigma_sq));
T coeff = -gamma_ij * g_of_w_ij * exp_term / sigma_sq;
T *gradient_f_ij = gradient_fs_d + IDXR(i, j, n) * d;
// delta * delta_w == Aij * w
for (int k = tx; k < d; k += block_size)
gradient_f_ij[k] = coeff * delta_ij_s[k] * delta_w;
}
template<typename T>
void KDACGPU<T>::GenPhiCoeff(const Vector <T> &w_l,
const Vector <T> &gradient) {
int n = this->n_;
int d = this->d_;
// Three terms used to calculate phi of alpha
// They only change if w_l or gradient change
CUDA_CALL(cudaMemcpy(w_l_d_, &w_l(0), d * sizeof(T),
cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(gradient_d_, &gradient(0), d * sizeof(T),
cudaMemcpyHostToDevice));
unsigned int block_size = (d < block_limit_ * 2) ?
nextPow2((d+1)/2) : block_limit_;
int shared_mem_size = 2 * d * sizeof(T);
dim3 dim_block(block_size, 1);
dim3 dim_grid(n, n);
GenPhiCoeffKernel <<<dim_grid, dim_block, shared_mem_size>>> (
x_matrix_d_,
w_l_d_,
gradient_d_,
n,
d,
waw_matrix_d_,
waf_matrix_d_,
faf_matrix_d_);
CUDA_CALL(cudaGetLastError());
}
template
void KDACGPU<float>::GenPhiCoeff(const Vector<float> &w_l,
const Vector<float> &gradient);
// Generate phi(alpha), phi(0) and phi'(0) for LineSearch
// If this is the first time to generate phi(), then w_l_changed is true
// Or if the w_l is negated because phi'(0) is negative,
// then w_l_changed is true
// If w_l_changed is true, generate phi(0) and phi'(0), otherwise
// when we are only computing phi(alpha) with a different alpha in the loop
// of the LineSearch, the w_l_changed is false and we do not generate
// new waw, waf and faf
template<typename T>
void KDACGPU<T>::GenPhi(const Vector <T> &w_l,
const Vector <T> &gradient,
bool w_l_changed) {
int n = this->n_;
int d = this->d_;
if (this->kernel_type_ == kGaussianKernel) {
this->profiler_.gen_phi.Start();
float alpha_square = pow(this->alpha_, 2);
float sqrt_one_minus_alpha = pow((1 - alpha_square), 0.5);
float denom = -1 / (2 * pow(this->constant_, 2));
this->phi_of_alpha_ = 0;
if (w_l_changed) {
GenPhiCoeff(w_l, gradient);
this->phi_of_zero_ = 0;
this->phi_of_zero_prime_ = 0;
}
int block_dim_x = 16;
int block_dim_y = 16;
dim3 dim_block(block_dim_x, block_dim_y);
// If matrix is n x m, then I need an m x n grid for contiguous
// memory access
dim3 dim_grid((n - 1) / block_dim_x + 1,
(n - 1) / block_dim_y + 1);
int block_size = block_dim_x * block_dim_y;
int num_blocks =
((n - 1) / block_dim_x + 1) * ((n - 1) / block_dim_y + 1);
int shared_mem_size;
if (w_l_changed)
shared_mem_size = 3 * block_size * sizeof(T);
else
shared_mem_size = block_size * sizeof(T);
GenPhiKernel << < dim_grid, dim_block, shared_mem_size >> >
(this->alpha_,
sqrt_one_minus_alpha,
denom,
waw_matrix_d_,
waf_matrix_d_,
faf_matrix_d_,
gamma_matrix_d_,
n,
d,
w_l_changed,
phi_of_alphas_d_,
phi_of_zeros_d_,
phi_of_zero_primes_d_);
// Check if error happens in kernel launch
CUDA_CALL(cudaGetLastError());
CUDA_CALL(cudaMemcpy(phi_of_alphas_h_, phi_of_alphas_d_,
num_blocks * sizeof(T), cudaMemcpyDeviceToHost));
for (int i = 0; i < num_blocks; i++) {
this->phi_of_alpha_ += phi_of_alphas_h_[i];
}
if (w_l_changed) {
CUDA_CALL(cudaMemcpy(phi_of_zeros_h_, phi_of_zeros_d_,
num_blocks * sizeof(T), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(phi_of_zero_primes_h_, phi_of_zero_primes_d_,
num_blocks * sizeof(T), cudaMemcpyDeviceToHost));
for (int i = 0; i < num_blocks; i++) {
this->phi_of_zero_ += phi_of_zeros_h_[i];
this->phi_of_zero_prime_ += phi_of_zero_primes_h_[i];
}
}
this->profiler_.gen_phi.Record();
}
}
template
void KDACGPU<float>::GenPhi(const Vector<float> &w_l,
const Vector<float> &gradient,
bool w_l_changed);
template<typename T>
Vector <T> KDACGPU<T>::GenWGradient(const Vector <T> &w_l) {
this->profiler_.gen_grad.Start();
int n = this->n_;
int d = this->d_;
Vector <T> w_gradient = Vector<T>::Zero(d);
if (this->kernel_type_ == kGaussianKernel) {
CUDA_CALL(cudaMemcpy(w_l_d_, &w_l(0), d * sizeof(T),
cudaMemcpyHostToDevice));
// When block_limit is 512
// If d is 128, block_size is 64
// If d is 6, block_size is 4
// If d is 1025, block_size is 512
unsigned int block_size = (d < block_limit_ * 2) ?
nextPow2((d+1)/2) : block_limit_;
int shared_mem_size = 2 * d * sizeof(T);
dim3 dim_block(block_size, 1);
dim3 dim_grid(n, n);
GenWGradientKernel
<< < dim_grid, dim_block, shared_mem_size >> >
(x_matrix_d_,
g_of_w_d_,
w_l_d_,
gamma_matrix_d_,
this->constant_,
n,
d,
gradient_fs_d_);
CUDA_CALL(cudaGetLastError());
CUDA_CALL(cudaMemcpy(gradient_fs_h_, gradient_fs_d_,
n * n * d * sizeof(T),
cudaMemcpyDeviceToHost));
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
T *gradient_f_ij = gradient_fs_h_ + IDXR(i, j, n) * d;
Vector<T> grad_temp = Eigen::Map < Vector < T >> (gradient_f_ij, d);
util::CheckFinite(grad_temp, "grad_temp_"+std::to_string(i));
w_gradient = w_gradient + grad_temp;
}
}
}
this->profiler_.gen_grad.Record();
util::CheckFinite(w_gradient, "w_gradient");
return w_gradient;
}
template
Vector<float> KDACGPU<float>::GenWGradient(const Vector<float> &w_l);
template<typename T>
void KDACGPU<T>::UpdateGOfW(const Vector<T> &w_l) {
this->profiler_.update_g_of_w.Start();
int n = this->n_;
int d = this->d_;
CUDA_CALL(cudaMemcpy(w_l_d_, &w_l(0), d * sizeof(T),
cudaMemcpyHostToDevice));
if (this->kernel_type_ == kGaussianKernel) {
unsigned int block_size = (d < block_limit_ * 2) ?
nextPow2((d+1)/2) : block_limit_;
int shared_mem_size = d * sizeof(T);
dim3 dim_block(block_size, 1);
dim3 dim_grid(n, n);
UpdateGOfWKernel <<<dim_grid, dim_block, shared_mem_size>>>
(x_matrix_d_,
w_l_d_,
this->constant_,
n,
d,
g_of_w_d_);
CUDA_CALL(cudaGetLastError());
}
this->profiler_.update_g_of_w.Record();
}
template
void KDACGPU<float>::UpdateGOfW(const Vector<float> &w_l);
} // Namespace NICE |
b20aa1e642513e0315e82ed9a5e47a6f1cf7371f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
const int Nthreads = 1024, maxFR = 10000, NrankMax = 3, nt0max=81, NchanMax = 17;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void max1D(const double *Params, const float *data, float *conv_sig){
volatile __shared__ float sdata[Nthreads+81];
float y, spkTh;
int tid, tid0, bid, i, NT, nt0;
NT = (int) Params[0];
nt0 = (int) Params[3];
spkTh = (float) Params[5];
tid = threadIdx.x;
bid = blockIdx.x;
tid0 = 0;
while (tid0<NT-Nthreads-nt0+1){
if (tid<nt0)
sdata[tid] = data[tid0 + tid + NT*bid];
sdata[tid + nt0] = data[nt0+tid0 + tid+ NT*bid];
__syncthreads();
y = 0.0f;
#pragma unroll 4
for(i=0;i<nt0;i++)
y = max(y, sdata[tid+i]);
if (y>spkTh)
conv_sig[tid0 + tid + NT*bid] = y;
tid0+=Nthreads;
__syncthreads();
}
} | b20aa1e642513e0315e82ed9a5e47a6f1cf7371f.cu | #include "includes.h"
const int Nthreads = 1024, maxFR = 10000, NrankMax = 3, nt0max=81, NchanMax = 17;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void max1D(const double *Params, const float *data, float *conv_sig){
volatile __shared__ float sdata[Nthreads+81];
float y, spkTh;
int tid, tid0, bid, i, NT, nt0;
NT = (int) Params[0];
nt0 = (int) Params[3];
spkTh = (float) Params[5];
tid = threadIdx.x;
bid = blockIdx.x;
tid0 = 0;
while (tid0<NT-Nthreads-nt0+1){
if (tid<nt0)
sdata[tid] = data[tid0 + tid + NT*bid];
sdata[tid + nt0] = data[nt0+tid0 + tid+ NT*bid];
__syncthreads();
y = 0.0f;
#pragma unroll 4
for(i=0;i<nt0;i++)
y = max(y, sdata[tid+i]);
if (y>spkTh)
conv_sig[tid0 + tid + NT*bid] = y;
tid0+=Nthreads;
__syncthreads();
}
} |
c06bc5792d5fe4d2d317e90f6f1d0271a1d26210.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_utils.h"
#include <cub/hipcub/hipcub.hpp>
#include <memory>
namespace CudaUtils {
/** this function is used to modify keys dense tensor to erase those keys
* do not belong to that gpu to 0. (0 represet invalid keys, and original 0-key
* has been mapped to 1.)
* @param kers_ptr, the pointer to 2-D dense keys tensor, corresponding shape is
* [batchsize * slot_num, max_nnz]
* @param dev_id, which dev_id this keys_ptr belongs to.
* @param elem_size, the element size of keys_ptr.
*/
template <typename T>
__global__ void erase_distributed_embedding_keys(T* keys_ptr, const int dev_id,
const int gpu_count, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
if (keys_ptr[i] > 0 && ((keys_ptr[i] - 1) % gpu_count != dev_id)) {
keys_ptr[i] = 0;
}
}
}
/** this function is used to modify keys dense tensor to erase those keys
* do not belong to that gpu to 0.
* @param kers_ptr, the pointer to 2-D dense keys tensor, corresponding shape is
* [batchsize * slot_num, max_nnz]
* @param dev_id, which dev_id this keys_ptr belongs to.
* @param elem_size, the element size of keys_ptr.
* @param batchsize, the batchsize
* @param gpu_count, how many gpus
*/
template <typename T>
__global__ void erase_localized_embedding_keys(T* keys_ptr, const int dev_id,
const size_t slot_num, const size_t max_nnz,
const int gpu_count, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
int row_idx = gid / max_nnz;
int slot_idx = row_idx % slot_num;
int key_dev_id = slot_idx % gpu_count;
for (size_t i = gid; i < elem_size; i += strid) {
if (key_dev_id != dev_id) {
keys_ptr[i] = 0;
}
}
}
/** this function is used to modify the elements.
* @param, the pointer to 2-D tensor keys tensor, corresponding shape is
* [batchsize * slot_num, max_nnz]
* @param fn, how to modify each element. input element, output its modified value.
* @param elem_size, how many elements.
*/
template <typename input_type, typename Func>
__global__ void modify_elements(input_type* keys_ptr, Func fn, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
keys_ptr[i] = fn(keys_ptr[i]);
}
}
template <typename Func, typename input_type, typename output_type>
__global__ void modify_elements(const input_type* input_ptr, output_type* output_ptr, Func fn, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
output_ptr[i] = fn(input_ptr[i]);
}
}
/** This function is used to generate binary vector for csr_row_offset.
* @param elem_size, how many element in input_ptr, and it is equal to the elem_size of csr_row_offset.
* @param dest_dev_id, decide which index should be reside on this destination device. if True, its value is set to 1,
* otherwise, the value is 0.
*/
template <typename T>
__global__ void generate_binary_vec(T* input_ptr, const size_t elem_size, const int dest_dev_id,
const size_t slot_num, const int gpu_count) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
input_ptr[i] = 1; // initialize the value to 1.
if (i > 0) { // ignore the first value. it should be 1 anyway.
int row_idx = i - 1;
int slot_idx = row_idx % slot_num;
int dev_id = slot_idx % gpu_count;
input_ptr[i] = (dev_id == dest_dev_id) ? 1 : 0;
}
}
}
/** This function fused value + 1 and erase_distributed_embedding_keys
*/
template <typename T>
__global__ void fuse_keys_plus_erase_distributed_embedding(T* keys_ptr, const int dev_id, const int gpu_count,
const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
T temp_key = keys_ptr[i];
temp_key += 1;
if (temp_key <= 0 || (temp_key - 1) % gpu_count != dev_id) {
temp_key = 0;
}
keys_ptr[i] = temp_key;
}
}
template <typename T>
void all_keys_plus_1(T* keys_ptr, const size_t elem_size, const size_t sm_count, hipStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
auto fn = [] __device__(T key) -> T { return key + 1; };
hipLaunchKernelGGL(( modify_elements), dim3(grid_dim), dim3(block_dim), 0, stream, keys_ptr, fn, elem_size);
}
template <typename input_type, typename output_type>
void cast_elements(const input_type* input_ptr, output_type* output_ptr, const size_t elem_size,
const size_t sm_count, hipStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
auto cast_fn = [] __device__ (input_type num) -> output_type { return static_cast<output_type>(num); };
hipLaunchKernelGGL(( modify_elements), dim3(grid_dim), dim3(block_dim), 0, stream, input_ptr, output_ptr, cast_fn, elem_size);
}
template <typename T>
void erase_distributed_embedding_keys(T* keys_ptr, const int dev_id, const int gpu_count,
const size_t elem_size, const size_t sm_count, hipStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
hipLaunchKernelGGL(( erase_distributed_embedding_keys), dim3(grid_dim), dim3(block_dim), 0, stream, keys_ptr, dev_id, gpu_count, elem_size);
}
template <typename T>
void erase_localized_embedding_keys(T* keys_ptr, const int dev_id, const size_t slot_num, const size_t max_nnz,
const int gpu_count, const size_t elem_size, const size_t sm_count, hipStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
hipLaunchKernelGGL(( erase_localized_embedding_keys), dim3(grid_dim), dim3(block_dim), 0, stream, keys_ptr, dev_id, slot_num,
max_nnz, gpu_count, elem_size);
}
template <typename T>
void fuse_keys_plus_erase_distributed_embedding(T* keys_ptr, const int dev_id, const int gpu_count,
const size_t elem_size, const size_t sm_count, hipStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
hipLaunchKernelGGL(( fuse_keys_plus_erase_distributed_embedding), dim3(grid_dim), dim3(block_dim), 0, stream, keys_ptr, dev_id, gpu_count, elem_size);
}
template <typename T>
void convert_dense_to_csr(T* keys_ptr, int row, int col,
const hipsparseHandle_t& handle,
const hipblasHandle_t& cublas_handle,
T* csr_values, int* csr_row_offsets, int * csr_col_indices,
long long* total_nnz,
hipsparseMatDescr_t& desc, int* nnz_row,
T* keys_ptr_transpose) {
if (std::is_same<T, long long>::value) {
// transpose
const double alpha = 1.0;
const double beta = 0.0;
hipblasStatus_t cublas_error = hipblasDgeam(cublas_handle,
HIPBLAS_OP_T, /*transa*/
HIPBLAS_OP_N, /*transb*/
row, /*number of rows*/
col, /*number of cols*/
&alpha, /*alpha*/
reinterpret_cast<double*>(keys_ptr), /*A*/
col, /*leading dimension*/
&beta, /*beta*/
reinterpret_cast<double*>(keys_ptr), /*B*/
row, /*leading dimension*/
reinterpret_cast<double*>(keys_ptr_transpose), /*C*/
row /*leading dimension*/);
if (cublas_error != HIPBLAS_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << "cublas error: " << cublas_error << std::endl;
exit(-1);
}
int m = row /*row number*/, n = col /*column number*/;
int temp_total_nnz = 0;
hipsparseStatus_t status = hipsparseDnnz(handle,
HIPSPARSE_DIRECTION_ROW, /*count nnz direction*/
m, /*number of rows*/
n, /*number of columns*/
desc, /*descriptor of matrix*/
reinterpret_cast<double*>(keys_ptr_transpose),
m, /*leading dimension*/
nnz_row, /*output*/
&temp_total_nnz);
if (status != HIPSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << hipsparseGetErrorString(status) << std::endl;
exit(-1);
}
*total_nnz = static_cast<long long>(temp_total_nnz);
status = hipsparseDdense2csr(handle,
m, /*number of rows of matrix A*/
n, /*number of columns of matrix A*/
desc, /*the descriptor of matrix A*/
reinterpret_cast<double*>(keys_ptr_transpose), /*array of dimensions (lda, n)*/
m, /*leading dimension*/
nnz_row, /*nnz array*/
reinterpret_cast<double*>(csr_values), /*csr values*/
csr_row_offsets, /*csr row_offset*/
csr_col_indices/*csr column indices*/);
if (status != HIPSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << hipsparseGetErrorString(status) << std::endl;
exit(-1);
}
} else if (std::is_same<T, unsigned int>::value) {
// transpose
const float alpha = 1.0;
const float beta = 0.0;
hipblasStatus_t cublas_error = hipblasSgeam(cublas_handle,
HIPBLAS_OP_T, /*transa*/
HIPBLAS_OP_N, /*transb*/
row, /*number of rows*/
col, /*number of cols*/
&alpha, /*alpha*/
reinterpret_cast<float*>(keys_ptr), /*A*/
col, /*leading dimension*/
&beta, /*beta*/
reinterpret_cast<float*>(keys_ptr), /*B*/
row, /*leading dimension*/
reinterpret_cast<float*>(keys_ptr_transpose), /*C*/
row /*leading dimension*/);
if (cublas_error != HIPBLAS_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << "cublas error: " << cublas_error << std::endl;
exit(-1);
}
int m = row /*row number*/, n = col /*column number*/;
int temp_total_nnz = 0;
hipsparseStatus_t status = hipsparseSnnz(handle,
HIPSPARSE_DIRECTION_ROW, /*count nnz direction*/
m, /*number of rows*/
n, /*number of columns*/
desc, /*descriptor of matrix*/
reinterpret_cast<float*>(keys_ptr_transpose),
m, /*leading dimension*/
nnz_row, /*output*/
&temp_total_nnz);
if (status != HIPSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << hipsparseGetErrorString(status) << std::endl;
exit(-1);
}
*total_nnz = static_cast<long long>(temp_total_nnz);
status = hipsparseSdense2csr(handle,
m, /*number of rows of matrix A*/
n, /*number of columns of matrix A*/
desc, /*the descriptor of matrix A*/
reinterpret_cast<float*>(keys_ptr_transpose), /*array of dimensions (lda, n)*/
m, /*leading dimension*/
nnz_row, /*nnz array*/
reinterpret_cast<float*>(csr_values), /*csr values*/
csr_row_offsets, /*csr row_offset*/
csr_col_indices/*csr column indices*/);
if (status != HIPSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << hipsparseGetErrorString(status) << std::endl;
exit(-1);
}
}
}
template <typename T>
void value_tensors_subtract_1(T* values_ptr, const size_t elem_size, const size_t sm_count, hipStream_t stream) {
int block_dim = 128;
size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
auto fn = [] __device__ (T value) { if (value > 0) {return value - 1;} else {return value;} };
hipLaunchKernelGGL(( modify_elements), dim3(grid_dim), dim3(block_dim), 0, stream, values_ptr, fn, elem_size);
}
template <typename T>
void generate_binary_vec(T* input_ptr, const size_t elem_size, const int dest_dev_id,
const size_t slot_num, const int gpu_count, const size_t sm_count, hipStream_t stream) {
int block_dim = 128;
size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
hipLaunchKernelGGL(( generate_binary_vec), dim3(grid_dim), dim3(block_dim), 0, stream, input_ptr, elem_size, dest_dev_id,
slot_num, gpu_count);
}
template <typename flag_type>
hipError_t get_temp_storage_bytes(int* input_ptr, flag_type* binary_flag, int* output_ptr,
const size_t elem_size, size_t& temp_storage_bytes) {
int d_num_selected_out = 0;
hipError_t error = hipcub::DeviceSelect::Flagged(nullptr, /*temp storage*/
temp_storage_bytes, /*temp_storage_bytes*/
input_ptr, /*d_in*/
binary_flag, /*d_flags*/
output_ptr, /*d_out*/
&d_num_selected_out, /*d_num_selected_out*/
static_cast<int>(elem_size)/*num_items*/);
return error;
}
template <typename flag_type>
hipError_t select_slots(int* input_ptr, int* output_ptr, const size_t elem_size, int* d_temp_storage,
flag_type* binary_flag, size_t temp_storage_bytes, int* d_num_selected_out,
hipStream_t stream) {
hipError_t error = hipMemsetAsync(output_ptr, 0, sizeof(int) * elem_size, stream);
if (error != hipSuccess) return error;
error = hipcub::DeviceSelect::Flagged(d_temp_storage, /*temp storage*/
temp_storage_bytes, /*temp_storage_bytes*/
input_ptr, /*d_in*/
binary_flag, /*d_flags*/
output_ptr, /*d_out*/
d_num_selected_out, /*d_num_selected_out*/
static_cast<int>(elem_size), /*num_items*/
stream, /*hipStream_t*/
false/*debug_synchronous*/);
if (error != hipSuccess) {
std::cout << __FILE__ << ":" << __LINE__ << " " << hipGetErrorString(error) << std::endl;
return error;
}
return hipSuccess;
}
size_t num_roof(const size_t number, const size_t base) {
return ((number + base - 1) / base) * base;
}
template <typename T>
void EraseDistributedEmbeddingKeysFunctor<T>::operator()(void* keys_ptr, const int dev_id, const size_t slot_num,
const size_t max_nnz, const int gpu_count, const size_t elem_size,
const size_t sm_count, hipStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
hipLaunchKernelGGL(( erase_distributed_embedding_keys), dim3(grid_dim), dim3(block_dim), 0, stream, reinterpret_cast<T*>(keys_ptr),
dev_id, gpu_count, elem_size);
}
template <typename T>
void EraseLocalizedEmbeddingKeysFunctor<T>::operator()(void* keys_ptr, const int dev_id, const size_t slot_num,
const size_t max_nnz, const int gpu_count, const size_t elem_size,
const size_t sm_count, hipStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
hipLaunchKernelGGL(( erase_localized_embedding_keys), dim3(grid_dim), dim3(block_dim), 0, stream, reinterpret_cast<T*>(keys_ptr),
dev_id, slot_num,
max_nnz, gpu_count, elem_size);
}
void ConvertDenseToCSRDoubleFunctor::operator()(void* keys_ptr, int row, int col,
const hipsparseHandle_t& cusparse_handle,
const hipblasHandle_t& cublas_handle, void* csr_values,
int* csr_row_offsets, int* csr_col_indices,
long long* total_nnz, hipsparseMatDescr_t& desc, int* nnz_row,
void* keys_ptr_transpose) {
// transpose
const double alpha = 1.0;
const double beta = 0.0;
hipblasStatus_t cublas_error = hipblasDgeam(cublas_handle,
HIPBLAS_OP_T, /*transa*/
HIPBLAS_OP_N, /*transb*/
row, /*number of rows*/
col, /*number of cols*/
&alpha, /*alpha*/
reinterpret_cast<double*>(keys_ptr), /*A*/
col, /*leading dimension*/
&beta, /*beta*/
reinterpret_cast<double*>(keys_ptr), /*B*/
row, /*leading dimension*/
reinterpret_cast<double*>(keys_ptr_transpose), /*C*/
row /*leading dimension*/);
if (cublas_error != HIPBLAS_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << "cublas error: " << cublas_error << std::endl;
exit(-1);
}
int m = row /*row number*/, n = col /*column number*/;
int temp_total_nnz = 0;
hipsparseStatus_t status = hipsparseDnnz(cusparse_handle,
HIPSPARSE_DIRECTION_ROW, /*count nnz direction*/
m, /*number of rows*/
n, /*number of columns*/
desc, /*descriptor of matrix*/
reinterpret_cast<double*>(keys_ptr_transpose),
m, /*leading dimension*/
nnz_row, /*output*/
&temp_total_nnz);
if (status != HIPSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << hipsparseGetErrorString(status) << std::endl;
exit(-1);
}
*total_nnz = static_cast<long long>(temp_total_nnz);
status = hipsparseDdense2csr(cusparse_handle,
m, /*number of rows of matrix A*/
n, /*number of columns of matrix A*/
desc, /*the descriptor of matrix A*/
reinterpret_cast<double*>(keys_ptr_transpose), /*array of dimensions (lda, n)*/
m, /*leading dimension*/
nnz_row, /*nnz array*/
reinterpret_cast<double*>(csr_values), /*csr values*/
csr_row_offsets, /*csr row_offset*/
csr_col_indices/*csr column indices*/);
if (status != HIPSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << hipsparseGetErrorString(status) << std::endl;
exit(-1);
}
}
void ConvertDenseToCSRFloatFunctor::operator()(void* keys_ptr, int row, int col,
const hipsparseHandle_t& cusparse_handle,
const hipblasHandle_t& cublas_handle, void* csr_values,
int* csr_row_offsets, int* csr_col_indices,
long long* total_nnz, hipsparseMatDescr_t& desc, int* nnz_row,
void* keys_ptr_transpose) {
// transpose
const float alpha = 1.0;
const float beta = 0.0;
hipblasStatus_t cublas_error = hipblasSgeam(cublas_handle,
HIPBLAS_OP_T, /*transa*/
HIPBLAS_OP_N, /*transb*/
row, /*number of rows*/
col, /*number of cols*/
&alpha, /*alpha*/
reinterpret_cast<float*>(keys_ptr), /*A*/
col, /*leading dimension*/
&beta, /*beta*/
reinterpret_cast<float*>(keys_ptr), /*B*/
row, /*leading dimension*/
reinterpret_cast<float*>(keys_ptr_transpose), /*C*/
row /*leading dimension*/);
if (cublas_error != HIPBLAS_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << "cublas error: " << cublas_error << std::endl;
exit(-1);
}
int m = row /*row number*/, n = col /*column number*/;
int temp_total_nnz = 0;
hipsparseStatus_t status = hipsparseSnnz(cusparse_handle,
HIPSPARSE_DIRECTION_ROW, /*count nnz direction*/
m, /*number of rows*/
n, /*number of columns*/
desc, /*descriptor of matrix*/
reinterpret_cast<float*>(keys_ptr_transpose),
m, /*leading dimension*/
nnz_row, /*output*/
&temp_total_nnz);
if (status != HIPSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << hipsparseGetErrorString(status) << std::endl;
exit(-1);
}
*total_nnz = static_cast<long long>(temp_total_nnz);
status = hipsparseSdense2csr(cusparse_handle,
m, /*number of rows of matrix A*/
n, /*number of columns of matrix A*/
desc, /*the descriptor of matrix A*/
reinterpret_cast<float*>(keys_ptr_transpose), /*array of dimensions (lda, n)*/
m, /*leading dimension*/
nnz_row, /*nnz array*/
reinterpret_cast<float*>(csr_values), /*csr values*/
csr_row_offsets, /*csr row_offset*/
csr_col_indices/*csr column indices*/);
if (status != HIPSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << hipsparseGetErrorString(status) << std::endl;
exit(-1);
}
}
template <typename T>
T* CudaAllocator<T>::allocate(size_t n) {
T* result = nullptr;
result = static_cast<T*>(malloc(n * sizeof(T)));
if (!result) throw std::bad_alloc();
return result;
}
template <typename T>
T* CudaHostAllocator<T>::allocate(size_t n) {
T* result = nullptr;
result = static_cast<T*>(malloc(n * sizeof(T)));
if (!result) throw std::bad_alloc();
return result;
}
template <typename T>
void CudaAllocator<T>::deallocate(T* ptr, size_t n) {
if (ptr) {
for (size_t i = 0; i < n; ++i) {
if (ptr[i]) {
hipFree(ptr[i]);
ptr[i] = nullptr;
}
}
free(ptr);
ptr = nullptr;
}
}
template <typename T>
void CudaHostAllocator<T>::deallocate(T* ptr, size_t n) {
if (ptr) {
for (size_t i = 0; i < n; ++i) {
if (ptr[i]) {
hipHostFree(ptr[i]);
ptr[i] = nullptr;
}
}
free(ptr);
ptr = nullptr;
}
}
template <typename T>
void print_cuda_ptr(T* dev_ptr, const size_t elem_size) {
hipError_t error = hipDeviceSynchronize();
if (error != hipSuccess) {
std::cout << __FILE__ << ":" << __LINE__ << " " << hipGetErrorString(error) << std::endl;
exit(-1);
}
std::unique_ptr<T []> host_vector(new T[elem_size]());
error = hipMemcpy(host_vector.get(), dev_ptr, sizeof(T) * elem_size, hipMemcpyDeviceToHost);
if (error != hipSuccess) {
std::cout << __FILE__ << ":" << __LINE__ << " " << hipGetErrorString(error) << std::endl;
exit(-1);
}
for (size_t i = 0; i < elem_size; ++i) {
std::cout << host_vector[i] << ", " << std::flush;
}
std::cout << std::endl;
return;
}
template class CudaAllocator<int*>;
template class CudaAllocator<long long*>;
template class CudaAllocator<char*>;
template class CudaAllocator<unsigned int*>;
template struct EraseDistributedEmbeddingKeysFunctor<long long>;
template struct EraseLocalizedEmbeddingKeysFunctor<long long>;
template void all_keys_plus_1(long long*, const size_t, const size_t, hipStream_t);
template void all_keys_plus_1(unsigned int*, const size_t, const size_t, hipStream_t);
template void erase_distributed_embedding_keys(long long*, const int, const int, const size_t, const size_t, hipStream_t);
template void erase_distributed_embedding_keys(unsigned int*, const int, const int, const size_t, const size_t, hipStream_t);
template void erase_localized_embedding_keys(long long*, const int, const size_t, const size_t, const int, const size_t,
const size_t, hipStream_t);
template void erase_localized_embedding_keys(unsigned int*, const int, const size_t, const size_t, const int, const size_t,
const size_t, hipStream_t);
template void convert_dense_to_csr(long long*, int, int, const hipsparseHandle_t&, const hipblasHandle_t&, long long*, int*, int*,
long long*, hipsparseMatDescr_t&, int*, long long*);
template void convert_dense_to_csr(unsigned int*, int, int, const hipsparseHandle_t&, const hipblasHandle_t&, unsigned int*, int*,
int*, long long*, hipsparseMatDescr_t&, int*, unsigned int*);
template void value_tensors_subtract_1(long long*, const size_t, const size_t, hipStream_t);
template void value_tensors_subtract_1(unsigned int*, const size_t, const size_t, hipStream_t);
template void cast_elements(const int*, long long*, const size_t, const size_t, hipStream_t);
template void generate_binary_vec(char*, const size_t, const int, const size_t, const int, const size_t, hipStream_t);
template void generate_binary_vec(int*, const size_t, const int, const size_t, const int, const size_t, hipStream_t);
template hipError_t select_slots(int*, int*, const size_t, int*, int*, size_t, int*, hipStream_t);
template hipError_t get_temp_storage_bytes(int*, int*, int*, const size_t, size_t&);
template void fuse_keys_plus_erase_distributed_embedding(long long*, const int, const int, const size_t, const size_t, hipStream_t);
template void fuse_keys_plus_erase_distributed_embedding(unsigned int*, const int, const int, const size_t, const size_t, hipStream_t);
template void print_cuda_ptr(long long*, const size_t);
template void print_cuda_ptr(unsigned int*, const size_t);
template void print_cuda_ptr(int*, const size_t);
} // namespace CudaUtils
| c06bc5792d5fe4d2d317e90f6f1d0271a1d26210.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_utils.h"
#include <cub/cub/cub.cuh>
#include <memory>
namespace CudaUtils {
/** this function is used to modify keys dense tensor to erase those keys
* do not belong to that gpu to 0. (0 represet invalid keys, and original 0-key
* has been mapped to 1.)
* @param kers_ptr, the pointer to 2-D dense keys tensor, corresponding shape is
* [batchsize * slot_num, max_nnz]
* @param dev_id, which dev_id this keys_ptr belongs to.
* @param elem_size, the element size of keys_ptr.
*/
template <typename T>
__global__ void erase_distributed_embedding_keys(T* keys_ptr, const int dev_id,
const int gpu_count, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
if (keys_ptr[i] > 0 && ((keys_ptr[i] - 1) % gpu_count != dev_id)) {
keys_ptr[i] = 0;
}
}
}
/** this function is used to modify keys dense tensor to erase those keys
* do not belong to that gpu to 0.
* @param kers_ptr, the pointer to 2-D dense keys tensor, corresponding shape is
* [batchsize * slot_num, max_nnz]
* @param dev_id, which dev_id this keys_ptr belongs to.
* @param elem_size, the element size of keys_ptr.
* @param batchsize, the batchsize
* @param gpu_count, how many gpus
*/
template <typename T>
__global__ void erase_localized_embedding_keys(T* keys_ptr, const int dev_id,
const size_t slot_num, const size_t max_nnz,
const int gpu_count, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
int row_idx = gid / max_nnz;
int slot_idx = row_idx % slot_num;
int key_dev_id = slot_idx % gpu_count;
for (size_t i = gid; i < elem_size; i += strid) {
if (key_dev_id != dev_id) {
keys_ptr[i] = 0;
}
}
}
/** this function is used to modify the elements.
* @param, the pointer to 2-D tensor keys tensor, corresponding shape is
* [batchsize * slot_num, max_nnz]
* @param fn, how to modify each element. input element, output its modified value.
* @param elem_size, how many elements.
*/
template <typename input_type, typename Func>
__global__ void modify_elements(input_type* keys_ptr, Func fn, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
keys_ptr[i] = fn(keys_ptr[i]);
}
}
template <typename Func, typename input_type, typename output_type>
__global__ void modify_elements(const input_type* input_ptr, output_type* output_ptr, Func fn, const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
output_ptr[i] = fn(input_ptr[i]);
}
}
/** This function is used to generate binary vector for csr_row_offset.
* @param elem_size, how many element in input_ptr, and it is equal to the elem_size of csr_row_offset.
* @param dest_dev_id, decide which index should be reside on this destination device. if True, its value is set to 1,
* otherwise, the value is 0.
*/
template <typename T>
__global__ void generate_binary_vec(T* input_ptr, const size_t elem_size, const int dest_dev_id,
const size_t slot_num, const int gpu_count) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
input_ptr[i] = 1; // initialize the value to 1.
if (i > 0) { // ignore the first value. it should be 1 anyway.
int row_idx = i - 1;
int slot_idx = row_idx % slot_num;
int dev_id = slot_idx % gpu_count;
input_ptr[i] = (dev_id == dest_dev_id) ? 1 : 0;
}
}
}
/** This function fused value + 1 and erase_distributed_embedding_keys
*/
template <typename T>
__global__ void fuse_keys_plus_erase_distributed_embedding(T* keys_ptr, const int dev_id, const int gpu_count,
const size_t elem_size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int strid = blockDim.x * gridDim.x;
for (size_t i = gid; i < elem_size; i += strid) {
T temp_key = keys_ptr[i];
temp_key += 1;
if (temp_key <= 0 || (temp_key - 1) % gpu_count != dev_id) {
temp_key = 0;
}
keys_ptr[i] = temp_key;
}
}
template <typename T>
void all_keys_plus_1(T* keys_ptr, const size_t elem_size, const size_t sm_count, cudaStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
auto fn = [] __device__(T key) -> T { return key + 1; };
modify_elements<<<grid_dim, block_dim, 0, stream>>>(keys_ptr, fn, elem_size);
}
template <typename input_type, typename output_type>
void cast_elements(const input_type* input_ptr, output_type* output_ptr, const size_t elem_size,
const size_t sm_count, cudaStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
auto cast_fn = [] __device__ (input_type num) -> output_type { return static_cast<output_type>(num); };
modify_elements<<<grid_dim, block_dim, 0, stream>>>(input_ptr, output_ptr, cast_fn, elem_size);
}
template <typename T>
void erase_distributed_embedding_keys(T* keys_ptr, const int dev_id, const int gpu_count,
const size_t elem_size, const size_t sm_count, cudaStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
erase_distributed_embedding_keys<<<grid_dim, block_dim, 0, stream>>>(keys_ptr, dev_id, gpu_count, elem_size);
}
template <typename T>
void erase_localized_embedding_keys(T* keys_ptr, const int dev_id, const size_t slot_num, const size_t max_nnz,
const int gpu_count, const size_t elem_size, const size_t sm_count, cudaStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
erase_localized_embedding_keys<<<grid_dim, block_dim, 0, stream>>>(keys_ptr, dev_id, slot_num,
max_nnz, gpu_count, elem_size);
}
template <typename T>
void fuse_keys_plus_erase_distributed_embedding(T* keys_ptr, const int dev_id, const int gpu_count,
const size_t elem_size, const size_t sm_count, cudaStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
fuse_keys_plus_erase_distributed_embedding<<<grid_dim, block_dim, 0, stream>>>(keys_ptr, dev_id, gpu_count, elem_size);
}
template <typename T>
void convert_dense_to_csr(T* keys_ptr, int row, int col,
const cusparseHandle_t& handle,
const cublasHandle_t& cublas_handle,
T* csr_values, int* csr_row_offsets, int * csr_col_indices,
long long* total_nnz,
cusparseMatDescr_t& desc, int* nnz_row,
T* keys_ptr_transpose) {
if (std::is_same<T, long long>::value) {
// transpose
const double alpha = 1.0;
const double beta = 0.0;
cublasStatus_t cublas_error = cublasDgeam(cublas_handle,
CUBLAS_OP_T, /*transa*/
CUBLAS_OP_N, /*transb*/
row, /*number of rows*/
col, /*number of cols*/
&alpha, /*alpha*/
reinterpret_cast<double*>(keys_ptr), /*A*/
col, /*leading dimension*/
&beta, /*beta*/
reinterpret_cast<double*>(keys_ptr), /*B*/
row, /*leading dimension*/
reinterpret_cast<double*>(keys_ptr_transpose), /*C*/
row /*leading dimension*/);
if (cublas_error != CUBLAS_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << "cublas error: " << cublas_error << std::endl;
exit(-1);
}
int m = row /*row number*/, n = col /*column number*/;
int temp_total_nnz = 0;
cusparseStatus_t status = cusparseDnnz(handle,
CUSPARSE_DIRECTION_ROW, /*count nnz direction*/
m, /*number of rows*/
n, /*number of columns*/
desc, /*descriptor of matrix*/
reinterpret_cast<double*>(keys_ptr_transpose),
m, /*leading dimension*/
nnz_row, /*output*/
&temp_total_nnz);
if (status != CUSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << cusparseGetErrorString(status) << std::endl;
exit(-1);
}
*total_nnz = static_cast<long long>(temp_total_nnz);
status = cusparseDdense2csr(handle,
m, /*number of rows of matrix A*/
n, /*number of columns of matrix A*/
desc, /*the descriptor of matrix A*/
reinterpret_cast<double*>(keys_ptr_transpose), /*array of dimensions (lda, n)*/
m, /*leading dimension*/
nnz_row, /*nnz array*/
reinterpret_cast<double*>(csr_values), /*csr values*/
csr_row_offsets, /*csr row_offset*/
csr_col_indices/*csr column indices*/);
if (status != CUSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << cusparseGetErrorString(status) << std::endl;
exit(-1);
}
} else if (std::is_same<T, unsigned int>::value) {
// transpose
const float alpha = 1.0;
const float beta = 0.0;
cublasStatus_t cublas_error = cublasSgeam(cublas_handle,
CUBLAS_OP_T, /*transa*/
CUBLAS_OP_N, /*transb*/
row, /*number of rows*/
col, /*number of cols*/
&alpha, /*alpha*/
reinterpret_cast<float*>(keys_ptr), /*A*/
col, /*leading dimension*/
&beta, /*beta*/
reinterpret_cast<float*>(keys_ptr), /*B*/
row, /*leading dimension*/
reinterpret_cast<float*>(keys_ptr_transpose), /*C*/
row /*leading dimension*/);
if (cublas_error != CUBLAS_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << "cublas error: " << cublas_error << std::endl;
exit(-1);
}
int m = row /*row number*/, n = col /*column number*/;
int temp_total_nnz = 0;
cusparseStatus_t status = cusparseSnnz(handle,
CUSPARSE_DIRECTION_ROW, /*count nnz direction*/
m, /*number of rows*/
n, /*number of columns*/
desc, /*descriptor of matrix*/
reinterpret_cast<float*>(keys_ptr_transpose),
m, /*leading dimension*/
nnz_row, /*output*/
&temp_total_nnz);
if (status != CUSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << cusparseGetErrorString(status) << std::endl;
exit(-1);
}
*total_nnz = static_cast<long long>(temp_total_nnz);
status = cusparseSdense2csr(handle,
m, /*number of rows of matrix A*/
n, /*number of columns of matrix A*/
desc, /*the descriptor of matrix A*/
reinterpret_cast<float*>(keys_ptr_transpose), /*array of dimensions (lda, n)*/
m, /*leading dimension*/
nnz_row, /*nnz array*/
reinterpret_cast<float*>(csr_values), /*csr values*/
csr_row_offsets, /*csr row_offset*/
csr_col_indices/*csr column indices*/);
if (status != CUSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << cusparseGetErrorString(status) << std::endl;
exit(-1);
}
}
}
template <typename T>
void value_tensors_subtract_1(T* values_ptr, const size_t elem_size, const size_t sm_count, cudaStream_t stream) {
int block_dim = 128;
size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
auto fn = [] __device__ (T value) { if (value > 0) {return value - 1;} else {return value;} };
modify_elements<<<grid_dim, block_dim, 0, stream>>>(values_ptr, fn, elem_size);
}
template <typename T>
void generate_binary_vec(T* input_ptr, const size_t elem_size, const int dest_dev_id,
const size_t slot_num, const int gpu_count, const size_t sm_count, cudaStream_t stream) {
int block_dim = 128;
size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
generate_binary_vec<<<grid_dim, block_dim, 0, stream>>>(input_ptr, elem_size, dest_dev_id,
slot_num, gpu_count);
}
template <typename flag_type>
cudaError_t get_temp_storage_bytes(int* input_ptr, flag_type* binary_flag, int* output_ptr,
const size_t elem_size, size_t& temp_storage_bytes) {
int d_num_selected_out = 0;
cudaError_t error = cub::DeviceSelect::Flagged(nullptr, /*temp storage*/
temp_storage_bytes, /*temp_storage_bytes*/
input_ptr, /*d_in*/
binary_flag, /*d_flags*/
output_ptr, /*d_out*/
&d_num_selected_out, /*d_num_selected_out*/
static_cast<int>(elem_size)/*num_items*/);
return error;
}
template <typename flag_type>
cudaError_t select_slots(int* input_ptr, int* output_ptr, const size_t elem_size, int* d_temp_storage,
flag_type* binary_flag, size_t temp_storage_bytes, int* d_num_selected_out,
cudaStream_t stream) {
cudaError_t error = cudaMemsetAsync(output_ptr, 0, sizeof(int) * elem_size, stream);
if (error != cudaSuccess) return error;
error = cub::DeviceSelect::Flagged(d_temp_storage, /*temp storage*/
temp_storage_bytes, /*temp_storage_bytes*/
input_ptr, /*d_in*/
binary_flag, /*d_flags*/
output_ptr, /*d_out*/
d_num_selected_out, /*d_num_selected_out*/
static_cast<int>(elem_size), /*num_items*/
stream, /*cudaStream_t*/
false/*debug_synchronous*/);
if (error != cudaSuccess) {
std::cout << __FILE__ << ":" << __LINE__ << " " << cudaGetErrorString(error) << std::endl;
return error;
}
return cudaSuccess;
}
size_t num_roof(const size_t number, const size_t base) {
return ((number + base - 1) / base) * base;
}
template <typename T>
void EraseDistributedEmbeddingKeysFunctor<T>::operator()(void* keys_ptr, const int dev_id, const size_t slot_num,
const size_t max_nnz, const int gpu_count, const size_t elem_size,
const size_t sm_count, cudaStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
erase_distributed_embedding_keys<<<grid_dim, block_dim, 0, stream>>>(reinterpret_cast<T*>(keys_ptr),
dev_id, gpu_count, elem_size);
}
template <typename T>
void EraseLocalizedEmbeddingKeysFunctor<T>::operator()(void* keys_ptr, const int dev_id, const size_t slot_num,
const size_t max_nnz, const int gpu_count, const size_t elem_size,
const size_t sm_count, cudaStream_t stream) {
int block_dim = 128;
// size_t grid_dim = num_roof((elem_size + block_dim - 1) / block_dim, sm_count);
int grid_dim = (elem_size + block_dim - 1) / block_dim;
erase_localized_embedding_keys<<<grid_dim, block_dim, 0, stream>>>(reinterpret_cast<T*>(keys_ptr),
dev_id, slot_num,
max_nnz, gpu_count, elem_size);
}
void ConvertDenseToCSRDoubleFunctor::operator()(void* keys_ptr, int row, int col,
const cusparseHandle_t& cusparse_handle,
const cublasHandle_t& cublas_handle, void* csr_values,
int* csr_row_offsets, int* csr_col_indices,
long long* total_nnz, cusparseMatDescr_t& desc, int* nnz_row,
void* keys_ptr_transpose) {
// transpose
const double alpha = 1.0;
const double beta = 0.0;
cublasStatus_t cublas_error = cublasDgeam(cublas_handle,
CUBLAS_OP_T, /*transa*/
CUBLAS_OP_N, /*transb*/
row, /*number of rows*/
col, /*number of cols*/
&alpha, /*alpha*/
reinterpret_cast<double*>(keys_ptr), /*A*/
col, /*leading dimension*/
&beta, /*beta*/
reinterpret_cast<double*>(keys_ptr), /*B*/
row, /*leading dimension*/
reinterpret_cast<double*>(keys_ptr_transpose), /*C*/
row /*leading dimension*/);
if (cublas_error != CUBLAS_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << "cublas error: " << cublas_error << std::endl;
exit(-1);
}
int m = row /*row number*/, n = col /*column number*/;
int temp_total_nnz = 0;
cusparseStatus_t status = cusparseDnnz(cusparse_handle,
CUSPARSE_DIRECTION_ROW, /*count nnz direction*/
m, /*number of rows*/
n, /*number of columns*/
desc, /*descriptor of matrix*/
reinterpret_cast<double*>(keys_ptr_transpose),
m, /*leading dimension*/
nnz_row, /*output*/
&temp_total_nnz);
if (status != CUSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << cusparseGetErrorString(status) << std::endl;
exit(-1);
}
*total_nnz = static_cast<long long>(temp_total_nnz);
status = cusparseDdense2csr(cusparse_handle,
m, /*number of rows of matrix A*/
n, /*number of columns of matrix A*/
desc, /*the descriptor of matrix A*/
reinterpret_cast<double*>(keys_ptr_transpose), /*array of dimensions (lda, n)*/
m, /*leading dimension*/
nnz_row, /*nnz array*/
reinterpret_cast<double*>(csr_values), /*csr values*/
csr_row_offsets, /*csr row_offset*/
csr_col_indices/*csr column indices*/);
if (status != CUSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << cusparseGetErrorString(status) << std::endl;
exit(-1);
}
}
void ConvertDenseToCSRFloatFunctor::operator()(void* keys_ptr, int row, int col,
const cusparseHandle_t& cusparse_handle,
const cublasHandle_t& cublas_handle, void* csr_values,
int* csr_row_offsets, int* csr_col_indices,
long long* total_nnz, cusparseMatDescr_t& desc, int* nnz_row,
void* keys_ptr_transpose) {
// transpose
const float alpha = 1.0;
const float beta = 0.0;
cublasStatus_t cublas_error = cublasSgeam(cublas_handle,
CUBLAS_OP_T, /*transa*/
CUBLAS_OP_N, /*transb*/
row, /*number of rows*/
col, /*number of cols*/
&alpha, /*alpha*/
reinterpret_cast<float*>(keys_ptr), /*A*/
col, /*leading dimension*/
&beta, /*beta*/
reinterpret_cast<float*>(keys_ptr), /*B*/
row, /*leading dimension*/
reinterpret_cast<float*>(keys_ptr_transpose), /*C*/
row /*leading dimension*/);
if (cublas_error != CUBLAS_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << "cublas error: " << cublas_error << std::endl;
exit(-1);
}
int m = row /*row number*/, n = col /*column number*/;
int temp_total_nnz = 0;
cusparseStatus_t status = cusparseSnnz(cusparse_handle,
CUSPARSE_DIRECTION_ROW, /*count nnz direction*/
m, /*number of rows*/
n, /*number of columns*/
desc, /*descriptor of matrix*/
reinterpret_cast<float*>(keys_ptr_transpose),
m, /*leading dimension*/
nnz_row, /*output*/
&temp_total_nnz);
if (status != CUSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << cusparseGetErrorString(status) << std::endl;
exit(-1);
}
*total_nnz = static_cast<long long>(temp_total_nnz);
status = cusparseSdense2csr(cusparse_handle,
m, /*number of rows of matrix A*/
n, /*number of columns of matrix A*/
desc, /*the descriptor of matrix A*/
reinterpret_cast<float*>(keys_ptr_transpose), /*array of dimensions (lda, n)*/
m, /*leading dimension*/
nnz_row, /*nnz array*/
reinterpret_cast<float*>(csr_values), /*csr values*/
csr_row_offsets, /*csr row_offset*/
csr_col_indices/*csr column indices*/);
if (status != CUSPARSE_STATUS_SUCCESS) {
std::cout << __FILE__ << ":" << __LINE__ << " " << cusparseGetErrorString(status) << std::endl;
exit(-1);
}
}
template <typename T>
T* CudaAllocator<T>::allocate(size_t n) {
T* result = nullptr;
result = static_cast<T*>(malloc(n * sizeof(T)));
if (!result) throw std::bad_alloc();
return result;
}
template <typename T>
T* CudaHostAllocator<T>::allocate(size_t n) {
T* result = nullptr;
result = static_cast<T*>(malloc(n * sizeof(T)));
if (!result) throw std::bad_alloc();
return result;
}
template <typename T>
void CudaAllocator<T>::deallocate(T* ptr, size_t n) {
if (ptr) {
for (size_t i = 0; i < n; ++i) {
if (ptr[i]) {
cudaFree(ptr[i]);
ptr[i] = nullptr;
}
}
free(ptr);
ptr = nullptr;
}
}
template <typename T>
void CudaHostAllocator<T>::deallocate(T* ptr, size_t n) {
if (ptr) {
for (size_t i = 0; i < n; ++i) {
if (ptr[i]) {
cudaFreeHost(ptr[i]);
ptr[i] = nullptr;
}
}
free(ptr);
ptr = nullptr;
}
}
template <typename T>
void print_cuda_ptr(T* dev_ptr, const size_t elem_size) {
cudaError_t error = cudaDeviceSynchronize();
if (error != cudaSuccess) {
std::cout << __FILE__ << ":" << __LINE__ << " " << cudaGetErrorString(error) << std::endl;
exit(-1);
}
std::unique_ptr<T []> host_vector(new T[elem_size]());
error = cudaMemcpy(host_vector.get(), dev_ptr, sizeof(T) * elem_size, cudaMemcpyDeviceToHost);
if (error != cudaSuccess) {
std::cout << __FILE__ << ":" << __LINE__ << " " << cudaGetErrorString(error) << std::endl;
exit(-1);
}
for (size_t i = 0; i < elem_size; ++i) {
std::cout << host_vector[i] << ", " << std::flush;
}
std::cout << std::endl;
return;
}
template class CudaAllocator<int*>;
template class CudaAllocator<long long*>;
template class CudaAllocator<char*>;
template class CudaAllocator<unsigned int*>;
template struct EraseDistributedEmbeddingKeysFunctor<long long>;
template struct EraseLocalizedEmbeddingKeysFunctor<long long>;
template void all_keys_plus_1(long long*, const size_t, const size_t, cudaStream_t);
template void all_keys_plus_1(unsigned int*, const size_t, const size_t, cudaStream_t);
template void erase_distributed_embedding_keys(long long*, const int, const int, const size_t, const size_t, cudaStream_t);
template void erase_distributed_embedding_keys(unsigned int*, const int, const int, const size_t, const size_t, cudaStream_t);
template void erase_localized_embedding_keys(long long*, const int, const size_t, const size_t, const int, const size_t,
const size_t, cudaStream_t);
template void erase_localized_embedding_keys(unsigned int*, const int, const size_t, const size_t, const int, const size_t,
const size_t, cudaStream_t);
template void convert_dense_to_csr(long long*, int, int, const cusparseHandle_t&, const cublasHandle_t&, long long*, int*, int*,
long long*, cusparseMatDescr_t&, int*, long long*);
template void convert_dense_to_csr(unsigned int*, int, int, const cusparseHandle_t&, const cublasHandle_t&, unsigned int*, int*,
int*, long long*, cusparseMatDescr_t&, int*, unsigned int*);
template void value_tensors_subtract_1(long long*, const size_t, const size_t, cudaStream_t);
template void value_tensors_subtract_1(unsigned int*, const size_t, const size_t, cudaStream_t);
template void cast_elements(const int*, long long*, const size_t, const size_t, cudaStream_t);
template void generate_binary_vec(char*, const size_t, const int, const size_t, const int, const size_t, cudaStream_t);
template void generate_binary_vec(int*, const size_t, const int, const size_t, const int, const size_t, cudaStream_t);
template cudaError_t select_slots(int*, int*, const size_t, int*, int*, size_t, int*, cudaStream_t);
template cudaError_t get_temp_storage_bytes(int*, int*, int*, const size_t, size_t&);
template void fuse_keys_plus_erase_distributed_embedding(long long*, const int, const int, const size_t, const size_t, cudaStream_t);
template void fuse_keys_plus_erase_distributed_embedding(unsigned int*, const int, const int, const size_t, const size_t, cudaStream_t);
template void print_cuda_ptr(long long*, const size_t);
template void print_cuda_ptr(unsigned int*, const size_t);
template void print_cuda_ptr(int*, const size_t);
} // namespace CudaUtils
|
80e16cef9827774faa4a42909c2f3417d55eeaec.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "Physika_Core/Utilities/cuda_utilities.h"
#include "Physika_Core/Utilities/template_functions.h"
#include "ParticlePrediction.h"
namespace Physika
{
// struct PP_STATE
// {
// float3 bodyForce;
// };
//
// __constant__ PP_STATE const_pp_state;
template<typename Coord>
__global__ void PP_Predict(
DeviceArray<Coord> posArr,
DeviceArray<Coord> velArr,
Coord bodyForce,
float dt)
{
int pId = threadIdx.x + (blockIdx.x * blockDim.x);
if (pId >= posArr.Size()) return;
Coord pos_i = posArr[pId];
Coord vel_i = velArr[pId];
pos_i += vel_i*dt;
vel_i += bodyForce*dt;
// vel_i += 20.0f*(make_float3(0.5f, 0.2f, 0.5f) - posArr[pId])*dt;
posArr[pId] = pos_i;
velArr[pId] = vel_i;
}
template<typename Coord>
__global__ void PP_PredictPosition(
DeviceArray<Coord> posArr,
DeviceArray<Coord> velArr,
DeviceArray<Attribute> attriArr,
float dt)
{
int pId = threadIdx.x + (blockIdx.x * blockDim.x);
if (pId >= posArr.Size()) return;
if (!attriArr[pId].IsFixed())
{
Coord pos_i = posArr[pId];
Coord vel_i = velArr[pId];
pos_i += vel_i*dt;
// vel_i += 20.0f*(make_float3(0.5f) - posArr[pId])*dt;
#ifdef SIMULATION2D
pos_i.z = 0.5f;
#endif
posArr[pId] = pos_i;
velArr[pId] = vel_i;
// if (attriArr[pId].IsPassive() && pos_i.y < 0.85f)
// {
// attriArr[pId].SetDynamic();
// }
}
}
template<typename Coord>
__global__ void PP_PredictVelocity(
DeviceArray<Coord> velArr,
DeviceArray<Attribute> attriArr,
Coord bodyForce,
float dt)
{
int pId = threadIdx.x + (blockIdx.x * blockDim.x);
if (pId >= velArr.Size()) return;
if (attriArr[pId].IsDynamic())
{
Coord vel_i = velArr[pId];
vel_i += bodyForce*dt;
velArr[pId] = vel_i;
}
}
template<typename Coord>
__global__ void PP_CorrectPosition(
DeviceArray<Coord> newPos,
DeviceArray<Coord> oldPos,
DeviceArray<Coord> newVel,
DeviceArray<Coord> oldVel,
DeviceArray<Attribute> attriArr,
float dt)
{
int pId = threadIdx.x + (blockIdx.x * blockDim.x);
if (pId >= newPos.Size()) return;
if (!attriArr[pId].IsFixed())
{
newPos[pId] = oldPos[pId] + 0.5f * dt * (oldVel[pId] + newVel[pId]);
#ifdef SIMULATION2D
newPos[pId].z = 0.5f;
#endif
}
}
template<typename TDataType>
ParticlePrediction<TDataType>::ParticlePrediction(ParticleSystem<TDataType>* parent)
:Module()
,m_parent(parent)
{
assert(m_parent != NULL);
setInputSize(1);
setOutputSize(1);
updateStates();
}
template<typename TDataType>
bool ParticlePrediction<TDataType>::execute()
{
DeviceArray<Coord>* posArr = m_parent->GetNewPositionBuffer()->getDataPtr();
DeviceArray<Coord>* velArr = m_parent->GetNewVelocityBuffer()->getDataPtr();
DeviceArray<Attribute>* attriArr = m_parent->GetAttributeBuffer()->getDataPtr();
Coord gravity = m_parent->GetGravity();
float dt = m_parent->getDt();
uint pDims = cudaGridSize(posArr->Size(), BLOCK_SIZE);
PP_Predict <Coord> << <pDims, BLOCK_SIZE >> > (*posArr, *velArr, gravity, dt);
return true;
}
template<typename TDataType>
void ParticlePrediction<TDataType>::PredictPosition(float dt)
{
DeviceArray<Coord>* posArr = m_parent->GetNewPositionBuffer()->getDataPtr();
DeviceArray<Coord>* velArr = m_parent->GetNewVelocityBuffer()->getDataPtr();
DeviceArray<Attribute>* attriArr = m_parent->GetAttributeBuffer()->getDataPtr();
uint pDims = cudaGridSize(posArr->Size(), BLOCK_SIZE);
PP_PredictPosition <Coord> << <pDims, BLOCK_SIZE >> > (*posArr, *velArr, *attriArr, dt);
}
template<typename TDataType>
void ParticlePrediction<TDataType>::PredictVelocity(float dt)
{
DeviceArray<Coord>* velArr = m_parent->GetNewVelocityBuffer()->getDataPtr();
DeviceArray<Attribute>* attriArr = m_parent->GetAttributeBuffer()->getDataPtr();
uint pDims = cudaGridSize(velArr->Size(), BLOCK_SIZE);
Coord gravity = Make<Coord>(0);
PP_PredictVelocity <Coord> << <pDims, BLOCK_SIZE >> > (*velArr, *attriArr, gravity, dt);
}
template<typename TDataType>
void ParticlePrediction<TDataType>::CorrectPosition(float dt)
{
DeviceArray<Coord>* oldPos = m_parent->GetOldPositionBuffer()->getDataPtr();
DeviceArray<Coord>* newPos = m_parent->GetNewPositionBuffer()->getDataPtr();
DeviceArray<Coord>* oldVel = m_parent->GetOldVelocityBuffer()->getDataPtr();
DeviceArray<Coord>* newVel = m_parent->GetNewVelocityBuffer()->getDataPtr();
DeviceArray<Attribute>* attriArr = m_parent->GetAttributeBuffer()->getDataPtr();
uint pDims = cudaGridSize(oldPos->Size(), BLOCK_SIZE);
PP_CorrectPosition << <pDims, BLOCK_SIZE >> > (*newPos, *oldPos, *newVel, *oldVel, *attriArr, dt);
}
template<typename TDataType>
bool ParticlePrediction<TDataType>::updateStates()
{
// PP_STATE cm;
// cm.bodyForce = m_parent->GetBodyForce();
//
// hipMemcpyToSymbol(const_pp_state, &cm, sizeof(PP_STATE));
return true;
}
} | 80e16cef9827774faa4a42909c2f3417d55eeaec.cu | #include <cuda_runtime.h>
#include "Physika_Core/Utilities/cuda_utilities.h"
#include "Physika_Core/Utilities/template_functions.h"
#include "ParticlePrediction.h"
namespace Physika
{
// struct PP_STATE
// {
// float3 bodyForce;
// };
//
// __constant__ PP_STATE const_pp_state;
template<typename Coord>
__global__ void PP_Predict(
DeviceArray<Coord> posArr,
DeviceArray<Coord> velArr,
Coord bodyForce,
float dt)
{
int pId = threadIdx.x + (blockIdx.x * blockDim.x);
if (pId >= posArr.Size()) return;
Coord pos_i = posArr[pId];
Coord vel_i = velArr[pId];
pos_i += vel_i*dt;
vel_i += bodyForce*dt;
// vel_i += 20.0f*(make_float3(0.5f, 0.2f, 0.5f) - posArr[pId])*dt;
posArr[pId] = pos_i;
velArr[pId] = vel_i;
}
template<typename Coord>
__global__ void PP_PredictPosition(
DeviceArray<Coord> posArr,
DeviceArray<Coord> velArr,
DeviceArray<Attribute> attriArr,
float dt)
{
int pId = threadIdx.x + (blockIdx.x * blockDim.x);
if (pId >= posArr.Size()) return;
if (!attriArr[pId].IsFixed())
{
Coord pos_i = posArr[pId];
Coord vel_i = velArr[pId];
pos_i += vel_i*dt;
// vel_i += 20.0f*(make_float3(0.5f) - posArr[pId])*dt;
#ifdef SIMULATION2D
pos_i.z = 0.5f;
#endif
posArr[pId] = pos_i;
velArr[pId] = vel_i;
// if (attriArr[pId].IsPassive() && pos_i.y < 0.85f)
// {
// attriArr[pId].SetDynamic();
// }
}
}
template<typename Coord>
__global__ void PP_PredictVelocity(
DeviceArray<Coord> velArr,
DeviceArray<Attribute> attriArr,
Coord bodyForce,
float dt)
{
int pId = threadIdx.x + (blockIdx.x * blockDim.x);
if (pId >= velArr.Size()) return;
if (attriArr[pId].IsDynamic())
{
Coord vel_i = velArr[pId];
vel_i += bodyForce*dt;
velArr[pId] = vel_i;
}
}
template<typename Coord>
__global__ void PP_CorrectPosition(
DeviceArray<Coord> newPos,
DeviceArray<Coord> oldPos,
DeviceArray<Coord> newVel,
DeviceArray<Coord> oldVel,
DeviceArray<Attribute> attriArr,
float dt)
{
int pId = threadIdx.x + (blockIdx.x * blockDim.x);
if (pId >= newPos.Size()) return;
if (!attriArr[pId].IsFixed())
{
newPos[pId] = oldPos[pId] + 0.5f * dt * (oldVel[pId] + newVel[pId]);
#ifdef SIMULATION2D
newPos[pId].z = 0.5f;
#endif
}
}
template<typename TDataType>
ParticlePrediction<TDataType>::ParticlePrediction(ParticleSystem<TDataType>* parent)
:Module()
,m_parent(parent)
{
assert(m_parent != NULL);
setInputSize(1);
setOutputSize(1);
updateStates();
}
template<typename TDataType>
bool ParticlePrediction<TDataType>::execute()
{
DeviceArray<Coord>* posArr = m_parent->GetNewPositionBuffer()->getDataPtr();
DeviceArray<Coord>* velArr = m_parent->GetNewVelocityBuffer()->getDataPtr();
DeviceArray<Attribute>* attriArr = m_parent->GetAttributeBuffer()->getDataPtr();
Coord gravity = m_parent->GetGravity();
float dt = m_parent->getDt();
uint pDims = cudaGridSize(posArr->Size(), BLOCK_SIZE);
PP_Predict <Coord> << <pDims, BLOCK_SIZE >> > (*posArr, *velArr, gravity, dt);
return true;
}
template<typename TDataType>
void ParticlePrediction<TDataType>::PredictPosition(float dt)
{
DeviceArray<Coord>* posArr = m_parent->GetNewPositionBuffer()->getDataPtr();
DeviceArray<Coord>* velArr = m_parent->GetNewVelocityBuffer()->getDataPtr();
DeviceArray<Attribute>* attriArr = m_parent->GetAttributeBuffer()->getDataPtr();
uint pDims = cudaGridSize(posArr->Size(), BLOCK_SIZE);
PP_PredictPosition <Coord> << <pDims, BLOCK_SIZE >> > (*posArr, *velArr, *attriArr, dt);
}
template<typename TDataType>
void ParticlePrediction<TDataType>::PredictVelocity(float dt)
{
DeviceArray<Coord>* velArr = m_parent->GetNewVelocityBuffer()->getDataPtr();
DeviceArray<Attribute>* attriArr = m_parent->GetAttributeBuffer()->getDataPtr();
uint pDims = cudaGridSize(velArr->Size(), BLOCK_SIZE);
Coord gravity = Make<Coord>(0);
PP_PredictVelocity <Coord> << <pDims, BLOCK_SIZE >> > (*velArr, *attriArr, gravity, dt);
}
template<typename TDataType>
void ParticlePrediction<TDataType>::CorrectPosition(float dt)
{
DeviceArray<Coord>* oldPos = m_parent->GetOldPositionBuffer()->getDataPtr();
DeviceArray<Coord>* newPos = m_parent->GetNewPositionBuffer()->getDataPtr();
DeviceArray<Coord>* oldVel = m_parent->GetOldVelocityBuffer()->getDataPtr();
DeviceArray<Coord>* newVel = m_parent->GetNewVelocityBuffer()->getDataPtr();
DeviceArray<Attribute>* attriArr = m_parent->GetAttributeBuffer()->getDataPtr();
uint pDims = cudaGridSize(oldPos->Size(), BLOCK_SIZE);
PP_CorrectPosition << <pDims, BLOCK_SIZE >> > (*newPos, *oldPos, *newVel, *oldVel, *attriArr, dt);
}
template<typename TDataType>
bool ParticlePrediction<TDataType>::updateStates()
{
// PP_STATE cm;
// cm.bodyForce = m_parent->GetBodyForce();
//
// cudaMemcpyToSymbol(const_pp_state, &cm, sizeof(PP_STATE));
return true;
}
} |
060025ae3a37867a7b144cad6e2664302d700989.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gSLIC_seg_engine_GPU.h"
#include "gSLIC_seg_engine_shared.h"
using namespace std;
using namespace gSLIC;
using namespace gSLIC::objects;
using namespace gSLIC::engines;
// ----------------------------------------------------
//
// kernel function defines
//
// ----------------------------------------------------
__global__ void Cvt_Img_Space_device(const Vector4u* inimg, Vector4f* outimg, Vector2i img_size, COLOR_SPACE color_space);
__global__ void Enforce_Connectivity_device(const int* in_idx_img, int* out_idx_img, Vector2i img_size);
__global__ void Init_Cluster_Centers_device(const Vector4f* inimg, spixel_info* out_spixel, Vector2i map_size, Vector2i img_size, int spixel_size);
__global__ void Find_Center_Association_device(const Vector4f* inimg, const spixel_info* in_spixel_map, int* out_idx_img, Vector2i map_size, Vector2i img_size, int spixel_size, float weight, float max_xy_dist, float max_color_dist);
__global__ void Update_Cluster_Center_device(const Vector4f* inimg, const int* in_idx_img, spixel_info* accum_map, Vector2i map_size, Vector2i img_size, int spixel_size, int no_blocks_per_line);
__global__ void Finalize_Reduction_Result_device(const spixel_info* accum_map, spixel_info* spixel_list, Vector2i map_size, int no_blocks_per_spixel);
__global__ void Draw_Segmentation_Result_device(const int* idx_img, Vector4u* sourceimg, Vector4u* outimg, Vector2i img_size);
__global__ void Enforce_Connectivity_device1_2(const int* in_idx_img, int* out_idx_img, Vector2i img_size);
// ----------------------------------------------------
//
// host function implementations
//
// ----------------------------------------------------
seg_engine_GPU::seg_engine_GPU(const settings& in_settings) : seg_engine(in_settings)
{
source_img = new UChar4Image(in_settings.img_size,true,true);
cvt_img = new Float4Image(in_settings.img_size, true, true);
idx_img = new IntImage(in_settings.img_size, true, true);
tmp_idx_img = new IntImage(in_settings.img_size, true, true);
if (in_settings.seg_method == DEFAULT_SIZE)
{
spixel_size = 8;
}
else
{
spixel_size = in_settings.spixel_size;
}
int spixel_per_col = (int)ceil((float)in_settings.img_size.x / (float)spixel_size);
int spixel_per_row = (int)ceil((float)in_settings.img_size.y / (float)spixel_size);
map_size = Vector2i(spixel_per_col, spixel_per_row);
spixel_map = new SpixelMap(map_size, true, true);
no_grid_per_center = (int)ceil(spixel_size*3.0f / BLOCK_DIM)*((int)ceil(spixel_size*3.0f / BLOCK_DIM));
Vector2i accum_size(map_size.x*no_grid_per_center, map_size.y);
accum_map = new SpixelMap(accum_size, true, true);
// normalizing factors
max_xy_dist = 1.0f / (1.4242f * spixel_size); // sqrt(2) * spixel_size
switch (in_settings.color_space)
{
case RGB:
max_color_dist = 5.0f / (1.7321f * 255);
break;
case XYZ:
max_color_dist = 5.0f / 1.7321f;
break;
case CIELAB:
max_color_dist = 15.0f / (1.7321f * 128);
break;
}
max_color_dist *= max_color_dist;
max_xy_dist *= max_xy_dist;
}
gSLIC::engines::seg_engine_GPU::~seg_engine_GPU()
{
delete accum_map;
delete tmp_idx_img;
}
void gSLIC::engines::seg_engine_GPU::Cvt_Img_Space(UChar4Image* inimg, Float4Image* outimg, COLOR_SPACE color_space)
{
Vector4u* inimg_ptr = inimg->GetData(MEMORYDEVICE_CUDA);
Vector4f* outimg_ptr = outimg->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize = getGridSize(img_size, blockSize);
Cvt_Img_Space_device << <gridSize, blockSize >> >(inimg_ptr, outimg_ptr, img_size, color_space);
}
void gSLIC::engines::seg_engine_GPU::Init_Cluster_Centers()
{
spixel_info* spixel_list = spixel_map->GetData(MEMORYDEVICE_CUDA);
Vector4f* img_ptr = cvt_img->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize = getGridSize(map_size, blockSize);
Init_Cluster_Centers_device << <gridSize, blockSize >> >(img_ptr, spixel_list, map_size, img_size, spixel_size);
}
void gSLIC::engines::seg_engine_GPU::Find_Center_Association()
{
spixel_info* spixel_list = spixel_map->GetData(MEMORYDEVICE_CUDA);
Vector4f* img_ptr = cvt_img->GetData(MEMORYDEVICE_CUDA);
int* idx_ptr = idx_img->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize = getGridSize(img_size, blockSize);
Find_Center_Association_device << <gridSize, blockSize >> >(img_ptr, spixel_list, idx_ptr, map_size, img_size, spixel_size, gslic_settings.coh_weight,max_xy_dist,max_color_dist);
}
void gSLIC::engines::seg_engine_GPU::Update_Cluster_Center()
{
spixel_info* accum_map_ptr = accum_map->GetData(MEMORYDEVICE_CUDA);
spixel_info* spixel_list_ptr = spixel_map->GetData(MEMORYDEVICE_CUDA);
Vector4f* img_ptr = cvt_img->GetData(MEMORYDEVICE_CUDA);
int* idx_ptr = idx_img->GetData(MEMORYDEVICE_CUDA);
int no_blocks_per_line = (int)ceil(spixel_size * 3.0f / BLOCK_DIM);
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize(map_size.x, map_size.y, no_grid_per_center);
hipLaunchKernelGGL(( Update_Cluster_Center_device), dim3(gridSize),dim3(blockSize), 0, 0, img_ptr, idx_ptr, accum_map_ptr, map_size, img_size, spixel_size, no_blocks_per_line);
dim3 gridSize2(map_size.x, map_size.y);
hipLaunchKernelGGL(( Finalize_Reduction_Result_device), dim3(gridSize2),dim3(blockSize), 0, 0, accum_map_ptr, spixel_list_ptr, map_size, no_grid_per_center);
}
void gSLIC::engines::seg_engine_GPU::Enforce_Connectivity()
{
int* idx_ptr = idx_img->GetData(MEMORYDEVICE_CUDA);
int* tmp_idx_ptr = tmp_idx_img->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize = getGridSize(img_size, blockSize);
Enforce_Connectivity_device << <gridSize, blockSize >> >(idx_ptr, tmp_idx_ptr, img_size);
Enforce_Connectivity_device << <gridSize, blockSize >> >(tmp_idx_ptr, idx_ptr, img_size);
Enforce_Connectivity_device1_2 << <gridSize, blockSize >> >(idx_ptr, tmp_idx_ptr, img_size);
Enforce_Connectivity_device1_2 << <gridSize, blockSize >> >(tmp_idx_ptr, idx_ptr, img_size);
}
void gSLIC::engines::seg_engine_GPU::Draw_Segmentation_Result(UChar4Image* out_img)
{
Vector4u* inimg_ptr = source_img->GetData(MEMORYDEVICE_CUDA);
Vector4u* outimg_ptr = out_img->GetData(MEMORYDEVICE_CUDA);
int* idx_img_ptr = idx_img->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize = getGridSize(img_size, blockSize);
Draw_Segmentation_Result_device << <gridSize, blockSize >> >(idx_img_ptr, inimg_ptr, outimg_ptr, img_size);
out_img->UpdateHostFromDevice();
}
// ----------------------------------------------------
//
// device function implementations
//
// ----------------------------------------------------
__global__ void Cvt_Img_Space_device(const Vector4u* inimg, Vector4f* outimg, Vector2i img_size, COLOR_SPACE color_space)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= img_size.x || y >= img_size.y) return;
cvt_img_space_shared(inimg, outimg, img_size, x, y, color_space);
}
__global__ void Draw_Segmentation_Result_device(const int* idx_img, Vector4u* sourceimg, Vector4u* outimg, Vector2i img_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x == 0 || y == 0 || x > img_size.x - 2 || y > img_size.y - 2) return;
draw_superpixel_boundry_shared(idx_img, sourceimg, outimg, img_size, x, y);
}
__global__ void Init_Cluster_Centers_device(const Vector4f* inimg, spixel_info* out_spixel, Vector2i map_size, Vector2i img_size, int spixel_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= map_size.x || y >= map_size.y) return;
init_cluster_centers_shared(inimg, out_spixel, map_size, img_size, spixel_size, x, y);
}
__global__ void Find_Center_Association_device(const Vector4f* inimg, const spixel_info* in_spixel_map, int* out_idx_img, Vector2i map_size, Vector2i img_size, int spixel_size, float weight, float max_xy_dist, float max_color_dist)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= img_size.x || y >= img_size.y) return;
find_center_association_shared(inimg, in_spixel_map, out_idx_img, map_size, img_size, spixel_size, weight, x, y,max_xy_dist,max_color_dist);
}
__global__ void Update_Cluster_Center_device(const Vector4f* inimg, const int* in_idx_img, spixel_info* accum_map, Vector2i map_size, Vector2i img_size, int spixel_size, int no_blocks_per_line)
{
int local_id = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ float4 color_shared[BLOCK_DIM*BLOCK_DIM];
__shared__ float2 xy_shared[BLOCK_DIM*BLOCK_DIM];
__shared__ int count_shared[BLOCK_DIM*BLOCK_DIM];
__shared__ bool should_add;
color_shared[local_id] = make_float4(0, 0, 0, 0);
xy_shared[local_id] = make_float2(0, 0);
count_shared[local_id] = 0;
should_add = false;
__syncthreads();
int no_blocks_per_spixel = gridDim.z;
int spixel_id = blockIdx.y * map_size.x + blockIdx.x;
// compute the relative position in the search window
int block_x = blockIdx.z % no_blocks_per_line;
int block_y = blockIdx.z / no_blocks_per_line;
int x_offset = block_x * BLOCK_DIM + threadIdx.x;
int y_offset = block_y * BLOCK_DIM + threadIdx.y;
if (x_offset < spixel_size * 3 && y_offset < spixel_size * 3)
{
// compute the start of the search window
int x_start = blockIdx.x * spixel_size - spixel_size;
int y_start = blockIdx.y * spixel_size - spixel_size;
int x_img = x_start + x_offset;
int y_img = y_start + y_offset;
if (x_img >= 0 && x_img < img_size.x && y_img >= 0 && y_img < img_size.y)
{
int img_idx = y_img * img_size.x + x_img;
if (in_idx_img[img_idx] == spixel_id)
{
color_shared[local_id] = make_float4(inimg[img_idx].x, inimg[img_idx].y, inimg[img_idx].z, inimg[img_idx].w);
xy_shared[local_id] = make_float2(x_img, y_img);
count_shared[local_id] = 1;
should_add = true;
}
}
}
__syncthreads();
if (should_add)
{
if (local_id < 128)
{
color_shared[local_id] += color_shared[local_id + 128];
xy_shared[local_id] += xy_shared[local_id + 128];
count_shared[local_id] += count_shared[local_id + 128];
}
__syncthreads();
if (local_id < 64)
{
color_shared[local_id] += color_shared[local_id + 64];
xy_shared[local_id] += xy_shared[local_id + 64];
count_shared[local_id] += count_shared[local_id + 64];
}
__syncthreads();
if (local_id < 32)
{
color_shared[local_id] += color_shared[local_id + 32];
color_shared[local_id] += color_shared[local_id + 16];
color_shared[local_id] += color_shared[local_id + 8];
color_shared[local_id] += color_shared[local_id + 4];
color_shared[local_id] += color_shared[local_id + 2];
color_shared[local_id] += color_shared[local_id + 1];
xy_shared[local_id] += xy_shared[local_id + 32];
xy_shared[local_id] += xy_shared[local_id + 16];
xy_shared[local_id] += xy_shared[local_id + 8];
xy_shared[local_id] += xy_shared[local_id + 4];
xy_shared[local_id] += xy_shared[local_id + 2];
xy_shared[local_id] += xy_shared[local_id + 1];
count_shared[local_id] += count_shared[local_id + 32];
count_shared[local_id] += count_shared[local_id + 16];
count_shared[local_id] += count_shared[local_id + 8];
count_shared[local_id] += count_shared[local_id + 4];
count_shared[local_id] += count_shared[local_id + 2];
count_shared[local_id] += count_shared[local_id + 1];
}
}
__syncthreads();
if (local_id == 0)
{
int accum_map_idx = spixel_id * no_blocks_per_spixel + blockIdx.z;
accum_map[accum_map_idx].center = Vector2f(xy_shared[0].x, xy_shared[0].y);
accum_map[accum_map_idx].color_info = Vector4f(color_shared[0].x, color_shared[0].y, color_shared[0].z, color_shared[0].w);
accum_map[accum_map_idx].no_pixels = count_shared[0];
}
}
__global__ void Finalize_Reduction_Result_device(const spixel_info* accum_map, spixel_info* spixel_list, Vector2i map_size, int no_blocks_per_spixel)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= map_size.x || y >= map_size.y) return;
finalize_reduction_result_shared(accum_map, spixel_list, map_size, no_blocks_per_spixel, x, y);
}
__global__ void Enforce_Connectivity_device(const int* in_idx_img, int* out_idx_img, Vector2i img_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= img_size.x || y >= img_size.y) return;
supress_local_lable(in_idx_img, out_idx_img, img_size, x, y);
}
__global__ void Enforce_Connectivity_device1_2(const int* in_idx_img, int* out_idx_img, Vector2i img_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= img_size.x || y >= img_size.y) return;
supress_local_lable_2(in_idx_img, out_idx_img, img_size, x, y);
}
| 060025ae3a37867a7b144cad6e2664302d700989.cu | #include "gSLIC_seg_engine_GPU.h"
#include "gSLIC_seg_engine_shared.h"
using namespace std;
using namespace gSLIC;
using namespace gSLIC::objects;
using namespace gSLIC::engines;
// ----------------------------------------------------
//
// kernel function defines
//
// ----------------------------------------------------
__global__ void Cvt_Img_Space_device(const Vector4u* inimg, Vector4f* outimg, Vector2i img_size, COLOR_SPACE color_space);
__global__ void Enforce_Connectivity_device(const int* in_idx_img, int* out_idx_img, Vector2i img_size);
__global__ void Init_Cluster_Centers_device(const Vector4f* inimg, spixel_info* out_spixel, Vector2i map_size, Vector2i img_size, int spixel_size);
__global__ void Find_Center_Association_device(const Vector4f* inimg, const spixel_info* in_spixel_map, int* out_idx_img, Vector2i map_size, Vector2i img_size, int spixel_size, float weight, float max_xy_dist, float max_color_dist);
__global__ void Update_Cluster_Center_device(const Vector4f* inimg, const int* in_idx_img, spixel_info* accum_map, Vector2i map_size, Vector2i img_size, int spixel_size, int no_blocks_per_line);
__global__ void Finalize_Reduction_Result_device(const spixel_info* accum_map, spixel_info* spixel_list, Vector2i map_size, int no_blocks_per_spixel);
__global__ void Draw_Segmentation_Result_device(const int* idx_img, Vector4u* sourceimg, Vector4u* outimg, Vector2i img_size);
__global__ void Enforce_Connectivity_device1_2(const int* in_idx_img, int* out_idx_img, Vector2i img_size);
// ----------------------------------------------------
//
// host function implementations
//
// ----------------------------------------------------
seg_engine_GPU::seg_engine_GPU(const settings& in_settings) : seg_engine(in_settings)
{
source_img = new UChar4Image(in_settings.img_size,true,true);
cvt_img = new Float4Image(in_settings.img_size, true, true);
idx_img = new IntImage(in_settings.img_size, true, true);
tmp_idx_img = new IntImage(in_settings.img_size, true, true);
if (in_settings.seg_method == DEFAULT_SIZE)
{
spixel_size = 8;
}
else
{
spixel_size = in_settings.spixel_size;
}
int spixel_per_col = (int)ceil((float)in_settings.img_size.x / (float)spixel_size);
int spixel_per_row = (int)ceil((float)in_settings.img_size.y / (float)spixel_size);
map_size = Vector2i(spixel_per_col, spixel_per_row);
spixel_map = new SpixelMap(map_size, true, true);
no_grid_per_center = (int)ceil(spixel_size*3.0f / BLOCK_DIM)*((int)ceil(spixel_size*3.0f / BLOCK_DIM));
Vector2i accum_size(map_size.x*no_grid_per_center, map_size.y);
accum_map = new SpixelMap(accum_size, true, true);
// normalizing factors
max_xy_dist = 1.0f / (1.4242f * spixel_size); // sqrt(2) * spixel_size
switch (in_settings.color_space)
{
case RGB:
max_color_dist = 5.0f / (1.7321f * 255);
break;
case XYZ:
max_color_dist = 5.0f / 1.7321f;
break;
case CIELAB:
max_color_dist = 15.0f / (1.7321f * 128);
break;
}
max_color_dist *= max_color_dist;
max_xy_dist *= max_xy_dist;
}
gSLIC::engines::seg_engine_GPU::~seg_engine_GPU()
{
delete accum_map;
delete tmp_idx_img;
}
void gSLIC::engines::seg_engine_GPU::Cvt_Img_Space(UChar4Image* inimg, Float4Image* outimg, COLOR_SPACE color_space)
{
Vector4u* inimg_ptr = inimg->GetData(MEMORYDEVICE_CUDA);
Vector4f* outimg_ptr = outimg->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize = getGridSize(img_size, blockSize);
Cvt_Img_Space_device << <gridSize, blockSize >> >(inimg_ptr, outimg_ptr, img_size, color_space);
}
void gSLIC::engines::seg_engine_GPU::Init_Cluster_Centers()
{
spixel_info* spixel_list = spixel_map->GetData(MEMORYDEVICE_CUDA);
Vector4f* img_ptr = cvt_img->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize = getGridSize(map_size, blockSize);
Init_Cluster_Centers_device << <gridSize, blockSize >> >(img_ptr, spixel_list, map_size, img_size, spixel_size);
}
void gSLIC::engines::seg_engine_GPU::Find_Center_Association()
{
spixel_info* spixel_list = spixel_map->GetData(MEMORYDEVICE_CUDA);
Vector4f* img_ptr = cvt_img->GetData(MEMORYDEVICE_CUDA);
int* idx_ptr = idx_img->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize = getGridSize(img_size, blockSize);
Find_Center_Association_device << <gridSize, blockSize >> >(img_ptr, spixel_list, idx_ptr, map_size, img_size, spixel_size, gslic_settings.coh_weight,max_xy_dist,max_color_dist);
}
void gSLIC::engines::seg_engine_GPU::Update_Cluster_Center()
{
spixel_info* accum_map_ptr = accum_map->GetData(MEMORYDEVICE_CUDA);
spixel_info* spixel_list_ptr = spixel_map->GetData(MEMORYDEVICE_CUDA);
Vector4f* img_ptr = cvt_img->GetData(MEMORYDEVICE_CUDA);
int* idx_ptr = idx_img->GetData(MEMORYDEVICE_CUDA);
int no_blocks_per_line = (int)ceil(spixel_size * 3.0f / BLOCK_DIM);
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize(map_size.x, map_size.y, no_grid_per_center);
Update_Cluster_Center_device<<<gridSize,blockSize>>>(img_ptr, idx_ptr, accum_map_ptr, map_size, img_size, spixel_size, no_blocks_per_line);
dim3 gridSize2(map_size.x, map_size.y);
Finalize_Reduction_Result_device<<<gridSize2,blockSize>>>(accum_map_ptr, spixel_list_ptr, map_size, no_grid_per_center);
}
void gSLIC::engines::seg_engine_GPU::Enforce_Connectivity()
{
int* idx_ptr = idx_img->GetData(MEMORYDEVICE_CUDA);
int* tmp_idx_ptr = tmp_idx_img->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize = getGridSize(img_size, blockSize);
Enforce_Connectivity_device << <gridSize, blockSize >> >(idx_ptr, tmp_idx_ptr, img_size);
Enforce_Connectivity_device << <gridSize, blockSize >> >(tmp_idx_ptr, idx_ptr, img_size);
Enforce_Connectivity_device1_2 << <gridSize, blockSize >> >(idx_ptr, tmp_idx_ptr, img_size);
Enforce_Connectivity_device1_2 << <gridSize, blockSize >> >(tmp_idx_ptr, idx_ptr, img_size);
}
void gSLIC::engines::seg_engine_GPU::Draw_Segmentation_Result(UChar4Image* out_img)
{
Vector4u* inimg_ptr = source_img->GetData(MEMORYDEVICE_CUDA);
Vector4u* outimg_ptr = out_img->GetData(MEMORYDEVICE_CUDA);
int* idx_img_ptr = idx_img->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize = getGridSize(img_size, blockSize);
Draw_Segmentation_Result_device << <gridSize, blockSize >> >(idx_img_ptr, inimg_ptr, outimg_ptr, img_size);
out_img->UpdateHostFromDevice();
}
// ----------------------------------------------------
//
// device function implementations
//
// ----------------------------------------------------
__global__ void Cvt_Img_Space_device(const Vector4u* inimg, Vector4f* outimg, Vector2i img_size, COLOR_SPACE color_space)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= img_size.x || y >= img_size.y) return;
cvt_img_space_shared(inimg, outimg, img_size, x, y, color_space);
}
__global__ void Draw_Segmentation_Result_device(const int* idx_img, Vector4u* sourceimg, Vector4u* outimg, Vector2i img_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x == 0 || y == 0 || x > img_size.x - 2 || y > img_size.y - 2) return;
draw_superpixel_boundry_shared(idx_img, sourceimg, outimg, img_size, x, y);
}
__global__ void Init_Cluster_Centers_device(const Vector4f* inimg, spixel_info* out_spixel, Vector2i map_size, Vector2i img_size, int spixel_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= map_size.x || y >= map_size.y) return;
init_cluster_centers_shared(inimg, out_spixel, map_size, img_size, spixel_size, x, y);
}
__global__ void Find_Center_Association_device(const Vector4f* inimg, const spixel_info* in_spixel_map, int* out_idx_img, Vector2i map_size, Vector2i img_size, int spixel_size, float weight, float max_xy_dist, float max_color_dist)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= img_size.x || y >= img_size.y) return;
find_center_association_shared(inimg, in_spixel_map, out_idx_img, map_size, img_size, spixel_size, weight, x, y,max_xy_dist,max_color_dist);
}
__global__ void Update_Cluster_Center_device(const Vector4f* inimg, const int* in_idx_img, spixel_info* accum_map, Vector2i map_size, Vector2i img_size, int spixel_size, int no_blocks_per_line)
{
int local_id = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ float4 color_shared[BLOCK_DIM*BLOCK_DIM];
__shared__ float2 xy_shared[BLOCK_DIM*BLOCK_DIM];
__shared__ int count_shared[BLOCK_DIM*BLOCK_DIM];
__shared__ bool should_add;
color_shared[local_id] = make_float4(0, 0, 0, 0);
xy_shared[local_id] = make_float2(0, 0);
count_shared[local_id] = 0;
should_add = false;
__syncthreads();
int no_blocks_per_spixel = gridDim.z;
int spixel_id = blockIdx.y * map_size.x + blockIdx.x;
// compute the relative position in the search window
int block_x = blockIdx.z % no_blocks_per_line;
int block_y = blockIdx.z / no_blocks_per_line;
int x_offset = block_x * BLOCK_DIM + threadIdx.x;
int y_offset = block_y * BLOCK_DIM + threadIdx.y;
if (x_offset < spixel_size * 3 && y_offset < spixel_size * 3)
{
// compute the start of the search window
int x_start = blockIdx.x * spixel_size - spixel_size;
int y_start = blockIdx.y * spixel_size - spixel_size;
int x_img = x_start + x_offset;
int y_img = y_start + y_offset;
if (x_img >= 0 && x_img < img_size.x && y_img >= 0 && y_img < img_size.y)
{
int img_idx = y_img * img_size.x + x_img;
if (in_idx_img[img_idx] == spixel_id)
{
color_shared[local_id] = make_float4(inimg[img_idx].x, inimg[img_idx].y, inimg[img_idx].z, inimg[img_idx].w);
xy_shared[local_id] = make_float2(x_img, y_img);
count_shared[local_id] = 1;
should_add = true;
}
}
}
__syncthreads();
if (should_add)
{
if (local_id < 128)
{
color_shared[local_id] += color_shared[local_id + 128];
xy_shared[local_id] += xy_shared[local_id + 128];
count_shared[local_id] += count_shared[local_id + 128];
}
__syncthreads();
if (local_id < 64)
{
color_shared[local_id] += color_shared[local_id + 64];
xy_shared[local_id] += xy_shared[local_id + 64];
count_shared[local_id] += count_shared[local_id + 64];
}
__syncthreads();
if (local_id < 32)
{
color_shared[local_id] += color_shared[local_id + 32];
color_shared[local_id] += color_shared[local_id + 16];
color_shared[local_id] += color_shared[local_id + 8];
color_shared[local_id] += color_shared[local_id + 4];
color_shared[local_id] += color_shared[local_id + 2];
color_shared[local_id] += color_shared[local_id + 1];
xy_shared[local_id] += xy_shared[local_id + 32];
xy_shared[local_id] += xy_shared[local_id + 16];
xy_shared[local_id] += xy_shared[local_id + 8];
xy_shared[local_id] += xy_shared[local_id + 4];
xy_shared[local_id] += xy_shared[local_id + 2];
xy_shared[local_id] += xy_shared[local_id + 1];
count_shared[local_id] += count_shared[local_id + 32];
count_shared[local_id] += count_shared[local_id + 16];
count_shared[local_id] += count_shared[local_id + 8];
count_shared[local_id] += count_shared[local_id + 4];
count_shared[local_id] += count_shared[local_id + 2];
count_shared[local_id] += count_shared[local_id + 1];
}
}
__syncthreads();
if (local_id == 0)
{
int accum_map_idx = spixel_id * no_blocks_per_spixel + blockIdx.z;
accum_map[accum_map_idx].center = Vector2f(xy_shared[0].x, xy_shared[0].y);
accum_map[accum_map_idx].color_info = Vector4f(color_shared[0].x, color_shared[0].y, color_shared[0].z, color_shared[0].w);
accum_map[accum_map_idx].no_pixels = count_shared[0];
}
}
__global__ void Finalize_Reduction_Result_device(const spixel_info* accum_map, spixel_info* spixel_list, Vector2i map_size, int no_blocks_per_spixel)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= map_size.x || y >= map_size.y) return;
finalize_reduction_result_shared(accum_map, spixel_list, map_size, no_blocks_per_spixel, x, y);
}
__global__ void Enforce_Connectivity_device(const int* in_idx_img, int* out_idx_img, Vector2i img_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= img_size.x || y >= img_size.y) return;
supress_local_lable(in_idx_img, out_idx_img, img_size, x, y);
}
__global__ void Enforce_Connectivity_device1_2(const int* in_idx_img, int* out_idx_img, Vector2i img_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= img_size.x || y >= img_size.y) return;
supress_local_lable_2(in_idx_img, out_idx_img, img_size, x, y);
}
|
e2a8315e2b964bf0a8d817e3cb43ce08a9929984.hip | // !!! This is a file automatically generated by hipify!!!
#include "FVL/FVLib.h"
using namespace std;
#ifdef NO_CUDA
#include "kernels_cpu.h"
#else
#include <hip/hip_runtime.h>
#include "kernels_hip.cuh"
#endif
#define BLOCK_SIZE_FLUX 512
#define BLOCK_SIZE_UPDATE 512
#define BLOCK_SIZE 512
#define GRID_SIZE(elems, threads) ((int) ::ceil((double)elems/threads))
typedef struct _parameters {
string mesh_file;
string velocity_file;
string initial_file;
string output_file;
double final_time;
double anim_time;
int anim_jump;
double dirichlet;
double CFL;
} Parameters;
#define _USE_MATH_DEFINES
#include <math.h>
#include <limits>
#include <set>
void prepare_mesh_test_data(CFVMesh2D &mesh, CFVArray<double> &polution) {
double min_x = std::numeric_limits<double>::max();
double max_x = std::numeric_limits<double>::min();
/* find min and max x coords of the mesh edges */
for(unsigned int i = 0; i < mesh.num_edges; ++i) {
double current = mesh.edge_centroids.x[i];
if (current < min_x) min_x = current;
if (current > max_x) max_x = current;
}
cout << endl << "Linking mesh ends" << endl;
/* This assumes the mesh is rectangular, and we want to connect the left side with the right side
* that is, for every edge E with x = min_x, and no right cell, make the right cell equal to the left cell of the corresponding edge on the right side, and vice-versa
**/
set<unsigned int> left_cells;
set<unsigned int> right_cells;
for(unsigned int i = 0; i < mesh.num_edges; ++i) {
if (mesh.edge_types[i] == FV_EDGE_DIRICHLET) {
if (mesh.edge_centroids.x[i] == min_x)
left_cells.insert(i);
if (mesh.edge_centroids.x[i] == max_x)
right_cells.insert(i);
}
}
set<unsigned int>::iterator left_it, right_it;
for(left_it = left_cells.begin(), right_it = right_cells.begin();
left_it != left_cells.end();
++left_it, ++right_it) {
unsigned int l = *left_it;
unsigned int r = *right_it;
/* set edges type to regular */
mesh.edge_types[l] = FV_EDGE_FAKE;
mesh.edge_types[r] = FV_EDGE_FAKE;
/* link both edges */
cout << "linking " << l << " with " << r << endl;
mesh.edge_right_cells[l] = mesh.edge_left_cells[l];
mesh.edge_left_cells[l] = mesh.edge_left_cells[r];
mesh.edge_right_cells[r] = mesh.edge_right_cells[l];
cout << "linking edge " << l << " with " << r << endl;
}
cout << "Linked " << left_cells.size() << " pairs of edges " << endl << endl;
}
Parameters read_parameters (string parameters_filename) {
Parameters data;
FVParameters para(parameters_filename);
data.mesh_file = para.getString("MeshName");
data.velocity_file = para.getString("VelocityFile");
data.initial_file = para.getString("PoluInitFile");
data.output_file = para.getString("OutputFile");
data.final_time = para.getDouble("FinalTime");
data.anim_time = para.getDouble("AnimTimeStep");
data.anim_jump = para.getInteger("NbJump");
data.dirichlet = para.getDouble("DirichletCondition");
data.CFL = para.getDouble("CFL");
return data;
}
int main(int argc, char **argv) {
#ifdef NO_CUDA
cout << "Running in NO_CUDA mode" << endl;
#endif
// var declaration
int i = 0;
double h, t, dt, v_max = 0;
string name;
// read params
Parameters data;
if (argc != 2) {
cerr << "Arg warning: no xml param filename specified. Defaulting to param.xml" << endl;
data = read_parameters("param.xml");
} else
data = read_parameters(argv[1]);
// read mesh
FVL::CFVMesh2D mesh(data.mesh_file);
FVL::CFVRecons2D recons(mesh);
FVL::CFVPoints2D<double> velocities(mesh.num_cells);
FVL::CFVArray<double> polution(mesh.num_cells);
FVL::CFVArray<double> vs(mesh.num_edges);
#if defined(_SECOND_ORDER)
FVL::CFVArray<double> vecA(mesh.num_cells);
#elif defined(_MUSCL)
FVL::CFVArray<double> p(mesh.num_cells);
#elif defined (_MOOD)
FVL::CFVArray<double> p(mesh.num_cells);
FVL::CFVArray<double> candidate(mesh.num_cells);
#endif
// read other input files
FVL::FVXMLReader velocity_reader(data.velocity_file);
FVL::FVXMLReader polu_ini_reader(data.initial_file);
polu_ini_reader.getVec(polution, t, name);
#ifdef _MOOD
for(uint i = 0; i < polution.size(); ++i)
candidate[i] = polution[i];
#endif
velocity_reader.getPoints2D(velocities, t, name);
polu_ini_reader.close();
velocity_reader.close();
/* assign test value for polution */
prepare_mesh_test_data(mesh, polution);
FVL::FVXMLWriter polution_writer(data.output_file);
polution_writer.append(polution, t, "polution");
// compute velocity vector
// TODO: Convert to CUDA
cpu_compute_edge_velocities(mesh, velocities, vs, v_max);
h = cpu_compute_mesh_parameter(mesh);
cout << "h" << h << endl;
dt = data.CFL / v_max * h;
#ifndef NO_CUDA
// saves whole mesh to CUDA memory
mesh.cuda_malloc();
recons.cuda_malloc();
polution.cuda_malloc();
vs.cuda_malloc();
vecA.cuda_malloc();
// data copy
hipStream_t stream;
hipStreamCreate(&stream);
mesh.cuda_save(stream);
polution.cuda_save(stream);
vs.cuda_save(stream);
vecA.cuda_save(stream);
// sizes of each kernel
// TODO: mudar BLOCK_SIZE_FLUX para MAT_A
dim3 block_s(BLOCK_SIZE, 1, 1);
dim3 grid_cells(GRID_SIZE(mesh.num_cells, BLOCK_SIZE));
dim3 grid_edges(GRID_SIZE(mesh.num_edges, BLOCK_SIZE));
#endif
bool finished = false;
double anim_next_step = data.anim_time;
cout << "dt= " << dt << endl;
while (!finished) {
cout << "time: " << t << " iteration: " << i << '\r';
if (t + dt > data.final_time) {
cout << endl << "Final iteration, adjusting dt" << endl;
dt = data.final_time - t;
finished = true;
}
#if defined(_SECOND_ORDER)
// Cpu version
#ifdef NO_CUDA
cpu_compute_a(mesh, polution, vecA);
cpu_compute_u(mesh, recons, polution, vecA, data.CFL);
cpu_compute_flux(mesh, vs, recons);
cpu_update(mesh, recons, polution, dt);
#else
// TODO
#endif
#elif defined(_MUSCL)
#ifdef NO_CUDA
cpu_compute_p(mesh, polution, p);
cpu_compute_u(mesh, recons, polution, p, data.CFL);
cpu_compute_flux(mesh, vs, recons);
cpu_update(mesh, recons, polution, dt);
#else
// TODO
#endif
#elif defined(_MOOD)
#ifdef NO_CUDA
// reset degree
for(uint c = 0; c < polution.size(); ++c)
recons.degree[c] = 1;
bool finished = false;
while(!finished) {
cpu_compute_p(mesh, polution, p);
cpu_compute_u(mesh, recons, polution, p, data.CFL);
cpu_compute_flux(mesh, vs, recons);
cpu_update(mesh, recons, candidate, dt);
if (cpu_mood_detector(mesh, recons, polution, candidate))
finished = true;
}
for(uint c = 0; c < polution.size(); ++c)
polution[c] = candidate[c];
#else
// TODO
#endif
#endif
t += dt;
if (t >= anim_next_step) {
#ifndef NO_CUDA
polution.cuda_get();
#endif
polution_writer.append(polution, t, "polution");
anim_next_step += data.anim_time;
}
++i;
}
polution_writer.save();
polution_writer.close();
#ifndef NO_CUDA
polution.cuda_free();
vs.cuda_free();
vecA.cuda_free();
recons.cuda_free();
mesh.cuda_free();
#endif
cout << endl << "exiting" << endl;
}
| e2a8315e2b964bf0a8d817e3cb43ce08a9929984.cu | #include "FVL/FVLib.h"
using namespace std;
#ifdef NO_CUDA
#include "kernels_cpu.h"
#else
#include <cuda.h>
#include "kernels_cuda.cuh"
#endif
#define BLOCK_SIZE_FLUX 512
#define BLOCK_SIZE_UPDATE 512
#define BLOCK_SIZE 512
#define GRID_SIZE(elems, threads) ((int) std::ceil((double)elems/threads))
typedef struct _parameters {
string mesh_file;
string velocity_file;
string initial_file;
string output_file;
double final_time;
double anim_time;
int anim_jump;
double dirichlet;
double CFL;
} Parameters;
#define _USE_MATH_DEFINES
#include <math.h>
#include <limits>
#include <set>
void prepare_mesh_test_data(CFVMesh2D &mesh, CFVArray<double> &polution) {
double min_x = std::numeric_limits<double>::max();
double max_x = std::numeric_limits<double>::min();
/* find min and max x coords of the mesh edges */
for(unsigned int i = 0; i < mesh.num_edges; ++i) {
double current = mesh.edge_centroids.x[i];
if (current < min_x) min_x = current;
if (current > max_x) max_x = current;
}
cout << endl << "Linking mesh ends" << endl;
/* This assumes the mesh is rectangular, and we want to connect the left side with the right side
* that is, for every edge E with x = min_x, and no right cell, make the right cell equal to the left cell of the corresponding edge on the right side, and vice-versa
**/
set<unsigned int> left_cells;
set<unsigned int> right_cells;
for(unsigned int i = 0; i < mesh.num_edges; ++i) {
if (mesh.edge_types[i] == FV_EDGE_DIRICHLET) {
if (mesh.edge_centroids.x[i] == min_x)
left_cells.insert(i);
if (mesh.edge_centroids.x[i] == max_x)
right_cells.insert(i);
}
}
set<unsigned int>::iterator left_it, right_it;
for(left_it = left_cells.begin(), right_it = right_cells.begin();
left_it != left_cells.end();
++left_it, ++right_it) {
unsigned int l = *left_it;
unsigned int r = *right_it;
/* set edges type to regular */
mesh.edge_types[l] = FV_EDGE_FAKE;
mesh.edge_types[r] = FV_EDGE_FAKE;
/* link both edges */
cout << "linking " << l << " with " << r << endl;
mesh.edge_right_cells[l] = mesh.edge_left_cells[l];
mesh.edge_left_cells[l] = mesh.edge_left_cells[r];
mesh.edge_right_cells[r] = mesh.edge_right_cells[l];
cout << "linking edge " << l << " with " << r << endl;
}
cout << "Linked " << left_cells.size() << " pairs of edges " << endl << endl;
}
Parameters read_parameters (string parameters_filename) {
Parameters data;
FVParameters para(parameters_filename);
data.mesh_file = para.getString("MeshName");
data.velocity_file = para.getString("VelocityFile");
data.initial_file = para.getString("PoluInitFile");
data.output_file = para.getString("OutputFile");
data.final_time = para.getDouble("FinalTime");
data.anim_time = para.getDouble("AnimTimeStep");
data.anim_jump = para.getInteger("NbJump");
data.dirichlet = para.getDouble("DirichletCondition");
data.CFL = para.getDouble("CFL");
return data;
}
int main(int argc, char **argv) {
#ifdef NO_CUDA
cout << "Running in NO_CUDA mode" << endl;
#endif
// var declaration
int i = 0;
double h, t, dt, v_max = 0;
string name;
// read params
Parameters data;
if (argc != 2) {
cerr << "Arg warning: no xml param filename specified. Defaulting to param.xml" << endl;
data = read_parameters("param.xml");
} else
data = read_parameters(argv[1]);
// read mesh
FVL::CFVMesh2D mesh(data.mesh_file);
FVL::CFVRecons2D recons(mesh);
FVL::CFVPoints2D<double> velocities(mesh.num_cells);
FVL::CFVArray<double> polution(mesh.num_cells);
FVL::CFVArray<double> vs(mesh.num_edges);
#if defined(_SECOND_ORDER)
FVL::CFVArray<double> vecA(mesh.num_cells);
#elif defined(_MUSCL)
FVL::CFVArray<double> p(mesh.num_cells);
#elif defined (_MOOD)
FVL::CFVArray<double> p(mesh.num_cells);
FVL::CFVArray<double> candidate(mesh.num_cells);
#endif
// read other input files
FVL::FVXMLReader velocity_reader(data.velocity_file);
FVL::FVXMLReader polu_ini_reader(data.initial_file);
polu_ini_reader.getVec(polution, t, name);
#ifdef _MOOD
for(uint i = 0; i < polution.size(); ++i)
candidate[i] = polution[i];
#endif
velocity_reader.getPoints2D(velocities, t, name);
polu_ini_reader.close();
velocity_reader.close();
/* assign test value for polution */
prepare_mesh_test_data(mesh, polution);
FVL::FVXMLWriter polution_writer(data.output_file);
polution_writer.append(polution, t, "polution");
// compute velocity vector
// TODO: Convert to CUDA
cpu_compute_edge_velocities(mesh, velocities, vs, v_max);
h = cpu_compute_mesh_parameter(mesh);
cout << "h" << h << endl;
dt = data.CFL / v_max * h;
#ifndef NO_CUDA
// saves whole mesh to CUDA memory
mesh.cuda_malloc();
recons.cuda_malloc();
polution.cuda_malloc();
vs.cuda_malloc();
vecA.cuda_malloc();
// data copy
cudaStream_t stream;
cudaStreamCreate(&stream);
mesh.cuda_save(stream);
polution.cuda_save(stream);
vs.cuda_save(stream);
vecA.cuda_save(stream);
// sizes of each kernel
// TODO: mudar BLOCK_SIZE_FLUX para MAT_A
dim3 block_s(BLOCK_SIZE, 1, 1);
dim3 grid_cells(GRID_SIZE(mesh.num_cells, BLOCK_SIZE));
dim3 grid_edges(GRID_SIZE(mesh.num_edges, BLOCK_SIZE));
#endif
bool finished = false;
double anim_next_step = data.anim_time;
cout << "dt= " << dt << endl;
while (!finished) {
cout << "time: " << t << " iteration: " << i << '\r';
if (t + dt > data.final_time) {
cout << endl << "Final iteration, adjusting dt" << endl;
dt = data.final_time - t;
finished = true;
}
#if defined(_SECOND_ORDER)
// Cpu version
#ifdef NO_CUDA
cpu_compute_a(mesh, polution, vecA);
cpu_compute_u(mesh, recons, polution, vecA, data.CFL);
cpu_compute_flux(mesh, vs, recons);
cpu_update(mesh, recons, polution, dt);
#else
// TODO
#endif
#elif defined(_MUSCL)
#ifdef NO_CUDA
cpu_compute_p(mesh, polution, p);
cpu_compute_u(mesh, recons, polution, p, data.CFL);
cpu_compute_flux(mesh, vs, recons);
cpu_update(mesh, recons, polution, dt);
#else
// TODO
#endif
#elif defined(_MOOD)
#ifdef NO_CUDA
// reset degree
for(uint c = 0; c < polution.size(); ++c)
recons.degree[c] = 1;
bool finished = false;
while(!finished) {
cpu_compute_p(mesh, polution, p);
cpu_compute_u(mesh, recons, polution, p, data.CFL);
cpu_compute_flux(mesh, vs, recons);
cpu_update(mesh, recons, candidate, dt);
if (cpu_mood_detector(mesh, recons, polution, candidate))
finished = true;
}
for(uint c = 0; c < polution.size(); ++c)
polution[c] = candidate[c];
#else
// TODO
#endif
#endif
t += dt;
if (t >= anim_next_step) {
#ifndef NO_CUDA
polution.cuda_get();
#endif
polution_writer.append(polution, t, "polution");
anim_next_step += data.anim_time;
}
++i;
}
polution_writer.save();
polution_writer.close();
#ifndef NO_CUDA
polution.cuda_free();
vs.cuda_free();
vecA.cuda_free();
recons.cuda_free();
mesh.cuda_free();
#endif
cout << endl << "exiting" << endl;
}
|
13ef45283797a308eca856c4d940ec20a967e63c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kern_gpu.cuh"
__device__ inline float4 operator*(const float& a, const float4& b) {
return make_float4(
a*b.x,
a*b.y,
a*b.z,
a*b.w
);
}
__device__ inline float4 operator+(const float4& a, const float4& b) {
return make_float4(
a.x + b.x,
a.y + b.y,
a.z + b.z,
a.w + b.w
);
}
__global__ void kern::gpu::compute_gpu(
kern::RawState st,
kern::Params p,
unsigned int iters,
bool first_call
) {
unsigned int rowIdx = blockIdx.y;
unsigned int colIdx = blockIdx.x*blockDim.x + threadIdx.x;
if(colIdx >= p.resolution) {
return;
}
float rowA = ((float) rowIdx + 0.5) / (float) p.resolution;
float colA = ((float) colIdx + 0.5) / (float) p.resolution;
float4 state = first_call
? make_float4(
(1-colA)*p.top_corner.x + colA*p.bot_corner.x,
(1-rowA)*p.top_corner.y + rowA*p.bot_corner.y,
0.0, 0.0)
: st.data[rowIdx * st.pitch/sizeof(float4) + colIdx];
const float h = p.step_size;
for(unsigned int i = 0; i < iters; i++) {
float4 k1 = kern::gpu::state_dt(state);
float4 k2 = kern::gpu::state_dt(state + h/2 * k1);
float4 k3 = kern::gpu::state_dt(state + h/2 * k2);
float4 k4 = kern::gpu::state_dt(state + h * k3);
state = state + h/6 * (k1 + 2*k2 + 2*k3 + k4);
}
st.data[rowIdx * st.pitch/sizeof(float4) + colIdx] = state;
}
__forceinline__ __device__ float4 kern::gpu::state_dt(float4 state) {
float2 top_inv_sq = kern::gpu::dts::inv_sq(1.5, 0.15, make_float2(0.0, 0.5), state);
float2 bot_inv_sq = kern::gpu::dts::inv_sq(1.5, 0.15, make_float2(0.0,-0.5), state);
float2 mid_spring = kern::gpu::dts::spring(0.5, make_float2(0.0, 0.0), state);
float2 frict_force = kern::gpu::dts::frict(0.1, state);
return make_float4(
state.z,
state.w,
top_inv_sq.x + bot_inv_sq.x + mid_spring.x + frict_force.x,
top_inv_sq.y + bot_inv_sq.y + mid_spring.y + frict_force.y
);
}
__forceinline__ __device__ float2 kern::gpu::dts::inv_sq(float g, float off, float2 center, float4 state) {
float2 d = make_float2(state.x-center.x, state.y-center.y);
float mag = powf(d.x*d.x + d.y*d.y + off*off, -1.5);
return make_float2(-g*mag*d.x, -g*mag*d.y);
}
__forceinline__ __device__ float2 kern::gpu::dts::spring(float k, float2 center, float4 state) {
float2 d = make_float2(state.x-center.x, state.y-center.y);
return make_float2(-k*d.x, -k*d.y);
}
__forceinline__ __device__ float2 kern::gpu::dts::frict(float m, float4 state) {
return make_float2(-m*state.z, -m*state.w);
}
| 13ef45283797a308eca856c4d940ec20a967e63c.cu | #include "kern_gpu.cuh"
__device__ inline float4 operator*(const float& a, const float4& b) {
return make_float4(
a*b.x,
a*b.y,
a*b.z,
a*b.w
);
}
__device__ inline float4 operator+(const float4& a, const float4& b) {
return make_float4(
a.x + b.x,
a.y + b.y,
a.z + b.z,
a.w + b.w
);
}
__global__ void kern::gpu::compute_gpu(
kern::RawState st,
kern::Params p,
unsigned int iters,
bool first_call
) {
unsigned int rowIdx = blockIdx.y;
unsigned int colIdx = blockIdx.x*blockDim.x + threadIdx.x;
if(colIdx >= p.resolution) {
return;
}
float rowA = ((float) rowIdx + 0.5) / (float) p.resolution;
float colA = ((float) colIdx + 0.5) / (float) p.resolution;
float4 state = first_call
? make_float4(
(1-colA)*p.top_corner.x + colA*p.bot_corner.x,
(1-rowA)*p.top_corner.y + rowA*p.bot_corner.y,
0.0, 0.0)
: st.data[rowIdx * st.pitch/sizeof(float4) + colIdx];
const float h = p.step_size;
for(unsigned int i = 0; i < iters; i++) {
float4 k1 = kern::gpu::state_dt(state);
float4 k2 = kern::gpu::state_dt(state + h/2 * k1);
float4 k3 = kern::gpu::state_dt(state + h/2 * k2);
float4 k4 = kern::gpu::state_dt(state + h * k3);
state = state + h/6 * (k1 + 2*k2 + 2*k3 + k4);
}
st.data[rowIdx * st.pitch/sizeof(float4) + colIdx] = state;
}
__forceinline__ __device__ float4 kern::gpu::state_dt(float4 state) {
float2 top_inv_sq = kern::gpu::dts::inv_sq(1.5, 0.15, make_float2(0.0, 0.5), state);
float2 bot_inv_sq = kern::gpu::dts::inv_sq(1.5, 0.15, make_float2(0.0,-0.5), state);
float2 mid_spring = kern::gpu::dts::spring(0.5, make_float2(0.0, 0.0), state);
float2 frict_force = kern::gpu::dts::frict(0.1, state);
return make_float4(
state.z,
state.w,
top_inv_sq.x + bot_inv_sq.x + mid_spring.x + frict_force.x,
top_inv_sq.y + bot_inv_sq.y + mid_spring.y + frict_force.y
);
}
__forceinline__ __device__ float2 kern::gpu::dts::inv_sq(float g, float off, float2 center, float4 state) {
float2 d = make_float2(state.x-center.x, state.y-center.y);
float mag = powf(d.x*d.x + d.y*d.y + off*off, -1.5);
return make_float2(-g*mag*d.x, -g*mag*d.y);
}
__forceinline__ __device__ float2 kern::gpu::dts::spring(float k, float2 center, float4 state) {
float2 d = make_float2(state.x-center.x, state.y-center.y);
return make_float2(-k*d.x, -k*d.y);
}
__forceinline__ __device__ float2 kern::gpu::dts::frict(float m, float4 state) {
return make_float2(-m*state.z, -m*state.w);
}
|
63ef9d121bd916ad85d85b31757566b1c2318efb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void xcorr(float *d_i1, float *d_i2, float *d_icorr, int m1, int n1, int m2, int n2)
{
//grab theadID location
int tx = threadIdx.x;
int ty = threadIdx.y;
// get output values based on block and thread locations
int row_out = blockIdx.y*blockDim.y + ty;
int col_out = blockIdx.x*blockDim.x + tx;
// Get starting value for the convolution as dictated by m2 and n2
// we'll use i1 indicies as the coord syst.
int row_st = row_out - (m2 - 1);
int col_st = col_out - (n2 - 1);
// correlation variable
float corr=0; // initialize correlation variable
if (row_out >= 0 && row_out < m1 + m2 - 1 && col_out >= 0 && col_out < n1 + n2 - 1) // ensure output is within bounds of correlation image
{
// Buffering into memory would be 1 call to a global variable, From there we need 1 call for each multiplication, however we only need to make 1 call to a global
// variable for the multiplication and move on, as such it doesn't make sense to buffer these images into local memory
for (int i = 0; i < m2; i++) { //
for (int j = 0; j < n2; j++)
{
if (row_st + i >= 0 && row_st + i < m1 && col_st + j >= 0 && col_st + j < n1) { // if row start and col start are greater than 0 and less than the number of pixels available perform convolution
corr += d_i1[row_st + i + (col_st + j) * m1] * d_i2[i + j * m2];
}
else {} // if else is invoked it's because row_st and col_st are outside of im1 bounds and the convolution should be left alone
}
}
d_icorr[row_out + col_out*(m1 + m2 - 1)] = corr; // assign correlation variable to proper location in final image
}
else{}
} | 63ef9d121bd916ad85d85b31757566b1c2318efb.cu | #include "includes.h"
__global__ void xcorr(float *d_i1, float *d_i2, float *d_icorr, int m1, int n1, int m2, int n2)
{
//grab theadID location
int tx = threadIdx.x;
int ty = threadIdx.y;
// get output values based on block and thread locations
int row_out = blockIdx.y*blockDim.y + ty;
int col_out = blockIdx.x*blockDim.x + tx;
// Get starting value for the convolution as dictated by m2 and n2
// we'll use i1 indicies as the coord syst.
int row_st = row_out - (m2 - 1);
int col_st = col_out - (n2 - 1);
// correlation variable
float corr=0; // initialize correlation variable
if (row_out >= 0 && row_out < m1 + m2 - 1 && col_out >= 0 && col_out < n1 + n2 - 1) // ensure output is within bounds of correlation image
{
// Buffering into memory would be 1 call to a global variable, From there we need 1 call for each multiplication, however we only need to make 1 call to a global
// variable for the multiplication and move on, as such it doesn't make sense to buffer these images into local memory
for (int i = 0; i < m2; i++) { //
for (int j = 0; j < n2; j++)
{
if (row_st + i >= 0 && row_st + i < m1 && col_st + j >= 0 && col_st + j < n1) { // if row start and col start are greater than 0 and less than the number of pixels available perform convolution
corr += d_i1[row_st + i + (col_st + j) * m1] * d_i2[i + j * m2];
}
else {} // if else is invoked it's because row_st and col_st are outside of im1 bounds and the convolution should be left alone
}
}
d_icorr[row_out + col_out*(m1 + m2 - 1)] = corr; // assign correlation variable to proper location in final image
}
else{}
} |
da29a3d7016403491748504a43f9911437a116fa.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2022, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include <ginkgo/core/base/exception_helpers.hpp>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hiprand/hiprand.h>
#include <hipsparse.h>
#include <gtest/gtest.h>
namespace {
TEST(AssertNoCudaErrors, ThrowsOnError)
{
ASSERT_THROW(GKO_ASSERT_NO_CUDA_ERRORS(1), gko::CudaError);
}
TEST(AssertNoCudaErrors, DoesNotThrowOnSuccess)
{
ASSERT_NO_THROW(GKO_ASSERT_NO_CUDA_ERRORS(hipSuccess));
}
TEST(AssertNoCublasErrors, ThrowsOnError)
{
ASSERT_THROW(GKO_ASSERT_NO_CUBLAS_ERRORS(1), gko::CublasError);
}
TEST(AssertNoCublasErrors, DoesNotThrowOnSuccess)
{
ASSERT_NO_THROW(GKO_ASSERT_NO_CUBLAS_ERRORS(HIPBLAS_STATUS_SUCCESS));
}
TEST(AssertNoCurandErrors, ThrowsOnError)
{
ASSERT_THROW(GKO_ASSERT_NO_CURAND_ERRORS(1), gko::CurandError);
}
TEST(AssertNoCurandErrors, DoesNotThrowOnSuccess)
{
ASSERT_NO_THROW(GKO_ASSERT_NO_CURAND_ERRORS(HIPRAND_STATUS_SUCCESS));
}
TEST(AssertNoCusparseErrors, ThrowsOnError)
{
ASSERT_THROW(GKO_ASSERT_NO_CUSPARSE_ERRORS(1), gko::CusparseError);
}
TEST(AssertNoCusparseErrors, DoesNotThrowOnSuccess)
{
ASSERT_NO_THROW(GKO_ASSERT_NO_CUSPARSE_ERRORS(HIPSPARSE_STATUS_SUCCESS));
}
TEST(AssertNoCufftErrors, ThrowsOnError)
{
ASSERT_THROW(GKO_ASSERT_NO_CUFFT_ERRORS(1), gko::CufftError);
}
TEST(AssertNoCufftErrors, DoesNotThrowOnSuccess)
{
ASSERT_NO_THROW(GKO_ASSERT_NO_CUFFT_ERRORS(HIPFFT_SUCCESS));
}
} // namespace
| da29a3d7016403491748504a43f9911437a116fa.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2022, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include <ginkgo/core/base/exception_helpers.hpp>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cufft.h>
#include <curand.h>
#include <cusparse.h>
#include <gtest/gtest.h>
namespace {
TEST(AssertNoCudaErrors, ThrowsOnError)
{
ASSERT_THROW(GKO_ASSERT_NO_CUDA_ERRORS(1), gko::CudaError);
}
TEST(AssertNoCudaErrors, DoesNotThrowOnSuccess)
{
ASSERT_NO_THROW(GKO_ASSERT_NO_CUDA_ERRORS(cudaSuccess));
}
TEST(AssertNoCublasErrors, ThrowsOnError)
{
ASSERT_THROW(GKO_ASSERT_NO_CUBLAS_ERRORS(1), gko::CublasError);
}
TEST(AssertNoCublasErrors, DoesNotThrowOnSuccess)
{
ASSERT_NO_THROW(GKO_ASSERT_NO_CUBLAS_ERRORS(CUBLAS_STATUS_SUCCESS));
}
TEST(AssertNoCurandErrors, ThrowsOnError)
{
ASSERT_THROW(GKO_ASSERT_NO_CURAND_ERRORS(1), gko::CurandError);
}
TEST(AssertNoCurandErrors, DoesNotThrowOnSuccess)
{
ASSERT_NO_THROW(GKO_ASSERT_NO_CURAND_ERRORS(CURAND_STATUS_SUCCESS));
}
TEST(AssertNoCusparseErrors, ThrowsOnError)
{
ASSERT_THROW(GKO_ASSERT_NO_CUSPARSE_ERRORS(1), gko::CusparseError);
}
TEST(AssertNoCusparseErrors, DoesNotThrowOnSuccess)
{
ASSERT_NO_THROW(GKO_ASSERT_NO_CUSPARSE_ERRORS(CUSPARSE_STATUS_SUCCESS));
}
TEST(AssertNoCufftErrors, ThrowsOnError)
{
ASSERT_THROW(GKO_ASSERT_NO_CUFFT_ERRORS(1), gko::CufftError);
}
TEST(AssertNoCufftErrors, DoesNotThrowOnSuccess)
{
ASSERT_NO_THROW(GKO_ASSERT_NO_CUFFT_ERRORS(CUFFT_SUCCESS));
}
} // namespace
|
93fa78e665e4228bf20b636dab8f91a541405e19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace default_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto _pp_var_a0m __attribute__((unused)) = params_.globals[0];\
auto _pp_var_zetam __attribute__((unused)) = params_.globals[1];\
auto _pp_var_gmm __attribute__((unused)) = params_.globals[2];\
auto _pp_var_alpm __attribute__((unused)) = params_.globals[3];\
auto _pp_var_betm __attribute__((unused)) = params_.globals[4];\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_v __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_minf __attribute__((unused)) = params_.state_vars[2];\
auto* _pp_var_mtau __attribute__((unused)) = params_.state_vars[3];\
auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\
auto* _pp_var_vhalfm __attribute__((unused)) = params_.parameters[1];\
auto* _pp_var_q10 __attribute__((unused)) = params_.parameters[2];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__device__
void trates(arb_mechanism_ppack params_, int tid_, arb_value_type v, arb_value_type celsius) {
PPACK_IFACE_BLOCK;
arb_value_type tmp, betm_t, qt, alpm_t;
qt = pow(_pp_var_q10[tid_], (celsius- 24.0)* 0.10000000000000001);
_pp_var_minf[tid_] = 1.0/( 1.0+exp( -(v- 21.0)* 0.10000000000000001));
tmp = _pp_var_zetam*(v-_pp_var_vhalfm[tid_]);
alpm_t = exp(tmp);
betm_t = exp(_pp_var_gmm*tmp);
_pp_var_mtau[tid_] = betm_t/(qt*_pp_var_a0m*( 1.0+alpm_t));
}
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type celsius = _pp_var_temperature_degC[node_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
trates(params_, tid_, v, celsius);
_pp_var_m[tid_] = _pp_var_minf[tid_];
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type celsius = _pp_var_temperature_degC[node_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type a_0_, b_0_, ll0_, ll1_;
ll1_ = 0.;
ll0_ = 0.;
trates(params_, tid_, v, celsius);
a_0_ = _pp_var_mtau[tid_];
b_0_ = _pp_var_minf[tid_];
ll0_ = -dt/a_0_;
ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_);
_pp_var_m[tid_] = b_0_+(_pp_var_m[tid_]-b_0_)*ll1_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
ik = _pp_var_gbar[tid_]*_pp_var_m[tid_]*(v-ek);
current_ = ik;
conductivity_ = _pp_var_gbar[tid_]*_pp_var_m[tid_];
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_kdrmt_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
if (!p->multiplicity) return;
hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(1}), block_dim, 0, *p);
}
void mechanism_kdrmt_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( compute_currents), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_kdrmt_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_kdrmt_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_kdrmt_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_kdrmt_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace default_catalogue
} // namespace arb
| 93fa78e665e4228bf20b636dab8f91a541405e19.cu | #include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace default_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto _pp_var_a0m __attribute__((unused)) = params_.globals[0];\
auto _pp_var_zetam __attribute__((unused)) = params_.globals[1];\
auto _pp_var_gmm __attribute__((unused)) = params_.globals[2];\
auto _pp_var_alpm __attribute__((unused)) = params_.globals[3];\
auto _pp_var_betm __attribute__((unused)) = params_.globals[4];\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_v __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_minf __attribute__((unused)) = params_.state_vars[2];\
auto* _pp_var_mtau __attribute__((unused)) = params_.state_vars[3];\
auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\
auto* _pp_var_vhalfm __attribute__((unused)) = params_.parameters[1];\
auto* _pp_var_q10 __attribute__((unused)) = params_.parameters[2];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__device__
void trates(arb_mechanism_ppack params_, int tid_, arb_value_type v, arb_value_type celsius) {
PPACK_IFACE_BLOCK;
arb_value_type tmp, betm_t, qt, alpm_t;
qt = pow(_pp_var_q10[tid_], (celsius- 24.0)* 0.10000000000000001);
_pp_var_minf[tid_] = 1.0/( 1.0+exp( -(v- 21.0)* 0.10000000000000001));
tmp = _pp_var_zetam*(v-_pp_var_vhalfm[tid_]);
alpm_t = exp(tmp);
betm_t = exp(_pp_var_gmm*tmp);
_pp_var_mtau[tid_] = betm_t/(qt*_pp_var_a0m*( 1.0+alpm_t));
}
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type celsius = _pp_var_temperature_degC[node_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
trates(params_, tid_, v, celsius);
_pp_var_m[tid_] = _pp_var_minf[tid_];
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type celsius = _pp_var_temperature_degC[node_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type a_0_, b_0_, ll0_, ll1_;
ll1_ = 0.;
ll0_ = 0.;
trates(params_, tid_, v, celsius);
a_0_ = _pp_var_mtau[tid_];
b_0_ = _pp_var_minf[tid_];
ll0_ = -dt/a_0_;
ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_);
_pp_var_m[tid_] = b_0_+(_pp_var_m[tid_]-b_0_)*ll1_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
ik = _pp_var_gbar[tid_]*_pp_var_m[tid_]*(v-ek);
current_ = ik;
conductivity_ = _pp_var_gbar[tid_]*_pp_var_m[tid_];
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_kdrmt_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
init<<<grid_dim, block_dim>>>(*p);
if (!p->multiplicity) return;
multiply<<<dim3{grid_dim, 1}, block_dim>>>(*p);
}
void mechanism_kdrmt_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
compute_currents<<<grid_dim, block_dim>>>(*p);
}
void mechanism_kdrmt_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
advance_state<<<grid_dim, block_dim>>>(*p);
}
void mechanism_kdrmt_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_kdrmt_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_kdrmt_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace default_catalogue
} // namespace arb
|
8de0f226a364b82e48be94d344c23ed1d9e3b31f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrix_add_matrix.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat1 = NULL;
hipMalloc(&mat1, XSIZE*YSIZE);
float *mat2 = NULL;
hipMalloc(&mat2, XSIZE*YSIZE);
float *mat3 = NULL;
hipMalloc(&mat3, XSIZE*YSIZE);
int row = 1;
int col = 1;
int sign = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrix_add_matrix), dim3(gridBlock),dim3(threadBlock), 0, 0, mat1,mat2,mat3,row,col,sign);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrix_add_matrix), dim3(gridBlock),dim3(threadBlock), 0, 0, mat1,mat2,mat3,row,col,sign);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrix_add_matrix), dim3(gridBlock),dim3(threadBlock), 0, 0, mat1,mat2,mat3,row,col,sign);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8de0f226a364b82e48be94d344c23ed1d9e3b31f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrix_add_matrix.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat1 = NULL;
cudaMalloc(&mat1, XSIZE*YSIZE);
float *mat2 = NULL;
cudaMalloc(&mat2, XSIZE*YSIZE);
float *mat3 = NULL;
cudaMalloc(&mat3, XSIZE*YSIZE);
int row = 1;
int col = 1;
int sign = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrix_add_matrix<<<gridBlock,threadBlock>>>(mat1,mat2,mat3,row,col,sign);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrix_add_matrix<<<gridBlock,threadBlock>>>(mat1,mat2,mat3,row,col,sign);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrix_add_matrix<<<gridBlock,threadBlock>>>(mat1,mat2,mat3,row,col,sign);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
836336b1aeadcce734ecc0a28842e3d54c5624e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "conv.h"
#include <stdio.h>
__global__ void conv_2d_2(
float data[IN_HEIGHT_3 * IN_WIDTH_3 * N_CHAN_3],
float res[OUT_HEIGHT_3 * OUT_WIDTH_3 * N_FILT_3],
float weights[FILT_HEIGHT * FILT_WIDTH * N_CHAN_3 * N_FILT_3],
float biases[N_FILT_3])
{
int oh= blockIdx.y * blockDim.y + threadIdx.y;
int ow= blockIdx.x * blockDim.x + threadIdx.x;
if (oh>=IN_HEIGHT_3-FILT_HEIGHT+1 || ow>=IN_WIDTH_3-FILT_WIDTH+1)
return;
int ff = blockIdx.z * blockDim.z + threadIdx.z;
if (ff >= N_FILT_3)
return;
int offset = (oh * OUT_WIDTH_3 + ow)*N_FILT_3;
float temp = biases[ff];
for (int cc = 0; cc < N_CHAN_3; cc++)
{
for (int fh = 0; fh < FILT_HEIGHT; fh++)
{
for (int fw = 0; fw < FILT_WIDTH; fw++)
{
int index_weight = fh * FILT_WIDTH * N_CHAN_3 * N_FILT_3 + fw * N_CHAN_3 * N_FILT_3 + cc * N_FILT_3 + ff;
// assuming there is no padding
temp += data[((oh + fh) * IN_WIDTH_3 + (ow + fw)) * N_CHAN_3 + cc] * weights[index_weight];
} //end mult loop
} //end channel loop
} //end filter width loop
res[offset + ff] = (temp > 0)?temp:0;
} //end conv2d
| 836336b1aeadcce734ecc0a28842e3d54c5624e9.cu |
#include "conv.h"
#include <stdio.h>
__global__ void conv_2d_2(
float data[IN_HEIGHT_3 * IN_WIDTH_3 * N_CHAN_3],
float res[OUT_HEIGHT_3 * OUT_WIDTH_3 * N_FILT_3],
float weights[FILT_HEIGHT * FILT_WIDTH * N_CHAN_3 * N_FILT_3],
float biases[N_FILT_3])
{
int oh= blockIdx.y * blockDim.y + threadIdx.y;
int ow= blockIdx.x * blockDim.x + threadIdx.x;
if (oh>=IN_HEIGHT_3-FILT_HEIGHT+1 || ow>=IN_WIDTH_3-FILT_WIDTH+1)
return;
int ff = blockIdx.z * blockDim.z + threadIdx.z;
if (ff >= N_FILT_3)
return;
int offset = (oh * OUT_WIDTH_3 + ow)*N_FILT_3;
float temp = biases[ff];
for (int cc = 0; cc < N_CHAN_3; cc++)
{
for (int fh = 0; fh < FILT_HEIGHT; fh++)
{
for (int fw = 0; fw < FILT_WIDTH; fw++)
{
int index_weight = fh * FILT_WIDTH * N_CHAN_3 * N_FILT_3 + fw * N_CHAN_3 * N_FILT_3 + cc * N_FILT_3 + ff;
// assuming there is no padding
temp += data[((oh + fh) * IN_WIDTH_3 + (ow + fw)) * N_CHAN_3 + cc] * weights[index_weight];
} //end mult loop
} //end channel loop
} //end filter width loop
res[offset + ff] = (temp > 0)?temp:0;
} //end conv2d
|
8871cb4a3528037c3319bb46cc63bd293de580af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorRandom.cu"
#else
#define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void THCTensor_(uniform)(THCState* state, THCTensor *self_, double a, double b)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generate_uniform), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, a, b);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generate_normal), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generateLogNormal<real>), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generate_exponential), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, lambda);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generate_cauchy), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, median, sigma);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(renormRows)(struct THCState* state,
THCTensor* t) {
THAssert(THCTensor_(nDimension)(state, t) == 2);
long rows = THCTensor_(size)(state, t, 0);
long cols = THCTensor_(size)(state, t, 1);
hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(cols < maxThreads ? cols : maxThreads);
hipLaunchKernelGGL(
(renormRowsL1<real>),
grid, block, block.x * sizeof(real),
THCState_getCurrentStream(state), THCTensor_(data)(state, t),
rows, cols);
}
THC_API void THCTensor_(multinomial)(struct THCState *state,
THCudaLongTensor *self,
THCTensor *prob_dist,
int n_sample,
int with_replacement)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist));
Generator* gen = THCRandom_getGenerator(state);
int inputSize = THCTensor_(nDimension)(state, prob_dist);
THArgCheck(inputSize > 0 && inputSize <= 2, 2,
"prob_dist must be 1 or 2 dim");
// Categories are in the innermost dimension
long numDist =
inputSize == 1 ? 1 : THCTensor_(size)(state, prob_dist, 0);
long numCategoriesLong =
inputSize == 1 ? THCTensor_(size)(state, prob_dist, 0) :
THCTensor_(size)(state, prob_dist, 1);
// Since the index tensor is float, numCategories cannot exceed max
// float integer precision
THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2,
"number of categories cannot exceed 2^24");
int numCategories = (int) numCategoriesLong;
THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples");
if (!with_replacement) {
THArgCheck(n_sample <= numCategories, 2,
"cannot sample n_sample > prob_dist:size(1) samples without "
"replacement");
}
// It is possible that prob_dist is non-contiguous
THCTensor* probDistContig =
THCTensor_(newContiguous)(state, prob_dist);
// Restructure data for 2d
if (inputSize == 1) {
THCTensor_(resize2d)(state, probDistContig, 1, numCategories);
}
THCudaLongTensor_resize2d(state, self, numDist, n_sample);
// get current device properties
hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
int maxShared = props->sharedMemPerBlock;
int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads)
* (sizeof(real) * sizeof(accreal));
if (n_sample == 1 && maxShared >= requiredShared) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample);
THCTensor_(uniform)(state, sampled, 0.0, 1.0);
dim3 block(numCategories < maxThreads ? numCategories : maxThreads);
dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4);
hipLaunchKernelGGL(
(sampleMultinomialOnce<real, accreal>),
grid, block,
requiredShared,
THCState_getCurrentStream(state),
THCudaLongTensor_data(state, self),
numDist,
numCategories,
THCTensor_(data)(state, sampled),
THCTensor_(data)(state, probDistContig));
THCTensor_(free)(state, sampled);
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
THCTensor* origDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, origDist, probDistContig);
THCTensor_(copy)(state, origDist, probDistContig);
THCTensor* normDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, normDist, probDistContig);
THCTensor* prefixSum = THCTensor_(new)(state);
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
if (with_replacement) {
// Sample with replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from one
// distribution concurrently.
dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS);
hipLaunchKernelGGL(
(sampleMultinomialWithReplacement),
grid, block, 0, THCState_getCurrentStream(state),
gen->gen_states,
n_sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, prefixSum));
} else {
// Sample without replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from a different
// distribution concurrently.
ptrdiff_t numBlocks = THCCeilDiv(numDist, 4L);
dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS);
for (int sample = 0; sample < n_sample; ++sample) {
if (sample > 0) {
// Update probabilities
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
}
// The kernel can only draw one sample before we have to
// recalculate our distribution
hipLaunchKernelGGL(
(sampleMultinomialWithoutReplacement),
grid, block, 0, THCState_getCurrentStream(state),
gen->gen_states,
n_sample,
sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, origDist),
THCTensor_(data)(state, prefixSum));
}
}
THCTensor_(free)(state, prefixSum);
THCTensor_(free)(state, normDist);
THCTensor_(free)(state, origDist);
}
// Revert data restructuring based on input sizes
if (inputSize == 1) {
THCudaLongTensor_resize1d(state, self, n_sample);
// Unfortunately, if prob_dist is contiguous already,
// newContiguous is not a private copy, so we have to restructure
// this too, so as to not affect prob_dist
THCTensor_(resize1d)(state, probDistContig, numCategories);
}
THCTensor_(free)(state, probDistContig);
}
THC_API void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
THAssert(THCTensor_(isContiguous)(state, _probs));
long inputsize = THCTensor_(nElement)(state, _probs);
THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor_resize1d(state, _J, inputsize);
THCTensor_(resize1d)(state, _q, inputsize);
real one = ScalarConvert<long, real>::to(1);
int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE);
hipLaunchKernelGGL(
(aliasMultinomialFilter),
inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, _q),
THCTensor_(data)(state, _probs),
THCudaLongTensor_data(state, smaller),
THCudaLongTensor_data(state, larger),
THCudaLongTensor_data(state, _J),
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
one, inputsize
);
THCudaLongTensor_nonzero(state, smaller_short, smaller);
THCudaLongTensor_nonzero(state, larger_short, larger);
int h_large_c = THCudaLongTensor_nElement(state, larger_short);
THCudaLongTensor_resize1d(state, smaller_short, inputsize);
THCudaLongTensor_resize1d(state, larger_short, inputsize);
hipLaunchKernelGGL(
(aliasMultinomialSetup),
1, 1, 0, THCState_getCurrentStream(state),
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
inputsize,
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
inputsize - h_large_c, h_large_c
);
real q_max = THCTensor_(maxall)(state, _q);
hipLaunchKernelGGL(
(condDiv),
inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, _q),
THCudaLongTensor_data(state, _J),
inputsize, q_max
);
THCudaLongTensor_free(state, smaller);
THCudaLongTensor_free(state, larger);
THCudaLongTensor_free(state, smaller_short);
THCudaLongTensor_free(state, larger_short);
}
THC_API void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCudaLongTensor *_J, THCTensor *_q){
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
Generator* gen = THCRandom_getGenerator(state);
long K = THCudaLongTensor_nElement(state, _J);
long output_nelem = THCudaLongTensor_nElement(state, self);
ptrdiff_t size = THCudaLongTensor_nElement(state, self);
THCTensor *uniform = THCTensor_(newWithSize1d)(state, output_nelem);
THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, output_nelem);
THCTensor_(uniform)(state, uniform, 0, K);
THCTensor_(uniform)(state, bernoulli, 0, 1);
hipLaunchKernelGGL(
(multinomialAliasDrawKernel),
THCCeilDiv((int)output_nelem+BLOCK_SIZE-1, BLOCK_SIZE), BLOCK_SIZE, 0, THCState_getCurrentStream(state),
size,
THCudaLongTensor_data(state, self),
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
K,
THCTensor_(data)(state, uniform),
THCTensor_(data)(state, bernoulli)
);
}
THC_API void THCTensor_(rand)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(uniform)(state, r_, 0, 1);
}
void THCTensor_(randn)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(normal)(state, r_, 0, 1);
}
#endif
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_bernoulli, double, double p, double, hiprand_uniform_double, x <= p)
#else
GENERATE_KERNEL1(generate_bernoulli, real, double p, float, hiprand_uniform, (ScalarConvert<bool, real>::to(x <= p)))
#endif
THC_API void THCTensor_(bernoulli)(THCState* state, THCTensor *self_, double p)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generate_bernoulli), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
#define DEFINE_BERNOULLI_TENSOR(NAME, PROB_TYPE, PROB_DATA_TYPE) \
THC_API void THCTensor_(NAME)(THCState* state, \
THCTensor *self_, PROB_TYPE *probs_) \
{ \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, probs_)); \
Generator* gen = THCRandom_getGenerator(state); \
THCTensor *self = THCTensor_(newContiguous)(state, self_); \
PROB_TYPE *probs = PROB_TYPE##_newContiguous(state, probs_); \
ptrdiff_t size = THCTensor_(nElement)(state, self); \
ptrdiff_t prob_size = PROB_TYPE##_nElement(state, probs); \
real *result_data = THCTensor_(data)(state, self); \
PROB_DATA_TYPE *probs_data = PROB_TYPE##_data(state, probs); \
\
THArgCheck(size == prob_size, 3, "inconsistent tensor size"); \
\
hipLaunchKernelGGL( \
(generate_bernoulli_tensor), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state), \
gen->gen_states, size, result_data, probs_data); \
\
PROB_TYPE##_free(state, probs); \
THCTensor_(freeCopyTo)(state, self, self_); \
}
DEFINE_BERNOULLI_TENSOR(bernoulli_FloatTensor, THCudaTensor, float)
DEFINE_BERNOULLI_TENSOR(bernoulli_DoubleTensor, THCudaDoubleTensor, double)
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_geometric, double, double p, double, hiprand_uniform_double, ceil(log(x) / log(1-p)))
#else
GENERATE_KERNEL1(generate_geometric, real, double p, float, hiprand_uniform, (ScalarConvert<float, real>::to(ceilf(logf(x) / log(1-p)))))
#endif
THC_API void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generate_geometric), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
#undef NUM_BLOCKS
#endif
| 8871cb4a3528037c3319bb46cc63bd293de580af.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorRandom.cu"
#else
#define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void THCTensor_(uniform)(THCState* state, THCTensor *self_, double a, double b)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generate_uniform), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, a, b);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generate_normal), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generateLogNormal<real>), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generate_exponential), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, lambda);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generate_cauchy), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, median, sigma);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(renormRows)(struct THCState* state,
THCTensor* t) {
THAssert(THCTensor_(nDimension)(state, t) == 2);
long rows = THCTensor_(size)(state, t, 0);
long cols = THCTensor_(size)(state, t, 1);
hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(cols < maxThreads ? cols : maxThreads);
hipLaunchKernelGGL(
(renormRowsL1<real>),
grid, block, block.x * sizeof(real),
THCState_getCurrentStream(state), THCTensor_(data)(state, t),
rows, cols);
}
THC_API void THCTensor_(multinomial)(struct THCState *state,
THCudaLongTensor *self,
THCTensor *prob_dist,
int n_sample,
int with_replacement)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist));
Generator* gen = THCRandom_getGenerator(state);
int inputSize = THCTensor_(nDimension)(state, prob_dist);
THArgCheck(inputSize > 0 && inputSize <= 2, 2,
"prob_dist must be 1 or 2 dim");
// Categories are in the innermost dimension
long numDist =
inputSize == 1 ? 1 : THCTensor_(size)(state, prob_dist, 0);
long numCategoriesLong =
inputSize == 1 ? THCTensor_(size)(state, prob_dist, 0) :
THCTensor_(size)(state, prob_dist, 1);
// Since the index tensor is float, numCategories cannot exceed max
// float integer precision
THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2,
"number of categories cannot exceed 2^24");
int numCategories = (int) numCategoriesLong;
THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples");
if (!with_replacement) {
THArgCheck(n_sample <= numCategories, 2,
"cannot sample n_sample > prob_dist:size(1) samples without "
"replacement");
}
// It is possible that prob_dist is non-contiguous
THCTensor* probDistContig =
THCTensor_(newContiguous)(state, prob_dist);
// Restructure data for 2d
if (inputSize == 1) {
THCTensor_(resize2d)(state, probDistContig, 1, numCategories);
}
THCudaLongTensor_resize2d(state, self, numDist, n_sample);
// get current device properties
hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
int maxShared = props->sharedMemPerBlock;
int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads)
* (sizeof(real) * sizeof(accreal));
if (n_sample == 1 && maxShared >= requiredShared) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample);
THCTensor_(uniform)(state, sampled, 0.0, 1.0);
dim3 block(numCategories < maxThreads ? numCategories : maxThreads);
dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4);
hipLaunchKernelGGL(
(sampleMultinomialOnce<real, accreal>),
grid, block,
requiredShared,
THCState_getCurrentStream(state),
THCudaLongTensor_data(state, self),
numDist,
numCategories,
THCTensor_(data)(state, sampled),
THCTensor_(data)(state, probDistContig));
THCTensor_(free)(state, sampled);
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
THCTensor* origDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, origDist, probDistContig);
THCTensor_(copy)(state, origDist, probDistContig);
THCTensor* normDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, normDist, probDistContig);
THCTensor* prefixSum = THCTensor_(new)(state);
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
if (with_replacement) {
// Sample with replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from one
// distribution concurrently.
dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS);
hipLaunchKernelGGL(
(sampleMultinomialWithReplacement),
grid, block, 0, THCState_getCurrentStream(state),
gen->gen_states,
n_sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, prefixSum));
} else {
// Sample without replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from a different
// distribution concurrently.
ptrdiff_t numBlocks = THCCeilDiv(numDist, 4L);
dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS);
for (int sample = 0; sample < n_sample; ++sample) {
if (sample > 0) {
// Update probabilities
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
}
// The kernel can only draw one sample before we have to
// recalculate our distribution
hipLaunchKernelGGL(
(sampleMultinomialWithoutReplacement),
grid, block, 0, THCState_getCurrentStream(state),
gen->gen_states,
n_sample,
sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, origDist),
THCTensor_(data)(state, prefixSum));
}
}
THCTensor_(free)(state, prefixSum);
THCTensor_(free)(state, normDist);
THCTensor_(free)(state, origDist);
}
// Revert data restructuring based on input sizes
if (inputSize == 1) {
THCudaLongTensor_resize1d(state, self, n_sample);
// Unfortunately, if prob_dist is contiguous already,
// newContiguous is not a private copy, so we have to restructure
// this too, so as to not affect prob_dist
THCTensor_(resize1d)(state, probDistContig, numCategories);
}
THCTensor_(free)(state, probDistContig);
}
THC_API void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
THAssert(THCTensor_(isContiguous)(state, _probs));
long inputsize = THCTensor_(nElement)(state, _probs);
THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor_resize1d(state, _J, inputsize);
THCTensor_(resize1d)(state, _q, inputsize);
real one = ScalarConvert<long, real>::to(1);
int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE);
hipLaunchKernelGGL(
(aliasMultinomialFilter),
inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, _q),
THCTensor_(data)(state, _probs),
THCudaLongTensor_data(state, smaller),
THCudaLongTensor_data(state, larger),
THCudaLongTensor_data(state, _J),
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
one, inputsize
);
THCudaLongTensor_nonzero(state, smaller_short, smaller);
THCudaLongTensor_nonzero(state, larger_short, larger);
int h_large_c = THCudaLongTensor_nElement(state, larger_short);
THCudaLongTensor_resize1d(state, smaller_short, inputsize);
THCudaLongTensor_resize1d(state, larger_short, inputsize);
hipLaunchKernelGGL(
(aliasMultinomialSetup),
1, 1, 0, THCState_getCurrentStream(state),
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
inputsize,
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
inputsize - h_large_c, h_large_c
);
real q_max = THCTensor_(maxall)(state, _q);
hipLaunchKernelGGL(
(condDiv),
inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, _q),
THCudaLongTensor_data(state, _J),
inputsize, q_max
);
THCudaLongTensor_free(state, smaller);
THCudaLongTensor_free(state, larger);
THCudaLongTensor_free(state, smaller_short);
THCudaLongTensor_free(state, larger_short);
}
THC_API void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCudaLongTensor *_J, THCTensor *_q){
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
Generator* gen = THCRandom_getGenerator(state);
long K = THCudaLongTensor_nElement(state, _J);
long output_nelem = THCudaLongTensor_nElement(state, self);
ptrdiff_t size = THCudaLongTensor_nElement(state, self);
THCTensor *uniform = THCTensor_(newWithSize1d)(state, output_nelem);
THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, output_nelem);
THCTensor_(uniform)(state, uniform, 0, K);
THCTensor_(uniform)(state, bernoulli, 0, 1);
hipLaunchKernelGGL(
(multinomialAliasDrawKernel),
THCCeilDiv((int)output_nelem+BLOCK_SIZE-1, BLOCK_SIZE), BLOCK_SIZE, 0, THCState_getCurrentStream(state),
size,
THCudaLongTensor_data(state, self),
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
K,
THCTensor_(data)(state, uniform),
THCTensor_(data)(state, bernoulli)
);
}
THC_API void THCTensor_(rand)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(uniform)(state, r_, 0, 1);
}
void THCTensor_(randn)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(normal)(state, r_, 0, 1);
}
#endif
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_bernoulli, double, double p, double, curand_uniform_double, x <= p)
#else
GENERATE_KERNEL1(generate_bernoulli, real, double p, float, curand_uniform, (ScalarConvert<bool, real>::to(x <= p)))
#endif
THC_API void THCTensor_(bernoulli)(THCState* state, THCTensor *self_, double p)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generate_bernoulli), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
#define DEFINE_BERNOULLI_TENSOR(NAME, PROB_TYPE, PROB_DATA_TYPE) \
THC_API void THCTensor_(NAME)(THCState* state, \
THCTensor *self_, PROB_TYPE *probs_) \
{ \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, probs_)); \
Generator* gen = THCRandom_getGenerator(state); \
THCTensor *self = THCTensor_(newContiguous)(state, self_); \
PROB_TYPE *probs = PROB_TYPE##_newContiguous(state, probs_); \
ptrdiff_t size = THCTensor_(nElement)(state, self); \
ptrdiff_t prob_size = PROB_TYPE##_nElement(state, probs); \
real *result_data = THCTensor_(data)(state, self); \
PROB_DATA_TYPE *probs_data = PROB_TYPE##_data(state, probs); \
\
THArgCheck(size == prob_size, 3, "inconsistent tensor size"); \
\
hipLaunchKernelGGL( \
(generate_bernoulli_tensor), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state), \
gen->gen_states, size, result_data, probs_data); \
\
PROB_TYPE##_free(state, probs); \
THCTensor_(freeCopyTo)(state, self, self_); \
}
DEFINE_BERNOULLI_TENSOR(bernoulli_FloatTensor, THCudaTensor, float)
DEFINE_BERNOULLI_TENSOR(bernoulli_DoubleTensor, THCudaDoubleTensor, double)
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_geometric, double, double p, double, curand_uniform_double, ceil(log(x) / log(1-p)))
#else
GENERATE_KERNEL1(generate_geometric, real, double p, float, curand_uniform, (ScalarConvert<float, real>::to(ceilf(logf(x) / log(1-p)))))
#endif
THC_API void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
ptrdiff_t size = THCTensor_(nElement)(state, self);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(
(generate_geometric), NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
#undef NUM_BLOCKS
#endif
|
c5212ae6b950f05e4f2ed461757558b4966511f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#define LAZYKBEST_THREADS 32
__global__ void cunnx_LazyKBest_updateOutput_kernel(
float *output, float *indice, const float *input,
int inputSize, int outputSize)
{
__shared__ float bufferVal[LAZYKBEST_THREADS];
__shared__ float bufferIdx[LAZYKBEST_THREADS];
const int tx = threadIdx.x;
const int step = blockDim.x;
const int k = blockIdx.x;
float *output_k = output + k*outputSize;
float *indice_k = indice + k*outputSize;
const float *input_k = input + k*inputSize;
float maxVal = -FLT_MAX;
int maxIdx = -1;
for (int i=tx; i<inputSize; i+=step)
{
float val = input_k[i];
if (val > maxVal)
{
maxVal = val;
maxIdx = i;
}
}
bufferVal[tx] = maxVal;
bufferIdx[tx] = maxIdx;
// reduce
for (unsigned int stride = blockDim.x >> 1; stride > outputSize-1; stride >>= 1)
{
__syncthreads();
if (tx < stride)
{
float val = bufferVal[tx+stride];
if (val > bufferVal[tx])
{
bufferVal[tx] = val;
bufferIdx[tx] = bufferIdx[tx+stride];
}
}
}
if (tx < outputSize)
{
output_k[tx] = bufferVal[tx];
indice_k[tx] = bufferIdx[tx] + 1;
}
}
static int cunnx_LazyKBest_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "_output", "torch.CudaTensor");
THCudaTensor *indice = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "_indice", "torch.CudaTensor");
int k = luaT_getfieldcheckint(L, 1, "k");
luaL_argcheck(L, input->nDimension == 2, 2, "2D(batch mode) tensor expected");
luaL_argcheck(L, k <= LAZYKBEST_THREADS, 1, "k must be smaller than KBEST_THREADS");
luaL_argcheck(L, THCudaTensor_isContiguous(state, input), 2, "Expecting contiguous input");
THCudaTensor_resize2d(state, output, input->size[0], k);
THCudaTensor_resize2d(state, indice, input->size[0], k);
/* call cudakernel */
dim3 blocks(input->size[0]); // each cuda-block is an example
dim3 threads(LAZYKBEST_THREADS);
hipLaunchKernelGGL(( cunnx_LazyKBest_updateOutput_kernel), dim3(blocks),dim3(threads), 0, 0,
THCudaTensor_data(state, output), THCudaTensor_data(state, indice),
THCudaTensor_data(state, input), input->size[1], k
);
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
return 1;
}
__global__ void cunnx_LazyKBest_updateGradInput_kernel(
float *gradInput, const float *indice, const float *gradOutput,
int inputSize, int outputSize)
{
int tx = threadIdx.x;
int step = blockDim.x;
int k = blockIdx.x;
float *gradInput_k = gradInput + k*inputSize;
const float *gradOutput_k = gradOutput + k*outputSize;
const float *indice_k = indice + k*outputSize;
for (int i=tx; i<outputSize; i+=step)
gradInput_k[(int)(indice_k[i] - 1)] = gradOutput_k[i];
}
static int cunnx_LazyKBest_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *indice = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
int k = luaT_getfieldcheckint(L, 1, "k");
luaL_argcheck(L, input->nDimension == 2, 2, "2D(batch mode) tensor expected");
luaL_argcheck(L, indice->nDimension == 2, 3, "2D(batch mode) tensor expected");
luaL_argcheck(L, THCudaTensor_isContiguous(state, input), 2, "Expecting contiguous input");
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_fill(state, gradInput, 0);
/* call cudakernel */
dim3 blocks(input->size[0]); // each cuda-block is an example
dim3 threads(LAZYKBEST_THREADS);
hipLaunchKernelGGL(( cunnx_LazyKBest_updateGradInput_kernel), dim3(blocks),dim3(threads), 0, 0,
THCudaTensor_data(state, gradInput), THCudaTensor_data(state, indice),
THCudaTensor_data(state, gradOutput), input->size[1], k
);
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
return 1;
}
static const struct luaL_Reg cunnx_LazyKBest__ [] = {
{"LazyKBest_updateOutput", cunnx_LazyKBest_updateOutput},
{"LazyKBest_updateGradInput", cunnx_LazyKBest_updateGradInput},
{NULL, NULL}
};
static void cunnx_LazyKBest_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunnx_LazyKBest__, "nn");
lua_pop(L,1);
}
| c5212ae6b950f05e4f2ed461757558b4966511f5.cu | #include "utils.h"
#define LAZYKBEST_THREADS 32
__global__ void cunnx_LazyKBest_updateOutput_kernel(
float *output, float *indice, const float *input,
int inputSize, int outputSize)
{
__shared__ float bufferVal[LAZYKBEST_THREADS];
__shared__ float bufferIdx[LAZYKBEST_THREADS];
const int tx = threadIdx.x;
const int step = blockDim.x;
const int k = blockIdx.x;
float *output_k = output + k*outputSize;
float *indice_k = indice + k*outputSize;
const float *input_k = input + k*inputSize;
float maxVal = -FLT_MAX;
int maxIdx = -1;
for (int i=tx; i<inputSize; i+=step)
{
float val = input_k[i];
if (val > maxVal)
{
maxVal = val;
maxIdx = i;
}
}
bufferVal[tx] = maxVal;
bufferIdx[tx] = maxIdx;
// reduce
for (unsigned int stride = blockDim.x >> 1; stride > outputSize-1; stride >>= 1)
{
__syncthreads();
if (tx < stride)
{
float val = bufferVal[tx+stride];
if (val > bufferVal[tx])
{
bufferVal[tx] = val;
bufferIdx[tx] = bufferIdx[tx+stride];
}
}
}
if (tx < outputSize)
{
output_k[tx] = bufferVal[tx];
indice_k[tx] = bufferIdx[tx] + 1;
}
}
static int cunnx_LazyKBest_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "_output", "torch.CudaTensor");
THCudaTensor *indice = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "_indice", "torch.CudaTensor");
int k = luaT_getfieldcheckint(L, 1, "k");
luaL_argcheck(L, input->nDimension == 2, 2, "2D(batch mode) tensor expected");
luaL_argcheck(L, k <= LAZYKBEST_THREADS, 1, "k must be smaller than KBEST_THREADS");
luaL_argcheck(L, THCudaTensor_isContiguous(state, input), 2, "Expecting contiguous input");
THCudaTensor_resize2d(state, output, input->size[0], k);
THCudaTensor_resize2d(state, indice, input->size[0], k);
/* call cudakernel */
dim3 blocks(input->size[0]); // each cuda-block is an example
dim3 threads(LAZYKBEST_THREADS);
cunnx_LazyKBest_updateOutput_kernel<<<blocks,threads>>>(
THCudaTensor_data(state, output), THCudaTensor_data(state, indice),
THCudaTensor_data(state, input), input->size[1], k
);
cudaError errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
return 1;
}
__global__ void cunnx_LazyKBest_updateGradInput_kernel(
float *gradInput, const float *indice, const float *gradOutput,
int inputSize, int outputSize)
{
int tx = threadIdx.x;
int step = blockDim.x;
int k = blockIdx.x;
float *gradInput_k = gradInput + k*inputSize;
const float *gradOutput_k = gradOutput + k*outputSize;
const float *indice_k = indice + k*outputSize;
for (int i=tx; i<outputSize; i+=step)
gradInput_k[(int)(indice_k[i] - 1)] = gradOutput_k[i];
}
static int cunnx_LazyKBest_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *indice = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
int k = luaT_getfieldcheckint(L, 1, "k");
luaL_argcheck(L, input->nDimension == 2, 2, "2D(batch mode) tensor expected");
luaL_argcheck(L, indice->nDimension == 2, 3, "2D(batch mode) tensor expected");
luaL_argcheck(L, THCudaTensor_isContiguous(state, input), 2, "Expecting contiguous input");
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_fill(state, gradInput, 0);
/* call cudakernel */
dim3 blocks(input->size[0]); // each cuda-block is an example
dim3 threads(LAZYKBEST_THREADS);
cunnx_LazyKBest_updateGradInput_kernel<<<blocks,threads>>>(
THCudaTensor_data(state, gradInput), THCudaTensor_data(state, indice),
THCudaTensor_data(state, gradOutput), input->size[1], k
);
cudaError errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
return 1;
}
static const struct luaL_Reg cunnx_LazyKBest__ [] = {
{"LazyKBest_updateOutput", cunnx_LazyKBest_updateOutput},
{"LazyKBest_updateGradInput", cunnx_LazyKBest_updateGradInput},
{NULL, NULL}
};
static void cunnx_LazyKBest_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunnx_LazyKBest__, "nn");
lua_pop(L,1);
}
|
a0e68b15d45a09166dc04121d7dd2f869e34e140.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <nvfunctional>
#include <cstdio>
#include <cassert>
#include <chrono>
#include <ftk/numeric/inverse_linear_interpolation_solver.hh>
#include <ftk/numeric/linear_interpolation.hh>
#include <ftk/numeric/clamp.hh>
#include <ftk/numeric/symmetric_matrix.hh>
#include <ftk/numeric/fixed_point.hh>
#include <ftk/numeric/critical_point_type.hh>
#include <ftk/numeric/critical_point_test.hh>
#include <ftk/mesh/lattice.hh>
#include <ftk/io/tdgl_metadata.hh>
// #include <ftk/filters/critical_point_lite.hh>
#include "common_hip.cuh"
using namespace ftk;
typedef tdgl_metadata_t meta_t;
template <typename T>
__device__ __host__
T line_integral(const T X0[], const T X1[], const T A0[], const T A1[])
{
T dX[3] = {X1[0] - X0[0], X1[1] - X0[1], X1[2] - X0[2]};
T A[3] = {A0[0] + A1[0], A0[1] + A1[1], A0[2] + A1[2]};
return 0.5 * inner_product(A, dX);
}
template <typename T>
__device__ __host__
inline void magnetic_potential(const meta_t& m, T X[4], T A[3])
{
if (m.B[1] > 0) {
A[0] = -m.Kex;
A[1] = X[0] * m.B[2];
A[2] = -X[0] * m.B[1];
} else {
A[0] = -X[1] * m.B[2] - m.Kex;
A[1] = 0;
A[2] = X[1] * m.B[0];
}
}
template <int scope>
__device__ __host__
bool check_simplex_tdgl_vortex_3dt(
int current_timestep,
const lattice4_t& domain,
const lattice4_t& core,
const lattice3_t& ext, // array dimension
const element42_t& e,
const meta_t *h[2],
const float *Rho[2], // current and next timesteps
const float *Phi[2],
cp_t &p)
{
if (e.corner[3] != current_timestep)
return false;
int vertices[3][4], indices[3];
size_t local_indices[3];
for (int i = 0; i < 3; i ++) {
for (int j = 0; j < 4; j ++) {
vertices[i][j] = e.corner[j]
+ unit_simplex_offset_4_2<scope>(e.type, i, j);
if (vertices[i][j] < domain.st[j] ||
vertices[i][j] > domain.st[j] + domain.sz[j] - 1)
return false;
}
indices[i] = domain.to_index(vertices[i]);
local_indices[i] = ext.to_index(vertices[i]);
}
float X[3][4], A[3][3];
float rho[3], phi[3], re[3], im[3];
for (int i = 0; i < 3; i ++) {
const size_t k = local_indices[i]; // k = ext.to_index(vertices[i]);
const size_t t = unit_simplex_offset_4_2<scope>(e.type, i, 3);
rho[i] = Rho[t][k];
phi[i] = Phi[t][k];
re[i] = rho[i] * cos(phi[i]);
im[i] = rho[i] * sin(phi[i]);
for (int j = 0; j < 3; j ++)
X[i][j] = vertices[i][j] * h[0]->cell_lengths[j] + h[0]->origins[j];
X[i][3] = vertices[i][3];
magnetic_potential<float>(*h[t], X[i], A[i]);
}
// compute contour integral
float delta[3], phase_shift = 0;
for (int i = 0; i < 3; i ++) { // ignoring quasi periodical boundary conditions
int j = (i+1) % 3;
float li = line_integral(X[i], X[j], A[i], A[j]);
delta[i] = mod2pi1( phi[j] - phi[i] - li ); // gauge transformation
phase_shift -= delta[i];
}
// check contour integral
float critera = phase_shift / (2 * M_PI);
if (fabs(critera) < 0.5) return false; // ignoring chiralities
// guage transformation
float psi[3][2]; // in re/im
for (int i = 0; i < 3; i ++) {
if (i != 0) phi[i] = phi[i-1] + delta[i-1];
psi[i][0] = rho[i] * cos(phi[i]);
psi[i][1] = rho[i] * sin(phi[i]);
}
// locate zero
float mu[3], // barycentric coordinates
cond; // condition number
inverse_lerp_s2v2(psi, mu, &cond);
// interpolation
float x[4];
lerp_s2v4(X, mu, x);
// result
p.x[0] = x[0];
p.x[1] = x[1];
p.x[2] = x[2];
p.t = x[3];
p.cond = cond;
return true;
}
template <int scope>
__global__
void sweep_simplices(
int current_timestep,
const lattice4_t domain,
const lattice4_t core,
const lattice3_t ext, // array dimension
const meta_t *h_c,
const meta_t *h_n,
const float *rho_c, // current timestep
const float *rho_n, // next timestep
const float *phi_c,
const float *phi_n,
unsigned long long &ncps, cp_t *cps)
{
const float *Rho[2] = {rho_c, rho_n};
const float *Phi[2] = {phi_c, phi_n};
const meta_t *h[2] = {h_c, h_n};
int tid = getGlobalIdx_3D_1D();
const element42_t e = element42_from_index<scope>(core, tid);
cp_t cp;
bool succ = check_simplex_tdgl_vortex_3dt<scope>(
current_timestep,
domain, core, ext, e, h, Rho, Phi, cp);
if (succ) {
unsigned long long i = atomicAdd(&ncps, 1ul);
cp.tag = tid;
cps[i] = cp;
}
}
template <int scope>
static std::vector<cp_t> extract_tdgl_vortex_3dt(
int current_timestep,
const lattice4_t& domain,
const lattice4_t& core,
const lattice3_t& ext,
const meta_t &h_c,
const meta_t &h_n,
const float *rho_c,
const float *rho_n,
const float *phi_c,
const float *phi_n)
{
auto t0 = std::chrono::high_resolution_clock::now();
const size_t ntasks = core.n() * ntypes_4_2<scope>();
// fprintf(stderr, "ntasks=%zu\n", ntasks);
const int maxGridDim = 1024;
const int blockSize = 256;
const int nBlocks = idivup(ntasks, blockSize);
dim3 gridSize;
if (nBlocks >= maxGridDim)
gridSize = dim3(idivup(nBlocks, maxGridDim), maxGridDim);
else
gridSize = dim3(nBlocks);
meta_t *dh_c = NULL, *dh_n = NULL; // headers
hipMalloc((void**)&dh_c, sizeof(meta_t));
hipMemcpy(dh_c, &h_c, sizeof(meta_t), hipMemcpyHostToDevice);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dh_c");
hipMalloc((void**)&dh_n, sizeof(meta_t));
hipMemcpy(dh_n, &h_n, sizeof(meta_t), hipMemcpyHostToDevice);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dh_n");
float *drho_c = NULL, *drho_n = NULL;
if (rho_c) {
hipMalloc((void**)&drho_c, sizeof(float) * ext.n());
// fprintf(stderr, "allocating mem %zu\n", sizeof(float) * ext.n());
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating drho_c");
hipMemcpy(drho_c, rho_c, sizeof(float) * ext.n(), hipMemcpyHostToDevice);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying drho_c");
}
if (rho_n) {
hipMalloc((void**)&drho_n, sizeof(float) * ext.n());
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating drho_l");
hipMemcpy(drho_n, rho_n, sizeof(float) * ext.n(), hipMemcpyHostToDevice);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying drho_l");
}
float *dphi_c = NULL, *dphi_n = NULL;
if (phi_c) {
hipMalloc((void**)&dphi_c, sizeof(float) * ext.n());
hipMemcpy(dphi_c, phi_c, sizeof(float) * ext.n(), hipMemcpyHostToDevice);
}
if (phi_n) {
hipMalloc((void**)&dphi_n, sizeof(float) * ext.n());
hipMemcpy(dphi_n, phi_n, sizeof(float) * ext.n(), hipMemcpyHostToDevice);
}
unsigned long long *dncps; // number of cps
hipMalloc((void**)&dncps, sizeof(unsigned long long));
hipMemset(dncps, 0, sizeof(unsigned long long));
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dncps");
cp_t *dcps;
hipMalloc((void**)&dcps, sizeof(cp_t) * core.n());
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dcps");
hipDeviceSynchronize();
fprintf(stderr, "calling kernel func...\n");
hipLaunchKernelGGL(( sweep_simplices<scope>), dim3(gridSize), dim3(blockSize), 0, 0,
current_timestep,
domain, core, ext, dh_c, dh_n, drho_c, drho_n, dphi_c, dphi_n,
*dncps, dcps);
hipDeviceSynchronize();
checkLastCudaError("[FTK-CUDA] error: sweep_simplices, kernel function");
unsigned long long ncps = 0;
hipMemcpy(&ncps, dncps, sizeof(unsigned long long), hipMemcpyDeviceToHost);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: hipMemcpyDeviceToHost, dncps");
fprintf(stderr, "ncps=%llu\n", ncps);
std::vector<cp_t> cps(ncps);
hipMemcpy(cps.data(), dcps, sizeof(cp_t) * ncps, hipMemcpyDeviceToHost);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: hipMemcpyDeviceToHost");
hipFree(dh_c);
hipFree(dh_n);
if (drho_c) hipFree(drho_c);
if (drho_n) hipFree(drho_n);
if (dphi_c) hipFree(dphi_c);
if (dphi_n) hipFree(dphi_n);
hipFree(dncps);
hipFree(dcps);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: hipFree");
hipDeviceSynchronize();
auto t1 = std::chrono::high_resolution_clock::now();
float duration = std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count() * 1e-9;
fprintf(stderr, "exitting gpu kernel, ncps=%llu, time=%f\n", ncps, duration);
return cps;
}
std::vector<cp_t>
extract_tdgl_vortex_3dt_cuda(
int scope,
int current_timestep,
const ftk::lattice& domain,
const ftk::lattice& core,
const ftk::lattice& ext,
const meta_t &h_c,
const meta_t &h_n,
const float *rho_c,
const float *rho_l,
const float *phi_c,
const float *phi_l)
{
lattice4_t D(domain);
lattice4_t C(core);
lattice3_t E(ext);
if (scope == scope_interval)
return extract_tdgl_vortex_3dt<scope_interval>(current_timestep, D, C, E, h_c, h_n, rho_c, rho_l, phi_c, phi_l);
else
return extract_tdgl_vortex_3dt<scope_ordinal>(current_timestep, D, C, E, h_c, h_n, rho_c, rho_l, phi_c, phi_l);
}
| a0e68b15d45a09166dc04121d7dd2f869e34e140.cu | #include <nvfunctional>
#include <cstdio>
#include <cassert>
#include <chrono>
#include <ftk/numeric/inverse_linear_interpolation_solver.hh>
#include <ftk/numeric/linear_interpolation.hh>
#include <ftk/numeric/clamp.hh>
#include <ftk/numeric/symmetric_matrix.hh>
#include <ftk/numeric/fixed_point.hh>
#include <ftk/numeric/critical_point_type.hh>
#include <ftk/numeric/critical_point_test.hh>
#include <ftk/mesh/lattice.hh>
#include <ftk/io/tdgl_metadata.hh>
// #include <ftk/filters/critical_point_lite.hh>
#include "common.cuh"
using namespace ftk;
typedef tdgl_metadata_t meta_t;
template <typename T>
__device__ __host__
T line_integral(const T X0[], const T X1[], const T A0[], const T A1[])
{
T dX[3] = {X1[0] - X0[0], X1[1] - X0[1], X1[2] - X0[2]};
T A[3] = {A0[0] + A1[0], A0[1] + A1[1], A0[2] + A1[2]};
return 0.5 * inner_product(A, dX);
}
template <typename T>
__device__ __host__
inline void magnetic_potential(const meta_t& m, T X[4], T A[3])
{
if (m.B[1] > 0) {
A[0] = -m.Kex;
A[1] = X[0] * m.B[2];
A[2] = -X[0] * m.B[1];
} else {
A[0] = -X[1] * m.B[2] - m.Kex;
A[1] = 0;
A[2] = X[1] * m.B[0];
}
}
template <int scope>
__device__ __host__
bool check_simplex_tdgl_vortex_3dt(
int current_timestep,
const lattice4_t& domain,
const lattice4_t& core,
const lattice3_t& ext, // array dimension
const element42_t& e,
const meta_t *h[2],
const float *Rho[2], // current and next timesteps
const float *Phi[2],
cp_t &p)
{
if (e.corner[3] != current_timestep)
return false;
int vertices[3][4], indices[3];
size_t local_indices[3];
for (int i = 0; i < 3; i ++) {
for (int j = 0; j < 4; j ++) {
vertices[i][j] = e.corner[j]
+ unit_simplex_offset_4_2<scope>(e.type, i, j);
if (vertices[i][j] < domain.st[j] ||
vertices[i][j] > domain.st[j] + domain.sz[j] - 1)
return false;
}
indices[i] = domain.to_index(vertices[i]);
local_indices[i] = ext.to_index(vertices[i]);
}
float X[3][4], A[3][3];
float rho[3], phi[3], re[3], im[3];
for (int i = 0; i < 3; i ++) {
const size_t k = local_indices[i]; // k = ext.to_index(vertices[i]);
const size_t t = unit_simplex_offset_4_2<scope>(e.type, i, 3);
rho[i] = Rho[t][k];
phi[i] = Phi[t][k];
re[i] = rho[i] * cos(phi[i]);
im[i] = rho[i] * sin(phi[i]);
for (int j = 0; j < 3; j ++)
X[i][j] = vertices[i][j] * h[0]->cell_lengths[j] + h[0]->origins[j];
X[i][3] = vertices[i][3];
magnetic_potential<float>(*h[t], X[i], A[i]);
}
// compute contour integral
float delta[3], phase_shift = 0;
for (int i = 0; i < 3; i ++) { // ignoring quasi periodical boundary conditions
int j = (i+1) % 3;
float li = line_integral(X[i], X[j], A[i], A[j]);
delta[i] = mod2pi1( phi[j] - phi[i] - li ); // gauge transformation
phase_shift -= delta[i];
}
// check contour integral
float critera = phase_shift / (2 * M_PI);
if (fabs(critera) < 0.5) return false; // ignoring chiralities
// guage transformation
float psi[3][2]; // in re/im
for (int i = 0; i < 3; i ++) {
if (i != 0) phi[i] = phi[i-1] + delta[i-1];
psi[i][0] = rho[i] * cos(phi[i]);
psi[i][1] = rho[i] * sin(phi[i]);
}
// locate zero
float mu[3], // barycentric coordinates
cond; // condition number
inverse_lerp_s2v2(psi, mu, &cond);
// interpolation
float x[4];
lerp_s2v4(X, mu, x);
// result
p.x[0] = x[0];
p.x[1] = x[1];
p.x[2] = x[2];
p.t = x[3];
p.cond = cond;
return true;
}
template <int scope>
__global__
void sweep_simplices(
int current_timestep,
const lattice4_t domain,
const lattice4_t core,
const lattice3_t ext, // array dimension
const meta_t *h_c,
const meta_t *h_n,
const float *rho_c, // current timestep
const float *rho_n, // next timestep
const float *phi_c,
const float *phi_n,
unsigned long long &ncps, cp_t *cps)
{
const float *Rho[2] = {rho_c, rho_n};
const float *Phi[2] = {phi_c, phi_n};
const meta_t *h[2] = {h_c, h_n};
int tid = getGlobalIdx_3D_1D();
const element42_t e = element42_from_index<scope>(core, tid);
cp_t cp;
bool succ = check_simplex_tdgl_vortex_3dt<scope>(
current_timestep,
domain, core, ext, e, h, Rho, Phi, cp);
if (succ) {
unsigned long long i = atomicAdd(&ncps, 1ul);
cp.tag = tid;
cps[i] = cp;
}
}
template <int scope>
static std::vector<cp_t> extract_tdgl_vortex_3dt(
int current_timestep,
const lattice4_t& domain,
const lattice4_t& core,
const lattice3_t& ext,
const meta_t &h_c,
const meta_t &h_n,
const float *rho_c,
const float *rho_n,
const float *phi_c,
const float *phi_n)
{
auto t0 = std::chrono::high_resolution_clock::now();
const size_t ntasks = core.n() * ntypes_4_2<scope>();
// fprintf(stderr, "ntasks=%zu\n", ntasks);
const int maxGridDim = 1024;
const int blockSize = 256;
const int nBlocks = idivup(ntasks, blockSize);
dim3 gridSize;
if (nBlocks >= maxGridDim)
gridSize = dim3(idivup(nBlocks, maxGridDim), maxGridDim);
else
gridSize = dim3(nBlocks);
meta_t *dh_c = NULL, *dh_n = NULL; // headers
cudaMalloc((void**)&dh_c, sizeof(meta_t));
cudaMemcpy(dh_c, &h_c, sizeof(meta_t), cudaMemcpyHostToDevice);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dh_c");
cudaMalloc((void**)&dh_n, sizeof(meta_t));
cudaMemcpy(dh_n, &h_n, sizeof(meta_t), cudaMemcpyHostToDevice);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dh_n");
float *drho_c = NULL, *drho_n = NULL;
if (rho_c) {
cudaMalloc((void**)&drho_c, sizeof(float) * ext.n());
// fprintf(stderr, "allocating mem %zu\n", sizeof(float) * ext.n());
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating drho_c");
cudaMemcpy(drho_c, rho_c, sizeof(float) * ext.n(), cudaMemcpyHostToDevice);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying drho_c");
}
if (rho_n) {
cudaMalloc((void**)&drho_n, sizeof(float) * ext.n());
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating drho_l");
cudaMemcpy(drho_n, rho_n, sizeof(float) * ext.n(), cudaMemcpyHostToDevice);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying drho_l");
}
float *dphi_c = NULL, *dphi_n = NULL;
if (phi_c) {
cudaMalloc((void**)&dphi_c, sizeof(float) * ext.n());
cudaMemcpy(dphi_c, phi_c, sizeof(float) * ext.n(), cudaMemcpyHostToDevice);
}
if (phi_n) {
cudaMalloc((void**)&dphi_n, sizeof(float) * ext.n());
cudaMemcpy(dphi_n, phi_n, sizeof(float) * ext.n(), cudaMemcpyHostToDevice);
}
unsigned long long *dncps; // number of cps
cudaMalloc((void**)&dncps, sizeof(unsigned long long));
cudaMemset(dncps, 0, sizeof(unsigned long long));
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dncps");
cp_t *dcps;
cudaMalloc((void**)&dcps, sizeof(cp_t) * core.n());
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dcps");
cudaDeviceSynchronize();
fprintf(stderr, "calling kernel func...\n");
sweep_simplices<scope><<<gridSize, blockSize>>>(
current_timestep,
domain, core, ext, dh_c, dh_n, drho_c, drho_n, dphi_c, dphi_n,
*dncps, dcps);
cudaDeviceSynchronize();
checkLastCudaError("[FTK-CUDA] error: sweep_simplices, kernel function");
unsigned long long ncps = 0;
cudaMemcpy(&ncps, dncps, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: cudaMemcpyDeviceToHost, dncps");
fprintf(stderr, "ncps=%llu\n", ncps);
std::vector<cp_t> cps(ncps);
cudaMemcpy(cps.data(), dcps, sizeof(cp_t) * ncps, cudaMemcpyDeviceToHost);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: cudaMemcpyDeviceToHost");
cudaFree(dh_c);
cudaFree(dh_n);
if (drho_c) cudaFree(drho_c);
if (drho_n) cudaFree(drho_n);
if (dphi_c) cudaFree(dphi_c);
if (dphi_n) cudaFree(dphi_n);
cudaFree(dncps);
cudaFree(dcps);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: cudaFree");
cudaDeviceSynchronize();
auto t1 = std::chrono::high_resolution_clock::now();
float duration = std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count() * 1e-9;
fprintf(stderr, "exitting gpu kernel, ncps=%llu, time=%f\n", ncps, duration);
return cps;
}
std::vector<cp_t>
extract_tdgl_vortex_3dt_cuda(
int scope,
int current_timestep,
const ftk::lattice& domain,
const ftk::lattice& core,
const ftk::lattice& ext,
const meta_t &h_c,
const meta_t &h_n,
const float *rho_c,
const float *rho_l,
const float *phi_c,
const float *phi_l)
{
lattice4_t D(domain);
lattice4_t C(core);
lattice3_t E(ext);
if (scope == scope_interval)
return extract_tdgl_vortex_3dt<scope_interval>(current_timestep, D, C, E, h_c, h_n, rho_c, rho_l, phi_c, phi_l);
else
return extract_tdgl_vortex_3dt<scope_ordinal>(current_timestep, D, C, E, h_c, h_n, rho_c, rho_l, phi_c, phi_l);
}
|
98648a4bd918ec0221b8218d71d99dfc36360323.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zgetf2.cu, normal z -> s, Sun Nov 20 20:20:30 2016
*/
#include "magma_internal.h"
#define sger_bs 512 // 512 is max threads for 1.x cards
void magma_sgetf2_swap(
magma_int_t n, float *x, magma_int_t i, magma_int_t j, magma_int_t incx,
magma_queue_t queue );
void magma_sscal_sger(
magma_int_t m, magma_int_t n, float *dA, magma_int_t ldda,
magma_queue_t );
// TODO: this function could be in .cpp file -- it has no CUDA code in it.
/***************************************************************************//**
SGETF2 computes an LU factorization of a general m-by-n matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 2 BLAS version of the algorithm.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0 and N <= 1024.
On CUDA architecture 1.x cards, N <= 512.
@param[in,out]
dA REAL array, dimension (LDDA,N)
On entry, the m by n matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
ipiv INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[in]
queue magma_queue_t
Queue to execute in.
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, U(k,k) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@ingroup magma_getf2
*******************************************************************************/
extern "C" magma_int_t
magma_sgetf2_gpu(
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_int_t *ipiv,
magma_queue_t queue,
magma_int_t *info )
{
#define dA(i, j) (dA + (i) + (j)*ldda)
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0 || n > sger_bs) {
*info = -2;
} else if (ldda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magma_int_t min_mn = min(m, n);
magma_int_t j, jp;
for (j=0; j < min_mn; j++) {
hipDeviceSetCacheConfig( hipFuncCachePreferShared );
// Find pivot and test for singularity.
jp = j - 1 + magma_isamax( m-j, dA(j,j), 1, queue );
ipiv[j] = jp + 1; // ipiv uses Fortran one-based index
// Can't check value of dA since it is on GPU
//if ( dA(jp, j) != 0.0) {
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
// Apply the interchange to columns 1:N.
if (jp != j) {
magma_sgetf2_swap( n, dA, j, jp, ldda, queue );
}
// Compute elements J+1:M of J-th column.
if (j < m) {
magma_sscal_sger( m-j, n-j, dA(j, j), ldda, queue );
}
//}
//else if (*info == 0) {
// *info = j;
//}
}
return *info;
}
// ===========================================================================
// TODO: use standard BLAS magma_sswap?
#define sswap_bs 64
/******************************************************************************/
__global__
void kernel_sswap(int n, float *x, int i, int j, int incx)
{
int id = blockIdx.x * sswap_bs + threadIdx.x;
if (id < n) {
float tmp = x[i + incx*id];
x[i + incx*id] = x[j + incx*id];
x[j + incx*id] = tmp;
}
}
/******************************************************************************/
void magma_sgetf2_swap(
magma_int_t n, float *x, magma_int_t i, magma_int_t j, magma_int_t incx,
magma_queue_t queue )
{
/* sswap two row vectors: ith and jth */
dim3 threads( sswap_bs );
dim3 grid( magma_ceildiv( n, sswap_bs ) );
hipLaunchKernelGGL(( kernel_sswap)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, x, i, j, incx);
}
/******************************************************************************/
// dynamically allocated shared memory, set to size n when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ float shared_data[];
/******************************************************************************/
__global__
void kernel_sscal_sger(int m, int n, float *A, int lda)
{
float *shared_y = shared_data;
int tid = blockIdx.x * sger_bs + threadIdx.x;
float reg = MAGMA_S_ZERO;
if (threadIdx.x < n) {
shared_y[threadIdx.x] = A[lda * threadIdx.x];
}
__syncthreads();
if (tid < m && tid > 0) {
reg = A[tid];
reg *= MAGMA_S_DIV(MAGMA_S_ONE, shared_y[0]);
A[tid] = reg;
#pragma unroll
for (int i=1; i < n; i++) {
A[tid + i*lda] += (MAGMA_S_NEG_ONE) * shared_y[i] * reg;
}
}
}
/******************************************************************************/
void magma_sscal_sger(
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
/*
Specialized kernel that merges sscal and sger
1) sscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a sger Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
dim3 threads( sger_bs );
dim3 grid( magma_ceildiv( m, sger_bs ) );
size_t shared_size = sizeof(float)*(n);
hipLaunchKernelGGL(( kernel_sscal_sger)
, dim3(grid), dim3(threads), shared_size, queue->cuda_stream() ,
m, n, dA, ldda);
}
| 98648a4bd918ec0221b8218d71d99dfc36360323.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zgetf2.cu, normal z -> s, Sun Nov 20 20:20:30 2016
*/
#include "magma_internal.h"
#define sger_bs 512 // 512 is max threads for 1.x cards
void magma_sgetf2_swap(
magma_int_t n, float *x, magma_int_t i, magma_int_t j, magma_int_t incx,
magma_queue_t queue );
void magma_sscal_sger(
magma_int_t m, magma_int_t n, float *dA, magma_int_t ldda,
magma_queue_t );
// TODO: this function could be in .cpp file -- it has no CUDA code in it.
/***************************************************************************//**
SGETF2 computes an LU factorization of a general m-by-n matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 2 BLAS version of the algorithm.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0 and N <= 1024.
On CUDA architecture 1.x cards, N <= 512.
@param[in,out]
dA REAL array, dimension (LDDA,N)
On entry, the m by n matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
ipiv INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[in]
queue magma_queue_t
Queue to execute in.
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, U(k,k) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@ingroup magma_getf2
*******************************************************************************/
extern "C" magma_int_t
magma_sgetf2_gpu(
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_int_t *ipiv,
magma_queue_t queue,
magma_int_t *info )
{
#define dA(i, j) (dA + (i) + (j)*ldda)
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0 || n > sger_bs) {
*info = -2;
} else if (ldda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magma_int_t min_mn = min(m, n);
magma_int_t j, jp;
for (j=0; j < min_mn; j++) {
cudaDeviceSetCacheConfig( cudaFuncCachePreferShared );
// Find pivot and test for singularity.
jp = j - 1 + magma_isamax( m-j, dA(j,j), 1, queue );
ipiv[j] = jp + 1; // ipiv uses Fortran one-based index
// Can't check value of dA since it is on GPU
//if ( dA(jp, j) != 0.0) {
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
// Apply the interchange to columns 1:N.
if (jp != j) {
magma_sgetf2_swap( n, dA, j, jp, ldda, queue );
}
// Compute elements J+1:M of J-th column.
if (j < m) {
magma_sscal_sger( m-j, n-j, dA(j, j), ldda, queue );
}
//}
//else if (*info == 0) {
// *info = j;
//}
}
return *info;
}
// ===========================================================================
// TODO: use standard BLAS magma_sswap?
#define sswap_bs 64
/******************************************************************************/
__global__
void kernel_sswap(int n, float *x, int i, int j, int incx)
{
int id = blockIdx.x * sswap_bs + threadIdx.x;
if (id < n) {
float tmp = x[i + incx*id];
x[i + incx*id] = x[j + incx*id];
x[j + incx*id] = tmp;
}
}
/******************************************************************************/
void magma_sgetf2_swap(
magma_int_t n, float *x, magma_int_t i, magma_int_t j, magma_int_t incx,
magma_queue_t queue )
{
/* sswap two row vectors: ith and jth */
dim3 threads( sswap_bs );
dim3 grid( magma_ceildiv( n, sswap_bs ) );
kernel_sswap
<<< grid, threads, 0, queue->cuda_stream() >>>
(n, x, i, j, incx);
}
/******************************************************************************/
// dynamically allocated shared memory, set to size n when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ float shared_data[];
/******************************************************************************/
__global__
void kernel_sscal_sger(int m, int n, float *A, int lda)
{
float *shared_y = shared_data;
int tid = blockIdx.x * sger_bs + threadIdx.x;
float reg = MAGMA_S_ZERO;
if (threadIdx.x < n) {
shared_y[threadIdx.x] = A[lda * threadIdx.x];
}
__syncthreads();
if (tid < m && tid > 0) {
reg = A[tid];
reg *= MAGMA_S_DIV(MAGMA_S_ONE, shared_y[0]);
A[tid] = reg;
#pragma unroll
for (int i=1; i < n; i++) {
A[tid + i*lda] += (MAGMA_S_NEG_ONE) * shared_y[i] * reg;
}
}
}
/******************************************************************************/
void magma_sscal_sger(
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
/*
Specialized kernel that merges sscal and sger
1) sscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a sger Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
dim3 threads( sger_bs );
dim3 grid( magma_ceildiv( m, sger_bs ) );
size_t shared_size = sizeof(float)*(n);
kernel_sscal_sger
<<< grid, threads, shared_size, queue->cuda_stream() >>>
(m, n, dA, ldda);
}
|
33cb363c4bd07a47741b4ca4b8dc23054aaf47b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include "cuda_utils.cuh"
/*
* The user's eigenvalues CUDA implementation should go here. All that is required to change from a CPU version to a CUDA version
* is for the user to add the value of q_arr_idx to their Q array indices and to add other_idx to all other array indices. For example:
* Q[1] becomes Q[q_arr_idx + 1] and lambda[1] becomes lambda[other_idx + 1].
*/
__global__ void eigenvalues_kernel(const double* const Q, const int normalNonZeroIndex, double* lambda, const int numVariables, const int patchBegin, const int basisSize, const int idxSelector) {
// Dimensions = 2
// Number of variables = 5 (#unknowns + #parameters)
const int other_idx = threadIdx.x + blockIdx.x * threadIdx.y * numVariables;
const int q_arr_idx = idx(threadIdx.x, threadIdx.y, blockIdx.x, numVariables, basisSize, patchBegin, idxSelector);
if(q_arr_idx == -1)
return;
// Application code goes here
const double GAMMA = 1.4;
const double irho = 1.0/Q[q_arr_idx];
const double p = (GAMMA-1) * (Q[q_arr_idx+4] - 0.5 * (Q[q_arr_idx+1] * Q[q_arr_idx+1] + Q[q_arr_idx+2] * Q[q_arr_idx+2]) * irho);
const double u_n = Q[q_arr_idx+normalNonZeroIndex + 1] * irho;
const double c = sqrt(GAMMA * p * irho);
lambda[other_idx] = u_n - c;
lambda[other_idx+1] = u_n;
lambda[other_idx+2] = u_n;
lambda[other_idx+3] = u_n;
lambda[other_idx+4] = u_n + c;
}
/*
* The user's flux CUDA implementation should go here. All that is required to change from a CPU version to a CUDA version
* is for the user to add the value of q_arr_idx to their Q array indices and to add other_idx to all other array indices. For example:
* Q[1] becomes Q[q_arr_idx + 1] and f[1] becomes f[other_idx + 1].
*/
__global__ void flux_kernel(const double* const Q, double** F, const int numVariables, const int patchBegin, const int basisSize, const int idxSelector) {
// Dimensions = 2
// Number of variables = 5 (#unknowns + #parameters)
const int other_idx = threadIdx.x + blockIdx.x * threadIdx.y * numVariables;
const int q_arr_idx = idx(threadIdx.x, threadIdx.y, blockIdx.x, numVariables, basisSize, patchBegin, idxSelector);
if(q_arr_idx == -1)
return;
const double GAMMA = 1.4;
const double irho = 1.0/Q[q_arr_idx];
const double p = (GAMMA-1) * (Q[q_arr_idx+4] - 0.5 * (Q[q_arr_idx+1] * Q[q_arr_idx+1] + Q[q_arr_idx+2] * Q[q_arr_idx+2]) * irho);
double* f = F[0];
double* g = F[1];
f[other_idx] = Q[q_arr_idx+1]; // should be numVariables * tid
f[other_idx+1] = irho * Q[q_arr_idx+1] * Q[q_arr_idx+1] + p;
f[other_idx+2] = irho * Q[q_arr_idx+1] * Q[q_arr_idx+2];
f[other_idx+3] = irho * Q[q_arr_idx+1] * Q[q_arr_idx+3];
f[other_idx+4] = irho * Q[q_arr_idx+1] * (Q[q_arr_idx+4] + p);
g[other_idx] = Q[q_arr_idx+2]; // Should be numVariables * tid + numVariables
g[other_idx+1] = irho * Q[q_arr_idx+2] * Q[q_arr_idx+1];
g[other_idx+2] = irho * Q[q_arr_idx+2] * Q[q_arr_idx+2] + p;
g[other_idx+3] = irho * Q[q_arr_idx+2] * Q[q_arr_idx+3];
g[other_idx+4] = irho * Q[q_arr_idx+2] * (Q[q_arr_idx+4] + p);
} | 33cb363c4bd07a47741b4ca4b8dc23054aaf47b5.cu | #include <stdio.h>
#include <math.h>
#include "cuda_utils.cuh"
/*
* The user's eigenvalues CUDA implementation should go here. All that is required to change from a CPU version to a CUDA version
* is for the user to add the value of q_arr_idx to their Q array indices and to add other_idx to all other array indices. For example:
* Q[1] becomes Q[q_arr_idx + 1] and lambda[1] becomes lambda[other_idx + 1].
*/
__global__ void eigenvalues_kernel(const double* const Q, const int normalNonZeroIndex, double* lambda, const int numVariables, const int patchBegin, const int basisSize, const int idxSelector) {
// Dimensions = 2
// Number of variables = 5 (#unknowns + #parameters)
const int other_idx = threadIdx.x + blockIdx.x * threadIdx.y * numVariables;
const int q_arr_idx = idx(threadIdx.x, threadIdx.y, blockIdx.x, numVariables, basisSize, patchBegin, idxSelector);
if(q_arr_idx == -1)
return;
// Application code goes here
const double GAMMA = 1.4;
const double irho = 1.0/Q[q_arr_idx];
const double p = (GAMMA-1) * (Q[q_arr_idx+4] - 0.5 * (Q[q_arr_idx+1] * Q[q_arr_idx+1] + Q[q_arr_idx+2] * Q[q_arr_idx+2]) * irho);
const double u_n = Q[q_arr_idx+normalNonZeroIndex + 1] * irho;
const double c = sqrt(GAMMA * p * irho);
lambda[other_idx] = u_n - c;
lambda[other_idx+1] = u_n;
lambda[other_idx+2] = u_n;
lambda[other_idx+3] = u_n;
lambda[other_idx+4] = u_n + c;
}
/*
* The user's flux CUDA implementation should go here. All that is required to change from a CPU version to a CUDA version
* is for the user to add the value of q_arr_idx to their Q array indices and to add other_idx to all other array indices. For example:
* Q[1] becomes Q[q_arr_idx + 1] and f[1] becomes f[other_idx + 1].
*/
__global__ void flux_kernel(const double* const Q, double** F, const int numVariables, const int patchBegin, const int basisSize, const int idxSelector) {
// Dimensions = 2
// Number of variables = 5 (#unknowns + #parameters)
const int other_idx = threadIdx.x + blockIdx.x * threadIdx.y * numVariables;
const int q_arr_idx = idx(threadIdx.x, threadIdx.y, blockIdx.x, numVariables, basisSize, patchBegin, idxSelector);
if(q_arr_idx == -1)
return;
const double GAMMA = 1.4;
const double irho = 1.0/Q[q_arr_idx];
const double p = (GAMMA-1) * (Q[q_arr_idx+4] - 0.5 * (Q[q_arr_idx+1] * Q[q_arr_idx+1] + Q[q_arr_idx+2] * Q[q_arr_idx+2]) * irho);
double* f = F[0];
double* g = F[1];
f[other_idx] = Q[q_arr_idx+1]; // should be numVariables * tid
f[other_idx+1] = irho * Q[q_arr_idx+1] * Q[q_arr_idx+1] + p;
f[other_idx+2] = irho * Q[q_arr_idx+1] * Q[q_arr_idx+2];
f[other_idx+3] = irho * Q[q_arr_idx+1] * Q[q_arr_idx+3];
f[other_idx+4] = irho * Q[q_arr_idx+1] * (Q[q_arr_idx+4] + p);
g[other_idx] = Q[q_arr_idx+2]; // Should be numVariables * tid + numVariables
g[other_idx+1] = irho * Q[q_arr_idx+2] * Q[q_arr_idx+1];
g[other_idx+2] = irho * Q[q_arr_idx+2] * Q[q_arr_idx+2] + p;
g[other_idx+3] = irho * Q[q_arr_idx+2] * Q[q_arr_idx+3];
g[other_idx+4] = irho * Q[q_arr_idx+2] * (Q[q_arr_idx+4] + p);
} |
08fe97344e8183e93eebf771c91038f09cc1d4ba.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <math.h>
#include <gptl_cuda.h>
#include "./localproto.h"
__device__ float doalot_log (int n, int innerlooplen)
{
int i, iter;
float sum;
sum = 0.;
for (iter = 0; iter < innerlooplen; ++iter) {
for (i = 0; i < n; ++i) {
sum += log ((iter*i) + 1.);
}
}
return sum;
}
__device__ float doalot_log_inner (int n, int innerlooplen, int *handle)
{
int i, iter;
int ret;
float sum;
sum = 0.;
for (iter = 0; iter < innerlooplen; ++iter) {
ret = GPTLstart_gpu (*handle);
for (i = 0; i < n; ++i) {
sum += log ((iter*i) + 1.);
}
ret = GPTLstop_gpu (*handle);
}
return sum;
}
__device__ float doalot_sqrt (int n, int innerlooplen)
{
int i, iter;
float sum;
sum = 0.;
for (iter = 0; iter < innerlooplen; ++iter) {
for (i = 0; i < n; ++i) {
sum += sqrtf ((float) iter*i);
}
}
return sum;
}
__device__ double doalot_sqrt_double (int n, int innerlooplen)
{
int i, iter;
double sum;
sum = 0.;
for (iter = 0; iter < innerlooplen; ++iter) {
for (i = 0; i < n; ++i) {
sum += sqrt ((double) iter*i);
}
}
return sum;
}
__global__ void donothing (int *total_gputime, int *donothing_handle)
{
int ret;
ret = GPTLstart_gpu (*total_gputime);
ret = GPTLstart_gpu (*donothing_handle);
ret = GPTLstop_gpu (*donothing_handle);
ret = GPTLstop_gpu (*total_gputime);
}
| 08fe97344e8183e93eebf771c91038f09cc1d4ba.cu | #include <cuda.h>
#include <math.h>
#include <gptl_cuda.h>
#include "./localproto.h"
__device__ float doalot_log (int n, int innerlooplen)
{
int i, iter;
float sum;
sum = 0.;
for (iter = 0; iter < innerlooplen; ++iter) {
for (i = 0; i < n; ++i) {
sum += log ((iter*i) + 1.);
}
}
return sum;
}
__device__ float doalot_log_inner (int n, int innerlooplen, int *handle)
{
int i, iter;
int ret;
float sum;
sum = 0.;
for (iter = 0; iter < innerlooplen; ++iter) {
ret = GPTLstart_gpu (*handle);
for (i = 0; i < n; ++i) {
sum += log ((iter*i) + 1.);
}
ret = GPTLstop_gpu (*handle);
}
return sum;
}
__device__ float doalot_sqrt (int n, int innerlooplen)
{
int i, iter;
float sum;
sum = 0.;
for (iter = 0; iter < innerlooplen; ++iter) {
for (i = 0; i < n; ++i) {
sum += sqrtf ((float) iter*i);
}
}
return sum;
}
__device__ double doalot_sqrt_double (int n, int innerlooplen)
{
int i, iter;
double sum;
sum = 0.;
for (iter = 0; iter < innerlooplen; ++iter) {
for (i = 0; i < n; ++i) {
sum += sqrt ((double) iter*i);
}
}
return sum;
}
__global__ void donothing (int *total_gputime, int *donothing_handle)
{
int ret;
ret = GPTLstart_gpu (*total_gputime);
ret = GPTLstart_gpu (*donothing_handle);
ret = GPTLstop_gpu (*donothing_handle);
ret = GPTLstop_gpu (*total_gputime);
}
|
b02c9f6e097ce5f7b30914a6557328e27a7c8e45.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "math.h"
#include "hip/hip_runtime.h"
#define DEVICE_INTRINSIC_QUALIFIERS __device__ __forceinline__
DEVICE_INTRINSIC_QUALIFIERS
unsigned int smid()
{
unsigned int r;
asm("mov.u32 %0, %%smid;" : "=r"(r));
return r;
}
DEVICE_INTRINSIC_QUALIFIERS
unsigned int timer()
{
unsigned int r;
asm("mov.u32 %0, %%clock;" : "=r"(r));
return r;
}
__global__ void
bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid)
{
unsigned int start, end;
if(threadIdx.x == 0 && threadIdx.y == 0)
start = timer();
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
if ( tx == 0 )
input_node[ty] = input_cuda[index_in] ;
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index];
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];
__syncthreads();
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){
int power_two = __powf(2, i);
if( ty % power_two == 0 )
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
__syncthreads();
}
//__syncthreads();
input_hidden_cuda[index] = weight_matrix[ty][tx];
/*
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
}
*/
__syncthreads();
if ( tx == 0 ) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
if(threadIdx.x == 0 && threadIdx.y == 0)
end = timer();
// output timing
if(threadIdx.x == 0 && threadIdx.y == 0)
{
unsigned int sm_id = smid();
printf("smx:%u:%u:%u\n", sm_id, start, end);
}
}
__global__ void bpnn_adjust_weights_cuda(float * delta,
int hid,
float * ly,
int in,
float * w,
float * oldw)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
if(threadIdx.x == 0 && threadIdx.y == 0)
{
//int sm_id = smid();
//cuPrintf("block(%d, %d) \t on smx %5d\n", blockIdx.x, blockIdx.y, sm_id);
}
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
//eta = 0.3;
//momentum = 0.3;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by ==0){
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
| b02c9f6e097ce5f7b30914a6557328e27a7c8e45.cu |
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "math.h"
#include "cuda.h"
#define DEVICE_INTRINSIC_QUALIFIERS __device__ __forceinline__
DEVICE_INTRINSIC_QUALIFIERS
unsigned int smid()
{
unsigned int r;
asm("mov.u32 %0, %%smid;" : "=r"(r));
return r;
}
DEVICE_INTRINSIC_QUALIFIERS
unsigned int timer()
{
unsigned int r;
asm("mov.u32 %0, %%clock;" : "=r"(r));
return r;
}
__global__ void
bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid)
{
unsigned int start, end;
if(threadIdx.x == 0 && threadIdx.y == 0)
start = timer();
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
if ( tx == 0 )
input_node[ty] = input_cuda[index_in] ;
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index];
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];
__syncthreads();
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){
int power_two = __powf(2, i);
if( ty % power_two == 0 )
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
__syncthreads();
}
//__syncthreads();
input_hidden_cuda[index] = weight_matrix[ty][tx];
/*
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
}
*/
__syncthreads();
if ( tx == 0 ) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
if(threadIdx.x == 0 && threadIdx.y == 0)
end = timer();
// output timing
if(threadIdx.x == 0 && threadIdx.y == 0)
{
unsigned int sm_id = smid();
printf("smx:%u:%u:%u\n", sm_id, start, end);
}
}
__global__ void bpnn_adjust_weights_cuda(float * delta,
int hid,
float * ly,
int in,
float * w,
float * oldw)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
if(threadIdx.x == 0 && threadIdx.y == 0)
{
//int sm_id = smid();
//cuPrintf("block(%d, %d) \t on smx %5d\n", blockIdx.x, blockIdx.y, sm_id);
}
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
//eta = 0.3;
//momentum = 0.3;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by ==0){
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
|
cf6c510ef3542647954bd06541824f6c1473014e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __cplusplus
extern "C" {
#include "kernels.h"
}
#endif
__global__ void map(char* dataIn,
char2* dataOut,
int channelSelect,
params pfbParams)
{
// select the channel range
int channelMin = pfbParams.fine_channels*channelSelect;
int absIdx = 2 * blockDim.y*(blockIdx.x*pfbParams.coarse_channels + (channelMin+blockIdx.y)) + 2 * threadIdx.y; // times 2 because we are mapping a sequence of values to char2 array.
int mapIdx = blockDim.y*(blockIdx.x*gridDim.y + blockIdx.y) + threadIdx.y;
dataOut[mapIdx].x = dataIn[absIdx];
dataOut[mapIdx].y = dataIn[absIdx+1];
return;
}
/* prepare data for PFB */
__global__ void PFB_kernel(char2* pc2Data,
float2* pf2FFTIn,
float* pfPFBCoeff,
params pfbParams)
{
int blkIdx = blockIdx.y * gridDim.x + blockIdx.x;
int i = blkIdx*blockDim.x + threadIdx.x;
int absCoeff = (blockIdx.x * blockDim.x) + threadIdx.x;
int iNFFT = (gridDim.x * blockDim.x);
int j = 0;
int iAbsIdx = 0;
int coeffIdx = 0;
float2 f2PFBOut = make_float2(0.0, 0.0);
char2 c2Data = make_char2(0, 0);
for (j = 0; j < pfbParams.taps; ++j)
{
/* calculate the absolute index */
iAbsIdx = (j * iNFFT) + i;
coeffIdx = (j * iNFFT) + absCoeff;
/* get the address of the block */
c2Data = pc2Data[iAbsIdx];
f2PFBOut.x += (float) c2Data.x * pfPFBCoeff[coeffIdx];
f2PFBOut.y += (float) c2Data.y * pfPFBCoeff[coeffIdx];
}
pf2FFTIn[i] = f2PFBOut;
return;
}
// Discard channels and perform FFT shift (part of scalloping solution)
//__global__ void Discard_Shift_kernel(float2* pf2FFTOut, float2* pf2DiscShift)
//{
// int pt = threadIdx.x; // N-point FFT index (0:63)
// int sb = blockIdx.x; // Number of elements x coarse channels (time series) index (0:319)
// //int st = blockIdx.y; // Windows index (4000/32 = 125 windows) (0:124)
// int Nfft = blockDim.x; // N-point FFT (64)
// //int Nsubbands = gridDim.x; // Nele*NfineChannels (64*5=320)
//// int Nchunks = 2;
//// int i = 0;
//
// int recover_idx = 0;
// int fftshift_idx = 0;
//
// // // Both pre-processor macros are defined in kernels.h //////////////////////////////
// // pf2DiscShift[fftshift_idx(pt,i,sb,st)].x = pf2FFTOut[recover_idx(pt,i,sb,st)].x;
// // pf2DiscShift[fftshift_idx(pt,i,sb,st)].y = pf2FFTOut[recover_idx(pt,i,sb,st)].y;
// // ////////////////////////////////////////////////////////////////////////////////////
//
//// for (i = 0; i < Nchunks; i++)
//// {
//// if (pt < (Nfft/4)) // pt indexing: 0:15 with Nfft/4 = 16
//// {
//// // recover_idx = (pt + (48*i)) + Nfft*sb + Nfft*Nsubbands*st;
//// // fftshift_idx = (pt + (16*(1-i))) + (Nfft/2)*sb + (Nfft/2)*Nsubbands*st;
////
//// recover_idx = (pt + (48*i)) + Nfft*sb;
//// fftshift_idx = (pt + (16*(1-i))) + (Nfft/2)*sb;
////
//// pf2DiscShift[fftshift_idx].x = pf2FFTOut[recover_idx].x;
//// pf2DiscShift[fftshift_idx].y = pf2FFTOut[recover_idx].y;
//// }
//// }
//
// if (pt < (Nfft/4))
// {
// recover_idx = pt + Nfft*sb;
// fftshift_idx = (pt + 16) + (Nfft/2)*sb;
//
// pf2DiscShift[fftshift_idx].x = pf2FFTOut[recover_idx].x;
// pf2DiscShift[fftshift_idx].y = pf2FFTOut[recover_idx].y;
// }
//
// if (pt >= (Nfft*(3/4)))
// {
// recover_idx = pt + Nfft*sb;
// fftshift_idx = (pt - 48) + (Nfft/2)*sb;
//
// pf2DiscShift[fftshift_idx].x = pf2FFTOut[recover_idx].x;
// pf2DiscShift[fftshift_idx].y = pf2FFTOut[recover_idx].y;
// }
//
// return;
//}
// Discard channels and perform FFT shift (part of scalloping solution and altered dimensions (subbands then nfft))
__global__ void Discard_Shift_kernel(float2* pf2FFTOut, float2* pf2DiscShift)
{
int pt = blockIdx.x; // N-point FFT index (0:63)
int sb = threadIdx.x; // Number of elements x coarse channels (time series) index (0:319)
//int st = blockIdx.y; // Windows index (4000/32 = 125 windows) (0:124)
int Nfft = gridDim.x; // N-point FFT (64)
int Nsubbands = blockDim.x; // Nele*NfineChannels (64*5=320)
// int Nchunks = 2;
// int i = 0;
int recover_idx = 0;
int fftshift_idx = 0;
// for (i = 0; i < Nchunks; i++)
// {
// if (pt < (Nfft/4)) // pt indexing: 0:15 with Nfft/4 = 16
// {
// recover_idx = sb + Nsubbands*(pt + (48*i));
// fftshift_idx = sb + Nsubbands*(pt + (16*(1-i)));
//
// pf2DiscShift[fftshift_idx].x = pf2FFTOut[recover_idx].x;
// pf2DiscShift[fftshift_idx].y = pf2FFTOut[recover_idx].y;
// }
// }
if (pt < (Nfft/4)) // Number of FFT points less that 16 (0:15)
{
recover_idx = sb + Nsubbands*pt; // Recover FFT points 0:15
fftshift_idx = sb + Nsubbands*(pt + 16); // Place the recovered points in 16:31 of this array
//fftshift_idx = sb + Nsubbands*pt; // Place the recovered points in 0:15 of this array (No fft shift)
pf2DiscShift[fftshift_idx].x = pf2FFTOut[recover_idx].x;
pf2DiscShift[fftshift_idx].y = pf2FFTOut[recover_idx].y;
}
if (pt >= (Nfft*(3/4))) // Number of FFT points greater that 48 (48:63)
{
recover_idx = sb + Nsubbands*pt; // Recover FFT points 48:63
fftshift_idx = sb + Nsubbands*(pt - 48); // Place the recovered points in 0:15 of this array
//fftshift_idx = sb + Nsubbands*(pt - 32); // Place the recovered points in 16:31 of this array (No fft shift)
pf2DiscShift[fftshift_idx].x = pf2FFTOut[recover_idx].x;
pf2DiscShift[fftshift_idx].y = pf2FFTOut[recover_idx].y;
}
return;
}
// When PFB disabled just perform FFT.
__global__ void CopyDataForFFT(char2 *pc2Data, float2 *pf2FFTIn)
{
int blkIdx = blockIdx.y * gridDim.x + blockIdx.x;
int i = blkIdx*blockDim.x + threadIdx.x;
pf2FFTIn[i].x = (float) pc2Data[i].x;
pf2FFTIn[i].y = (float) pc2Data[i].y;
return;
}
// prepares for the next PFB.
__global__ void saveData(char2* dataIn, char2* dataOut){
int i = blockIdx.y*(gridDim.x*blockDim.x) + blockIdx.x*blockDim.x + threadIdx.x;
dataOut[i] = dataIn[i];
return;
}
| cf6c510ef3542647954bd06541824f6c1473014e.cu | #ifdef __cplusplus
extern "C" {
#include "kernels.h"
}
#endif
__global__ void map(char* dataIn,
char2* dataOut,
int channelSelect,
params pfbParams)
{
// select the channel range
int channelMin = pfbParams.fine_channels*channelSelect;
int absIdx = 2 * blockDim.y*(blockIdx.x*pfbParams.coarse_channels + (channelMin+blockIdx.y)) + 2 * threadIdx.y; // times 2 because we are mapping a sequence of values to char2 array.
int mapIdx = blockDim.y*(blockIdx.x*gridDim.y + blockIdx.y) + threadIdx.y;
dataOut[mapIdx].x = dataIn[absIdx];
dataOut[mapIdx].y = dataIn[absIdx+1];
return;
}
/* prepare data for PFB */
__global__ void PFB_kernel(char2* pc2Data,
float2* pf2FFTIn,
float* pfPFBCoeff,
params pfbParams)
{
int blkIdx = blockIdx.y * gridDim.x + blockIdx.x;
int i = blkIdx*blockDim.x + threadIdx.x;
int absCoeff = (blockIdx.x * blockDim.x) + threadIdx.x;
int iNFFT = (gridDim.x * blockDim.x);
int j = 0;
int iAbsIdx = 0;
int coeffIdx = 0;
float2 f2PFBOut = make_float2(0.0, 0.0);
char2 c2Data = make_char2(0, 0);
for (j = 0; j < pfbParams.taps; ++j)
{
/* calculate the absolute index */
iAbsIdx = (j * iNFFT) + i;
coeffIdx = (j * iNFFT) + absCoeff;
/* get the address of the block */
c2Data = pc2Data[iAbsIdx];
f2PFBOut.x += (float) c2Data.x * pfPFBCoeff[coeffIdx];
f2PFBOut.y += (float) c2Data.y * pfPFBCoeff[coeffIdx];
}
pf2FFTIn[i] = f2PFBOut;
return;
}
// Discard channels and perform FFT shift (part of scalloping solution)
//__global__ void Discard_Shift_kernel(float2* pf2FFTOut, float2* pf2DiscShift)
//{
// int pt = threadIdx.x; // N-point FFT index (0:63)
// int sb = blockIdx.x; // Number of elements x coarse channels (time series) index (0:319)
// //int st = blockIdx.y; // Windows index (4000/32 = 125 windows) (0:124)
// int Nfft = blockDim.x; // N-point FFT (64)
// //int Nsubbands = gridDim.x; // Nele*NfineChannels (64*5=320)
//// int Nchunks = 2;
//// int i = 0;
//
// int recover_idx = 0;
// int fftshift_idx = 0;
//
// // // Both pre-processor macros are defined in kernels.h //////////////////////////////
// // pf2DiscShift[fftshift_idx(pt,i,sb,st)].x = pf2FFTOut[recover_idx(pt,i,sb,st)].x;
// // pf2DiscShift[fftshift_idx(pt,i,sb,st)].y = pf2FFTOut[recover_idx(pt,i,sb,st)].y;
// // ////////////////////////////////////////////////////////////////////////////////////
//
//// for (i = 0; i < Nchunks; i++)
//// {
//// if (pt < (Nfft/4)) // pt indexing: 0:15 with Nfft/4 = 16
//// {
//// // recover_idx = (pt + (48*i)) + Nfft*sb + Nfft*Nsubbands*st;
//// // fftshift_idx = (pt + (16*(1-i))) + (Nfft/2)*sb + (Nfft/2)*Nsubbands*st;
////
//// recover_idx = (pt + (48*i)) + Nfft*sb;
//// fftshift_idx = (pt + (16*(1-i))) + (Nfft/2)*sb;
////
//// pf2DiscShift[fftshift_idx].x = pf2FFTOut[recover_idx].x;
//// pf2DiscShift[fftshift_idx].y = pf2FFTOut[recover_idx].y;
//// }
//// }
//
// if (pt < (Nfft/4))
// {
// recover_idx = pt + Nfft*sb;
// fftshift_idx = (pt + 16) + (Nfft/2)*sb;
//
// pf2DiscShift[fftshift_idx].x = pf2FFTOut[recover_idx].x;
// pf2DiscShift[fftshift_idx].y = pf2FFTOut[recover_idx].y;
// }
//
// if (pt >= (Nfft*(3/4)))
// {
// recover_idx = pt + Nfft*sb;
// fftshift_idx = (pt - 48) + (Nfft/2)*sb;
//
// pf2DiscShift[fftshift_idx].x = pf2FFTOut[recover_idx].x;
// pf2DiscShift[fftshift_idx].y = pf2FFTOut[recover_idx].y;
// }
//
// return;
//}
// Discard channels and perform FFT shift (part of scalloping solution and altered dimensions (subbands then nfft))
__global__ void Discard_Shift_kernel(float2* pf2FFTOut, float2* pf2DiscShift)
{
int pt = blockIdx.x; // N-point FFT index (0:63)
int sb = threadIdx.x; // Number of elements x coarse channels (time series) index (0:319)
//int st = blockIdx.y; // Windows index (4000/32 = 125 windows) (0:124)
int Nfft = gridDim.x; // N-point FFT (64)
int Nsubbands = blockDim.x; // Nele*NfineChannels (64*5=320)
// int Nchunks = 2;
// int i = 0;
int recover_idx = 0;
int fftshift_idx = 0;
// for (i = 0; i < Nchunks; i++)
// {
// if (pt < (Nfft/4)) // pt indexing: 0:15 with Nfft/4 = 16
// {
// recover_idx = sb + Nsubbands*(pt + (48*i));
// fftshift_idx = sb + Nsubbands*(pt + (16*(1-i)));
//
// pf2DiscShift[fftshift_idx].x = pf2FFTOut[recover_idx].x;
// pf2DiscShift[fftshift_idx].y = pf2FFTOut[recover_idx].y;
// }
// }
if (pt < (Nfft/4)) // Number of FFT points less that 16 (0:15)
{
recover_idx = sb + Nsubbands*pt; // Recover FFT points 0:15
fftshift_idx = sb + Nsubbands*(pt + 16); // Place the recovered points in 16:31 of this array
//fftshift_idx = sb + Nsubbands*pt; // Place the recovered points in 0:15 of this array (No fft shift)
pf2DiscShift[fftshift_idx].x = pf2FFTOut[recover_idx].x;
pf2DiscShift[fftshift_idx].y = pf2FFTOut[recover_idx].y;
}
if (pt >= (Nfft*(3/4))) // Number of FFT points greater that 48 (48:63)
{
recover_idx = sb + Nsubbands*pt; // Recover FFT points 48:63
fftshift_idx = sb + Nsubbands*(pt - 48); // Place the recovered points in 0:15 of this array
//fftshift_idx = sb + Nsubbands*(pt - 32); // Place the recovered points in 16:31 of this array (No fft shift)
pf2DiscShift[fftshift_idx].x = pf2FFTOut[recover_idx].x;
pf2DiscShift[fftshift_idx].y = pf2FFTOut[recover_idx].y;
}
return;
}
// When PFB disabled just perform FFT.
__global__ void CopyDataForFFT(char2 *pc2Data, float2 *pf2FFTIn)
{
int blkIdx = blockIdx.y * gridDim.x + blockIdx.x;
int i = blkIdx*blockDim.x + threadIdx.x;
pf2FFTIn[i].x = (float) pc2Data[i].x;
pf2FFTIn[i].y = (float) pc2Data[i].y;
return;
}
// prepares for the next PFB.
__global__ void saveData(char2* dataIn, char2* dataOut){
int i = blockIdx.y*(gridDim.x*blockDim.x) + blockIdx.x*blockDim.x + threadIdx.x;
dataOut[i] = dataIn[i];
return;
}
|
6bacc1b019c9ed4e865208aaf5ed8deba123f49b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2020 ETH Zurich. All Rights Reserved.
#include "density_control.h"
#include "utils/simple_serializer.h"
#include "utils/time_stamp.h"
#include <mirheo/core/field/from_function.h>
#include <mirheo/core/field/utils.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/cuda_rng.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <fstream>
#include <memory>
namespace mirheo
{
namespace density_control_plugin_kernels
{
enum {INVALID_LEVEL=-1};
__device__ int getLevelId(const FieldDeviceHandler& field, const real3& r,
const DensityControlPlugin::LevelBounds& lb)
{
real l = field(r);
return (l > lb.lo && l < lb.hi) ?
(l - lb.lo) / lb.space :
INVALID_LEVEL;
}
__global__ void countInsideRegions(int nSamples, DomainInfo domain, FieldDeviceHandler field, DensityControlPlugin::LevelBounds lb,
real seed, unsigned long long int *nInsides)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= nSamples) return;
real3 r {Saru::uniform01(seed, i - 2, i + 4242),
Saru::uniform01(seed, i - 3, i + 4343),
Saru::uniform01(seed, i - 4, i + 4444)};
r = domain.localSize * (r - 0._r);
int levelId = getLevelId(field, r, lb);
if (levelId != INVALID_LEVEL)
atomicAdd(&nInsides[levelId], 1);
}
__global__ void computeVolumes(int nLevels, int nSamples, const unsigned long long int *nInsides, double subdomainVolume, double *volumes)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= nLevels) return;
double v = subdomainVolume * (double) nInsides[i] / (double) nSamples;
volumes[i] = v;
}
__global__ void collectSamples(PVview view, FieldDeviceHandler field, DensityControlPlugin::LevelBounds lb, unsigned long long int *nInsides)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= view.size) return;
auto r = Real3_int(view.readPosition(i)).v;
int levelId = getLevelId(field, r, lb);
if (levelId != INVALID_LEVEL)
atomicAdd(&nInsides[levelId], 1);
}
__global__ void applyForces(PVview view, FieldDeviceHandler field, DensityControlPlugin::LevelBounds lb, const real *forces)
{
const real h = 0.25_r;
const real zeroTolerance = 1e-10_r;
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= view.size) return;
auto r = Real3_int(view.readPosition(i)).v;
int levelId = getLevelId(field, r, lb);
if (levelId == INVALID_LEVEL) return;
real forceMagn = forces[levelId];
real3 grad = computeGradient(field, r, h);
if (dot(grad, grad) < zeroTolerance) return;
real3 force = normalize(grad) * forceMagn;
atomicAdd(view.forces + i, force);
}
} // namespace density_control_plugin_kernels
DensityControlPlugin::DensityControlPlugin(const MirState *state, std::string name,
std::vector<std::string> pvNames, real targetDensity,
RegionFunc region, real3 resolution,
real levelLo, real levelHi, real levelSpace,
real Kp, real Ki, real Kd,
int tuneEvery, int dumpEvery, int sampleEvery) :
SimulationPlugin(state, name),
pvNames_(pvNames),
targetDensity_(targetDensity),
spaceDecompositionField_(std::make_unique<FieldFromFunction>
(state, name + "_decomposition", region, resolution)),
levelBounds_({levelLo, levelHi, levelSpace}),
Kp_(Kp), Ki_(Ki), Kd_(Kd),
tuneEvery_(tuneEvery),
dumpEvery_(dumpEvery),
sampleEvery_(sampleEvery),
nSamples_(0)
{}
DensityControlPlugin::~DensityControlPlugin() = default;
void DensityControlPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
for (auto &pvName : pvNames_)
pvs_.push_back(simulation->getPVbyNameOrDie(pvName));
spaceDecompositionField_->setup(comm);
const int nLevelSets = (levelBounds_.hi - levelBounds_.lo) / levelBounds_.space;
levelBounds_.space = (levelBounds_.hi - levelBounds_.lo) / nLevelSets;
nInsides_ .resize_anew(nLevelSets);
forces_ .resize_anew(nLevelSets);
const real initError = 0;
controllers_.assign(nLevelSets, PidControl<real>(initError, Kp_, Ki_, Kd_));
volumes_ .resize(nLevelSets);
densities_.resize(nLevelSets);
densities_.assign(nLevelSets, 0.0_r);
computeVolumes(defaultStream, 1000000);
nInsides_ .clearDevice(defaultStream);
forces_ .clearDevice(defaultStream);
nSamples_ = 0;
}
void DensityControlPlugin::beforeForces(hipStream_t stream)
{
if (isTimeEvery(getState(), tuneEvery_))
updatePids(stream);
if (isTimeEvery(getState(), sampleEvery_))
sample(stream);
applyForces(stream);
}
void DensityControlPlugin::serializeAndSend(__UNUSED hipStream_t stream)
{
if (!isTimeEvery(getState(), dumpEvery_)) return;
_waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, getState()->currentTime, getState()->currentStep, densities_, forces_);
_send(sendBuffer_);
}
void DensityControlPlugin::computeVolumes(hipStream_t stream, int MCnSamples)
{
const int nthreads = 128;
const real seed = 0.42424242_r + rank_ * 17;
const auto domain = getState()->domain;
const int nLevelSets = nInsides_.size();
PinnedBuffer<double> localVolumes(nLevelSets);
nInsides_ .clearDevice(stream);
localVolumes.clearDevice(stream);
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::countInsideRegions,
getNblocks(MCnSamples, nthreads), nthreads, 0, stream,
MCnSamples, domain, spaceDecompositionField_->handler(),
levelBounds_, seed, nInsides_.devPtr());
const real3 L = domain.localSize;
const double subdomainVolume = L.x * L.y * L.z;
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::computeVolumes,
getNblocks(localVolumes.size(), nthreads), nthreads, 0, stream,
localVolumes.size(), MCnSamples, nInsides_.devPtr(),
subdomainVolume, localVolumes.devPtr());
volumes_.resize(nLevelSets);
volumes_.assign(nLevelSets, 0.0);
localVolumes.downloadFromDevice(stream);
MPI_Check( MPI_Allreduce(localVolumes.hostPtr(), volumes_.data(), volumes_.size(), MPI_DOUBLE, MPI_SUM, comm_) );
// std::copy(localVolumes.begin(), localVolumes.end(), volumes.begin());
}
void DensityControlPlugin::sample(hipStream_t stream)
{
const int nthreads = 128;
for (auto pv : pvs_)
{
PVview view(pv, pv->local());
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::collectSamples,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, spaceDecompositionField_->handler(),
levelBounds_, nInsides_.devPtr());
}
++nSamples_;
}
void DensityControlPlugin::updatePids(hipStream_t stream)
{
nInsides_.downloadFromDevice(stream);
MPI_Check( MPI_Allreduce(MPI_IN_PLACE, nInsides_.hostPtr(), nInsides_.size(),
MPI_UNSIGNED_LONG_LONG, MPI_SUM, comm_) );
for (size_t i = 0; i < volumes_.size(); ++i)
{
const double denom = volumes_[i] * nSamples_;
densities_[i] = (denom > 1e-6) ?
nInsides_[i] / denom :
0.0;
}
for (size_t i = 0; i < densities_.size(); ++i)
{
const real error = densities_[i] - targetDensity_;
forces_[i] = controllers_[i].update(error);
}
forces_.uploadToDevice(stream);
nInsides_.clearDevice(stream);
nSamples_ = 0;
}
void DensityControlPlugin::applyForces(hipStream_t stream)
{
const int nthreads = 128;
for (auto pv : pvs_)
{
PVview view(pv, pv->local());
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::applyForces,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, spaceDecompositionField_->handler(),
levelBounds_, forces_.devPtr());
}
}
void DensityControlPlugin::checkpoint(MPI_Comm comm, const std::string& path, int checkpointId)
{
const auto filename = createCheckpointNameWithId(path, "plugin." + getName(), "txt", checkpointId);
{
std::ofstream fout(filename);
for (const auto& pid : controllers_)
fout << pid << std::endl;
}
createCheckpointSymlink(comm, path, "plugin." + getName(), "txt", checkpointId);
}
void DensityControlPlugin::restart(__UNUSED MPI_Comm comm, const std::string& path)
{
const auto filename = createCheckpointName(path, "plugin." + getName(), "txt");
std::ifstream fin(filename);
for (auto& pid : controllers_)
fin >> pid;
}
PostprocessDensityControl::PostprocessDensityControl(std::string name, std::string filename) :
PostprocessPlugin(name)
{
auto status = fdump_.open(filename, "w");
if (status != FileWrapper::Status::Success)
die("Could not open file '%s'", filename.c_str());
}
void PostprocessDensityControl::deserialize()
{
MirState::StepType currentTimeStep;
MirState::TimeType currentTime;
std::vector<real> densities, forces;
SimpleSerializer::deserialize(data_, currentTime, currentTimeStep, densities, forces);
if (rank_ == 0)
{
fprintf(fdump_.get(), "%g %lld ", currentTime, currentTimeStep);
for (auto d : densities) fprintf(fdump_.get(), "%g ", d);
for (auto f : forces) fprintf(fdump_.get(), "%g ", f);
fprintf(fdump_.get(), "\n");
fflush(fdump_.get());
}
}
} // namespace mirheo
| 6bacc1b019c9ed4e865208aaf5ed8deba123f49b.cu | // Copyright 2020 ETH Zurich. All Rights Reserved.
#include "density_control.h"
#include "utils/simple_serializer.h"
#include "utils/time_stamp.h"
#include <mirheo/core/field/from_function.h>
#include <mirheo/core/field/utils.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/cuda_rng.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <fstream>
#include <memory>
namespace mirheo
{
namespace density_control_plugin_kernels
{
enum {INVALID_LEVEL=-1};
__device__ int getLevelId(const FieldDeviceHandler& field, const real3& r,
const DensityControlPlugin::LevelBounds& lb)
{
real l = field(r);
return (l > lb.lo && l < lb.hi) ?
(l - lb.lo) / lb.space :
INVALID_LEVEL;
}
__global__ void countInsideRegions(int nSamples, DomainInfo domain, FieldDeviceHandler field, DensityControlPlugin::LevelBounds lb,
real seed, unsigned long long int *nInsides)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= nSamples) return;
real3 r {Saru::uniform01(seed, i - 2, i + 4242),
Saru::uniform01(seed, i - 3, i + 4343),
Saru::uniform01(seed, i - 4, i + 4444)};
r = domain.localSize * (r - 0._r);
int levelId = getLevelId(field, r, lb);
if (levelId != INVALID_LEVEL)
atomicAdd(&nInsides[levelId], 1);
}
__global__ void computeVolumes(int nLevels, int nSamples, const unsigned long long int *nInsides, double subdomainVolume, double *volumes)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= nLevels) return;
double v = subdomainVolume * (double) nInsides[i] / (double) nSamples;
volumes[i] = v;
}
__global__ void collectSamples(PVview view, FieldDeviceHandler field, DensityControlPlugin::LevelBounds lb, unsigned long long int *nInsides)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= view.size) return;
auto r = Real3_int(view.readPosition(i)).v;
int levelId = getLevelId(field, r, lb);
if (levelId != INVALID_LEVEL)
atomicAdd(&nInsides[levelId], 1);
}
__global__ void applyForces(PVview view, FieldDeviceHandler field, DensityControlPlugin::LevelBounds lb, const real *forces)
{
const real h = 0.25_r;
const real zeroTolerance = 1e-10_r;
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= view.size) return;
auto r = Real3_int(view.readPosition(i)).v;
int levelId = getLevelId(field, r, lb);
if (levelId == INVALID_LEVEL) return;
real forceMagn = forces[levelId];
real3 grad = computeGradient(field, r, h);
if (dot(grad, grad) < zeroTolerance) return;
real3 force = normalize(grad) * forceMagn;
atomicAdd(view.forces + i, force);
}
} // namespace density_control_plugin_kernels
DensityControlPlugin::DensityControlPlugin(const MirState *state, std::string name,
std::vector<std::string> pvNames, real targetDensity,
RegionFunc region, real3 resolution,
real levelLo, real levelHi, real levelSpace,
real Kp, real Ki, real Kd,
int tuneEvery, int dumpEvery, int sampleEvery) :
SimulationPlugin(state, name),
pvNames_(pvNames),
targetDensity_(targetDensity),
spaceDecompositionField_(std::make_unique<FieldFromFunction>
(state, name + "_decomposition", region, resolution)),
levelBounds_({levelLo, levelHi, levelSpace}),
Kp_(Kp), Ki_(Ki), Kd_(Kd),
tuneEvery_(tuneEvery),
dumpEvery_(dumpEvery),
sampleEvery_(sampleEvery),
nSamples_(0)
{}
DensityControlPlugin::~DensityControlPlugin() = default;
void DensityControlPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
for (auto &pvName : pvNames_)
pvs_.push_back(simulation->getPVbyNameOrDie(pvName));
spaceDecompositionField_->setup(comm);
const int nLevelSets = (levelBounds_.hi - levelBounds_.lo) / levelBounds_.space;
levelBounds_.space = (levelBounds_.hi - levelBounds_.lo) / nLevelSets;
nInsides_ .resize_anew(nLevelSets);
forces_ .resize_anew(nLevelSets);
const real initError = 0;
controllers_.assign(nLevelSets, PidControl<real>(initError, Kp_, Ki_, Kd_));
volumes_ .resize(nLevelSets);
densities_.resize(nLevelSets);
densities_.assign(nLevelSets, 0.0_r);
computeVolumes(defaultStream, 1000000);
nInsides_ .clearDevice(defaultStream);
forces_ .clearDevice(defaultStream);
nSamples_ = 0;
}
void DensityControlPlugin::beforeForces(cudaStream_t stream)
{
if (isTimeEvery(getState(), tuneEvery_))
updatePids(stream);
if (isTimeEvery(getState(), sampleEvery_))
sample(stream);
applyForces(stream);
}
void DensityControlPlugin::serializeAndSend(__UNUSED cudaStream_t stream)
{
if (!isTimeEvery(getState(), dumpEvery_)) return;
_waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, getState()->currentTime, getState()->currentStep, densities_, forces_);
_send(sendBuffer_);
}
void DensityControlPlugin::computeVolumes(cudaStream_t stream, int MCnSamples)
{
const int nthreads = 128;
const real seed = 0.42424242_r + rank_ * 17;
const auto domain = getState()->domain;
const int nLevelSets = nInsides_.size();
PinnedBuffer<double> localVolumes(nLevelSets);
nInsides_ .clearDevice(stream);
localVolumes.clearDevice(stream);
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::countInsideRegions,
getNblocks(MCnSamples, nthreads), nthreads, 0, stream,
MCnSamples, domain, spaceDecompositionField_->handler(),
levelBounds_, seed, nInsides_.devPtr());
const real3 L = domain.localSize;
const double subdomainVolume = L.x * L.y * L.z;
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::computeVolumes,
getNblocks(localVolumes.size(), nthreads), nthreads, 0, stream,
localVolumes.size(), MCnSamples, nInsides_.devPtr(),
subdomainVolume, localVolumes.devPtr());
volumes_.resize(nLevelSets);
volumes_.assign(nLevelSets, 0.0);
localVolumes.downloadFromDevice(stream);
MPI_Check( MPI_Allreduce(localVolumes.hostPtr(), volumes_.data(), volumes_.size(), MPI_DOUBLE, MPI_SUM, comm_) );
// std::copy(localVolumes.begin(), localVolumes.end(), volumes.begin());
}
void DensityControlPlugin::sample(cudaStream_t stream)
{
const int nthreads = 128;
for (auto pv : pvs_)
{
PVview view(pv, pv->local());
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::collectSamples,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, spaceDecompositionField_->handler(),
levelBounds_, nInsides_.devPtr());
}
++nSamples_;
}
void DensityControlPlugin::updatePids(cudaStream_t stream)
{
nInsides_.downloadFromDevice(stream);
MPI_Check( MPI_Allreduce(MPI_IN_PLACE, nInsides_.hostPtr(), nInsides_.size(),
MPI_UNSIGNED_LONG_LONG, MPI_SUM, comm_) );
for (size_t i = 0; i < volumes_.size(); ++i)
{
const double denom = volumes_[i] * nSamples_;
densities_[i] = (denom > 1e-6) ?
nInsides_[i] / denom :
0.0;
}
for (size_t i = 0; i < densities_.size(); ++i)
{
const real error = densities_[i] - targetDensity_;
forces_[i] = controllers_[i].update(error);
}
forces_.uploadToDevice(stream);
nInsides_.clearDevice(stream);
nSamples_ = 0;
}
void DensityControlPlugin::applyForces(cudaStream_t stream)
{
const int nthreads = 128;
for (auto pv : pvs_)
{
PVview view(pv, pv->local());
SAFE_KERNEL_LAUNCH(
density_control_plugin_kernels::applyForces,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, spaceDecompositionField_->handler(),
levelBounds_, forces_.devPtr());
}
}
void DensityControlPlugin::checkpoint(MPI_Comm comm, const std::string& path, int checkpointId)
{
const auto filename = createCheckpointNameWithId(path, "plugin." + getName(), "txt", checkpointId);
{
std::ofstream fout(filename);
for (const auto& pid : controllers_)
fout << pid << std::endl;
}
createCheckpointSymlink(comm, path, "plugin." + getName(), "txt", checkpointId);
}
void DensityControlPlugin::restart(__UNUSED MPI_Comm comm, const std::string& path)
{
const auto filename = createCheckpointName(path, "plugin." + getName(), "txt");
std::ifstream fin(filename);
for (auto& pid : controllers_)
fin >> pid;
}
PostprocessDensityControl::PostprocessDensityControl(std::string name, std::string filename) :
PostprocessPlugin(name)
{
auto status = fdump_.open(filename, "w");
if (status != FileWrapper::Status::Success)
die("Could not open file '%s'", filename.c_str());
}
void PostprocessDensityControl::deserialize()
{
MirState::StepType currentTimeStep;
MirState::TimeType currentTime;
std::vector<real> densities, forces;
SimpleSerializer::deserialize(data_, currentTime, currentTimeStep, densities, forces);
if (rank_ == 0)
{
fprintf(fdump_.get(), "%g %lld ", currentTime, currentTimeStep);
for (auto d : densities) fprintf(fdump_.get(), "%g ", d);
for (auto f : forces) fprintf(fdump_.get(), "%g ", f);
fprintf(fdump_.get(), "\n");
fflush(fdump_.get());
}
}
} // namespace mirheo
|
b96d3bbfada884d679f3ee26b1414792ddd340bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <iostream>
#include <thread>
#include <SDL.h>
#include <hiprand/hiprand_kernel.h>
#include "device_launch_parameters.h"
#include "texturegpu.cuh"
#include "window.hpp"
#include "hitable_list.cuh"
#include "sphere_hip.cuh"
#include "kernels_hip.cuh"
#include "dielectric.cuh"
#include "lambertian.cuh"
#include "metal.cuh"
#include "camera.cuh"
#include "ray.cuh"
#include "vec3.cuh"
#include "config.cuh"
#include "cuda_utils.cuh"
#include "managed_ptr.cuh"
template <typename... Args>
void launch_2D_texture_kernel(void (*kernel)(TextureGPU*, Args...),
const Config& config, TextureGPU* tex,
Args... args) {
dim3 blocks = config.blocks(tex->get_width(), tex->get_height());
kernel << <blocks, config.threads >> > (tex, std::forward<Args>(args)...);
cudaCheckErr(hipGetLastError());
tex->copy_to_cpu();
}
#define RND (hiprand_uniform(rand_state))
__global__ void create_world(HitableList** hitable_objects,
hiprandState_t* rand_state) {
if ((threadIdx.x == 0) && (blockIdx.x == 0)) {
*hitable_objects = new HitableList();
(*hitable_objects)
->push_back(new Sphere(Vec3(0, -1000, -1), 1000,
new Lambertian(Vec3(0.5f, 0.5f, 0.5f))));
for (int a = -11; a < 11; ++a) {
for (int b = -11; b < 11; ++b) {
float choose_mat = RND;
Vec3 center(a + RND, 0.2f, b + RND);
if (choose_mat < 0.8f) {
(*hitable_objects)
->push_back(new Sphere(
center, 0.2f,
new Lambertian(Vec3(RND * RND, RND * RND, RND * RND))));
}
else if (choose_mat < 0.95f) {
(*hitable_objects)
->push_back(new Sphere(
center, 0.2f,
new Metal(Vec3(0.5f * (1.0f + RND), 0.5f * (1.0f + RND),
0.5f * (1.0f + RND)),
0.5f * RND)));
}
else {
(*hitable_objects)
->push_back(new Sphere(center, 0.2f, new Dielectric(1.5f)));
}
}
}
(*hitable_objects)
->push_back(new Sphere(Vec3(0, 1, 0), 1.0f, new Dielectric(1.5f)));
(*hitable_objects)
->push_back(new Sphere(Vec3(-4, 1, 0), 1.0f,
new Lambertian(Vec3(0.4f, 0.2f, 0.1f))));
(*hitable_objects)
->push_back(new Sphere(Vec3(4, 1, 0), 1.0f,
new Metal(Vec3(0.7f, 0.6f, 0.5f), 0.0f)));
}
}
__global__ void render_init(int max_x, int max_y, hiprandState_t* rand_state) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if ((x >= max_x) || (y >= max_y)) {
return;
}
int pixel_index = y * max_x + x;
hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
}
template <class T, typename... Args>
__global__ void push_back(HitableList** hitable_objects, Args... args) {
(*hitable_objects)->push_back(new T(args...));
}
void input_thread_task(Window& window, Camera& camera) {
while (!window.should_quit()) {
SDL_Event e;
while (SDL_PollEvent(&e) != 0) {
if (e.type == SDL_QUIT) {
window.close();
}
}
const Uint8* state = SDL_GetKeyboardState(NULL);
if (state[SDL_SCANCODE_W]) {
camera.move(Camera::Movement::FORWARD, window.get_delta_time());
}
if (state[SDL_SCANCODE_S]) {
camera.move(Camera::Movement::BACKWARD, window.get_delta_time());
}
if (state[SDL_SCANCODE_A]) {
camera.move(Camera::Movement::LEFT, window.get_delta_time());
}
if (state[SDL_SCANCODE_D]) {
camera.move(Camera::Movement::RIGHT, window.get_delta_time());
}
if (state[SDL_SCANCODE_Q]) {
camera.move(Camera::Movement::DOWN, window.get_delta_time());
}
if (state[SDL_SCANCODE_E]) {
camera.move(Camera::Movement::UP, window.get_delta_time());
}
}
}
int main() {
Config gConfig;
{
Window window("Raytracer", 800, 400);
Vec3 lookfrom(13, 2, 3);
Vec3 lookat(0, 0, 0);
float aperture = 0.1f;
Camera gCamera(lookfrom, lookat, Vec3(0, 1, 0), 30,
window.get_aspect_ratio(), aperture);
<TextureGPU> viewport = make_managed<TextureGPU>(
window.get_renderer(), window.get_width(), window.get_height(), 0.75f);
// Init hiprand
hiprandState_t* d_rand_state = cuda_malloc<hiprandState_t>(
viewport->get_n_pixels() * sizeof(hiprandState_t));
dim3 blocks = gConfig.blocks(viewport->get_width(), viewport->get_height());
render_init << <blocks, gConfig.threads >> > (
viewport->get_width(), viewport->get_height(), d_rand_state);
cudaCheckErr(hipDeviceSynchronize());
cudaCheckErr(hipGetLastError());
// Create world
HitableList** hitable_objects =
cuda_malloc<HitableList*>(sizeof(HitableList*));
create_world << <1, 1 >> > (hitable_objects, d_rand_state);
cudaCheckErr(hipDeviceSynchronize());
cudaCheckErr(hipGetLastError());
std::thread input_thread(input_thread_task, std::ref(window),
std::ref(gCamera));
gCamera.set_ns(20);
int frames = 0;
float time = 0.0f;
float avg_fps = 0.0f;
while (!window.should_quit()) {
window.update_delta_time();
window.clear_render();
launch_2D_texture_kernel(chapter_11_kernel, gConfig, viewport.get(),
gCamera, (Hitable**)hitable_objects,
d_rand_state);
viewport->copy_to_renderer(window.get_renderer());
window.present_render();
time += window.get_delta_time();
++frames;
avg_fps += window.get_fps();
if (time >= 0.5f) {
std::cout << avg_fps / frames << std::endl;
time = 0.0f;
frames = 0;
avg_fps = 0;
}
}
input_thread.join();
}
cudaCheckErr(hipDeviceReset());
return 0;
}
| b96d3bbfada884d679f3ee26b1414792ddd340bd.cu | #include <chrono>
#include <iostream>
#include <thread>
#include <SDL.h>
#include <curand_kernel.h>
#include "device_launch_parameters.h"
#include "texturegpu.cuh"
#include "window.hpp"
#include "hitable_list.cuh"
#include "sphere.cuh"
#include "kernels.cuh"
#include "dielectric.cuh"
#include "lambertian.cuh"
#include "metal.cuh"
#include "camera.cuh"
#include "ray.cuh"
#include "vec3.cuh"
#include "config.cuh"
#include "cuda_utils.cuh"
#include "managed_ptr.cuh"
template <typename... Args>
void launch_2D_texture_kernel(void (*kernel)(TextureGPU*, Args...),
const Config& config, TextureGPU* tex,
Args... args) {
dim3 blocks = config.blocks(tex->get_width(), tex->get_height());
kernel << <blocks, config.threads >> > (tex, std::forward<Args>(args)...);
cudaCheckErr(cudaGetLastError());
tex->copy_to_cpu();
}
#define RND (curand_uniform(rand_state))
__global__ void create_world(HitableList** hitable_objects,
curandState* rand_state) {
if ((threadIdx.x == 0) && (blockIdx.x == 0)) {
*hitable_objects = new HitableList();
(*hitable_objects)
->push_back(new Sphere(Vec3(0, -1000, -1), 1000,
new Lambertian(Vec3(0.5f, 0.5f, 0.5f))));
for (int a = -11; a < 11; ++a) {
for (int b = -11; b < 11; ++b) {
float choose_mat = RND;
Vec3 center(a + RND, 0.2f, b + RND);
if (choose_mat < 0.8f) {
(*hitable_objects)
->push_back(new Sphere(
center, 0.2f,
new Lambertian(Vec3(RND * RND, RND * RND, RND * RND))));
}
else if (choose_mat < 0.95f) {
(*hitable_objects)
->push_back(new Sphere(
center, 0.2f,
new Metal(Vec3(0.5f * (1.0f + RND), 0.5f * (1.0f + RND),
0.5f * (1.0f + RND)),
0.5f * RND)));
}
else {
(*hitable_objects)
->push_back(new Sphere(center, 0.2f, new Dielectric(1.5f)));
}
}
}
(*hitable_objects)
->push_back(new Sphere(Vec3(0, 1, 0), 1.0f, new Dielectric(1.5f)));
(*hitable_objects)
->push_back(new Sphere(Vec3(-4, 1, 0), 1.0f,
new Lambertian(Vec3(0.4f, 0.2f, 0.1f))));
(*hitable_objects)
->push_back(new Sphere(Vec3(4, 1, 0), 1.0f,
new Metal(Vec3(0.7f, 0.6f, 0.5f), 0.0f)));
}
}
__global__ void render_init(int max_x, int max_y, curandState* rand_state) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if ((x >= max_x) || (y >= max_y)) {
return;
}
int pixel_index = y * max_x + x;
curand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
}
template <class T, typename... Args>
__global__ void push_back(HitableList** hitable_objects, Args... args) {
(*hitable_objects)->push_back(new T(args...));
}
void input_thread_task(Window& window, Camera& camera) {
while (!window.should_quit()) {
SDL_Event e;
while (SDL_PollEvent(&e) != 0) {
if (e.type == SDL_QUIT) {
window.close();
}
}
const Uint8* state = SDL_GetKeyboardState(NULL);
if (state[SDL_SCANCODE_W]) {
camera.move(Camera::Movement::FORWARD, window.get_delta_time());
}
if (state[SDL_SCANCODE_S]) {
camera.move(Camera::Movement::BACKWARD, window.get_delta_time());
}
if (state[SDL_SCANCODE_A]) {
camera.move(Camera::Movement::LEFT, window.get_delta_time());
}
if (state[SDL_SCANCODE_D]) {
camera.move(Camera::Movement::RIGHT, window.get_delta_time());
}
if (state[SDL_SCANCODE_Q]) {
camera.move(Camera::Movement::DOWN, window.get_delta_time());
}
if (state[SDL_SCANCODE_E]) {
camera.move(Camera::Movement::UP, window.get_delta_time());
}
}
}
int main() {
Config gConfig;
{
Window window("Raytracer", 800, 400);
Vec3 lookfrom(13, 2, 3);
Vec3 lookat(0, 0, 0);
float aperture = 0.1f;
Camera gCamera(lookfrom, lookat, Vec3(0, 1, 0), 30,
window.get_aspect_ratio(), aperture);
<TextureGPU> viewport = make_managed<TextureGPU>(
window.get_renderer(), window.get_width(), window.get_height(), 0.75f);
// Init curand
curandState* d_rand_state = cuda_malloc<curandState>(
viewport->get_n_pixels() * sizeof(curandState));
dim3 blocks = gConfig.blocks(viewport->get_width(), viewport->get_height());
render_init << <blocks, gConfig.threads >> > (
viewport->get_width(), viewport->get_height(), d_rand_state);
cudaCheckErr(cudaDeviceSynchronize());
cudaCheckErr(cudaGetLastError());
// Create world
HitableList** hitable_objects =
cuda_malloc<HitableList*>(sizeof(HitableList*));
create_world << <1, 1 >> > (hitable_objects, d_rand_state);
cudaCheckErr(cudaDeviceSynchronize());
cudaCheckErr(cudaGetLastError());
std::thread input_thread(input_thread_task, std::ref(window),
std::ref(gCamera));
gCamera.set_ns(20);
int frames = 0;
float time = 0.0f;
float avg_fps = 0.0f;
while (!window.should_quit()) {
window.update_delta_time();
window.clear_render();
launch_2D_texture_kernel(chapter_11_kernel, gConfig, viewport.get(),
gCamera, (Hitable**)hitable_objects,
d_rand_state);
viewport->copy_to_renderer(window.get_renderer());
window.present_render();
time += window.get_delta_time();
++frames;
avg_fps += window.get_fps();
if (time >= 0.5f) {
std::cout << avg_fps / frames << std::endl;
time = 0.0f;
frames = 0;
avg_fps = 0;
}
}
input_thread.join();
}
cudaCheckErr(cudaDeviceReset());
return 0;
}
|
ee2272187f6403b961d31c819ce7d51baf43be9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hist.cuh"
#include "point_hist_half_byte_template.cuh"
#include "compute_hist_loop_one_stat.cuh"
using namespace cooperative_groups;
namespace NKernel {
template <int BlockSize>
struct TPointHistBinary: public TPointHistHalfByteBase<BlockSize, TPointHistBinary<BlockSize>> {
using TPointHistHalfByteBase<BlockSize, TPointHistBinary<BlockSize>>::Histogram;
__forceinline__ __device__ TPointHistBinary(float* buff)
: TPointHistHalfByteBase<BlockSize,TPointHistBinary<BlockSize>>(buff) {
}
static constexpr int Unroll(ECIndexLoadType) {
#if __CUDA_ARCH__ < 500
return 4;
#elif __CUDA_ARCH__ < 700
return 1;
#else
return 2;
#endif
}
__forceinline__ __device__ void AddToGlobalMemory(int statId, int statCount, int blockCount,
const TFeatureInBlock* features,
int fCount,
int leafId, int leafCount,
float* binSums) {
const int fid = threadIdx.x;
const int fold = 0;
if (fid < fCount ) {
TFeatureInBlock group = features[fid];
if (group.Folds) {
const int deviceOffset = group.GroupOffset * statCount * leafCount;
const int entriesPerLeaf = statCount * group.GroupSize;
float* dst = binSums + deviceOffset + leafId * entriesPerLeaf + statId * group.GroupSize +
group.FoldOffsetInGroup;
const int groupId = fid / 4;
const int fMask = 1 << (3 - (fid & 3));
float val = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++) {
if (!(i & fMask)) {
val += Histogram[8 * i + groupId];
}
}
if (abs(val) > 1e-20f) {
if (blockCount > 1) {
atomicAdd(dst + fold, val);
} else {
dst[fold] = val;
}
}
}
}
}
};
using THist = TPointHistBinary<768>;
void ComputeHistBinary(const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
const int blockSize = 768;
dim3 numBlocks;
numBlocks.z = numStats;
numBlocks.y = partCount;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();
numBlocks.x = (fCount + 31) / 32;
numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));
hipLaunchKernelGGL(( ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 32>), dim3(numBlocks), dim3(blockSize), 0, stream, features,
fCount,
bins,
binsLineSize,
stats,
statLineSize,
parts,
partIds,
histograms);
}
void ComputeHistBinary(const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
const int blockSize = 768;
dim3 numBlocks;
numBlocks.z = numStats;
numBlocks.y = partCount;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();
numBlocks.x = (fCount + 31) / 32;
numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));
hipLaunchKernelGGL(( ComputeSplitPropertiesGatherImpl<THist, blockSize, 32>), dim3(numBlocks), dim3(blockSize), 0, stream, features,
fCount,
cindex,
indices,
stats,
statLineSize,
parts,
partIds,
histograms);
}
}
| ee2272187f6403b961d31c819ce7d51baf43be9a.cu | #include "hist.cuh"
#include "point_hist_half_byte_template.cuh"
#include "compute_hist_loop_one_stat.cuh"
using namespace cooperative_groups;
namespace NKernel {
template <int BlockSize>
struct TPointHistBinary: public TPointHistHalfByteBase<BlockSize, TPointHistBinary<BlockSize>> {
using TPointHistHalfByteBase<BlockSize, TPointHistBinary<BlockSize>>::Histogram;
__forceinline__ __device__ TPointHistBinary(float* buff)
: TPointHistHalfByteBase<BlockSize,TPointHistBinary<BlockSize>>(buff) {
}
static constexpr int Unroll(ECIndexLoadType) {
#if __CUDA_ARCH__ < 500
return 4;
#elif __CUDA_ARCH__ < 700
return 1;
#else
return 2;
#endif
}
__forceinline__ __device__ void AddToGlobalMemory(int statId, int statCount, int blockCount,
const TFeatureInBlock* features,
int fCount,
int leafId, int leafCount,
float* binSums) {
const int fid = threadIdx.x;
const int fold = 0;
if (fid < fCount ) {
TFeatureInBlock group = features[fid];
if (group.Folds) {
const int deviceOffset = group.GroupOffset * statCount * leafCount;
const int entriesPerLeaf = statCount * group.GroupSize;
float* dst = binSums + deviceOffset + leafId * entriesPerLeaf + statId * group.GroupSize +
group.FoldOffsetInGroup;
const int groupId = fid / 4;
const int fMask = 1 << (3 - (fid & 3));
float val = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++) {
if (!(i & fMask)) {
val += Histogram[8 * i + groupId];
}
}
if (abs(val) > 1e-20f) {
if (blockCount > 1) {
atomicAdd(dst + fold, val);
} else {
dst[fold] = val;
}
}
}
}
}
};
using THist = TPointHistBinary<768>;
void ComputeHistBinary(const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
const int blockSize = 768;
dim3 numBlocks;
numBlocks.z = numStats;
numBlocks.y = partCount;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();
numBlocks.x = (fCount + 31) / 32;
numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));
ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 32><<<numBlocks, blockSize, 0, stream>>>(features,
fCount,
bins,
binsLineSize,
stats,
statLineSize,
parts,
partIds,
histograms);
}
void ComputeHistBinary(const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
const int blockSize = 768;
dim3 numBlocks;
numBlocks.z = numStats;
numBlocks.y = partCount;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();
numBlocks.x = (fCount + 31) / 32;
numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));
ComputeSplitPropertiesGatherImpl<THist, blockSize, 32><<<numBlocks, blockSize, 0, stream>>>(features,
fCount,
cindex,
indices,
stats,
statLineSize,
parts,
partIds,
histograms);
}
}
|
a69ada12188d3133ab802e9182ebe500eea37273.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file indexing_op.cu
* \brief GPU implementation of indexing operator
* \author Siyi Li, Chi Zhang
*/
#include "./indexing_op.h"
#include "./util/tensor_util-inl.cuh"
#include "./util/tensor_util-inl.h"
namespace mxnet {
namespace op {
/*! \brief If there are out-of-bound indices, out will be assigned to 1.
*/
struct is_valid_check {
template <typename DType>
MSHADOW_XINLINE static void Map(int i,
char* out,
const DType* data,
const DType min,
const DType max) {
if (data[i] < min || data[i] > max)
*out = 1;
}
};
struct AddTakeGradRspGPUKernel {
template <typename DType, typename IType>
__device__ __forceinline__ static void Map(int tid,
DType* out,
const nnvm::dim_t* prefix_sum,
const IType* data,
const DType* ograd,
const nnvm::dim_t row_length) {
using nnvm::dim_t;
const dim_t data_i = tid / row_length;
const dim_t grad_i = tid % row_length;
const dim_t irow = static_cast<dim_t>(data[data_i]);
const dim_t rsp_row = prefix_sum[irow] - 1;
const DType val = ograd[data_i * row_length + grad_i];
atomicAdd(static_cast<DType*>(&(out[rsp_row * row_length + grad_i])), val);
}
};
/*
* \brief kernel for backward computation for take, executed with deterministic order
* \param thread_id the thread id
* \param out the output gradient data
* \param lookup_table the table to lookup the position of an id in gradient array
* \param sorted_data the sorted data input
* \param original_idx the original indices of the sorted data input
* \param ograd head gradient
* \param row_length the output dimension
* \param num_threads_per_row the number of threads to process a row together
* \param SZ the number of features a thread is responsible for
*/
template <int SZ>
struct AddTakeGradRspDeterministicKernel {
template <typename DType>
__device__ __forceinline__ static void Map(int thread_id,
DType* out,
const nnvm::dim_t* lookup_table,
const nnvm::dim_t* sorted_data,
const nnvm::dim_t data_size,
const nnvm::dim_t* original_idx,
const DType* ograd,
const nnvm::dim_t row_length,
const nnvm::dim_t num_threads_per_row) {
using nnvm::dim_t;
int tid = thread_id / num_threads_per_row;
const int feature_start = thread_id % num_threads_per_row * SZ;
int num_features = SZ;
if (feature_start + num_features > row_length) {
num_features = row_length - feature_start;
}
if (tid == 0 || sorted_data[tid - 1] != sorted_data[tid]) {
DType acc[SZ];
#pragma unroll
for (int i = 0; i < SZ; i++) {
acc[i] = 0;
}
const dim_t data = sorted_data[tid];
const dim_t row_id = lookup_table[data];
const dim_t out_offset = row_id * row_length + feature_start;
do {
const dim_t idx = original_idx[tid];
const dim_t ograd_offset = idx * row_length + feature_start;
for (int i = 0; i < num_features; i++) {
acc[i] += ograd[ograd_offset + i];
}
tid++;
} while (tid < data_size && sorted_data[tid - 1] == sorted_data[tid]);
for (int i = 0; i < num_features; i++) {
out[out_offset + i] += acc[i];
}
}
}
};
template <bool clip = true>
struct TakeZeroAxisGPU {
// assume that idx have been flattened to a 1-D tensor (N,)
// assume that out_data and in_data have been flattened to 2-D tensors, (N, M) and (K, M)
// M is the number of columns of in_data and out_data
// K is the number of rows of in_data
// i is the index of out_data
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i,
DType* out_data,
const DType* in_data,
const IType* idx,
const int64_t M,
const int64_t K) {
int64_t j = static_cast<int64_t>(idx[i / M]);
if (clip) {
if (j <= 0)
j = 0;
else if (j >= K)
j = K - 1;
} else {
j = j % K;
j += (j < 0) ? K : 0;
}
out_data[i] = in_data[j * M + i % M];
}
};
/*
* \brief returns true if all indices are between [min, max]
* \param s the stream
* \param data_ptr the indices on the stream
* \param data_size the number of indices to examine
* \param min the expected min value for indices
* \param max the expected max value for indices
* \param is_valid_ptr the temparary workspace
*/
template <typename DType>
bool CheckIndexOutOfBound(mshadow::Stream<gpu>* s,
const DType* data_ptr,
size_t data_size,
const DType min,
const DType max,
char* is_valid_ptr) {
using namespace mxnet_op;
int32_t is_valid = 0;
Kernel<set_zero, gpu>::Launch(s, 1, is_valid_ptr);
Kernel<is_valid_check, gpu>::Launch(s, data_size, is_valid_ptr, data_ptr, min, max);
CUDA_CALL(hipMemcpyAsync(&is_valid,
is_valid_ptr,
sizeof(char),
hipMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(hipStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
return is_valid == 0;
}
// Embedding forward implementation with dense weight
template <>
void EmbeddingOpForwardDnsImpl<gpu>(mshadow::Stream<gpu>* s,
const TBlob& data,
const TBlob& weight,
const OpReqType req,
const TBlob& output) {
using namespace mxnet_op;
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& oshape = output.shape_;
MSHADOW_TYPE_SWITCH(output.type_flag_, DType, {
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
Tensor<gpu, 1, IType> idx =
data.get_with_shape<gpu, 1, IType>(Shape1(ishape.ProdShape(0, ishape.ndim())), s);
Tensor<gpu, 2, DType> wmat = weight.get<gpu, 2, DType>(s);
Tensor<gpu, 2, DType> out = output.get_with_shape<gpu, 2, DType>(
Shape2(oshape.ProdShape(0, oshape.ndim() - 1), oshape[oshape.ndim() - 1]), s);
Kernel<TakeZeroAxisGPU<true>, gpu>::Launch(
s, oshape.Size(), out.dptr_, wmat.dptr_, idx.dptr_, wmat.shape_[1], wmat.shape_[0]);
});
});
}
template <>
void SparseEmbeddingOpForwardRspImpl<gpu>(const OpContext& ctx,
const TBlob& data,
const NDArray& weight,
const OpReqType req,
const TBlob& output) {
if (req == kNullOp)
return;
using namespace rowsparse;
using namespace mxnet_op;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
// zeros weight
if (req == kWriteTo && !weight.storage_initialized()) {
size_t out_size = output.shape_.Size();
MSHADOW_TYPE_SWITCH(output.type_flag_, DType, {
Fill<false>(
s, TBlob(output.dptr<DType>(), mshadow::Shape1(out_size), gpu::kDevMask), kWriteTo, 0);
})
return;
}
// check out-of-bound indices
MSHADOW_TYPE_SWITCH(data.type_flag_, DType, {
DType min = 0;
DType max = static_cast<DType>(weight.shape()[0] - 1);
DType* data_ptr = data.dptr<DType>();
size_t data_size = data.shape_.Size();
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(1), s);
char* is_valid_ptr = reinterpret_cast<char*>(workspace.dptr_);
bool is_valid = CheckIndexOutOfBound(s, data_ptr, data_size, min, max, is_valid_ptr);
CHECK(is_valid) << "SparseEmbedding input contains data out of bound";
})
// the weight is actually dense
if (weight.aux_shape(kIdx)[0] == weight.shape()[0]) {
EmbeddingOpForwardDnsImpl<gpu>(s, data, weight.data(), req, output);
} else {
EmbeddingOpForwardRspImpl<gpu>(s, data, weight, req, output);
}
}
template <typename IType, typename DType, typename RType>
void SparseEmbeddingDeterministicKernelLaunch(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using namespace mshadow;
using namespace mxnet_op;
using namespace expr;
using namespace rowsparse;
using nnvm::dim_t;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const dim_t num_rows = output.shape()[0];
const dim_t row_length = output.shape()[1];
const dim_t data_size = static_cast<dim_t>(data.shape_.Size());
// temp resource declarations
dim_t* lookup_table = nullptr;
void* temp_storage = nullptr;
dim_t* sorted_data = nullptr;
dim_t* original_idx = nullptr;
// calculate number of bytes for temp resources
size_t lookup_table_bytes = num_rows * sizeof(dim_t);
size_t sorted_data_storage_bytes = data_size * sizeof(dim_t);
size_t original_idx_storage_bytes = data_size * sizeof(dim_t);
size_t sort_workspace_size = SortByKeyWorkspaceSize<dim_t, dim_t, gpu>(data_size);
size_t unique_workspace_bytes = 0;
// estimate unique temp space
IType* data_ptr = data.dptr<IType>();
size_t* null_ptr = nullptr;
// unique operations will be applied on sorted data
hipcub::DeviceSelect::Unique(nullptr,
unique_workspace_bytes,
sorted_data,
sorted_data,
null_ptr,
data_size,
Stream<gpu>::GetStream(s));
// One more space reserved for unique count
size_t temp_workspace_bytes = ::max(unique_workspace_bytes, sort_workspace_size);
size_t total_storage_bytes = lookup_table_bytes + sorted_data_storage_bytes +
original_idx_storage_bytes + temp_workspace_bytes;
// request resource and split it. layout is:
// lookup_table, sorted_data, original_idx, temp_storage
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(total_storage_bytes), s);
lookup_table = reinterpret_cast<dim_t*>(workspace.dptr_);
sorted_data = reinterpret_cast<dim_t*>(workspace.dptr_ + lookup_table_bytes);
original_idx =
reinterpret_cast<dim_t*>(workspace.dptr_ + lookup_table_bytes + sorted_data_storage_bytes);
temp_storage = workspace.dptr_ + total_storage_bytes - temp_workspace_bytes;
// check out-of-bound indices
{
IType min = 0;
IType max = static_cast<IType>(output.shape()[0] - 1);
IType* data_ptr = data.dptr<IType>();
size_t data_size = data.shape_.Size();
bool is_valid = CheckIndexOutOfBound(
s, data_ptr, data_size, min, max, reinterpret_cast<char*>(temp_storage));
CHECK(is_valid) << "Embedding input contains data out of bound";
}
// make a copy of the data, to be sorted
TBlob sorted_data_blob(sorted_data, Shape1(data_size), gpu::kDevMask);
auto sorted_data_tensor = sorted_data_blob.FlatTo1D<gpu, dim_t>(s);
mxnet_op::copy(s, sorted_data_blob, data);
// generate original idx
Tensor<gpu, 1, dim_t> original_idx_tensor(original_idx, Shape1(data_size), s);
Kernel<range_fwd, gpu>::Launch(
s, data_size, 1, static_cast<dim_t>(0), static_cast<dim_t>(1), kWriteTo, original_idx);
// sort data with its original idx
int num_bits = common::ilog2ui(num_rows - 1);
char* temp_storage_ptr = reinterpret_cast<char*>(temp_storage);
Tensor<gpu, 1, char> temp_storage_tensor(temp_storage_ptr, Shape1(sort_workspace_size), s);
SortByKey(sorted_data_tensor, original_idx_tensor, true, &temp_storage_tensor, 0, num_bits);
// compute unique row ids based on sorted values.
output.CheckAndAllocAuxData(kIdx, Shape1(data_size + 1));
// fill row_idx array of output matrix, using the row_flg values
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
hipcub::DeviceSelect::Unique(temp_storage_ptr,
unique_workspace_bytes,
sorted_data,
grad_row_idx,
grad_row_idx + data_size,
data_size,
Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(hipMemcpyAsync(&nnr,
grad_row_idx + data_size,
sizeof(RType),
hipMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(hipStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
CHECK_EQ(output.shape().ndim(), 2) << "Unexcepted ndim";
output.CheckAndAllocData(Shape2(nnr, output.shape()[1]));
output.set_aux_shape(kIdx, Shape1(nnr));
// generate lookup table
Kernel<MarkLookupTable, gpu>::Launch(s, nnr, lookup_table, grad_row_idx);
// accumulate gradients
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask), kWriteTo, 0);
const int SZ = 4;
const nnvm::dim_t num_threads_per_row = (row_length + SZ - 1) / SZ;
Kernel<AddTakeGradRspDeterministicKernel<SZ>, gpu>::Launch(s,
data_size * num_threads_per_row,
grad_data,
lookup_table,
sorted_data,
data_size,
original_idx,
ograd.dptr<DType>(),
row_length,
num_threads_per_row);
}
inline void SparseEmbeddingOpBackwardDeterministicRspImpl(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using nnvm::dim_t;
if (req == kNullOp)
return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const dim_t data_size = static_cast<dim_t>(data.shape_.Size());
if (data_size == 0) {
FillZerosRspImpl(s, output);
return;
}
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(rowsparse::kIdx), RType, {
SparseEmbeddingDeterministicKernelLaunch<IType, DType, RType>(
ctx, ograd, data, req, output);
});
});
});
}
template <>
inline void SparseEmbeddingOpBackwardRspImpl<gpu>(const bool deterministic,
const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
if (deterministic) {
SparseEmbeddingOpBackwardDeterministicRspImpl(ctx, ograd, data, req, output);
return;
}
using namespace mshadow;
using namespace mxnet_op;
using namespace mshadow::expr;
using namespace rowsparse;
using nnvm::dim_t;
if (req == kNullOp)
return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
// Request temporary storage for marking non-zero rows and prefix sum
Stream<gpu>* s = ctx.get_stream<gpu>();
dim_t num_rows = output.shape()[0];
dim_t row_length = output.shape()[1];
dim_t data_size = static_cast<dim_t>(data.shape_.Size());
dim_t num_threads;
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_SGL_DBL_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(kIdx), RType, {
dim_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(
Shape1(num_rows * sizeof(dim_t) + temp_storage_bytes), s);
prefix_sum = reinterpret_cast<dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows * sizeof(dim_t);
num_threads = num_rows;
Fill<false>(s, TBlob(prefix_sum, Shape1(num_threads), gpu::kDevMask), kWriteTo, 0);
Kernel<MarkRowFlgKernel, gpu>::Launch(s, data_size, prefix_sum, data.dptr<IType>());
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
mshadow::Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(hipMemcpyAsync(&nnr,
&prefix_sum[num_rows - 1],
sizeof(dim_t),
hipMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(hipStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
if (nnr == 0) {
FillZerosRspImpl(s, output);
return;
}
output.CheckAndAlloc({Shape1(nnr)});
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
// fill row_idx array of output matrix, using the row_flg values
Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_rows, grad_row_idx, prefix_sum, num_rows);
// prefill with zeros
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask), kWriteTo, 0);
// add the final gradients
num_threads = row_length * data_size;
Kernel<AddTakeGradRspGPUKernel, gpu>::Launch(s,
num_threads,
grad_data,
prefix_sum,
data.dptr<IType>(),
ograd.dptr<DType>(),
row_length);
});
});
});
}
/*
* \brief check if any of the indices is out of bound
* \param s the stream
* \param idx_ptr the indices on the stream
* \param N the number of indices in an axis
* \param M the number of axises to exmaine
* \param mshape the array that stores shape for each dimension
* \param is_valid_dim_ptr the temparary workspace that contains out-of-bound indices
*/
template <typename DType>
void GatherNDCheckBoundGPU(mshadow::Stream<gpu>* s,
const DType* idx_ptr,
index_t N,
index_t M,
const mshadow::Shape<10> mshape,
DType* is_valid_dim_ptr) {
using namespace mxnet_op;
Kernel<set_zero, gpu>::Launch(s, M, is_valid_dim_ptr);
Kernel<is_valid_check_gather_nd, gpu>::Launch(s, M, is_valid_dim_ptr, idx_ptr, N, mshape);
std::vector<DType> is_valid_dim(M);
CUDA_CALL(hipMemcpyAsync(is_valid_dim.data(),
is_valid_dim_ptr,
sizeof(DType) * M,
hipMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(hipStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
for (int m = 0; m < M; m++) {
if (is_valid_dim[m] > mshape[m] - 1 || is_valid_dim[m] < -mshape[m]) {
LOG(FATAL) << "IndexError: index " << is_valid_dim[m] << " is out of bounds for axis " << m
<< " with size " << mshape[m];
}
}
}
void GatherNDForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp)
return;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const mxnet::TShape& dshape = inputs[0].shape_;
const mxnet::TShape& ishape = inputs[1].shape_;
int M = ishape[0];
int N = ishape.Size() / M;
int K = dshape.ProdShape(M, dshape.ndim());
mshadow::Shape<10> strides;
mshadow::Shape<10> mshape;
for (int i = M - 1, stride = K; i >= 0; stride *= dshape[i], --i) {
strides[i] = stride;
mshape[i] = dshape[i];
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { // output data type switch
MSHADOW_TYPE_SWITCH(inputs[1].type_flag_, IType, { // indices data type switch
// check whether indices are out of bound
IType* idx_ptr = inputs[1].dptr<IType>();
Tensor<gpu, 1, IType> workspace =
ctx.requested[0].get_space_typed<gpu, 1, IType>(Shape1(M), s);
IType* is_valid_dim_ptr = reinterpret_cast<IType*>(workspace.dptr_);
GatherNDCheckBoundGPU(s, idx_ptr, N, M, mshape, is_valid_dim_ptr);
Kernel<gather_nd, gpu>::Launch(s,
N,
req[0],
N,
M,
K,
strides,
mshape,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
inputs[1].dptr<IType>());
});
});
}
struct backward_gather_nd_gpu {
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(index_t i,
index_t N,
index_t M,
index_t K,
const mshadow::Shape<10> strides,
DType* out,
const DType* data,
const IType* indices) {
index_t offset = 0;
for (index_t j = 0; j < M; ++j) {
offset += strides[j] * static_cast<int>(indices[j * N + i]);
}
for (index_t j = 0; j < K; ++j) {
atomicAdd(out + (offset + j), data[i * K + j]);
}
}
};
template <typename DType, typename IType>
inline void GatherNDBackwardImpl(index_t N,
index_t M,
index_t K,
const mshadow::Shape<10> strides,
DType* out,
const DType* data,
const IType* indices,
mshadow::Stream<gpu>* s) {
mxnet_op::Kernel<backward_gather_nd_gpu, gpu>::Launch(s, N, N, M, K, strides, out, data, indices);
}
template <>
void TakeOpForward<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[take_::kOut] == kNullOp)
return;
const TakeParam& param = nnvm::get<TakeParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& idxshape = inputs[take_::kIdx].shape_;
const mxnet::TShape& arrshape = inputs[take_::kArr].shape_;
const mxnet::TShape& oshape = outputs[take_::kOut].shape_;
if (idxshape.Size() == 0) {
return;
}
Stream<gpu>* s = ctx.get_stream<gpu>();
const int actual_axis = param.axis + ((param.axis < 0) ? arrshape.ndim() : 0);
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[take_::kOut].type_flag_, DType, { // output data type
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[take_::kIdx].type_flag_, IType, { // index data type
if (param.mode == take_::kRaise) {
// check out-of-bound indices
IType min = 0;
IType max = static_cast<IType>(arrshape[actual_axis] - 1);
IType* idx_ptr = inputs[take_::kIdx].dptr<IType>();
size_t idx_size = idxshape.Size();
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(1), s);
char* is_valid_ptr = reinterpret_cast<char*>(workspace.dptr_);
bool is_valid = CheckIndexOutOfBound(s, idx_ptr, idx_size, min, max, is_valid_ptr);
CHECK(is_valid) << "Take indices contains indices out of bound";
}
if (actual_axis == 0) {
if (param.mode == take_::kClip) {
Kernel<TakeZeroAxisGPU<true>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
oshape.Size() / idxshape.Size(),
arrshape[0]);
} else {
Kernel<TakeZeroAxisGPU<false>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
oshape.Size() / idxshape.Size(),
arrshape[0]);
}
} else {
mshadow::Shape<10> in_strides;
int stride = 1;
for (int i = arrshape.ndim() - 1; i >= 0; stride *= arrshape[i], --i) {
in_strides[i] = stride;
}
mshadow::Shape<10> out_strides;
stride = 1;
for (int i = oshape.ndim() - 1; i >= 0; stride *= oshape[i], --i) {
out_strides[i] = stride;
}
if (param.mode == take_::kClip) {
Kernel<TakeNonzeroAxis<true>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
out_strides[actual_axis - 1],
in_strides[actual_axis - 1],
in_strides[actual_axis],
arrshape.ndim(),
oshape.ndim(),
idxshape.ndim(),
arrshape[actual_axis],
actual_axis);
} else {
Kernel<TakeNonzeroAxis<false>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
out_strides[actual_axis - 1],
in_strides[actual_axis - 1],
in_strides[actual_axis],
arrshape.ndim(),
oshape.ndim(),
idxshape.ndim(),
arrshape[actual_axis],
actual_axis);
}
}
});
});
}
namespace {
/*
* \brief returns integer log2(a) rounded up
*/
inline int ilog2(unsigned int a) {
int k = 1;
while (a >>= 1)
k++;
return k;
}
} // namespace
/*
* \brief finds the lower and upper-bound positions of each unique element within
* a sorted input array
*
* \param sorted_data input elements previously sorted
* \param bounds output containing all lower-bound followed by all upper-bound positions
* \param data_dim total number of elements in the input array
* \param vocab_dim maximum number of unique elements
*/
template <typename IType>
__global__ void EmbeddingFindBounds(const IType* sorted_data,
IType* bounds,
const index_t data_dim,
const index_t vocab_dim) {
const index_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= vocab_dim)
return;
// Binary search to find lower bound: stored at bounds[0..vocab_dim-1]
IType lower_bound = 0;
IType upper_bound = data_dim - 1;
IType mean;
while (lower_bound < upper_bound) {
mean = (lower_bound + upper_bound) / 2;
if (id <= sorted_data[mean])
upper_bound = mean;
else
lower_bound = mean + 1;
}
bool found_row = (sorted_data[lower_bound] == id);
if (!found_row) {
bounds[id] = -1;
bounds[vocab_dim + id] = -2;
return;
} else {
bounds[id] = lower_bound;
}
// Binary search to find upper bound: stored at bounds[vocab_dim..2*vocab_dim-1]
lower_bound = 0;
upper_bound = data_dim - 1;
while (lower_bound < upper_bound) {
mean = (lower_bound + upper_bound + 1) / 2;
if (id >= sorted_data[mean])
lower_bound = mean;
else
upper_bound = mean - 1;
}
bounds[vocab_dim + id] = upper_bound;
}
/*
* \brief kernel to compute gradient of EmbeddingOp
* \param grad_in input gradient data
* \param original_index reference to the position at original input data for each index
* \param index_bounds lower and upper-bounds positions of each unique index
* \param grad_out output gradient data
* \param embbedding_dim dimension of the dense embedding
* \param vocab_dim maximum number of unique indices in the data array: tokens vocabulary size
* \param nelems_per_load number of elements per each load based on (LType / DType)
* \param req write/add/null
*/
template <typename AType, typename LType, typename DType, typename IType>
__global__ void EmbeddingGradKernel(DType* grad_in,
const IType* original_index,
const IType* index_bounds,
const DType* grad_out,
const index_t embbedding_dim,
const index_t vocab_dim,
const int nelems_per_load,
const int req) {
extern __shared__ int sharedmem[];
AType* grad_in_row = reinterpret_cast<AType*>(sharedmem);
const LType* aligned_grad_out = reinterpret_cast<const LType*>(grad_out);
LType* aligned_grad_in = reinterpret_cast<LType*>(grad_in);
const index_t aligned_emb_dim = embbedding_dim / nelems_per_load;
LType load_value[1];
DType* data_values = reinterpret_cast<DType*>(load_value);
IType my_row = blockIdx.x;
if (my_row < vocab_dim) {
// Read lower and upper bounds for current row
IType lower_bound = index_bounds[my_row];
IType upper_bound = index_bounds[vocab_dim + my_row];
int nOccurrences = upper_bound - lower_bound + 1;
for (index_t emb_id = threadIdx.x; emb_id < aligned_emb_dim; emb_id += blockDim.x) {
// Initialize grad_in
if (req == kAddTo) {
*load_value = aligned_grad_in[my_row * aligned_emb_dim + emb_id];
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
grad_in_row[val_id * blockDim.x + threadIdx.x] = static_cast<AType>(data_values[val_id]);
}
} else {
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
grad_in_row[val_id * blockDim.x + threadIdx.x] = static_cast<AType>(0.0);
}
}
// Add all rows from grad_out according to indices in data
for (index_t data_idx = lower_bound; data_idx < (lower_bound + nOccurrences); ++data_idx) {
*load_value = aligned_grad_out[original_index[data_idx] * aligned_emb_dim + emb_id];
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
grad_in_row[val_id * blockDim.x + threadIdx.x] += static_cast<AType>(data_values[val_id]);
}
}
// Save results
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
data_values[val_id] = static_cast<DType>(grad_in_row[val_id * blockDim.x + threadIdx.x]);
}
aligned_grad_in[my_row * aligned_emb_dim + emb_id] = *load_value;
}
}
}
template <typename AType, typename IType, typename DType>
void EmbeddingGradKernelCaller(const OpContext& ctx,
mshadow::Tensor<gpu, 2, DType> grad_in,
const mshadow::Tensor<gpu, 1, IType>& index,
const mshadow::Tensor<gpu, 2, DType>& grad_out,
const std::vector<OpReqType>& req) {
using namespace mxnet_op;
using namespace mshadow::expr;
Stream<gpu>* s = ctx.get_stream<gpu>();
const index_t data_dim = index.shape_[0];
const index_t vocab_dim = grad_in.shape_[0];
const index_t embbedding_dim = grad_in.shape_[1];
// Calculate amount of temporary storage
size_t sort_workspace_size = mxnet::op::SortByKeyWorkspaceSize<int, int, gpu>(data_dim);
size_t workspace_size =
2 * data_dim * sizeof(int) + 2 * vocab_dim * sizeof(int) + sort_workspace_size;
// Request temporary storage
Tensor<gpu, 1, char> workspace =
ctx.requested[embedding::kTempSpace].get_space_typed<gpu, 1, char>(Shape1(workspace_size), s);
// Create tensors
size_t pos = 0;
Tensor<gpu, 1, int> sorted_data(reinterpret_cast<int*>(&workspace[pos]), Shape1(data_dim), s);
pos += data_dim * sizeof(int);
// Reference to input data positions for each element of sorted_data
Tensor<gpu, 1, int> original_index(reinterpret_cast<int*>(&workspace[pos]), Shape1(data_dim), s);
pos += data_dim * sizeof(int);
// lower and upper bound positions of each index within sorted_data
Tensor<gpu, 1, int> bounds_index(
reinterpret_cast<int*>(&workspace[pos]), Shape1(2 * vocab_dim), s);
pos += 2 * vocab_dim * sizeof(int);
Tensor<gpu, 1, char> Sort_temp_storage(&workspace[pos], Shape1(sort_workspace_size), s);
// Clip indices [0, vocab_dim-1]
Kernel<tcast_clip, gpu>::Launch(
s, data_dim, sorted_data.dptr_, index.dptr_, static_cast<int>(vocab_dim));
Kernel<range_fwd, gpu>::Launch(s, data_dim, 1, 0, 1, kWriteTo, original_index.dptr_);
// Sort indices array
int num_bits = ilog2((vocab_dim - 1));
mxnet::op::SortByKey(sorted_data, original_index, true, &Sort_temp_storage, 0, num_bits);
// Find lower & upper bounds of each possible index
const int threads_block_bounds = 128;
const int nblocks_bounds = (vocab_dim + threads_block_bounds - 1) / threads_block_bounds;
hipLaunchKernelGGL(( EmbeddingFindBounds), dim3(nblocks_bounds), dim3(threads_block_bounds), 0, Stream<gpu>::GetStream(s),
sorted_data.dptr_, bounds_index.dptr_, data_dim, vocab_dim);
// Compute Gradient
int ltype = mxnet::common::cuda::get_load_type(embbedding_dim * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
CHECK_LE(sizeof(DType), sizeof(LType));
int nelems_per_load = sizeof(LType) / sizeof(DType);
int threads_block_grad = 32;
int maxThreads = 1024;
while (threads_block_grad < (embbedding_dim / nelems_per_load) &&
(threads_block_grad < maxThreads))
threads_block_grad += 32;
size_t required_shared = threads_block_grad * nelems_per_load * sizeof(AType);
dim3 blocks(vocab_dim, 1);
hipLaunchKernelGGL(( EmbeddingGradKernel<AType, LType>)
, dim3(blocks), dim3(threads_block_grad), required_shared, Stream<gpu>::GetStream(s),
grad_in.dptr_,
original_index.dptr_,
bounds_index.dptr_,
grad_out.dptr_,
embbedding_dim,
vocab_dim,
nelems_per_load,
req[embedding::kWeight]);
});
}
template <>
void EmbeddingOpBackward<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req[embedding::kData], kNullOp)
<< "Embedding layer doesn't support calculate data gradient";
if (req[embedding::kWeight] == kNullOp) {
return;
}
CHECK_EQ(outputs[1].type_flag_, inputs[0].type_flag_);
const mxnet::TShape& ishape = inputs[1].shape_;
const mxnet::TShape& oshape = inputs[0].shape_;
Stream<gpu>* s = ctx.get_stream<gpu>();
CHECK_NE(req[embedding::kWeight], kWriteInplace)
<< "Backward of Embedding does not support writing in place.";
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
if (!safe_acc && outputs[1].type_flag_ == mshadow::kFloat16) {
common::LogOnce(
"MXNET_SAFE_ACCUMULATION=1 is recommended for EmbeddingOpBackward "
"with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(outputs[1].type_flag_, DType, AType, {
MSHADOW_TYPE_SWITCH(inputs[1].type_flag_, IType, {
Tensor<gpu, 1, IType> data =
inputs[1].get_with_shape<gpu, 1, IType>(Shape1(ishape.ProdShape(0, ishape.ndim())), s);
Tensor<gpu, 2, DType> grad_out = inputs[0].get_with_shape<gpu, 2, DType>(
Shape2(oshape.ProdShape(0, oshape.ndim() - 1), oshape[oshape.ndim() - 1]), s);
Tensor<gpu, 2, DType> grad_in = outputs[1].get<gpu, 2, DType>(s);
if (req[embedding::kWeight] == kWriteTo || req[embedding::kWeight] == kAddTo) {
if (safe_acc)
EmbeddingGradKernelCaller<AType>(ctx, grad_in, data, grad_out, req);
else
EmbeddingGradKernelCaller<DType>(ctx, grad_in, data, grad_out, req);
} else {
LOG(FATAL) << "wrong req";
}
});
});
}
NNVM_REGISTER_OP(Embedding).set_attr<FCompute>("FCompute<gpu>", EmbeddingOpForward<gpu>);
NNVM_REGISTER_OP(_backward_Embedding)
.set_attr<FCompute>("FCompute<gpu>", EmbeddingOpBackward<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", EmbeddingOpBackwardEx<gpu>);
NNVM_REGISTER_OP(take).set_attr<FCompute>("FCompute<gpu>", TakeOpForward<gpu>);
NNVM_REGISTER_OP(_backward_take).set_attr<FCompute>("FCompute<gpu>", TakeOpBackward<gpu>);
NNVM_REGISTER_OP(batch_take).set_attr<FCompute>("FCompute<gpu>", BatchTakeOpForward<gpu>);
NNVM_REGISTER_OP(one_hot).set_attr<FCompute>("FCompute<gpu>", OneHotOpForward<gpu>);
NNVM_REGISTER_OP(gather_nd).set_attr<FCompute>("FCompute<gpu>", GatherNDForwardGPU);
NNVM_REGISTER_OP(scatter_nd).set_attr<FCompute>("FCompute<gpu>", ScatterNDForward<gpu>);
NNVM_REGISTER_OP(_backward_gather_nd).set_attr<FCompute>("FCompute<gpu>", GatherNDBackward<gpu>);
NNVM_REGISTER_OP(_scatter_set_nd).set_attr<FCompute>("FCompute<gpu>", ScatterSetNDForward<gpu>);
} // namespace op
} // namespace mxnet
| a69ada12188d3133ab802e9182ebe500eea37273.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file indexing_op.cu
* \brief GPU implementation of indexing operator
* \author Siyi Li, Chi Zhang
*/
#include "./indexing_op.h"
#include "./util/tensor_util-inl.cuh"
#include "./util/tensor_util-inl.h"
namespace mxnet {
namespace op {
/*! \brief If there are out-of-bound indices, out will be assigned to 1.
*/
struct is_valid_check {
template <typename DType>
MSHADOW_XINLINE static void Map(int i,
char* out,
const DType* data,
const DType min,
const DType max) {
if (data[i] < min || data[i] > max)
*out = 1;
}
};
struct AddTakeGradRspGPUKernel {
template <typename DType, typename IType>
__device__ __forceinline__ static void Map(int tid,
DType* out,
const nnvm::dim_t* prefix_sum,
const IType* data,
const DType* ograd,
const nnvm::dim_t row_length) {
using nnvm::dim_t;
const dim_t data_i = tid / row_length;
const dim_t grad_i = tid % row_length;
const dim_t irow = static_cast<dim_t>(data[data_i]);
const dim_t rsp_row = prefix_sum[irow] - 1;
const DType val = ograd[data_i * row_length + grad_i];
atomicAdd(static_cast<DType*>(&(out[rsp_row * row_length + grad_i])), val);
}
};
/*
* \brief kernel for backward computation for take, executed with deterministic order
* \param thread_id the thread id
* \param out the output gradient data
* \param lookup_table the table to lookup the position of an id in gradient array
* \param sorted_data the sorted data input
* \param original_idx the original indices of the sorted data input
* \param ograd head gradient
* \param row_length the output dimension
* \param num_threads_per_row the number of threads to process a row together
* \param SZ the number of features a thread is responsible for
*/
template <int SZ>
struct AddTakeGradRspDeterministicKernel {
template <typename DType>
__device__ __forceinline__ static void Map(int thread_id,
DType* out,
const nnvm::dim_t* lookup_table,
const nnvm::dim_t* sorted_data,
const nnvm::dim_t data_size,
const nnvm::dim_t* original_idx,
const DType* ograd,
const nnvm::dim_t row_length,
const nnvm::dim_t num_threads_per_row) {
using nnvm::dim_t;
int tid = thread_id / num_threads_per_row;
const int feature_start = thread_id % num_threads_per_row * SZ;
int num_features = SZ;
if (feature_start + num_features > row_length) {
num_features = row_length - feature_start;
}
if (tid == 0 || sorted_data[tid - 1] != sorted_data[tid]) {
DType acc[SZ];
#pragma unroll
for (int i = 0; i < SZ; i++) {
acc[i] = 0;
}
const dim_t data = sorted_data[tid];
const dim_t row_id = lookup_table[data];
const dim_t out_offset = row_id * row_length + feature_start;
do {
const dim_t idx = original_idx[tid];
const dim_t ograd_offset = idx * row_length + feature_start;
for (int i = 0; i < num_features; i++) {
acc[i] += ograd[ograd_offset + i];
}
tid++;
} while (tid < data_size && sorted_data[tid - 1] == sorted_data[tid]);
for (int i = 0; i < num_features; i++) {
out[out_offset + i] += acc[i];
}
}
}
};
template <bool clip = true>
struct TakeZeroAxisGPU {
// assume that idx have been flattened to a 1-D tensor (N,)
// assume that out_data and in_data have been flattened to 2-D tensors, (N, M) and (K, M)
// M is the number of columns of in_data and out_data
// K is the number of rows of in_data
// i is the index of out_data
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i,
DType* out_data,
const DType* in_data,
const IType* idx,
const int64_t M,
const int64_t K) {
int64_t j = static_cast<int64_t>(idx[i / M]);
if (clip) {
if (j <= 0)
j = 0;
else if (j >= K)
j = K - 1;
} else {
j = j % K;
j += (j < 0) ? K : 0;
}
out_data[i] = in_data[j * M + i % M];
}
};
/*
* \brief returns true if all indices are between [min, max]
* \param s the stream
* \param data_ptr the indices on the stream
* \param data_size the number of indices to examine
* \param min the expected min value for indices
* \param max the expected max value for indices
* \param is_valid_ptr the temparary workspace
*/
template <typename DType>
bool CheckIndexOutOfBound(mshadow::Stream<gpu>* s,
const DType* data_ptr,
size_t data_size,
const DType min,
const DType max,
char* is_valid_ptr) {
using namespace mxnet_op;
int32_t is_valid = 0;
Kernel<set_zero, gpu>::Launch(s, 1, is_valid_ptr);
Kernel<is_valid_check, gpu>::Launch(s, data_size, is_valid_ptr, data_ptr, min, max);
CUDA_CALL(cudaMemcpyAsync(&is_valid,
is_valid_ptr,
sizeof(char),
cudaMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(cudaStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
return is_valid == 0;
}
// Embedding forward implementation with dense weight
template <>
void EmbeddingOpForwardDnsImpl<gpu>(mshadow::Stream<gpu>* s,
const TBlob& data,
const TBlob& weight,
const OpReqType req,
const TBlob& output) {
using namespace mxnet_op;
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& oshape = output.shape_;
MSHADOW_TYPE_SWITCH(output.type_flag_, DType, {
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
Tensor<gpu, 1, IType> idx =
data.get_with_shape<gpu, 1, IType>(Shape1(ishape.ProdShape(0, ishape.ndim())), s);
Tensor<gpu, 2, DType> wmat = weight.get<gpu, 2, DType>(s);
Tensor<gpu, 2, DType> out = output.get_with_shape<gpu, 2, DType>(
Shape2(oshape.ProdShape(0, oshape.ndim() - 1), oshape[oshape.ndim() - 1]), s);
Kernel<TakeZeroAxisGPU<true>, gpu>::Launch(
s, oshape.Size(), out.dptr_, wmat.dptr_, idx.dptr_, wmat.shape_[1], wmat.shape_[0]);
});
});
}
template <>
void SparseEmbeddingOpForwardRspImpl<gpu>(const OpContext& ctx,
const TBlob& data,
const NDArray& weight,
const OpReqType req,
const TBlob& output) {
if (req == kNullOp)
return;
using namespace rowsparse;
using namespace mxnet_op;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
// zeros weight
if (req == kWriteTo && !weight.storage_initialized()) {
size_t out_size = output.shape_.Size();
MSHADOW_TYPE_SWITCH(output.type_flag_, DType, {
Fill<false>(
s, TBlob(output.dptr<DType>(), mshadow::Shape1(out_size), gpu::kDevMask), kWriteTo, 0);
})
return;
}
// check out-of-bound indices
MSHADOW_TYPE_SWITCH(data.type_flag_, DType, {
DType min = 0;
DType max = static_cast<DType>(weight.shape()[0] - 1);
DType* data_ptr = data.dptr<DType>();
size_t data_size = data.shape_.Size();
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(1), s);
char* is_valid_ptr = reinterpret_cast<char*>(workspace.dptr_);
bool is_valid = CheckIndexOutOfBound(s, data_ptr, data_size, min, max, is_valid_ptr);
CHECK(is_valid) << "SparseEmbedding input contains data out of bound";
})
// the weight is actually dense
if (weight.aux_shape(kIdx)[0] == weight.shape()[0]) {
EmbeddingOpForwardDnsImpl<gpu>(s, data, weight.data(), req, output);
} else {
EmbeddingOpForwardRspImpl<gpu>(s, data, weight, req, output);
}
}
template <typename IType, typename DType, typename RType>
void SparseEmbeddingDeterministicKernelLaunch(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using namespace mshadow;
using namespace mxnet_op;
using namespace expr;
using namespace rowsparse;
using nnvm::dim_t;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const dim_t num_rows = output.shape()[0];
const dim_t row_length = output.shape()[1];
const dim_t data_size = static_cast<dim_t>(data.shape_.Size());
// temp resource declarations
dim_t* lookup_table = nullptr;
void* temp_storage = nullptr;
dim_t* sorted_data = nullptr;
dim_t* original_idx = nullptr;
// calculate number of bytes for temp resources
size_t lookup_table_bytes = num_rows * sizeof(dim_t);
size_t sorted_data_storage_bytes = data_size * sizeof(dim_t);
size_t original_idx_storage_bytes = data_size * sizeof(dim_t);
size_t sort_workspace_size = SortByKeyWorkspaceSize<dim_t, dim_t, gpu>(data_size);
size_t unique_workspace_bytes = 0;
// estimate unique temp space
IType* data_ptr = data.dptr<IType>();
size_t* null_ptr = nullptr;
// unique operations will be applied on sorted data
cub::DeviceSelect::Unique(nullptr,
unique_workspace_bytes,
sorted_data,
sorted_data,
null_ptr,
data_size,
Stream<gpu>::GetStream(s));
// One more space reserved for unique count
size_t temp_workspace_bytes = std::max(unique_workspace_bytes, sort_workspace_size);
size_t total_storage_bytes = lookup_table_bytes + sorted_data_storage_bytes +
original_idx_storage_bytes + temp_workspace_bytes;
// request resource and split it. layout is:
// lookup_table, sorted_data, original_idx, temp_storage
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(total_storage_bytes), s);
lookup_table = reinterpret_cast<dim_t*>(workspace.dptr_);
sorted_data = reinterpret_cast<dim_t*>(workspace.dptr_ + lookup_table_bytes);
original_idx =
reinterpret_cast<dim_t*>(workspace.dptr_ + lookup_table_bytes + sorted_data_storage_bytes);
temp_storage = workspace.dptr_ + total_storage_bytes - temp_workspace_bytes;
// check out-of-bound indices
{
IType min = 0;
IType max = static_cast<IType>(output.shape()[0] - 1);
IType* data_ptr = data.dptr<IType>();
size_t data_size = data.shape_.Size();
bool is_valid = CheckIndexOutOfBound(
s, data_ptr, data_size, min, max, reinterpret_cast<char*>(temp_storage));
CHECK(is_valid) << "Embedding input contains data out of bound";
}
// make a copy of the data, to be sorted
TBlob sorted_data_blob(sorted_data, Shape1(data_size), gpu::kDevMask);
auto sorted_data_tensor = sorted_data_blob.FlatTo1D<gpu, dim_t>(s);
mxnet_op::copy(s, sorted_data_blob, data);
// generate original idx
Tensor<gpu, 1, dim_t> original_idx_tensor(original_idx, Shape1(data_size), s);
Kernel<range_fwd, gpu>::Launch(
s, data_size, 1, static_cast<dim_t>(0), static_cast<dim_t>(1), kWriteTo, original_idx);
// sort data with its original idx
int num_bits = common::ilog2ui(num_rows - 1);
char* temp_storage_ptr = reinterpret_cast<char*>(temp_storage);
Tensor<gpu, 1, char> temp_storage_tensor(temp_storage_ptr, Shape1(sort_workspace_size), s);
SortByKey(sorted_data_tensor, original_idx_tensor, true, &temp_storage_tensor, 0, num_bits);
// compute unique row ids based on sorted values.
output.CheckAndAllocAuxData(kIdx, Shape1(data_size + 1));
// fill row_idx array of output matrix, using the row_flg values
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
cub::DeviceSelect::Unique(temp_storage_ptr,
unique_workspace_bytes,
sorted_data,
grad_row_idx,
grad_row_idx + data_size,
data_size,
Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(cudaMemcpyAsync(&nnr,
grad_row_idx + data_size,
sizeof(RType),
cudaMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(cudaStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
CHECK_EQ(output.shape().ndim(), 2) << "Unexcepted ndim";
output.CheckAndAllocData(Shape2(nnr, output.shape()[1]));
output.set_aux_shape(kIdx, Shape1(nnr));
// generate lookup table
Kernel<MarkLookupTable, gpu>::Launch(s, nnr, lookup_table, grad_row_idx);
// accumulate gradients
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask), kWriteTo, 0);
const int SZ = 4;
const nnvm::dim_t num_threads_per_row = (row_length + SZ - 1) / SZ;
Kernel<AddTakeGradRspDeterministicKernel<SZ>, gpu>::Launch(s,
data_size * num_threads_per_row,
grad_data,
lookup_table,
sorted_data,
data_size,
original_idx,
ograd.dptr<DType>(),
row_length,
num_threads_per_row);
}
inline void SparseEmbeddingOpBackwardDeterministicRspImpl(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using nnvm::dim_t;
if (req == kNullOp)
return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const dim_t data_size = static_cast<dim_t>(data.shape_.Size());
if (data_size == 0) {
FillZerosRspImpl(s, output);
return;
}
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(rowsparse::kIdx), RType, {
SparseEmbeddingDeterministicKernelLaunch<IType, DType, RType>(
ctx, ograd, data, req, output);
});
});
});
}
template <>
inline void SparseEmbeddingOpBackwardRspImpl<gpu>(const bool deterministic,
const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
if (deterministic) {
SparseEmbeddingOpBackwardDeterministicRspImpl(ctx, ograd, data, req, output);
return;
}
using namespace mshadow;
using namespace mxnet_op;
using namespace mshadow::expr;
using namespace rowsparse;
using nnvm::dim_t;
if (req == kNullOp)
return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
// Request temporary storage for marking non-zero rows and prefix sum
Stream<gpu>* s = ctx.get_stream<gpu>();
dim_t num_rows = output.shape()[0];
dim_t row_length = output.shape()[1];
dim_t data_size = static_cast<dim_t>(data.shape_.Size());
dim_t num_threads;
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_SGL_DBL_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(kIdx), RType, {
dim_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(
Shape1(num_rows * sizeof(dim_t) + temp_storage_bytes), s);
prefix_sum = reinterpret_cast<dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows * sizeof(dim_t);
num_threads = num_rows;
Fill<false>(s, TBlob(prefix_sum, Shape1(num_threads), gpu::kDevMask), kWriteTo, 0);
Kernel<MarkRowFlgKernel, gpu>::Launch(s, data_size, prefix_sum, data.dptr<IType>());
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
mshadow::Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(cudaMemcpyAsync(&nnr,
&prefix_sum[num_rows - 1],
sizeof(dim_t),
cudaMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(cudaStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
if (nnr == 0) {
FillZerosRspImpl(s, output);
return;
}
output.CheckAndAlloc({Shape1(nnr)});
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
// fill row_idx array of output matrix, using the row_flg values
Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_rows, grad_row_idx, prefix_sum, num_rows);
// prefill with zeros
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask), kWriteTo, 0);
// add the final gradients
num_threads = row_length * data_size;
Kernel<AddTakeGradRspGPUKernel, gpu>::Launch(s,
num_threads,
grad_data,
prefix_sum,
data.dptr<IType>(),
ograd.dptr<DType>(),
row_length);
});
});
});
}
/*
* \brief check if any of the indices is out of bound
* \param s the stream
* \param idx_ptr the indices on the stream
* \param N the number of indices in an axis
* \param M the number of axises to exmaine
* \param mshape the array that stores shape for each dimension
* \param is_valid_dim_ptr the temparary workspace that contains out-of-bound indices
*/
template <typename DType>
void GatherNDCheckBoundGPU(mshadow::Stream<gpu>* s,
const DType* idx_ptr,
index_t N,
index_t M,
const mshadow::Shape<10> mshape,
DType* is_valid_dim_ptr) {
using namespace mxnet_op;
Kernel<set_zero, gpu>::Launch(s, M, is_valid_dim_ptr);
Kernel<is_valid_check_gather_nd, gpu>::Launch(s, M, is_valid_dim_ptr, idx_ptr, N, mshape);
std::vector<DType> is_valid_dim(M);
CUDA_CALL(cudaMemcpyAsync(is_valid_dim.data(),
is_valid_dim_ptr,
sizeof(DType) * M,
cudaMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(cudaStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
for (int m = 0; m < M; m++) {
if (is_valid_dim[m] > mshape[m] - 1 || is_valid_dim[m] < -mshape[m]) {
LOG(FATAL) << "IndexError: index " << is_valid_dim[m] << " is out of bounds for axis " << m
<< " with size " << mshape[m];
}
}
}
void GatherNDForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp)
return;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const mxnet::TShape& dshape = inputs[0].shape_;
const mxnet::TShape& ishape = inputs[1].shape_;
int M = ishape[0];
int N = ishape.Size() / M;
int K = dshape.ProdShape(M, dshape.ndim());
mshadow::Shape<10> strides;
mshadow::Shape<10> mshape;
for (int i = M - 1, stride = K; i >= 0; stride *= dshape[i], --i) {
strides[i] = stride;
mshape[i] = dshape[i];
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { // output data type switch
MSHADOW_TYPE_SWITCH(inputs[1].type_flag_, IType, { // indices data type switch
// check whether indices are out of bound
IType* idx_ptr = inputs[1].dptr<IType>();
Tensor<gpu, 1, IType> workspace =
ctx.requested[0].get_space_typed<gpu, 1, IType>(Shape1(M), s);
IType* is_valid_dim_ptr = reinterpret_cast<IType*>(workspace.dptr_);
GatherNDCheckBoundGPU(s, idx_ptr, N, M, mshape, is_valid_dim_ptr);
Kernel<gather_nd, gpu>::Launch(s,
N,
req[0],
N,
M,
K,
strides,
mshape,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
inputs[1].dptr<IType>());
});
});
}
struct backward_gather_nd_gpu {
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(index_t i,
index_t N,
index_t M,
index_t K,
const mshadow::Shape<10> strides,
DType* out,
const DType* data,
const IType* indices) {
index_t offset = 0;
for (index_t j = 0; j < M; ++j) {
offset += strides[j] * static_cast<int>(indices[j * N + i]);
}
for (index_t j = 0; j < K; ++j) {
atomicAdd(out + (offset + j), data[i * K + j]);
}
}
};
template <typename DType, typename IType>
inline void GatherNDBackwardImpl(index_t N,
index_t M,
index_t K,
const mshadow::Shape<10> strides,
DType* out,
const DType* data,
const IType* indices,
mshadow::Stream<gpu>* s) {
mxnet_op::Kernel<backward_gather_nd_gpu, gpu>::Launch(s, N, N, M, K, strides, out, data, indices);
}
template <>
void TakeOpForward<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[take_::kOut] == kNullOp)
return;
const TakeParam& param = nnvm::get<TakeParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& idxshape = inputs[take_::kIdx].shape_;
const mxnet::TShape& arrshape = inputs[take_::kArr].shape_;
const mxnet::TShape& oshape = outputs[take_::kOut].shape_;
if (idxshape.Size() == 0) {
return;
}
Stream<gpu>* s = ctx.get_stream<gpu>();
const int actual_axis = param.axis + ((param.axis < 0) ? arrshape.ndim() : 0);
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[take_::kOut].type_flag_, DType, { // output data type
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[take_::kIdx].type_flag_, IType, { // index data type
if (param.mode == take_::kRaise) {
// check out-of-bound indices
IType min = 0;
IType max = static_cast<IType>(arrshape[actual_axis] - 1);
IType* idx_ptr = inputs[take_::kIdx].dptr<IType>();
size_t idx_size = idxshape.Size();
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(1), s);
char* is_valid_ptr = reinterpret_cast<char*>(workspace.dptr_);
bool is_valid = CheckIndexOutOfBound(s, idx_ptr, idx_size, min, max, is_valid_ptr);
CHECK(is_valid) << "Take indices contains indices out of bound";
}
if (actual_axis == 0) {
if (param.mode == take_::kClip) {
Kernel<TakeZeroAxisGPU<true>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
oshape.Size() / idxshape.Size(),
arrshape[0]);
} else {
Kernel<TakeZeroAxisGPU<false>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
oshape.Size() / idxshape.Size(),
arrshape[0]);
}
} else {
mshadow::Shape<10> in_strides;
int stride = 1;
for (int i = arrshape.ndim() - 1; i >= 0; stride *= arrshape[i], --i) {
in_strides[i] = stride;
}
mshadow::Shape<10> out_strides;
stride = 1;
for (int i = oshape.ndim() - 1; i >= 0; stride *= oshape[i], --i) {
out_strides[i] = stride;
}
if (param.mode == take_::kClip) {
Kernel<TakeNonzeroAxis<true>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
out_strides[actual_axis - 1],
in_strides[actual_axis - 1],
in_strides[actual_axis],
arrshape.ndim(),
oshape.ndim(),
idxshape.ndim(),
arrshape[actual_axis],
actual_axis);
} else {
Kernel<TakeNonzeroAxis<false>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
out_strides[actual_axis - 1],
in_strides[actual_axis - 1],
in_strides[actual_axis],
arrshape.ndim(),
oshape.ndim(),
idxshape.ndim(),
arrshape[actual_axis],
actual_axis);
}
}
});
});
}
namespace {
/*
* \brief returns integer log2(a) rounded up
*/
inline int ilog2(unsigned int a) {
int k = 1;
while (a >>= 1)
k++;
return k;
}
} // namespace
/*
* \brief finds the lower and upper-bound positions of each unique element within
* a sorted input array
*
* \param sorted_data input elements previously sorted
* \param bounds output containing all lower-bound followed by all upper-bound positions
* \param data_dim total number of elements in the input array
* \param vocab_dim maximum number of unique elements
*/
template <typename IType>
__global__ void EmbeddingFindBounds(const IType* sorted_data,
IType* bounds,
const index_t data_dim,
const index_t vocab_dim) {
const index_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= vocab_dim)
return;
// Binary search to find lower bound: stored at bounds[0..vocab_dim-1]
IType lower_bound = 0;
IType upper_bound = data_dim - 1;
IType mean;
while (lower_bound < upper_bound) {
mean = (lower_bound + upper_bound) / 2;
if (id <= sorted_data[mean])
upper_bound = mean;
else
lower_bound = mean + 1;
}
bool found_row = (sorted_data[lower_bound] == id);
if (!found_row) {
bounds[id] = -1;
bounds[vocab_dim + id] = -2;
return;
} else {
bounds[id] = lower_bound;
}
// Binary search to find upper bound: stored at bounds[vocab_dim..2*vocab_dim-1]
lower_bound = 0;
upper_bound = data_dim - 1;
while (lower_bound < upper_bound) {
mean = (lower_bound + upper_bound + 1) / 2;
if (id >= sorted_data[mean])
lower_bound = mean;
else
upper_bound = mean - 1;
}
bounds[vocab_dim + id] = upper_bound;
}
/*
* \brief kernel to compute gradient of EmbeddingOp
* \param grad_in input gradient data
* \param original_index reference to the position at original input data for each index
* \param index_bounds lower and upper-bounds positions of each unique index
* \param grad_out output gradient data
* \param embbedding_dim dimension of the dense embedding
* \param vocab_dim maximum number of unique indices in the data array: tokens vocabulary size
* \param nelems_per_load number of elements per each load based on (LType / DType)
* \param req write/add/null
*/
template <typename AType, typename LType, typename DType, typename IType>
__global__ void EmbeddingGradKernel(DType* grad_in,
const IType* original_index,
const IType* index_bounds,
const DType* grad_out,
const index_t embbedding_dim,
const index_t vocab_dim,
const int nelems_per_load,
const int req) {
extern __shared__ int sharedmem[];
AType* grad_in_row = reinterpret_cast<AType*>(sharedmem);
const LType* aligned_grad_out = reinterpret_cast<const LType*>(grad_out);
LType* aligned_grad_in = reinterpret_cast<LType*>(grad_in);
const index_t aligned_emb_dim = embbedding_dim / nelems_per_load;
LType load_value[1];
DType* data_values = reinterpret_cast<DType*>(load_value);
IType my_row = blockIdx.x;
if (my_row < vocab_dim) {
// Read lower and upper bounds for current row
IType lower_bound = index_bounds[my_row];
IType upper_bound = index_bounds[vocab_dim + my_row];
int nOccurrences = upper_bound - lower_bound + 1;
for (index_t emb_id = threadIdx.x; emb_id < aligned_emb_dim; emb_id += blockDim.x) {
// Initialize grad_in
if (req == kAddTo) {
*load_value = aligned_grad_in[my_row * aligned_emb_dim + emb_id];
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
grad_in_row[val_id * blockDim.x + threadIdx.x] = static_cast<AType>(data_values[val_id]);
}
} else {
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
grad_in_row[val_id * blockDim.x + threadIdx.x] = static_cast<AType>(0.0);
}
}
// Add all rows from grad_out according to indices in data
for (index_t data_idx = lower_bound; data_idx < (lower_bound + nOccurrences); ++data_idx) {
*load_value = aligned_grad_out[original_index[data_idx] * aligned_emb_dim + emb_id];
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
grad_in_row[val_id * blockDim.x + threadIdx.x] += static_cast<AType>(data_values[val_id]);
}
}
// Save results
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
data_values[val_id] = static_cast<DType>(grad_in_row[val_id * blockDim.x + threadIdx.x]);
}
aligned_grad_in[my_row * aligned_emb_dim + emb_id] = *load_value;
}
}
}
template <typename AType, typename IType, typename DType>
void EmbeddingGradKernelCaller(const OpContext& ctx,
mshadow::Tensor<gpu, 2, DType> grad_in,
const mshadow::Tensor<gpu, 1, IType>& index,
const mshadow::Tensor<gpu, 2, DType>& grad_out,
const std::vector<OpReqType>& req) {
using namespace mxnet_op;
using namespace mshadow::expr;
Stream<gpu>* s = ctx.get_stream<gpu>();
const index_t data_dim = index.shape_[0];
const index_t vocab_dim = grad_in.shape_[0];
const index_t embbedding_dim = grad_in.shape_[1];
// Calculate amount of temporary storage
size_t sort_workspace_size = mxnet::op::SortByKeyWorkspaceSize<int, int, gpu>(data_dim);
size_t workspace_size =
2 * data_dim * sizeof(int) + 2 * vocab_dim * sizeof(int) + sort_workspace_size;
// Request temporary storage
Tensor<gpu, 1, char> workspace =
ctx.requested[embedding::kTempSpace].get_space_typed<gpu, 1, char>(Shape1(workspace_size), s);
// Create tensors
size_t pos = 0;
Tensor<gpu, 1, int> sorted_data(reinterpret_cast<int*>(&workspace[pos]), Shape1(data_dim), s);
pos += data_dim * sizeof(int);
// Reference to input data positions for each element of sorted_data
Tensor<gpu, 1, int> original_index(reinterpret_cast<int*>(&workspace[pos]), Shape1(data_dim), s);
pos += data_dim * sizeof(int);
// lower and upper bound positions of each index within sorted_data
Tensor<gpu, 1, int> bounds_index(
reinterpret_cast<int*>(&workspace[pos]), Shape1(2 * vocab_dim), s);
pos += 2 * vocab_dim * sizeof(int);
Tensor<gpu, 1, char> Sort_temp_storage(&workspace[pos], Shape1(sort_workspace_size), s);
// Clip indices [0, vocab_dim-1]
Kernel<tcast_clip, gpu>::Launch(
s, data_dim, sorted_data.dptr_, index.dptr_, static_cast<int>(vocab_dim));
Kernel<range_fwd, gpu>::Launch(s, data_dim, 1, 0, 1, kWriteTo, original_index.dptr_);
// Sort indices array
int num_bits = ilog2((vocab_dim - 1));
mxnet::op::SortByKey(sorted_data, original_index, true, &Sort_temp_storage, 0, num_bits);
// Find lower & upper bounds of each possible index
const int threads_block_bounds = 128;
const int nblocks_bounds = (vocab_dim + threads_block_bounds - 1) / threads_block_bounds;
EmbeddingFindBounds<<<nblocks_bounds, threads_block_bounds, 0, Stream<gpu>::GetStream(s)>>>(
sorted_data.dptr_, bounds_index.dptr_, data_dim, vocab_dim);
// Compute Gradient
int ltype = mxnet::common::cuda::get_load_type(embbedding_dim * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
CHECK_LE(sizeof(DType), sizeof(LType));
int nelems_per_load = sizeof(LType) / sizeof(DType);
int threads_block_grad = 32;
int maxThreads = 1024;
while (threads_block_grad < (embbedding_dim / nelems_per_load) &&
(threads_block_grad < maxThreads))
threads_block_grad += 32;
size_t required_shared = threads_block_grad * nelems_per_load * sizeof(AType);
dim3 blocks(vocab_dim, 1);
EmbeddingGradKernel<AType, LType>
<<<blocks, threads_block_grad, required_shared, Stream<gpu>::GetStream(s)>>>(
grad_in.dptr_,
original_index.dptr_,
bounds_index.dptr_,
grad_out.dptr_,
embbedding_dim,
vocab_dim,
nelems_per_load,
req[embedding::kWeight]);
});
}
template <>
void EmbeddingOpBackward<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req[embedding::kData], kNullOp)
<< "Embedding layer doesn't support calculate data gradient";
if (req[embedding::kWeight] == kNullOp) {
return;
}
CHECK_EQ(outputs[1].type_flag_, inputs[0].type_flag_);
const mxnet::TShape& ishape = inputs[1].shape_;
const mxnet::TShape& oshape = inputs[0].shape_;
Stream<gpu>* s = ctx.get_stream<gpu>();
CHECK_NE(req[embedding::kWeight], kWriteInplace)
<< "Backward of Embedding does not support writing in place.";
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
if (!safe_acc && outputs[1].type_flag_ == mshadow::kFloat16) {
common::LogOnce(
"MXNET_SAFE_ACCUMULATION=1 is recommended for EmbeddingOpBackward "
"with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(outputs[1].type_flag_, DType, AType, {
MSHADOW_TYPE_SWITCH(inputs[1].type_flag_, IType, {
Tensor<gpu, 1, IType> data =
inputs[1].get_with_shape<gpu, 1, IType>(Shape1(ishape.ProdShape(0, ishape.ndim())), s);
Tensor<gpu, 2, DType> grad_out = inputs[0].get_with_shape<gpu, 2, DType>(
Shape2(oshape.ProdShape(0, oshape.ndim() - 1), oshape[oshape.ndim() - 1]), s);
Tensor<gpu, 2, DType> grad_in = outputs[1].get<gpu, 2, DType>(s);
if (req[embedding::kWeight] == kWriteTo || req[embedding::kWeight] == kAddTo) {
if (safe_acc)
EmbeddingGradKernelCaller<AType>(ctx, grad_in, data, grad_out, req);
else
EmbeddingGradKernelCaller<DType>(ctx, grad_in, data, grad_out, req);
} else {
LOG(FATAL) << "wrong req";
}
});
});
}
NNVM_REGISTER_OP(Embedding).set_attr<FCompute>("FCompute<gpu>", EmbeddingOpForward<gpu>);
NNVM_REGISTER_OP(_backward_Embedding)
.set_attr<FCompute>("FCompute<gpu>", EmbeddingOpBackward<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", EmbeddingOpBackwardEx<gpu>);
NNVM_REGISTER_OP(take).set_attr<FCompute>("FCompute<gpu>", TakeOpForward<gpu>);
NNVM_REGISTER_OP(_backward_take).set_attr<FCompute>("FCompute<gpu>", TakeOpBackward<gpu>);
NNVM_REGISTER_OP(batch_take).set_attr<FCompute>("FCompute<gpu>", BatchTakeOpForward<gpu>);
NNVM_REGISTER_OP(one_hot).set_attr<FCompute>("FCompute<gpu>", OneHotOpForward<gpu>);
NNVM_REGISTER_OP(gather_nd).set_attr<FCompute>("FCompute<gpu>", GatherNDForwardGPU);
NNVM_REGISTER_OP(scatter_nd).set_attr<FCompute>("FCompute<gpu>", ScatterNDForward<gpu>);
NNVM_REGISTER_OP(_backward_gather_nd).set_attr<FCompute>("FCompute<gpu>", GatherNDBackward<gpu>);
NNVM_REGISTER_OP(_scatter_set_nd).set_attr<FCompute>("FCompute<gpu>", ScatterSetNDForward<gpu>);
} // namespace op
} // namespace mxnet
|
ae9b89e353ded9cc8dd04ac3de37687d86dab737.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[4], b[4], out=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-8);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-8);
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
}
// Rest of the computation
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))){
// Bottom
float __temp_3__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_7__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
float __temp_12__ = (tilevar[0][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
float __temp_17__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
float __temp_22__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
float __temp_27__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
float __temp_32__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
float __temp_37__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
float __temp_42__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t[0] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[0] += __temp_88__;
// Top
float __temp_92__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_97__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
float __temp_102__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
float __temp_107__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
float __temp_112__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
float __temp_117__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
float __temp_122__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
float __temp_127__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
float __temp_132__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))){
float __temp_3__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_7__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
float __temp_12__ = (tilevar[1][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
float __temp_17__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
float __temp_22__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
float __temp_27__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
float __temp_32__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
float __temp_37__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
float __temp_42__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t[1] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[1] += __temp_88__;
// Top
float __temp_92__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_97__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
float __temp_102__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
float __temp_107__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
float __temp_112__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
float __temp_117__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
float __temp_122__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
float __temp_127__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
float __temp_132__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))){
float __temp_3__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_7__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
float __temp_12__ = (tilevar[2][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
float __temp_17__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
float __temp_22__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
float __temp_27__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
float __temp_32__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
float __temp_37__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
float __temp_42__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t[2] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[2] += __temp_88__;
// Top
float __temp_92__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_97__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
float __temp_102__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
float __temp_107__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
float __temp_112__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
float __temp_117__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
float __temp_122__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
float __temp_127__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
float __temp_132__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2))){
float __temp_3__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_7__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
float __temp_12__ = (tilevar[3][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
float __temp_17__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
float __temp_22__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
float __temp_27__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
float __temp_32__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
float __temp_37__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
float __temp_42__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t[3] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[3] += __temp_88__;
// Top
float __temp_92__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_97__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
float __temp_102__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
float __temp_107__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
float __temp_112__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
float __temp_117__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
float __temp_122__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
float __temp_127__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
float __temp_132__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-4,0))] = out;
}
__syncthreads ();
// Now rotate
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[0];
b[0] = t[0];
t[0] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[1];
b[1] = t[1];
t[1] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[2];
b[2] = t[2];
t[2] = 0.0f;
out= b[3];
b[3] = t[3];
t[3] = 0.0f;
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d27pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-8);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
for (int i = 0 ; i < 125; i++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), 0, 0, input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), 0, 0, __var_1__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, input);
}
for (int n = 0; n < 5; n++) {
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
for (int i = 0 ; i < 125; i++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), 0, 0, input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), 0, 0, __var_1__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, input);
}
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
}
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
}
/*Host Free End*/
| ae9b89e353ded9cc8dd04ac3de37687d86dab737.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[4], b[4], out=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-8);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-8);
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
}
// Rest of the computation
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))){
// Bottom
float __temp_3__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_7__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
float __temp_12__ = (tilevar[0][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
float __temp_17__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
float __temp_22__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
float __temp_27__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
float __temp_32__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
float __temp_37__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
float __temp_42__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t[0] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[0] += __temp_88__;
// Top
float __temp_92__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_97__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
float __temp_102__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
float __temp_107__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
float __temp_112__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
float __temp_117__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
float __temp_122__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
float __temp_127__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
float __temp_132__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))){
float __temp_3__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_7__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
float __temp_12__ = (tilevar[1][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
float __temp_17__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
float __temp_22__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
float __temp_27__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
float __temp_32__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
float __temp_37__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
float __temp_42__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t[1] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[1] += __temp_88__;
// Top
float __temp_92__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_97__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
float __temp_102__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
float __temp_107__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
float __temp_112__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
float __temp_117__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
float __temp_122__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
float __temp_127__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
float __temp_132__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))){
float __temp_3__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_7__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
float __temp_12__ = (tilevar[2][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
float __temp_17__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
float __temp_22__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
float __temp_27__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
float __temp_32__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
float __temp_37__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
float __temp_42__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t[2] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[2] += __temp_88__;
// Top
float __temp_92__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_97__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
float __temp_102__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
float __temp_107__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
float __temp_112__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
float __temp_117__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
float __temp_122__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
float __temp_127__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
float __temp_132__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2))){
float __temp_3__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_7__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
float __temp_12__ = (tilevar[3][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
float __temp_17__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
float __temp_22__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
float __temp_27__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
float __temp_32__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
float __temp_37__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
float __temp_42__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t[3] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[3] += __temp_88__;
// Top
float __temp_92__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_97__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
float __temp_102__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
float __temp_107__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
float __temp_112__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
float __temp_117__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
float __temp_122__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
float __temp_127__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
float __temp_132__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-4,0))] = out;
}
__syncthreads ();
// Now rotate
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[0];
b[0] = t[0];
t[0] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[1];
b[1] = t[1];
t[1] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[2];
b[2] = t[2];
t[2] = 0.0f;
out= b[3];
b[3] = t[3];
t[3] = 0.0f;
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d27pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-8);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
for (int i = 0 ; i < 125; i++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig>>> (input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig>>> (__var_1__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, input);
}
for (int n = 0; n < 5; n++) {
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
for (int i = 0 ; i < 125; i++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig>>> (input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig>>> (__var_1__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, input);
}
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
}
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
}
/*Host Free End*/
|
812ae57728fc29db0fb975a13c0941971680d0f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./gaussian_kernel.h"
#define BLOCK 16
#define TILE_WIDTH 16
/*
The actual gaussian blur kernel to be implemented by
you. Keep in mind that the kernel operates on a
single channel.
*/
__global__
void gaussianBlur(unsigned char *d_in, unsigned char *d_out,
const int rows, const int cols, float *d_filter, const int filterWidth){
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
if(c < cols && r < rows)
{
float pixelVal = 0.0f;
for(int blurRow = 0; blurRow < filterWidth; ++blurRow)
{
for(int blurCol = 0; blurCol < filterWidth; ++blurCol)
{
int curRow = r + blurRow-filterWidth/2;
int curCol = c + blurCol-filterWidth/2;
int cR = max(0, min(rows-1, curRow));
int cC = max(0, min(cols-1, curCol));
if(cR < rows && cR > -1 && cC < cols && cC > -1)
{
pixelVal += (float)d_filter[blurRow * filterWidth + blurCol] * (float)d_in[cR * cols + cC];
}
}
}
d_out[r * cols + c] = (unsigned char)pixelVal;
}
}
__global__
void gaussianBlur_shared(unsigned char *d_in, unsigned char *d_out,
const int rows, const int cols, float *d_filter, const int filterWidth){
__shared__ unsigned char ds_in[TILE_WIDTH][TILE_WIDTH];
__shared__ int p;
float pixelVal = 0;
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
// Load the tiles one by one
p = c/TILE_WIDTH;
// Check the boundary condition
if((r < rows) && ((p*TILE_WIDTH+threadIdx.x) < cols)){
// If the index is valid, load data to shared memory
ds_in[threadIdx.y][threadIdx.x] = d_in[r*cols + p*TILE_WIDTH + threadIdx.x];
}
else{
// If the index is invalid, load the zero pixel to shared memory
ds_in[threadIdx.y][threadIdx.x] = (unsigned char)0.0;
}
__syncthreads();
p = c/TILE_WIDTH;
if ((r < rows) && ((p*TILE_WIDTH + threadIdx.x) < cols))
{
for(int blurRow = 0; blurRow < filterWidth; ++blurRow)
{
for(int blurCol = 0; blurCol < filterWidth; ++blurCol)
{
int curRow = threadIdx.y - (filterWidth/2);
int curCol = threadIdx.x - (filterWidth/2);
//curRow = min(max(curRow, 0), rows);
//curCol = min(max(curCol, 0), cols);
if(curRow >= -1 && curRow < rows && curCol >= -1 && curCol < cols)
{
pixelVal += d_filter[blurRow * filterWidth + blurCol] * (float)ds_in[curRow][curCol];
}
__syncthreads();
}
}
d_out[r*cols+c] = (unsigned char)pixelVal;
}
// Barrier synchronization
__syncthreads();
}
__global__
void gaussianBlur_row(unsigned char *d_in, unsigned char *d_out,
const int rows, const int cols, float *d_filter, const int filterWidth)
{
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int leftCol = filterWidth/2;
/*
float rowsum = 0.0;
for(int i = 0; i < filterWidth; i++)
{
rowsum += d_filter[leftCol*filterWidth+i];
}
*/
if(c < cols && r < rows)
{
float pixelVal = 0.0f;
for(int blurRow = 0; blurRow < filterWidth; ++blurRow)
{
int curCol = c + blurRow - filterWidth/2;
if(curCol > -1 && curCol < cols)
{
// printf("filter index: %d\n", blurRow*filterWidth+leftCol);
//printf("d_in index: %d\n", r*cols+curCol);
pixelVal += (float)d_filter[blurRow * filterWidth + leftCol] * (float)d_in[r * cols + curCol];
}
__syncthreads();
}
d_out[r * cols + c] = pixelVal;
}
__syncthreads();
}
__global__
void gaussianBlur_col(unsigned char *d_in, unsigned char *d_out,
const int rows, const int cols, float *d_filter, const int filterWidth)
{
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int topRow = filterWidth/2;
/*
float rowsum = 0.0;
for(int i = 0; i < filterWidth; i++)
{
rowsum += d_filter[i*filterWidth+topRow];
}*/
if(c < cols && r < rows)
{
float pixelVal = 0.0f;
for(int blurCol = 0; blurCol < filterWidth; ++blurCol)
{
int curRow = r + blurCol - filterWidth/2;
if(curRow > -1 && curRow < rows)
{
//printf("filter index: %d\n", topRow*filterWidth+blurCol);
//printf("d_in index: %d\n", curRow*cols+c);
pixelVal += ((float)d_filter[topRow * filterWidth + blurCol]) * (float)d_in[curRow * cols + c];
}
__syncthreads();
}
d_out[r * cols + c] = (unsigned char)pixelVal;
}
__syncthreads();
}
/*
Given an input RGBA image separate
that into appropriate rgba channels.
*/
__global__
void separateChannels(uchar4 *d_imrgba, unsigned char *d_r, unsigned char *d_g, unsigned char *d_b,
const int rows, const int cols){
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int offset = r * cols + c;
if(r < rows && c < cols)
{
d_r[offset] = d_imrgba[offset].x;
d_g[offset] = d_imrgba[offset].y;
d_b[offset] = d_imrgba[offset].z;
}
}
/*
Given input channels combine them
into a single uchar4 channel.
You can use some handy constructors provided by the
cuda library i.e.
make_int2(x, y) -> creates a vector of type int2 having x,y components
make_uchar4(x,y,z,255) -> creates a vector of uchar4 type x,y,z components
the last argument being the transperency value.
*/
__global__
void recombineChannels(unsigned char *d_r, unsigned char *d_g, unsigned char *d_b, uchar4 *d_orgba,
const int rows, const int cols){
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
if(r < rows && c < cols)
{
unsigned char red = d_r[r * cols + c];
unsigned char green = d_g[r*cols+c];
unsigned char blue = d_b[r*cols+c];
uchar4 recombine = make_uchar4(blue, green, red, 255);
d_orgba[r*cols+c] = recombine;
}
}
void your_gauss_blur(uchar4* d_imrgba, uchar4 *d_oimrgba, size_t rows, size_t cols,
unsigned char *d_red, unsigned char *d_green, unsigned char *d_blue,
unsigned char *d_rblurred, unsigned char *d_gblurred, unsigned char *d_bblurred,
float *d_filter, int filterWidth){
dim3 blockSize(BLOCK,BLOCK,1);
dim3 gridSize(ceil(cols/BLOCK)+1,ceil(rows/BLOCK)+1,1);
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_imrgba, d_red, d_green, d_blue, rows, cols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussianBlur_row), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_rblurred, rows, cols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussianBlur_col), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_rblurred, rows, cols, d_filter, filterWidth);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussianBlur_row), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_gblurred, rows, cols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussianBlur_col), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_gblurred, rows, cols, d_filter, filterWidth);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussianBlur_row), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_bblurred, rows, cols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussianBlur_col), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_bblurred, rows, cols, d_filter, filterWidth);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_rblurred, d_gblurred, d_bblurred, d_oimrgba, rows, cols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
| 812ae57728fc29db0fb975a13c0941971680d0f9.cu | #include "./gaussian_kernel.h"
#define BLOCK 16
#define TILE_WIDTH 16
/*
The actual gaussian blur kernel to be implemented by
you. Keep in mind that the kernel operates on a
single channel.
*/
__global__
void gaussianBlur(unsigned char *d_in, unsigned char *d_out,
const int rows, const int cols, float *d_filter, const int filterWidth){
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
if(c < cols && r < rows)
{
float pixelVal = 0.0f;
for(int blurRow = 0; blurRow < filterWidth; ++blurRow)
{
for(int blurCol = 0; blurCol < filterWidth; ++blurCol)
{
int curRow = r + blurRow-filterWidth/2;
int curCol = c + blurCol-filterWidth/2;
int cR = max(0, min(rows-1, curRow));
int cC = max(0, min(cols-1, curCol));
if(cR < rows && cR > -1 && cC < cols && cC > -1)
{
pixelVal += (float)d_filter[blurRow * filterWidth + blurCol] * (float)d_in[cR * cols + cC];
}
}
}
d_out[r * cols + c] = (unsigned char)pixelVal;
}
}
__global__
void gaussianBlur_shared(unsigned char *d_in, unsigned char *d_out,
const int rows, const int cols, float *d_filter, const int filterWidth){
__shared__ unsigned char ds_in[TILE_WIDTH][TILE_WIDTH];
__shared__ int p;
float pixelVal = 0;
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
// Load the tiles one by one
p = c/TILE_WIDTH;
// Check the boundary condition
if((r < rows) && ((p*TILE_WIDTH+threadIdx.x) < cols)){
// If the index is valid, load data to shared memory
ds_in[threadIdx.y][threadIdx.x] = d_in[r*cols + p*TILE_WIDTH + threadIdx.x];
}
else{
// If the index is invalid, load the zero pixel to shared memory
ds_in[threadIdx.y][threadIdx.x] = (unsigned char)0.0;
}
__syncthreads();
p = c/TILE_WIDTH;
if ((r < rows) && ((p*TILE_WIDTH + threadIdx.x) < cols))
{
for(int blurRow = 0; blurRow < filterWidth; ++blurRow)
{
for(int blurCol = 0; blurCol < filterWidth; ++blurCol)
{
int curRow = threadIdx.y - (filterWidth/2);
int curCol = threadIdx.x - (filterWidth/2);
//curRow = min(max(curRow, 0), rows);
//curCol = min(max(curCol, 0), cols);
if(curRow >= -1 && curRow < rows && curCol >= -1 && curCol < cols)
{
pixelVal += d_filter[blurRow * filterWidth + blurCol] * (float)ds_in[curRow][curCol];
}
__syncthreads();
}
}
d_out[r*cols+c] = (unsigned char)pixelVal;
}
// Barrier synchronization
__syncthreads();
}
__global__
void gaussianBlur_row(unsigned char *d_in, unsigned char *d_out,
const int rows, const int cols, float *d_filter, const int filterWidth)
{
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int leftCol = filterWidth/2;
/*
float rowsum = 0.0;
for(int i = 0; i < filterWidth; i++)
{
rowsum += d_filter[leftCol*filterWidth+i];
}
*/
if(c < cols && r < rows)
{
float pixelVal = 0.0f;
for(int blurRow = 0; blurRow < filterWidth; ++blurRow)
{
int curCol = c + blurRow - filterWidth/2;
if(curCol > -1 && curCol < cols)
{
// printf("filter index: %d\n", blurRow*filterWidth+leftCol);
//printf("d_in index: %d\n", r*cols+curCol);
pixelVal += (float)d_filter[blurRow * filterWidth + leftCol] * (float)d_in[r * cols + curCol];
}
__syncthreads();
}
d_out[r * cols + c] = pixelVal;
}
__syncthreads();
}
__global__
void gaussianBlur_col(unsigned char *d_in, unsigned char *d_out,
const int rows, const int cols, float *d_filter, const int filterWidth)
{
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int topRow = filterWidth/2;
/*
float rowsum = 0.0;
for(int i = 0; i < filterWidth; i++)
{
rowsum += d_filter[i*filterWidth+topRow];
}*/
if(c < cols && r < rows)
{
float pixelVal = 0.0f;
for(int blurCol = 0; blurCol < filterWidth; ++blurCol)
{
int curRow = r + blurCol - filterWidth/2;
if(curRow > -1 && curRow < rows)
{
//printf("filter index: %d\n", topRow*filterWidth+blurCol);
//printf("d_in index: %d\n", curRow*cols+c);
pixelVal += ((float)d_filter[topRow * filterWidth + blurCol]) * (float)d_in[curRow * cols + c];
}
__syncthreads();
}
d_out[r * cols + c] = (unsigned char)pixelVal;
}
__syncthreads();
}
/*
Given an input RGBA image separate
that into appropriate rgba channels.
*/
__global__
void separateChannels(uchar4 *d_imrgba, unsigned char *d_r, unsigned char *d_g, unsigned char *d_b,
const int rows, const int cols){
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int offset = r * cols + c;
if(r < rows && c < cols)
{
d_r[offset] = d_imrgba[offset].x;
d_g[offset] = d_imrgba[offset].y;
d_b[offset] = d_imrgba[offset].z;
}
}
/*
Given input channels combine them
into a single uchar4 channel.
You can use some handy constructors provided by the
cuda library i.e.
make_int2(x, y) -> creates a vector of type int2 having x,y components
make_uchar4(x,y,z,255) -> creates a vector of uchar4 type x,y,z components
the last argument being the transperency value.
*/
__global__
void recombineChannels(unsigned char *d_r, unsigned char *d_g, unsigned char *d_b, uchar4 *d_orgba,
const int rows, const int cols){
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
if(r < rows && c < cols)
{
unsigned char red = d_r[r * cols + c];
unsigned char green = d_g[r*cols+c];
unsigned char blue = d_b[r*cols+c];
uchar4 recombine = make_uchar4(blue, green, red, 255);
d_orgba[r*cols+c] = recombine;
}
}
void your_gauss_blur(uchar4* d_imrgba, uchar4 *d_oimrgba, size_t rows, size_t cols,
unsigned char *d_red, unsigned char *d_green, unsigned char *d_blue,
unsigned char *d_rblurred, unsigned char *d_gblurred, unsigned char *d_bblurred,
float *d_filter, int filterWidth){
dim3 blockSize(BLOCK,BLOCK,1);
dim3 gridSize(ceil(cols/BLOCK)+1,ceil(rows/BLOCK)+1,1);
separateChannels<<<gridSize, blockSize>>>(d_imrgba, d_red, d_green, d_blue, rows, cols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
gaussianBlur_row<<<gridSize, blockSize>>>(d_red, d_rblurred, rows, cols, d_filter, filterWidth);
gaussianBlur_col<<<gridSize, blockSize>>>(d_red, d_rblurred, rows, cols, d_filter, filterWidth);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
gaussianBlur_row<<<gridSize, blockSize>>>(d_green, d_gblurred, rows, cols, d_filter, filterWidth);
gaussianBlur_col<<<gridSize, blockSize>>>(d_green, d_gblurred, rows, cols, d_filter, filterWidth);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
gaussianBlur_row<<<gridSize, blockSize>>>(d_blue, d_bblurred, rows, cols, d_filter, filterWidth);
gaussianBlur_col<<<gridSize, blockSize>>>(d_blue, d_bblurred, rows, cols, d_filter, filterWidth);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
recombineChannels<<<gridSize, blockSize>>>(d_rblurred, d_gblurred, d_bblurred, d_oimrgba, rows, cols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
|
7a5f15676b6057a91d7611b819aae6a0bc208acc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Sqrt_V.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Sqrt_V), dim3(gridBlock),dim3(threadBlock), 0, 0, a,out,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Sqrt_V), dim3(gridBlock),dim3(threadBlock), 0, 0, a,out,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Sqrt_V), dim3(gridBlock),dim3(threadBlock), 0, 0, a,out,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7a5f15676b6057a91d7611b819aae6a0bc208acc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Sqrt_V.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Sqrt_V<<<gridBlock,threadBlock>>>(a,out,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Sqrt_V<<<gridBlock,threadBlock>>>(a,out,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Sqrt_V<<<gridBlock,threadBlock>>>(a,out,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9ff4dd29375fa711b9a883cfdae369e022d4f480.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "VideoInputFromBlackMagic.hpp"
// usleep
#include <unistd.h>
#include <stdio.h>
__global__ void separateFramesKernel(unsigned char* bothFrames, unsigned char* rightFrame, unsigned char* leftFrame, int sizeRow){
const int outputXIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int outputYIndex = blockIdx.y * blockDim.y + threadIdx.y;
int output_tid = outputYIndex * sizeRow + (outputXIndex * 3);
int output_tid_p1 = (outputYIndex+1) * sizeRow + ((outputXIndex) * 3);
int output_tid_m1 = (outputYIndex-1) * sizeRow + ((outputXIndex) * 3);
if(outputYIndex % 2 == 0){
rightFrame[output_tid] = bothFrames[output_tid];
rightFrame[output_tid+1] = bothFrames[output_tid+1];
rightFrame[output_tid+2] = bothFrames[output_tid+2];
rightFrame[output_tid_p1] = bothFrames[output_tid];
rightFrame[output_tid_p1+1] = bothFrames[output_tid+1];
rightFrame[output_tid_p1+2] = bothFrames[output_tid+2];
} else {
leftFrame[output_tid_m1] = bothFrames[output_tid];
leftFrame[output_tid_m1+1] = bothFrames[output_tid+1];
leftFrame[output_tid_m1+2] = bothFrames[output_tid+2];
leftFrame[output_tid] = bothFrames[output_tid];
leftFrame[output_tid+1] = bothFrames[output_tid+1];
leftFrame[output_tid+2] = bothFrames[output_tid+2];
}
}
bool VideoInputFromBlackMagic::separateFrames(cv::Mat* left, cv::Mat* right, cv::Mat* combined){
if(!this->initCuda){
//Init the array
this->sizeCurrentImageData = combined->rows * combined->step;
this->sizeImageRData = right->rows * right->step;
this->sizeImageLData = left->rows * left->step;
hipMalloc((void **) &this->currentImageDevice, this->sizeCurrentImageData);
hipMalloc((void **) &this->ImageRightDevice, this->sizeImageRData);
hipMalloc((void **) &this->ImageLeftDevice, this->sizeImageLData);
this->initCuda = true;
}
hipMemcpy(this->currentImageDevice, combined->ptr(), this->sizeCurrentImageData, hipMemcpyHostToDevice) ;
hipMemcpy(this->ImageRightDevice, right->ptr(), this->sizeImageRData, hipMemcpyHostToDevice) ;
hipMemcpy(this->ImageLeftDevice, left->ptr(), this->sizeImageLData, hipMemcpyHostToDevice) ;
//Specify a reasonable block size
const dim3 block(16,16);
//Grid
const dim3 grid((combined->cols + block.x - 1)/block.x, (combined->rows + block.y - 1)/block.y);
//Call the kernel
hipLaunchKernelGGL(( separateFramesKernel), dim3(grid),dim3(block), 0, 0, this->currentImageDevice, this->ImageRightDevice, this->ImageLeftDevice, combined->step);
hipDeviceSynchronize();
hipMemcpy(right->ptr(),this->ImageRightDevice,this->sizeImageRData,hipMemcpyDeviceToHost);
hipMemcpy(left->ptr(),this->ImageLeftDevice,this->sizeImageLData,hipMemcpyDeviceToHost);
return true;
}
//Constructor
VideoInputFromBlackMagic::VideoInputFromBlackMagic(): m_refCount(1){
this->isStereo = true;
this->running = false;
this->initialized = false;
this->initCuda = false;
this->updating = false;
}
VideoInputFromBlackMagic::VideoInputFromBlackMagic(bool isstereo): m_refCount(1){
this->isStereo = isstereo;
this->running = false;
this->initialized = false;
this->initCuda = false;
this->updating = false;
}
//Destructor
//TODO FREE CUDA MEMORY
VideoInputFromBlackMagic::~VideoInputFromBlackMagic(){
}
//Run call
std::thread VideoInputFromBlackMagic::run(){
printf("VideoInputFromBlackMagic : run function has been called...\n");
std::thread mainThread(runThread, this);
return mainThread;
}
//Run sub function
void VideoInputFromBlackMagic::runThread(VideoInputFromBlackMagic* context){
context->runInput();
}
//Run (real stuff)
void VideoInputFromBlackMagic::runInput(){
fprintf(stdout, "Run\n");
if(!this->running){
this->running=true;
int idx;
//Check result
HRESULT result;
IDeckLink* deckLink = NULL;
IDeckLinkInput* g_deckLinkInput = NULL;
IDeckLinkAttributes* deckLinkAttributes = NULL;
IDeckLinkIterator* deckLinkIterator = CreateDeckLinkIteratorInstance();
IDeckLinkDisplayModeIterator* displayModeIterator = NULL;
IDeckLinkDisplayMode* displayMode = NULL;
char* displayModeName = NULL;
BMDDisplayModeSupport displayModeSupported;
bool formatDetectionSupported;
if (!deckLinkIterator)
{
fprintf(stderr, "This application requires the DeckLink drivers installed.\n");
return;
}
//Get the DeckLink Inputs
result = deckLinkIterator->Next(&deckLink);
result = deckLink->QueryInterface(IID_IDeckLinkInput, (void**)&g_deckLinkInput);
if(result != S_OK){
fprintf(stdout, "Cannot get the Input : DeckLink Error\n");
return;
}
//Get the DeckLink attributes (that may not correctly work: format detection does not properly work)
result = deckLink->QueryInterface(IID_IDeckLinkAttributes, (void**)&deckLinkAttributes);
if (!(result == S_OK)){
fprintf(stdout, "Cannot get the DeckLink attributes : DeckLink Error\n");
return;
}
//Format detection
result = deckLinkAttributes->GetFlag(BMDDeckLinkSupportsInputFormatDetection, &formatDetectionSupported);
if (result != S_OK || !formatDetectionSupported){
fprintf(stdout,"Cannot get the format input: DeckLink Error\n");
return;
}
//Index for the different inputs
idx = 0;
//Get all the displayModes
result = g_deckLinkInput->GetDisplayModeIterator(&displayModeIterator);
if (result != S_OK){
fprintf(stdout,"Cannot set an iterator on the different display modes: DeckLink Error\n");
}
//Set idx
while ((result = displayModeIterator->Next(&displayMode)) == S_OK)
{
if (idx == 0)
break;
--idx;
displayMode->Release();
}
if (result != S_OK || displayMode == NULL){
fprintf(stdout,"Cannot get the main display mode: DeckLink Error\n");
return;
}
//Get Mode name: useless
result = displayMode->GetName((const char**)&displayModeName);
// Check display mode is supported with given options
result = g_deckLinkInput->DoesSupportVideoMode(bmdModeHD1080p50, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709, &displayModeSupported, NULL);
if (result != S_OK){
fprintf(stdout,"Video Mode not supported : aborted\n");
return;
}
if (displayModeSupported == bmdDisplayModeNotSupported)
{
fprintf(stdout, "The display mode %s is not supported with the selected pixel format\n", displayModeName);
return;
}
//Set the callback on this ( will defined callback on VideoInputFrameArrived and others functions when images arrives or when other events happens
g_deckLinkInput->SetCallback(this);
//Enable the video input with the selected format
result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p50, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709);
if (result != S_OK)
{
fprintf(stderr, "Failed to enable video input. Maybe another application is using the card.\n");
return;
}
//Disable the audio
result = g_deckLinkInput->DisableAudioInput();
//Start the stream
result = g_deckLinkInput->StartStreams();
if (result != S_OK){
fprintf(stdout,"Error while starting the streaming : aborted\n");
}
while(this->running){
//Nothing thread must not end... this is dirty TODO mutex?
}
}
}
//A frame arrived
HRESULT VideoInputFromBlackMagic::VideoInputFrameArrived(IDeckLinkVideoInputFrame* videoFrame, IDeckLinkAudioInputPacket* audioFrame){
if(!this->updating){
this->updating = true;
if (!videoFrame){
fprintf(stdout,"Update: No video frame\n");
return S_FALSE;
}
void* data;
if (FAILED(videoFrame->GetBytes(&data))){
fprintf(stdout,"Fail obtaining the data from videoFrame\n");
return S_FALSE;
}
cv::Mat loadedImage;
cv::Mat mat = cv::Mat(videoFrame->GetHeight(), videoFrame->GetWidth(), CV_8UC2, data, videoFrame->GetRowBytes());
cv::cvtColor(mat, loadedImage, CV_YUV2BGR_UYVY);
if (!loadedImage.data){
fprintf(stdout,"No frame loaded from the video : mainImage will not be updated\n");
} else {
if(this->isStereo){
cv::Mat loadedImageRight = cv::Mat::zeros(loadedImage.rows,loadedImage.cols, loadedImage.type());
cv::Mat loadedImageLeft = cv::Mat::zeros(loadedImage.rows,loadedImage.cols, loadedImage.type()) ;
if(!this->separateFrames(&loadedImageLeft, &loadedImageRight, &loadedImage)){
fprintf(stdout,"Error while the separation of left and right frame\n");
}
//Update the images
//Mutex here
this->mtxImages.lock();
this->currentImageLeft = loadedImageLeft.clone();
this->currentImageRight = loadedImageRight.clone();
this->initialized = true;
this->mtxImages.unlock();
} else {
//Update the left image carrefull left image is not updated
//Mutex here
this->mtxImages.lock();
this->currentImageLeft = loadedImage.clone();
this->initialized = true;
this->mtxImages.unlock();
}
}
this->updating=false;
return S_OK;
} else {
return S_OK;
}
}
//DeckLink stuff: not important
ULONG VideoInputFromBlackMagic::AddRef(void)
{
return __sync_add_and_fetch(&m_refCount, 1);
}
ULONG VideoInputFromBlackMagic::Release(void)
{
int32_t newRefValue = __sync_sub_and_fetch(&m_refCount, 1);
if (newRefValue == 0)
{
return 0;
}
return newRefValue;
}
HRESULT VideoInputFromBlackMagic::VideoInputFormatChanged(BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *mode, BMDDetectedVideoInputFormatFlags formatFlags){
fprintf(stdout,"VideoInputFormatChanged: Not supported\n");
return S_OK;
}
//Stop TODO unlock mutex
void VideoInputFromBlackMagic::stop(){
this->running=false;
}
bool VideoInputFromBlackMagic::isRunning(){
return this->running;
}
bool VideoInputFromBlackMagic::isInitialized(){
return this->initialized;
}
//This is dirty... clone (think about it to do it faster)
void VideoInputFromBlackMagic::getFrames(cv::Mat & leftI, cv::Mat & rightI){
if(!isStereo){
std::cout << "Warning : Using left and right images in monocular mode" << std::endl;
}
this->mtxImages.lock();
leftI = this->currentImageLeft.clone();
rightI = this->currentImageRight.clone();
this->mtxImages.unlock();
}
void VideoInputFromBlackMagic::getFrames(cv::Mat & leftI){
this->mtxImages.lock();
leftI = this->currentImageLeft.clone();
this->mtxImages.unlock();
}
| 9ff4dd29375fa711b9a883cfdae369e022d4f480.cu | #include "VideoInputFromBlackMagic.hpp"
// usleep
#include <unistd.h>
#include <stdio.h>
__global__ void separateFramesKernel(unsigned char* bothFrames, unsigned char* rightFrame, unsigned char* leftFrame, int sizeRow){
const int outputXIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int outputYIndex = blockIdx.y * blockDim.y + threadIdx.y;
int output_tid = outputYIndex * sizeRow + (outputXIndex * 3);
int output_tid_p1 = (outputYIndex+1) * sizeRow + ((outputXIndex) * 3);
int output_tid_m1 = (outputYIndex-1) * sizeRow + ((outputXIndex) * 3);
if(outputYIndex % 2 == 0){
rightFrame[output_tid] = bothFrames[output_tid];
rightFrame[output_tid+1] = bothFrames[output_tid+1];
rightFrame[output_tid+2] = bothFrames[output_tid+2];
rightFrame[output_tid_p1] = bothFrames[output_tid];
rightFrame[output_tid_p1+1] = bothFrames[output_tid+1];
rightFrame[output_tid_p1+2] = bothFrames[output_tid+2];
} else {
leftFrame[output_tid_m1] = bothFrames[output_tid];
leftFrame[output_tid_m1+1] = bothFrames[output_tid+1];
leftFrame[output_tid_m1+2] = bothFrames[output_tid+2];
leftFrame[output_tid] = bothFrames[output_tid];
leftFrame[output_tid+1] = bothFrames[output_tid+1];
leftFrame[output_tid+2] = bothFrames[output_tid+2];
}
}
bool VideoInputFromBlackMagic::separateFrames(cv::Mat* left, cv::Mat* right, cv::Mat* combined){
if(!this->initCuda){
//Init the array
this->sizeCurrentImageData = combined->rows * combined->step;
this->sizeImageRData = right->rows * right->step;
this->sizeImageLData = left->rows * left->step;
cudaMalloc((void **) &this->currentImageDevice, this->sizeCurrentImageData);
cudaMalloc((void **) &this->ImageRightDevice, this->sizeImageRData);
cudaMalloc((void **) &this->ImageLeftDevice, this->sizeImageLData);
this->initCuda = true;
}
cudaMemcpy(this->currentImageDevice, combined->ptr(), this->sizeCurrentImageData, cudaMemcpyHostToDevice) ;
cudaMemcpy(this->ImageRightDevice, right->ptr(), this->sizeImageRData, cudaMemcpyHostToDevice) ;
cudaMemcpy(this->ImageLeftDevice, left->ptr(), this->sizeImageLData, cudaMemcpyHostToDevice) ;
//Specify a reasonable block size
const dim3 block(16,16);
//Grid
const dim3 grid((combined->cols + block.x - 1)/block.x, (combined->rows + block.y - 1)/block.y);
//Call the kernel
separateFramesKernel<<<grid,block>>>(this->currentImageDevice, this->ImageRightDevice, this->ImageLeftDevice, combined->step);
cudaDeviceSynchronize();
cudaMemcpy(right->ptr(),this->ImageRightDevice,this->sizeImageRData,cudaMemcpyDeviceToHost);
cudaMemcpy(left->ptr(),this->ImageLeftDevice,this->sizeImageLData,cudaMemcpyDeviceToHost);
return true;
}
//Constructor
VideoInputFromBlackMagic::VideoInputFromBlackMagic(): m_refCount(1){
this->isStereo = true;
this->running = false;
this->initialized = false;
this->initCuda = false;
this->updating = false;
}
VideoInputFromBlackMagic::VideoInputFromBlackMagic(bool isstereo): m_refCount(1){
this->isStereo = isstereo;
this->running = false;
this->initialized = false;
this->initCuda = false;
this->updating = false;
}
//Destructor
//TODO FREE CUDA MEMORY
VideoInputFromBlackMagic::~VideoInputFromBlackMagic(){
}
//Run call
std::thread VideoInputFromBlackMagic::run(){
printf("VideoInputFromBlackMagic : run function has been called...\n");
std::thread mainThread(runThread, this);
return mainThread;
}
//Run sub function
void VideoInputFromBlackMagic::runThread(VideoInputFromBlackMagic* context){
context->runInput();
}
//Run (real stuff)
void VideoInputFromBlackMagic::runInput(){
fprintf(stdout, "Run\n");
if(!this->running){
this->running=true;
int idx;
//Check result
HRESULT result;
IDeckLink* deckLink = NULL;
IDeckLinkInput* g_deckLinkInput = NULL;
IDeckLinkAttributes* deckLinkAttributes = NULL;
IDeckLinkIterator* deckLinkIterator = CreateDeckLinkIteratorInstance();
IDeckLinkDisplayModeIterator* displayModeIterator = NULL;
IDeckLinkDisplayMode* displayMode = NULL;
char* displayModeName = NULL;
BMDDisplayModeSupport displayModeSupported;
bool formatDetectionSupported;
if (!deckLinkIterator)
{
fprintf(stderr, "This application requires the DeckLink drivers installed.\n");
return;
}
//Get the DeckLink Inputs
result = deckLinkIterator->Next(&deckLink);
result = deckLink->QueryInterface(IID_IDeckLinkInput, (void**)&g_deckLinkInput);
if(result != S_OK){
fprintf(stdout, "Cannot get the Input : DeckLink Error\n");
return;
}
//Get the DeckLink attributes (that may not correctly work: format detection does not properly work)
result = deckLink->QueryInterface(IID_IDeckLinkAttributes, (void**)&deckLinkAttributes);
if (!(result == S_OK)){
fprintf(stdout, "Cannot get the DeckLink attributes : DeckLink Error\n");
return;
}
//Format detection
result = deckLinkAttributes->GetFlag(BMDDeckLinkSupportsInputFormatDetection, &formatDetectionSupported);
if (result != S_OK || !formatDetectionSupported){
fprintf(stdout,"Cannot get the format input: DeckLink Error\n");
return;
}
//Index for the different inputs
idx = 0;
//Get all the displayModes
result = g_deckLinkInput->GetDisplayModeIterator(&displayModeIterator);
if (result != S_OK){
fprintf(stdout,"Cannot set an iterator on the different display modes: DeckLink Error\n");
}
//Set idx
while ((result = displayModeIterator->Next(&displayMode)) == S_OK)
{
if (idx == 0)
break;
--idx;
displayMode->Release();
}
if (result != S_OK || displayMode == NULL){
fprintf(stdout,"Cannot get the main display mode: DeckLink Error\n");
return;
}
//Get Mode name: useless
result = displayMode->GetName((const char**)&displayModeName);
// Check display mode is supported with given options
result = g_deckLinkInput->DoesSupportVideoMode(bmdModeHD1080p50, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709, &displayModeSupported, NULL);
if (result != S_OK){
fprintf(stdout,"Video Mode not supported : aborted\n");
return;
}
if (displayModeSupported == bmdDisplayModeNotSupported)
{
fprintf(stdout, "The display mode %s is not supported with the selected pixel format\n", displayModeName);
return;
}
//Set the callback on this ( will defined callback on VideoInputFrameArrived and others functions when images arrives or when other events happens
g_deckLinkInput->SetCallback(this);
//Enable the video input with the selected format
result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p50, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709);
if (result != S_OK)
{
fprintf(stderr, "Failed to enable video input. Maybe another application is using the card.\n");
return;
}
//Disable the audio
result = g_deckLinkInput->DisableAudioInput();
//Start the stream
result = g_deckLinkInput->StartStreams();
if (result != S_OK){
fprintf(stdout,"Error while starting the streaming : aborted\n");
}
while(this->running){
//Nothing thread must not end... this is dirty TODO mutex?
}
}
}
//A frame arrived
HRESULT VideoInputFromBlackMagic::VideoInputFrameArrived(IDeckLinkVideoInputFrame* videoFrame, IDeckLinkAudioInputPacket* audioFrame){
if(!this->updating){
this->updating = true;
if (!videoFrame){
fprintf(stdout,"Update: No video frame\n");
return S_FALSE;
}
void* data;
if (FAILED(videoFrame->GetBytes(&data))){
fprintf(stdout,"Fail obtaining the data from videoFrame\n");
return S_FALSE;
}
cv::Mat loadedImage;
cv::Mat mat = cv::Mat(videoFrame->GetHeight(), videoFrame->GetWidth(), CV_8UC2, data, videoFrame->GetRowBytes());
cv::cvtColor(mat, loadedImage, CV_YUV2BGR_UYVY);
if (!loadedImage.data){
fprintf(stdout,"No frame loaded from the video : mainImage will not be updated\n");
} else {
if(this->isStereo){
cv::Mat loadedImageRight = cv::Mat::zeros(loadedImage.rows,loadedImage.cols, loadedImage.type());
cv::Mat loadedImageLeft = cv::Mat::zeros(loadedImage.rows,loadedImage.cols, loadedImage.type()) ;
if(!this->separateFrames(&loadedImageLeft, &loadedImageRight, &loadedImage)){
fprintf(stdout,"Error while the separation of left and right frame\n");
}
//Update the images
//Mutex here
this->mtxImages.lock();
this->currentImageLeft = loadedImageLeft.clone();
this->currentImageRight = loadedImageRight.clone();
this->initialized = true;
this->mtxImages.unlock();
} else {
//Update the left image carrefull left image is not updated
//Mutex here
this->mtxImages.lock();
this->currentImageLeft = loadedImage.clone();
this->initialized = true;
this->mtxImages.unlock();
}
}
this->updating=false;
return S_OK;
} else {
return S_OK;
}
}
//DeckLink stuff: not important
ULONG VideoInputFromBlackMagic::AddRef(void)
{
return __sync_add_and_fetch(&m_refCount, 1);
}
ULONG VideoInputFromBlackMagic::Release(void)
{
int32_t newRefValue = __sync_sub_and_fetch(&m_refCount, 1);
if (newRefValue == 0)
{
return 0;
}
return newRefValue;
}
HRESULT VideoInputFromBlackMagic::VideoInputFormatChanged(BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *mode, BMDDetectedVideoInputFormatFlags formatFlags){
fprintf(stdout,"VideoInputFormatChanged: Not supported\n");
return S_OK;
}
//Stop TODO unlock mutex
void VideoInputFromBlackMagic::stop(){
this->running=false;
}
bool VideoInputFromBlackMagic::isRunning(){
return this->running;
}
bool VideoInputFromBlackMagic::isInitialized(){
return this->initialized;
}
//This is dirty... clone (think about it to do it faster)
void VideoInputFromBlackMagic::getFrames(cv::Mat & leftI, cv::Mat & rightI){
if(!isStereo){
std::cout << "Warning : Using left and right images in monocular mode" << std::endl;
}
this->mtxImages.lock();
leftI = this->currentImageLeft.clone();
rightI = this->currentImageRight.clone();
this->mtxImages.unlock();
}
void VideoInputFromBlackMagic::getFrames(cv::Mat & leftI){
this->mtxImages.lock();
leftI = this->currentImageLeft.clone();
this->mtxImages.unlock();
}
|
08edd17661766488aea3ce040731489000e13a17.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sha256_final.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
SHA256_CTX *ctx = NULL;
hipMalloc(&ctx, XSIZE*YSIZE);
uchar *gpuResult = NULL;
hipMalloc(&gpuResult, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sha256_final), dim3(gridBlock),dim3(threadBlock), 0, 0, ctx,gpuResult);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sha256_final), dim3(gridBlock),dim3(threadBlock), 0, 0, ctx,gpuResult);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sha256_final), dim3(gridBlock),dim3(threadBlock), 0, 0, ctx,gpuResult);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 08edd17661766488aea3ce040731489000e13a17.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sha256_final.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
SHA256_CTX *ctx = NULL;
cudaMalloc(&ctx, XSIZE*YSIZE);
uchar *gpuResult = NULL;
cudaMalloc(&gpuResult, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sha256_final<<<gridBlock,threadBlock>>>(ctx,gpuResult);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sha256_final<<<gridBlock,threadBlock>>>(ctx,gpuResult);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sha256_final<<<gridBlock,threadBlock>>>(ctx,gpuResult);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e2ed5ce8ee2be590cade4ca373b3e08b8b59ee84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernelMarkInvalidVertex.h"
__global__ void kernelMarkInvalidVertex(int *d_O,int *LO,unsigned int sizeLO,int *d_labelAmount,unsigned int sizeLabelAmount,unsigned int minsup){
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<sizeLabelAmount){
if(d_labelAmount[i]<minsup){
for (int j=0;j<sizeLO;++j){
if(LO[j]==i) d_O[j]=-1;
}
}
}
} | e2ed5ce8ee2be590cade4ca373b3e08b8b59ee84.cu | #include "kernelMarkInvalidVertex.h"
__global__ void kernelMarkInvalidVertex(int *d_O,int *LO,unsigned int sizeLO,int *d_labelAmount,unsigned int sizeLabelAmount,unsigned int minsup){
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<sizeLabelAmount){
if(d_labelAmount[i]<minsup){
for (int j=0;j<sizeLO;++j){
if(LO[j]==i) d_O[j]=-1;
}
}
}
} |
a73dc073b8f374d37a68840f1d7799d5b8d49010.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-----------------------------------------------------------------------------
Name: histTRISH_Gen.cu
Desc: Implements generic binning histograms on GPU
Disclaimer:
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
/*-----------------------------------------------------------------------------
Includes
-----------------------------------------------------------------------------*/
// System Includes
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// CUDA Includes
#include <cutil_inline.h>
// Local Includes
#include "Platform.h"
#include "BaseDefs.h"
#include "TRISH_traits.h"
#include "MapToBin.h"
#include "Extract.h"
#include "histogram_common.h"
/*-----------------------------------------------------------------------------
Compiler Settings
-----------------------------------------------------------------------------*/
//#define INTERLEAVE 1
//#define INTERLEAVE 2
#define INTERLEAVE 4
//#define TRISH_VERIFY_HISTOGRAM 1
#define TRISH_VERIFY_HISTOGRAM 0
/*-----------------------------------------------------------------------------
Helper Templates
-----------------------------------------------------------------------------*/
/*-------------------------------------
Name: TRISH_VerifyHistogram
-------------------------------------*/
#if 1 == TRISH_VERIFY_HISTOGRAM
// Verify single byte integers (I8, U8)
template <
typename valT, // Underlying value type
typename mapT // Mapper Type
>
__host__
void TRISH_VerifyHistogram_B1
(
U32 nElems, // IN - number of 32-bit elements to bin & count
U32 * d_gpuElems, // IN - array of elements to bin & count
U32 numBins, // IN - number of bins in histogram
U32 * d_gpuCounts, // IN - GPU histogram counts
valT minVal, // IN - [min,max] values for histogram
valT maxVal // ditto
)
{
assert( numBins > 0u );
assert( numBins <= 256u );
assert( nElems > 0u );
U32 mem_size_elems = nElems * sizeof( U32 );
U32 mem_size_counts = 256u * sizeof( U32 );
U32 * h_cpuElems = NULL;
U32 * h_gpuCounts = NULL;
U32 * h_cpuCounts = NULL;
//-----
// Allocate memory resources
//-----
h_cpuElems = (U32 *)malloc( mem_size_elems );
h_gpuCounts = (U32 *)malloc( mem_size_counts );
h_cpuCounts = (U32 *)malloc( mem_size_counts );
//-----
// Transfer arrays from GPU to CPU
//-----
cutilSafeCall( hipMemcpy( h_cpuElems, d_gpuElems, mem_size_elems, hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy( h_gpuCounts, d_gpuCounts, mem_size_counts, hipMemcpyDeviceToHost) );
// Zero CPU counts
for (U32 idx = 0; idx < 256u; idx++)
{
h_cpuCounts[idx] = 0u;
}
// Get TRISH types
typedef ExtractorBytes<U32> Extractor;
typedef typename TRISH_trait<valT>:base_type baseType;
typedef typename TRISH_trait<valT>::bin_type binType;
typedef typename TRISH_trait<valT>::upscale_type upscaleType;
typedef typename TRISH_trait<valT>::convert_type convertType;
//-----
// Compute CPU row counts
//-----
// Initialize Mapper
mapT mapper;
mapper.Initiate( minVal, maxVal, numBins );
U32 val1;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
for (U32 idx = 0; idx < nElems; idx+=1u)
{
// Get current values
val1 = h_cpuElems[idx];
// Extract 4 bytes from single values
Extractor.Extract4( b1, b2, b3, b4, val2 );
// Transform values into bins
mapper.Transform4( bin1, bin2, bin3, bin4, // Out => transformed bins
b1, b2, b3, b4 ); // In => values to transform into bins
// Bin results
h_cpuCounts[bin1] += 1u;
h_cpuCounts[bin2] += 1u;
h_cpuCounts[bin3] += 1u;
h_cpuCounts[bin4] += 1u;
}
// Cleanup Mapper
mapper.Finish();
//-----
// Compare CPU vs. GPU totals
//-----
U64 totalCPU = 0ull;
U64 totalGPU = 0ull;
for (U32 idx = 0; idx < numBins; idx++)
{
U32 cpuCount = h_cpuCounts[idx];
U32 gpuCount = h_gpuCounts[idx];
totalCPU += (U64)cpuCount;
totalGPU += (U64)gpuCount;
if (cpuCount != gpuCount)
{
fprintf( stdout, "Total Counts[%u] : CPU (%u) != GPU (%u) !!! ERROR !!!\n",
idx, cpuCount, gpuCount );
}
else
{
fprintf( stdout, "Total Counts[%u] : CPU (%u) == GPU (%u) Success\n",
idx, cpuCount, gpuCount );
}
}
// Get items below range
U32 minCPU, minGPU;
minCPU = h_cpuCounts[numBins+1];
minGPU = h_gpuCounts[numBins+1];
if (minCPU != minGPU)
{
fprintf( stdout, "For < min (%d), CPU count (%d) != GPU count (%d) !!! ERROR !!!\n",
minVal, minCPU, minGPU );
}
else
{
fprintf( stdout, "For < min (%d), CPU count (%d) == GPU count (%d) Success\n",
minVal, minCPU, minGPU );
}
totalCPU += (U64)minCPU;
totalGPU += (U64)minGPU;
// Get items above range
U32 maxCPU, maxGPU;
maxCPU = h_cpuCounts[numBins+2];
maxGPU = h_gpuCounts[numBins+2];
if (minCPU != minGPU)
{
fprintf( stdout, "For > max (%d), CPU count (%d) != GPU count (%d) !!! ERROR !!!\n",
maxVal, maxCPU, maxGPU );
}
else
{
fprintf( stdout, "For > max (%d), CPU count (%d) == GPU count (%d) Success\n",
maxVal, maxCPU, maxGPU );
}
totalCPU += (U64)maxCPU;
totalGPU += (U64)maxGPU;
// Verify final counts
if (totalCPU != totalGPU)
{
fprintf( stdout, "\nTotal CPU (%I64u) != Total GPU (%I64u) !!! ERROR !!!\n\n\n",
totalCPU, totalGPU );
}
else
{
fprintf( stdout, "\nTotal CPU (%I64u) == Total GPU (%I64u) Success\n\n\n",
totalCPU, totalGPU );
}
//-----
// Free memory resources
//-----
free( h_cpuCounts );
free( h_gpuCounts );
free( h_cpuElems );
}
// Verify 2 byte integers (I16, U16)
template <
typename valT, // Underlying value type
typename mapT // Mapper Type
>
__host__
void TRISH_VerifyHistogram_B2
(
U32 nElems, // IN - number of 32-bit elements to bin & count
U32 * d_gpuElems, // IN - array of elements to bin & count
U32 numBins, // IN - number of bins in histogram
U32 * d_gpuCounts, // IN - GPU histogram counts
valT minVal, // IN - [min,max] values for histogram
valT maxVal // ditto
)
{
assert( numBins > 0u );
assert( numBins <= 256u );
assert( nElems > 0u );
U32 mem_size_elems = nElems * sizeof( U32 );
U32 mem_size_counts = 256u * sizeof( U32 );
U32 * h_cpuElems = NULL;
U32 * h_gpuCounts = NULL;
U32 * h_cpuCounts = NULL;
//-----
// Allocate memory resources
//-----
h_cpuElems = (U32 *)malloc( mem_size_elems );
h_gpuCounts = (U32 *)malloc( mem_size_counts );
h_cpuCounts = (U32 *)malloc( mem_size_counts );
//-----
// Transfer arrays from GPU to CPU
//-----
cutilSafeCall( hipMemcpy( h_cpuElems, d_gpuElems, mem_size_elems, hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy( h_gpuCounts, d_gpuCounts, mem_size_counts, hipMemcpyDeviceToHost) );
// Zero CPU counts
for (U32 idx = 0; idx < 256u; idx++)
{
h_cpuCounts[idx] = 0u;
}
// Get TRISH types
typedef ExtractorWords<U32> Extractor;
typedef typename TRISH_trait<valT>:base_type baseType;
typedef typename TRISH_trait<valT>::bin_type binType;
typedef typename TRISH_trait<valT>::upscale_type upscaleType;
typedef typename TRISH_trait<valT>::convert_type convertType;
//-----
// Compute CPU row counts
//-----
// Initialize Mapper
mapT mapper;
mapper.Initiate( minVal, maxVal, numBins );
U32 val1, val2;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
for (U32 idx = 0u; idx < nElems; idx+=2u)
{
// Get current value
val1 = h_cpuElems[idx];
val2 = h_cpuElems[idx+1u];
// Extract 4 words from 2 values
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
// Transform
mapper.Transform4( bin1, bin2, bin3, bin4, // Out => transformed bins
b1, b2, b3, b4 ); // In => values to transform into bins
// Bin results
h_cpuCounts[bin1] += 1u;
h_cpuCounts[bin2] += 1u;
h_cpuCounts[bin3] += 1u;
h_cpuCounts[bin4] += 1u;
}
// Cleanup Mapper
mapper.Finish();
//-----
// Compare CPU vs. GPU totals
//-----
U64 totalCPU = 0ull;
U64 totalGPU = 0ull;
for (U32 idx = 0; idx < numBins; idx++)
{
U32 cpuCount = h_cpuCounts[idx];
U32 gpuCount = h_gpuCounts[idx];
totalCPU += (U64)cpuCount;
totalGPU += (U64)gpuCount;
if (cpuCount != gpuCount)
{
fprintf( stdout, "Total Counts[%u] : CPU (%u) != GPU (%u) !!! ERROR !!!\n",
idx, cpuCount, gpuCount );
}
else
{
fprintf( stdout, "Total Counts[%u] : CPU (%u) == GPU (%u) Success\n",
idx, cpuCount, gpuCount );
}
}
// Get items below range
U32 minCPU, minGPU;
minCPU = h_cpuCounts[numBins+1];
minGPU = h_gpuCounts[numBins+1];
if (minCPU != minGPU)
{
fprintf( stdout, "For < min (%d), CPU count (%d) != GPU count (%d) !!! ERROR !!!\n",
minVal, minCPU, minGPU );
}
else
{
fprintf( stdout, "For < min (%d), CPU count (%d) == GPU count (%d) Success\n",
minVal, minCPU, minGPU );
}
totalCPU += (U64)minCPU;
totalGPU += (U64)minGPU;
// Get items above range
U32 maxCPU, maxGPU;
maxCPU = h_cpuCounts[numBins+2];
maxGPU = h_gpuCounts[numBins+2];
if (minCPU != minGPU)
{
fprintf( stdout, "For > max (%d), CPU count (%d) != GPU count (%d) !!! ERROR !!!\n",
maxVal, maxCPU, maxGPU );
}
else
{
fprintf( stdout, "For > max (%d), CPU count (%d) == GPU count (%d) Success\n",
maxVal, maxCPU, maxGPU );
}
totalCPU += (U64)maxCPU;
totalGPU += (U64)maxGPU;
// Verify final counts
if (totalCPU != totalGPU)
{
fprintf( stdout, "\nTotal CPU (%I64u) != Total GPU (%I64u) !!! ERROR !!!\n\n\n",
totalCPU, totalGPU );
}
else
{
fprintf( stdout, "\nTotal CPU (%I64u) == Total GPU (%I64u) Success\n\n\n",
totalCPU, totalGPU );
}
//-----
// Free memory resources
//-----
free( h_cpuCounts );
free( h_gpuCounts );
free( h_cpuElems );
}
// Verify 4 byte integers (I32, U32)
template <
typename valT, // Underlying value type
typename mapT // Mapper Type
>
__host__
void TRISH_VerifyHistogram_B4
(
U32 nElems, // IN - number of 32-bit elements to bin & count
valT * d_gpuElems, // IN - array of elements to bin & count
U32 numBins, // IN - number of bins in histogram
U32 * d_gpuCounts, // IN - GPU histogram counts
valT minVal, // IN - [min,max] values for histogram
valT maxVal // ditto
)
{
assert( numBins > 0u );
assert( numBins <= 256u );
assert( nElems > 0u );
U32 mem_size_elems = nElems * sizeof( valT );
U32 mem_size_counts = 256u * sizeof( U32 );
valT * h_cpuElems = NULL;
U32 * h_gpuCounts = NULL;
U32 * h_cpuCounts = NULL;
//-----
// Allocate memory resources
//-----
h_cpuElems = (U32 *)malloc( mem_size_elems );
h_gpuCounts = (U32 *)malloc( mem_size_counts );
h_cpuCounts = (U32 *)malloc( mem_size_counts );
//-----
// Transfer arrays from GPU to CPU
//-----
cutilSafeCall( hipMemcpy( h_cpuElems, d_gpuElems, mem_size_elems, hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy( h_gpuCounts, d_gpuCounts, mem_size_counts, hipMemcpyDeviceToHost) );
// Zero CPU counts
for (U32 idx = 0; idx < 256u; idx++)
{
h_cpuCounts[idx] = 0u;
}
// Get TRISH types
typedef ExtractorWords<valT> Extractor;
typedef typename TRISH_trait<valT>:base_type baseType;
typedef typename TRISH_trait<valT>::bin_type binType;
typedef typename TRISH_trait<valT>::upscale_type upscaleType;
typedef typename TRISH_trait<valT>::convert_type convertType;
//-----
// Compute CPU row counts
//-----
// Initialize Mapper
mapT mapper;
mapper.Initiate( minVal, maxVal, numBins );
U32 nRows = nElems / 4u;
U32 nCols = nElems % 4u;
valT val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
for (U32 idx = 0u; idx < nRows; idx+=4u)
{
// Get current value
val1 = h_cpuElems[idx];
val2 = h_cpuElems[idx+1u];
val3 = h_cpuElems[idx+2u];
val4 = h_cpuElems[idx+3u];
b1 = (upscaleType)val1;
b2 = (upscaleType)val2;
b3 = (upscaleType)val3;
b4 = (upscaleType)val4;
// Transform
mapper.Transform4( bin1, bin2, bin3, bin4, // Out => transformed bins
b1, b2, b3, b4 ); // In => values to transform into bins
// Bin results
h_cpuCounts[bin1] += 1u;
h_cpuCounts[bin2] += 1u;
h_cpuCounts[bin3] += 1u;
h_cpuCounts[bin4] += 1u;
}
// Cleanup Mapper
mapper.Finish();
//-----
// Compare CPU vs. GPU totals
//-----
U64 totalCPU = 0ull;
U64 totalGPU = 0ull;
for (U32 idx = 0; idx < numBins; idx++)
{
U32 cpuCount = h_cpuCounts[idx];
U32 gpuCount = h_gpuCounts[idx];
totalCPU += (U64)cpuCount;
totalGPU += (U64)gpuCount;
if (cpuCount != gpuCount)
{
fprintf( stdout, "Total Counts[%u] : CPU (%u) != GPU (%u) !!! ERROR !!!\n",
idx, cpuCount, gpuCount );
}
else
{
fprintf( stdout, "Total Counts[%u] : CPU (%u) == GPU (%u) Success\n",
idx, cpuCount, gpuCount );
}
}
// Get items below range
U32 minCPU, minGPU;
minCPU = h_cpuCounts[numBins+1];
minGPU = h_gpuCounts[numBins+1];
if (minCPU != minGPU)
{
fprintf( stdout, "For < min (%d), CPU count (%d) != GPU count (%d) !!! ERROR !!!\n",
minVal, minCPU, minGPU );
}
else
{
fprintf( stdout, "For < min (%d), CPU count (%d) == GPU count (%d) Success\n",
minVal, minCPU, minGPU );
}
totalCPU += (U64)minCPU;
totalGPU += (U64)minGPU;
// Get items above range
U32 maxCPU, maxGPU;
maxCPU = h_cpuCounts[numBins+2];
maxGPU = h_gpuCounts[numBins+2];
if (minCPU != minGPU)
{
fprintf( stdout, "For > max (%d), CPU count (%d) != GPU count (%d) !!! ERROR !!!\n",
maxVal, maxCPU, maxGPU );
}
else
{
fprintf( stdout, "For > max (%d), CPU count (%d) == GPU count (%d) Success\n",
maxVal, maxCPU, maxGPU );
}
totalCPU += (U64)maxCPU;
totalGPU += (U64)maxGPU;
// Verify final counts
if (totalCPU != totalGPU)
{
fprintf( stdout, "\nTotal CPU (%I64u) != Total GPU (%I64u) !!! ERROR !!!\n\n\n",
totalCPU, totalGPU );
}
else
{
fprintf( stdout, "\nTotal CPU (%I64u) == Total GPU (%I64u) Success\n\n\n",
totalCPU, totalGPU );
}
//-----
// Free memory resources
//-----
free( h_cpuCounts );
free( h_gpuCounts );
free( h_cpuElems );
}
#endif
/*-----------------------------------------------
Name: BinCounts
Desc: Adds Bins into count array
-----------------------------------------------*/
template <U32 BlockSize>
__host__ __device__ __forceinline__
void BinCount1
(
U32 * cntPtr, // OUT - count array (to store bin results in)
U32 bin1 // IN - input 'bins' to count
)
{
// Lane Row[0..63] = bin / 4
U32 LI_1;
LI_1 = bin1 >> 2u;
// Multiply by block Size;
LI_1 = LI_1 * BlockSize;
// Lane Col[0,1,2,3] = bin % 4
U32 col1;
col1 = bin1 & 0x3u;
// Shift[0,8,16,24] = Lane Col [0,1,2,3] * 8
U32 s1;
s1 = col1 << 3u;
// Get Increments
U32 inc1;
inc1 = 1u << s1;
U32 oldCnt, newCnt;
//-----
// Add bin counts into count array
//-----
// Increment 1st bin count
oldCnt = cntPtr[LI_1];
newCnt = oldCnt + inc1;
cntPtr[LI_1] = newCnt;
}
template <U32 BlockSize>
__host__ __device__ __forceinline__
void BinCount2
(
U32 * cntPtr, // OUT - count array (to store bin results in)
U32 bin1, // IN - input 'bins' to count
U32 bin2
)
{
// Lane Row = bin / 4
U32 LI_1, LI_2;
LI_1 = bin1 >> 2u;
LI_2 = bin2 >> 2u;
// Multiply by block Size;
LI_1 = LI_1 * BlockSize;
LI_2 = LI_2 * BlockSize;
// Lane Col = bin % 4
U32 col1, col2;
col1 = bin1 & 0x3u;
col2 = bin2 & 0x3u;
// Shift = Lane Col [0,1,2,3] * 8
U32 s1, s2;
s1 = col1 << 3u;
s2 = col2 << 3u;
// Get Increments
U32 inc1, inc2;
inc1 = 1u << s1;
inc2 = 1u << s2;
//-----
// Add bin counts into count array
//-----
U32 oldCnt, newCnt;
// Increment 1st bin
oldCnt = cntPtr[LI_1];
newCnt = oldCnt + inc1;
cntPtr[LI_1] = newCnt;
// Increment 2nd bin
oldCnt = cntPtr[LI_2];
newCnt = oldCnt + inc2;
cntPtr[LI_2] = newCnt;
}
template <U32 BlockSize>
__host__ __device__ __forceinline__
void BinCount3
(
U32 * cntPtr, // OUT - count array (to store bin results in)
U32 bin1, // IN - input 'bins' to count
U32 bin2,
U32 bin3
)
{
// Lane Row = bin / 4
U32 LI_1, LI_2, LI_3;
LI_1 = bin1 >> 2u;
LI_2 = bin2 >> 2u;
LI_3 = bin3 >> 2u;
// Multiply by block Size;
LI_1 = LI_1 * BlockSize;
LI_2 = LI_2 * BlockSize;
LI_3 = LI_3 * BlockSize;
// Lane Col = bin % 4
U32 col1, col2, col3;
col1 = bin1 & 0x3u;
col2 = bin2 & 0x3u;
col3 = bin3 & 0x3u;
// Shift = Lane Col [0,1,2,3] * 8
U32 s1, s2, s3;
s1 = col1 << 3u;
s2 = col2 << 3u;
s3 = col3 << 3u;
// Get Increments
U32 inc1, inc2, inc3;
inc1 = 1u << s1;
inc2 = 1u << s2;
inc3 = 1u << s3;
//-----
// Add bin counts into count array
//-----
U32 oldCnt, newCnt;
// Increment 1st bin
oldCnt = cntPtr[LI_1];
newCnt = oldCnt + inc1;
cntPtr[LI_1] = newCnt;
// Increment 2nd bin
oldCnt = cntPtr[LI_2];
newCnt = oldCnt + inc2;
cntPtr[LI_2] = newCnt;
// Increment 3rd bin
oldCnt = cntPtr[LI_3];
newCnt = oldCnt + inc3;
cntPtr[LI_3] = newCnt;
}
template <U32 BlockSize>
__host__ __device__ __forceinline__
void BinCount4
(
U32 * cntPtr, // OUT - count array (to store bin results in)
U32 bin1, // IN - input 'bins' to count
U32 bin2,
U32 bin3,
U32 bin4
)
{
// Lane Row = bin / 4
U32 LI_1, LI_2, LI_3, LI_4;
LI_1 = bin1 >> 2u;
LI_2 = bin2 >> 2u;
LI_3 = bin3 >> 2u;
LI_4 = bin4 >> 2u;
// Multiply by block Size;
LI_1 = LI_1 * BlockSize;
LI_2 = LI_2 * BlockSize;
LI_3 = LI_3 * BlockSize;
LI_4 = LI_4 * BlockSize;
// Lane Col = bin % 4
U32 col1, col2, col3, col4;
col1 = bin1 & 0x3u;
col2 = bin2 & 0x3u;
col3 = bin3 & 0x3u;
col4 = bin4 & 0x3u;
// Shift = Lane Col [0,1,2,3] * 8
U32 s1, s2, s3, s4;
s1 = col1 << 3u;
s2 = col2 << 3u;
s3 = col3 << 3u;
s4 = col4 << 3u;
// Get Increments
U32 inc1, inc2, inc3, inc4;
inc1 = 1u << s1;
inc2 = 1u << s2;
inc3 = 1u << s3;
inc4 = 1u << s4;
//-----
// Add bin counts into count array
//-----
U32 oldCnt, newCnt;
// Increment 1st bin
oldCnt = cntPtr[LI_1];
newCnt = oldCnt + inc1;
cntPtr[LI_1] = newCnt;
// Increment 2nd bin
oldCnt = cntPtr[LI_2];
newCnt = oldCnt + inc2;
cntPtr[LI_2] = newCnt;
// Increment 3rd bin
oldCnt = cntPtr[LI_3];
newCnt = oldCnt + inc3;
cntPtr[LI_3] = newCnt;
// Increment 4th bin
oldCnt = cntPtr[LI_4];
newCnt = oldCnt + inc4;
cntPtr[LI_4] = newCnt;
}
/*---------------------------------------------------------
Name: SetArray_BlockSeq
Desc: Sets elements in array to specified value
Note: Uses "Block Sequential" access pattern
---------------------------------------------------------*/
template <
typename valT, // Underlying value type
U32 BlockSize, // Threads Per Block
U32 nSafePasses, // Number of safe passes
U32 nLeftOver, // Number of left over elements
U32 maxSize // Max Size of array
>
__device__ __forceinline__
void SetArray_BlockSeq
(
valT * basePtr, // IN/OUT - array to set to 'set' value
valT toSet // IN - value to set array elements 'to'
)
{
// Get 'per thread' pointer
valT * setPtr = basePtr + threadIdx.x;
// Initialize as many elements as we
// safely can with no range checking
if (nSafePasses >= 1u) { setPtr[( 0u * BlockSize)] = toSet; }
if (nSafePasses >= 2u) { setPtr[( 1u * BlockSize)] = toSet; }
if (nSafePasses >= 3u) { setPtr[( 2u * BlockSize)] = toSet; }
if (nSafePasses >= 4u) { setPtr[( 3u * BlockSize)] = toSet; }
if (nSafePasses >= 5u) { setPtr[( 4u * BlockSize)] = toSet; }
if (nSafePasses >= 6u) { setPtr[( 5u * BlockSize)] = toSet; }
if (nSafePasses >= 7u) { setPtr[( 6u * BlockSize)] = toSet; }
if (nSafePasses >= 8u) { setPtr[( 7u * BlockSize)] = toSet; }
if (nSafePasses >= 9u) { setPtr[( 8u * BlockSize)] = toSet; }
if (nSafePasses >= 10u) { setPtr[( 9u * BlockSize)] = toSet; }
if (nSafePasses >= 11u) { setPtr[(10u * BlockSize)] = toSet; }
if (nSafePasses >= 12u) { setPtr[(11u * BlockSize)] = toSet; }
if (nSafePasses >= 13u) { setPtr[(12u * BlockSize)] = toSet; }
if (nSafePasses >= 14u) { setPtr[(13u * BlockSize)] = toSet; }
if (nSafePasses >= 15u) { setPtr[(14u * BlockSize)] = toSet; }
if (nSafePasses >= 16u) { setPtr[(15u * BlockSize)] = toSet; }
if (nSafePasses >= 17u) { setPtr[(16u * BlockSize)] = toSet; }
if (nSafePasses >= 18u) { setPtr[(17u * BlockSize)] = toSet; }
if (nSafePasses >= 19u) { setPtr[(18u * BlockSize)] = toSet; }
if (nSafePasses >= 20u) { setPtr[(19u * BlockSize)] = toSet; }
if (nSafePasses >= 21u) { setPtr[(20u * BlockSize)] = toSet; }
if (nSafePasses >= 22u) { setPtr[(21u * BlockSize)] = toSet; }
if (nSafePasses >= 23u) { setPtr[(22u * BlockSize)] = toSet; }
if (nSafePasses >= 24u) { setPtr[(23u * BlockSize)] = toSet; }
if (nSafePasses >= 25u) { setPtr[(24u * BlockSize)] = toSet; }
if (nSafePasses >= 26u) { setPtr[(25u * BlockSize)] = toSet; }
if (nSafePasses >= 27u) { setPtr[(26u * BlockSize)] = toSet; }
if (nSafePasses >= 28u) { setPtr[(27u * BlockSize)] = toSet; }
if (nSafePasses >= 29u) { setPtr[(28u * BlockSize)] = toSet; }
if (nSafePasses >= 30u) { setPtr[(29u * BlockSize)] = toSet; }
if (nSafePasses >= 31u) { setPtr[(30u * BlockSize)] = toSet; }
if (nSafePasses >= 32u) { setPtr[(31u * BlockSize)] = toSet; }
if (nSafePasses >= 33u) { setPtr[(32u * BlockSize)] = toSet; }
if (nSafePasses >= 34u) { setPtr[(33u * BlockSize)] = toSet; }
if (nSafePasses >= 35u) { setPtr[(34u * BlockSize)] = toSet; }
if (nSafePasses >= 36u) { setPtr[(35u * BlockSize)] = toSet; }
if (nSafePasses >= 37u) { setPtr[(36u * BlockSize)] = toSet; }
if (nSafePasses >= 38u) { setPtr[(37u * BlockSize)] = toSet; }
if (nSafePasses >= 39u) { setPtr[(38u * BlockSize)] = toSet; }
if (nSafePasses >= 40u) { setPtr[(39u * BlockSize)] = toSet; }
if (nSafePasses >= 41u) { setPtr[(40u * BlockSize)] = toSet; }
if (nSafePasses >= 42u) { setPtr[(41u * BlockSize)] = toSet; }
if (nSafePasses >= 43u) { setPtr[(42u * BlockSize)] = toSet; }
if (nSafePasses >= 44u) { setPtr[(43u * BlockSize)] = toSet; }
if (nSafePasses >= 45u) { setPtr[(44u * BlockSize)] = toSet; }
if (nSafePasses >= 46u) { setPtr[(45u * BlockSize)] = toSet; }
if (nSafePasses >= 47u) { setPtr[(46u * BlockSize)] = toSet; }
if (nSafePasses >= 48u) { setPtr[(47u * BlockSize)] = toSet; }
if (nSafePasses >= 49u) { setPtr[(48u * BlockSize)] = toSet; }
if (nSafePasses >= 50u) { setPtr[(49u * BlockSize)] = toSet; }
if (nSafePasses >= 51u) { setPtr[(50u * BlockSize)] = toSet; }
if (nSafePasses >= 52u) { setPtr[(51u * BlockSize)] = toSet; }
if (nSafePasses >= 53u) { setPtr[(52u * BlockSize)] = toSet; }
if (nSafePasses >= 54u) { setPtr[(53u * BlockSize)] = toSet; }
if (nSafePasses >= 55u) { setPtr[(54u * BlockSize)] = toSet; }
if (nSafePasses >= 56u) { setPtr[(55u * BlockSize)] = toSet; }
if (nSafePasses >= 57u) { setPtr[(56u * BlockSize)] = toSet; }
if (nSafePasses >= 58u) { setPtr[(57u * BlockSize)] = toSet; }
if (nSafePasses >= 59u) { setPtr[(58u * BlockSize)] = toSet; }
if (nSafePasses >= 60u) { setPtr[(59u * BlockSize)] = toSet; }
if (nSafePasses >= 61u) { setPtr[(60u * BlockSize)] = toSet; }
if (nSafePasses >= 62u) { setPtr[(61u * BlockSize)] = toSet; }
if (nSafePasses >= 63u) { setPtr[(62u * BlockSize)] = toSet; }
if (nSafePasses >= 64u) { setPtr[(63u * BlockSize)] = toSet; }
if (nSafePasses >= 65u) { setPtr[(64u * BlockSize)] = toSet; }
if (nSafePasses >= 66u) { setPtr[(65u * BlockSize)] = toSet; }
// Set any 'left over' values with range checking
if (nLeftOver > 0u)
{
U32 idx = (nSafePasses * BlockSize) + threadIdx.x;
if (idx < maxSize)
{
basePtr[idx] = toSet;
}
}
}
/*---------------------------------------------------------
Name: SetArray_WarpSeq
Desc: Sets elements in array to specified value
Note: Uses "Warp Sequential" access pattern
---------------------------------------------------------*/
template <
typename valT, // Underlying value type
U32 WarpSize, // Threads per Warp
U32 nSafePasses, // Number of safe passes (warps per subsection)
U32 nLeftOver, // Number of left over elements
U32 maxSize // Max Size of array
>
__device__ __forceinline__
void SetArray_WarpSeq
(
valT * basePtr, // IN/OUT - array to set to 'set' value
valT toSet, // IN - value to set array elements 'to'
U32 startIdx // starting index for this thread
)
{
// Get 'per thread' pointer
valT * setPtr = &basePtr[startIdx];
// Initialize as many elements as we
// safely can with no range checking
if (nSafePasses >= 1u) { setPtr[( 0u * WarpSize)] = toSet; }
if (nSafePasses >= 2u) { setPtr[( 1u * WarpSize)] = toSet; }
if (nSafePasses >= 3u) { setPtr[( 2u * WarpSize)] = toSet; }
if (nSafePasses >= 4u) { setPtr[( 3u * WarpSize)] = toSet; }
if (nSafePasses >= 5u) { setPtr[( 4u * WarpSize)] = toSet; }
if (nSafePasses >= 6u) { setPtr[( 5u * WarpSize)] = toSet; }
if (nSafePasses >= 7u) { setPtr[( 6u * WarpSize)] = toSet; }
if (nSafePasses >= 8u) { setPtr[( 7u * WarpSize)] = toSet; }
if (nSafePasses >= 9u) { setPtr[( 8u * WarpSize)] = toSet; }
if (nSafePasses >= 10u) { setPtr[( 9u * WarpSize)] = toSet; }
if (nSafePasses >= 11u) { setPtr[(10u * WarpSize)] = toSet; }
if (nSafePasses >= 12u) { setPtr[(11u * WarpSize)] = toSet; }
if (nSafePasses >= 13u) { setPtr[(12u * WarpSize)] = toSet; }
if (nSafePasses >= 14u) { setPtr[(13u * WarpSize)] = toSet; }
if (nSafePasses >= 15u) { setPtr[(14u * WarpSize)] = toSet; }
if (nSafePasses >= 16u) { setPtr[(15u * WarpSize)] = toSet; }
if (nSafePasses >= 17u) { setPtr[(16u * WarpSize)] = toSet; }
if (nSafePasses >= 18u) { setPtr[(17u * WarpSize)] = toSet; }
if (nSafePasses >= 19u) { setPtr[(18u * WarpSize)] = toSet; }
if (nSafePasses >= 20u) { setPtr[(19u * WarpSize)] = toSet; }
if (nSafePasses >= 21u) { setPtr[(20u * WarpSize)] = toSet; }
if (nSafePasses >= 22u) { setPtr[(21u * WarpSize)] = toSet; }
if (nSafePasses >= 23u) { setPtr[(22u * WarpSize)] = toSet; }
if (nSafePasses >= 24u) { setPtr[(23u * WarpSize)] = toSet; }
if (nSafePasses >= 25u) { setPtr[(24u * WarpSize)] = toSet; }
if (nSafePasses >= 26u) { setPtr[(25u * WarpSize)] = toSet; }
if (nSafePasses >= 27u) { setPtr[(26u * WarpSize)] = toSet; }
if (nSafePasses >= 28u) { setPtr[(27u * WarpSize)] = toSet; }
if (nSafePasses >= 29u) { setPtr[(28u * WarpSize)] = toSet; }
if (nSafePasses >= 30u) { setPtr[(29u * WarpSize)] = toSet; }
if (nSafePasses >= 31u) { setPtr[(30u * WarpSize)] = toSet; }
if (nSafePasses >= 32u) { setPtr[(31u * WarpSize)] = toSet; }
if (nSafePasses >= 33u) { setPtr[(32u * WarpSize)] = toSet; }
if (nSafePasses >= 34u) { setPtr[(33u * WarpSize)] = toSet; }
if (nSafePasses >= 35u) { setPtr[(34u * WarpSize)] = toSet; }
if (nSafePasses >= 36u) { setPtr[(35u * WarpSize)] = toSet; }
if (nSafePasses >= 37u) { setPtr[(36u * WarpSize)] = toSet; }
if (nSafePasses >= 38u) { setPtr[(37u * WarpSize)] = toSet; }
if (nSafePasses >= 39u) { setPtr[(38u * WarpSize)] = toSet; }
if (nSafePasses >= 40u) { setPtr[(39u * WarpSize)] = toSet; }
if (nSafePasses >= 41u) { setPtr[(40u * WarpSize)] = toSet; }
if (nSafePasses >= 42u) { setPtr[(41u * WarpSize)] = toSet; }
if (nSafePasses >= 43u) { setPtr[(42u * WarpSize)] = toSet; }
if (nSafePasses >= 44u) { setPtr[(43u * WarpSize)] = toSet; }
if (nSafePasses >= 45u) { setPtr[(44u * WarpSize)] = toSet; }
if (nSafePasses >= 46u) { setPtr[(45u * WarpSize)] = toSet; }
if (nSafePasses >= 47u) { setPtr[(46u * WarpSize)] = toSet; }
if (nSafePasses >= 48u) { setPtr[(47u * WarpSize)] = toSet; }
if (nSafePasses >= 49u) { setPtr[(48u * WarpSize)] = toSet; }
if (nSafePasses >= 50u) { setPtr[(49u * WarpSize)] = toSet; }
if (nSafePasses >= 51u) { setPtr[(50u * WarpSize)] = toSet; }
if (nSafePasses >= 52u) { setPtr[(51u * WarpSize)] = toSet; }
if (nSafePasses >= 53u) { setPtr[(52u * WarpSize)] = toSet; }
if (nSafePasses >= 54u) { setPtr[(53u * WarpSize)] = toSet; }
if (nSafePasses >= 55u) { setPtr[(54u * WarpSize)] = toSet; }
if (nSafePasses >= 56u) { setPtr[(55u * WarpSize)] = toSet; }
if (nSafePasses >= 57u) { setPtr[(56u * WarpSize)] = toSet; }
if (nSafePasses >= 58u) { setPtr[(57u * WarpSize)] = toSet; }
if (nSafePasses >= 59u) { setPtr[(58u * WarpSize)] = toSet; }
if (nSafePasses >= 60u) { setPtr[(59u * WarpSize)] = toSet; }
if (nSafePasses >= 61u) { setPtr[(60u * WarpSize)] = toSet; }
if (nSafePasses >= 62u) { setPtr[(61u * WarpSize)] = toSet; }
if (nSafePasses >= 63u) { setPtr[(62u * WarpSize)] = toSet; }
if (nSafePasses >= 64u) { setPtr[(63u * WarpSize)] = toSet; }
if (nSafePasses >= 65u) { setPtr[(64u * WarpSize)] = toSet; }
if (nSafePasses >= 66u) { setPtr[(65u * WarpSize)] = toSet; }
// Set any 'left over' values with range checking
if (nLeftOver > 0u)
{
U32 idx = startIdx + (nSafePasses * WarpSize);
if (idx < maxSize)
{
basePtr[idx] = toSet;
}
}
}
/*-------------------------------------------------------------------
Name: SS_Sums_4_Next_V1
Desc: Serial scan on next 4 elements in seq [0..3]
------------------------------------------------------------------*/
template <
U32 BlockSize, // Threads per block
U32 BlockMask // Block Mask
>
__device__ __forceinline__
void SS_Sums_4_Next_V1
(
U32 & sum1, // OUT - sum1 .. sum4 (as singletons)
U32 & sum2,
U32 & sum3,
U32 & sum4,
U32 * cntPtr, // IN - 'per thread' counts <horizontal row> to sum up
U32 baseIdx
)
{
// wrap = (idx + [0..3]) % BlockSize
U32 idx1, idx2, idx3, idx4;
idx1 = baseIdx + 0u;
idx2 = baseIdx + 1u;
idx3 = baseIdx + 2u;
idx4 = baseIdx + 3u;
U32 wrap1, wrap2, wrap3, wrap4;
wrap1 = idx1 & BlockMask;
wrap2 = idx2 & BlockMask;
wrap3 = idx3 & BlockMask;
wrap4 = idx4 & BlockMask;
//-
// Grab 4 elements in seq [0..3]
//-
U32 lane1, lane2, lane3, lane4;
lane1 = cntPtr[wrap1];
lane2 = cntPtr[wrap2];
lane3 = cntPtr[wrap3];
lane4 = cntPtr[wrap4];
//-
// Zero out sequence [0..3]
//-
cntPtr[wrap1] = 0u;
cntPtr[wrap2] = 0u;
cntPtr[wrap3] = 0u;
cntPtr[wrap4] = 0u;
//-
// Accumulate all 4 groups in each lane
//-
//-
// Initialize sums from 1st lane (of 4 groups)
//-
U32 s3 = lane1 >> 16u; // 3rd bin (of 4) in lane
U32 s2 = lane1 >> 8u; // 2nd bin (of 4) in lane
U32 cnt4 = lane1 >> 24u;
U32 cnt3 = s3 & 0xFFu;
U32 cnt2 = s2 & 0xFFu;
U32 cnt1 = lane1 & 0xFFu;
sum4 = sum4 + cnt4;
sum3 = sum3 + cnt3;
sum2 = sum2 + cnt2;
sum1 = sum1 + cnt1;
//-
// Accumulate sums from 2nd lane (of 4 groups)
//-
s3 = lane2 >> 16u; // 3rd bin (of 4) in lane
s2 = lane2 >> 8u; // 2nd bin (of 4) in lane
cnt4 = lane2 >> 24u;
cnt3 = s3 & 0xFFu;
cnt2 = s2 & 0xFFu;
cnt1 = lane2 & 0xFFu;
sum4 = sum4 + cnt4;
sum3 = sum3 + cnt3;
sum2 = sum2 + cnt2;
sum1 = sum1 + cnt1;
//-
// Accumulate sums from 3rd lane (of 4 groups)
//-
s3 = lane3 >> 16u; // 3rd bin (of 4) in lane
s2 = lane3 >> 8u; // 2nd bin (of 4) in lane
cnt4 = lane3 >> 24u;
cnt3 = s3 & 0xFFu;
cnt2 = s2 & 0xFFu;
cnt1 = lane3 & 0xFFu;
sum4 = sum4 + cnt4;
sum3 = sum3 + cnt3;
sum2 = sum2 + cnt2;
sum1 = sum1 + cnt1;
//-
// Accumulate sums from 4th lane (of 4 groups)
//-
s3 = lane4 >> 16u; // 3rd bin (of 4) in lane
s2 = lane4 >> 8u; // 2nd bin (of 4) in lane
cnt4 = lane4 >> 24u;
cnt3 = s3 & 0xFFu;
cnt2 = s2 & 0xFFu;
cnt1 = lane4 & 0xFFu;
sum4 = sum4 + cnt4;
sum3 = sum3 + cnt3;
sum2 = sum2 + cnt2;
sum1 = sum1 + cnt1;
}
/*-------------------------------------------------------------------
Name: SS_Sums_4_Next_V2
Desc: Serial scan on next 4 elements in seq [0..3]
------------------------------------------------------------------*/
template <
U32 BlockSize, // Threads Per Block
U32 BlockMask // Block Mask
>
__device__ __forceinline__
void SS_Sums_4_Next_V2
(
U32 & sum13, // OUT - sum1 .. sum4 (as pairs)
U32 & sum24,
U32 * cntPtr, // IN - 'per thread' counts <horizontal row> to sum up
U32 baseIdx
)
{
// wrap = (idx + [0..3]) % BlockSize
U32 idx1, idx2, idx3, idx4;
idx1 = baseIdx + 0u;
idx2 = baseIdx + 1u;
idx3 = baseIdx + 2u;
idx4 = baseIdx + 3u;
U32 wrap1, wrap2, wrap3, wrap4;
wrap1 = idx1 & BlockMask;
wrap2 = idx2 & BlockMask;
wrap3 = idx3 & BlockMask;
wrap4 = idx4 & BlockMask;
//-
// Grab 4 elements in seq [0..3]
//-
U32 lane1, lane2, lane3, lane4;
lane1 = cntPtr[wrap1];
lane2 = cntPtr[wrap2];
lane3 = cntPtr[wrap3];
lane4 = cntPtr[wrap4];
//-
// Zero out sequence [0..3]
//-
cntPtr[wrap1] = 0u;
cntPtr[wrap2] = 0u;
cntPtr[wrap3] = 0u;
cntPtr[wrap4] = 0u;
//-
// Accumulate all 4 groups in each lane
//-
//-
// Initialize sums from 1st lane (of 4 groups)
//-
U32 cnt13, cnt24;
cnt13 = (lane1 >> 0u) & 0x00FF00FFu;
cnt24 = (lane1 >> 8u) & 0x00FF00FFu;
sum13 += cnt13;
sum24 += cnt24;
//-
// Accumulate sums from 2nd lane (of 4 groups)
//-
cnt13 = (lane2 >> 0u) & 0x00FF00FFu;
cnt24 = (lane2 >> 8u) & 0x00FF00FFu;
sum13 += cnt13;
sum24 += cnt24;
//-
// Accumulate sums from 3rd lane (of 4 groups)
//-
cnt13 = (lane3 >> 0u) & 0x00FF00FFu;
cnt24 = (lane3 >> 8u) & 0x00FF00FFu;
sum13 += cnt13;
sum24 += cnt24;
//-
// Accumulate sums from 4th lane (of 4 groups)
//-
cnt13 = (lane4 >> 0u) & 0x00FF00FFu;
cnt24 = (lane4 >> 8u) & 0x00FF00FFu;
sum13 += cnt13;
sum24 += cnt24;
}
/*-------------------------------------------------------------------
Name: AddThreadToRowCounts_V1
Desc: Accumulates 'Per Thread' counts into 'Per Row' Counts
------------------------------------------------------------------*/
template <
U32 BlockSize, // Threads per Block
U32 BlockMask // Block Mask
>
__device__ __forceinline__
void AddThreadToRowCounts_V1
(
U32 & rCnt1, // OUT - 4 'per row' counts assigned to this thread
U32 & rCnt2, // ditto
U32 & rCnt3, // ditto
U32 & rCnt4, // ditto
U32 * basePtr, // IN - array of 'per thread' counts
U32 tid
)
{
//-----
// Serial Scan (Scan All 64 elements in sequence)
//-----
// Accumulate [0..63]
// Note: Also zeros out [0..63]
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 0) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 4) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 8) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 12) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 16) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 20) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 24) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 28) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 32) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 36) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 40) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 44) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 48) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 52) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 56) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 60) );
}
/*-------------------------------------------------------------------
Name: AddThreadToRowCounts_V2
Desc: Accumulates 'Per Thread' counts into 'Per Row' Counts
Notes:
1. Vector Parallelism:
We accumulate 2 pairs at a time across each row
instead of 4 singletons for a big savings
in arithmetic operations.
2. Overflow:
We store 2 16-bit row sums per 32-bit number
Which means that the accumulated Row sums need to not
overflow a 16-bit number (65,535).
Since, we assume the maximum possible count per thread is 252
64 threads * 252 = 16,128 <Safe>
128 threads * 252 = 32,256 <Safe>
256 threads * 252 = 64,512 <Safe>
512 threads * 252 = 129,024 *** UNSAFE ***
If this is a problem, revert to *_V1
3. Register Pressure:
*_V2 uses 6 more registers per thread than *_V1
If this is a problem, revert to *_V1
------------------------------------------------------------------*/
template <
U32 BlockSize, // Threads per Block
U32 BlockMask // BlockSize - 1
>
__device__ __forceinline__
void AddThreadToRowCounts_V2
(
U32 & rCnt1, // OUT - 4 'per row' counts assigned to this thread
U32 & rCnt2, // ditto
U32 & rCnt3, // ditto
U32 & rCnt4, // ditto
U32 * basePtr, // IN - array of 'per thread' counts
U32 tid // IN - thread ID
)
{
U32 sum13, sum24;
sum13 = 0u;
sum24 = 0u;
//-----
// Serial Scan (Scan All 64 elements in sequence)
//-----
// Accumulate Row Sums [0..63]
// Note: Also zeros out count array while accumulating
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 0) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 4) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 8) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 12) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 16) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 20) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 24) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 28) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 32) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 36) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 40) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 44) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 48) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 52) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 56) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 60) );
// Convert row sums from pairs back into singletons
U32 sum1, sum2, sum3, sum4;
sum1 = sum13 & 0x0000FFFFu;
sum2 = sum24 & 0x0000FFFFu;
sum3 = sum13 >> 16u;
sum4 = sum24 >> 16u;
// Add row sums back into register counts
rCnt1 += sum1;
rCnt2 += sum2;
rCnt3 += sum3;
rCnt4 += sum4;
}
/*---------------------------------------------------------
Name: K1_TRISH_CountRows_GEN_B1
Desc:
Note: Assumes underlying data is stored as
four 8-bit values (U8,I8) per 32-bit
storage element
---------------------------------------------------------*/
template <
typename valT, // underlying value Type (U8, I8)
typename mapT, // underlying mapper object
U32 logBankSize, // log<2>( Channels per Bank )
U32 logWarpSize, // log<2>( Threads per Warp )
U32 BlockSize, // Threads Per Block (needs to be a power of 2 & multiple of warpsize)
U32 GridSize, // Blocks Per Grid
U32 K_length // #elements to process per thread before looping
>
__global__
void K1_TRISH_CountRows_GEN_B1
(
U32 * outRowCounts, // OUT - 256-way row-sums array
const U32 * inVals, // IN - values to bin and count
U32 start, // IN - range [start,stop] to check and count
U32 stop, // ditto
valT minVal, // IN - minimum value
valT maxVal, // IN - maximum value
U32 numBins // IN - number of bins (in histogram)
)
{
//-------------------------------------------
// Constant values (computed at compile time)
//-------------------------------------------
// Bank Size (elements per bank)
const U32 BankSize = (1u << logBankSize); // 32 = 2^5 threads per bank
const U32 BankMask = BankSize - 1u; // 31 = 32 - 1 = 0x1F = b11111
const U32 strideBank = BankSize + 1u; // 33 = 32 + 1
// Extra '+1' to help try and avoid bank conflicts
// Warp Size (threads per warp)
const U32 WarpSize = (1u << logWarpSize); // 32 = 2^5 threads per warp
const U32 WarpMask = WarpSize - 1u; // 31 = 32 - 1 = 0x1F = b11111
// Block Size (threads per block)
//const U32 BlockSize = 64u;
const U32 BlockMask = BlockSize - 1u;
// Chunk Size
//const U32 ChunkSize = BlockSize * K_length;
//const U32 IN_WarpSize = K_length * WarpSize;
// K_length
//const U32 K_length = 16u; // 16
const U32 K4_length = K_length * 4u; // 64 = 16 * 4
const U32 K4_stop = 256u - K4_length; // 192 = 256 - 64
// Warps Per Block
const U32 WarpsPerBlock = BlockSize / WarpSize; // 2 = 64/32
// Bins per Histogram
const U32 nHistBins = 256u; // 256 = 2^8
// Lane Info (Compress 4 'bins' into each 32-bit value)
const U32 nLanes = 64u; // 64, # Lanes = 256 bins / 4 bins per lane
// 'Per Thread' counts array
const U32 nTCounts = nLanes * BlockSize;
const U32 banksTCounts = (nTCounts + BankMask) / BankSize;
const U32 padTCounts = (banksTCounts * BankSize) - nTCounts;
const U32 sizeTCounts = nTCounts + padTCounts;
// Output size
const U32 OutWarpSize = nHistBins / WarpsPerBlock;
const U32 OutLength = OutWarpSize / WarpSize;
const U32 OutStrideSize = OutLength * strideBank;
// Array Initialization
const U32 nPassesThrd = sizeTCounts / BlockSize;
const U32 leftOverThrd = sizeTCounts - (nPassesThrd * BlockSize);
const U32 nThreadsPerGrid = BlockSize * GridSize; // 3,072 = 64 * 48
const U32 rowSize = K_length * nThreadsPerGrid; // 193,586 = 63 * 64 * 48
//------------------------------------
// Local Typedefs
//------------------------------------
// TRISH types
typedef typename TRISH_traits<valT>::base_type baseType;
typedef typename TRISH_traits<valT>::bin_type binType;
typedef typename TRISH_traits<valT>::upscale_type upscaleType;
typedef typename TRISH_traits<valT>::convert_type convertType;
typedef typename ExtractBytes<upscaleType> Extractor;
//------------------------------------
// Local Variables
//------------------------------------
// Local variables (shared memory)
__shared__ U32 s_thrdCounts[sizeTCounts]; // 'per thread' counts
// Local variables (registers)
U32 rowCnt1 = 0u;
U32 rowCnt2 = 0u;
U32 rowCnt3 = 0u;
U32 rowCnt4 = 0u;
//---------------------------
// Compute Indices & Pointers
//---------------------------
U32 tid = threadIdx.x; // Thread ID within Block
U32 * cntPtr;
U32 * basePtr;
{
// Get Warp Row & Column
//U32 warpRow = threadIdx.x >> logWarpSize; // tid / 32
//U32 warpCol = threadIdx.x & WarpMask; // tid % 32
// Compute starting 'input' offset (Warp Sequential Layout)
//inIdx = (warpRow * IN_WarpSize) // Move to each warps assigned portion of work
// + warpCol; // Move to warp column (in warp)
// Compute starting serial scan index
U32 baseIdx = (tid * BlockSize);
// Get pointers into shared memory array
// for different views of memory
cntPtr = &s_thrdCounts[threadIdx.x];
basePtr = &s_thrdCounts[baseIdx];
}
//-------------------------------------------
// Zero out arrays
//-------------------------------------------
{
//-
// Zero out 'Per Thread' counts
//-
U32 * ptrTC = (&s_thrdCounts[0]);
SetArray_BlockSeq
<
U32, BlockSize, nPassesThrd, leftOverThrd, sizeTCounts
>
(
ptrTC, 0u
);
}
//-----
// Compute thread, block, & grid indices & sizes
//-----
U32 bid = (blockIdx.y * gridDim.x) + blockIdx.x; // Block ID within Grid
U32 elemOffset = (bid * K_length * BlockSize) + tid; // Starting offset
U32 nElems32 = stop - start + 1u;
U32 nMaxRows = (nElems32 + (rowSize - 1u)) / rowSize;
U32 nSafeRows = nElems32 / rowSize;
U32 nSafeElems = nSafeRows * rowSize;
U32 nLeftOverElems = nElems32 - nSafeElems;
U32 startIdx = start + elemOffset;
U32 stopIdx = startIdx + (nSafeRows * rowSize);
U32 currIdx = startIdx;
U32 overflow = 0u;
// Initiate
// Initiate Mapping object
// (Transform from values to bin indices)
mapT mapper;
mapper.Initiate( minVal, maxVal, numBins );
//-----
// Process all safe blocks
//-----
// 'input' pointer for reading from memory
const U32 * inPtr = &inVals[currIdx];
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
while (currIdx < stopIdx)
{
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow >= K4_stop)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
U32 val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
// NOTE: the 'K_length' variable below is a static
// hard-coded constant in the range [1..63].
// K = 'Work per thread' per loop (stride)...
// The compiler will take care of throwing away
// any unused code greater than our specified 'K'
// value, with no negative impact on performance.
//-
// Process values [0..3] (bytes 0..15)
//-
// Read in first 'four' values (32-bit)
if (K_length >= 1u) { val1 = inPtr[0u*BlockSize]; }
if (K_length >= 2u) { val2 = inPtr[1u*BlockSize]; }
if (K_length >= 3u) { val3 = inPtr[2u*BlockSize]; }
if (K_length >= 4u) { val4 = inPtr[3u*BlockSize]; }
// Bin first 'four' values into count array
if (K_length >= 1u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 2u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 3u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 4u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [4..7] (bytes 16..31)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 5u) { val1 = inPtr[4u*BlockSize]; }
if (K_length >= 6u) { val2 = inPtr[5u*BlockSize]; }
if (K_length >= 7u) { val3 = inPtr[6u*BlockSize]; }
if (K_length >= 8u) { val4 = inPtr[7u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 5u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 6u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 7u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 8u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [8..11] (bytes 32..47)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 9u) { val1 = inPtr[ 8u*BlockSize]; }
if (K_length >= 10u) { val2 = inPtr[ 9u*BlockSize]; }
if (K_length >= 11u) { val3 = inPtr[10u*BlockSize]; }
if (K_length >= 12u) { val4 = inPtr[11u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 9u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 10u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 11u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 12u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [12..15] (bytes 48..63)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 13u) { val1 = inPtr[12u*BlockSize]; }
if (K_length >= 14u) { val2 = inPtr[13u*BlockSize]; }
if (K_length >= 15u) { val3 = inPtr[14u*BlockSize]; }
if (K_length >= 16u) { val4 = inPtr[15u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 13u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 14u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 15u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 16u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [16..19] (bytes 64..79)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 17u) { val1 = inPtr[16u*BlockSize]; }
if (K_length >= 18u) { val2 = inPtr[17u*BlockSize]; }
if (K_length >= 19u) { val3 = inPtr[18u*BlockSize]; }
if (K_length >= 20u) { val4 = inPtr[19u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 17u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 18u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 19u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 20u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [20..23] (bytes 80..95)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 21u) { val1 = inPtr[20u*BlockSize]; }
if (K_length >= 22u) { val2 = inPtr[21u*BlockSize]; }
if (K_length >= 23u) { val3 = inPtr[22u*BlockSize]; }
if (K_length >= 24u) { val4 = inPtr[23u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 21u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 22u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 23u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 24u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [24..27] (bytes 96..111)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 25u) { val1 = inPtr[24u*BlockSize]; }
if (K_length >= 26u) { val2 = inPtr[25u*BlockSize]; }
if (K_length >= 27u) { val3 = inPtr[26u*BlockSize]; }
if (K_length >= 28u) { val4 = inPtr[27u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 25u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 26u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 27u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 28u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [28..31] (bytes 112..127)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 29u) { val1 = inPtr[28u*BlockSize]; }
if (K_length >= 30u) { val2 = inPtr[29u*BlockSize]; }
if (K_length >= 31u) { val3 = inPtr[30u*BlockSize]; }
if (K_length >= 32u) { val4 = inPtr[31u*BlockSize]; }
// Bin 'four' values (4 bytes at a time)
// Bin next 'four' values into count array
if (K_length >= 29u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 30u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 31u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 32u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [32..35] (bytes 128..143)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 33u) { val1 = inPtr[32u*BlockSize]; }
if (K_length >= 34u) { val2 = inPtr[33u*BlockSize]; }
if (K_length >= 35u) { val3 = inPtr[34u*BlockSize]; }
if (K_length >= 36u) { val4 = inPtr[35u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 33u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 34u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 35u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 36u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [36..39] (bytes 144..159)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 37u) { val1 = inPtr[36u*BlockSize]; }
if (K_length >= 38u) { val2 = inPtr[37u*BlockSize]; }
if (K_length >= 39u) { val3 = inPtr[38u*BlockSize]; }
if (K_length >= 40u) { val4 = inPtr[39u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 37u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 38u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 39u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 40u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [40..43] (bytes 160-175)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 41u) { val1 = inPtr[40u*BlockSize]; }
if (K_length >= 42u) { val2 = inPtr[41u*BlockSize]; }
if (K_length >= 43u) { val3 = inPtr[42u*BlockSize]; }
if (K_length >= 44u) { val4 = inPtr[43u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 41u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 42u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 43u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 44u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [44..47] (bytes 176-191)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 45u) { val1 = inPtr[44u*BlockSize]; }
if (K_length >= 46u) { val2 = inPtr[45u*BlockSize]; }
if (K_length >= 47u) { val3 = inPtr[46u*BlockSize]; }
if (K_length >= 48u) { val4 = inPtr[47u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 45u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 46u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 47u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 48u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [48-51] (bytes 192-207)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 49u) { val1 = inPtr[48u*BlockSize]; }
if (K_length >= 50u) { val2 = inPtr[49u*BlockSize]; }
if (K_length >= 51u) { val3 = inPtr[50u*BlockSize]; }
if (K_length >= 52u) { val4 = inPtr[51u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 49u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 50u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 51u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 52u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [52-55] (bytes 208-223)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 53u) { val1 = inPtr[52u*BlockSize]; }
if (K_length >= 54u) { val2 = inPtr[53u*BlockSize]; }
if (K_length >= 55u) { val3 = inPtr[54u*BlockSize]; }
if (K_length >= 56u) { val4 = inPtr[55u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 53u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 54u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 55u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 56u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [56-59] (bytes 224-239)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 57u) { val1 = inPtr[56u*BlockSize]; }
if (K_length >= 58u) { val2 = inPtr[57u*BlockSize]; }
if (K_length >= 59u) { val3 = inPtr[58u*BlockSize]; }
if (K_length >= 60u) { val4 = inPtr[59u*BlockSize]; }
// Bin 'four' values (4 bytes at a time)
// Bin next 'four' values into count array
if (K_length >= 57u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 58u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 59u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 60u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [60-62] (bytes 240-251)
//-
// Note: We deliberately do not support k >= '64' to
// avoid overflow issues during 'binning'
// As our 'per thread' 'bin counts' can only handle
// '255' increments before overflow becomes a problem.
// and 252 is the next smallest number
// evenly divisible by 4, IE 4 bytes per 32-bit value
// 63 values = 252 bytes / 4 bytes per value.
// Read in next 'four' values (32-bit)
if (K_length >= 61u) { val1 = inPtr[60u*BlockSize]; }
if (K_length >= 62u) { val2 = inPtr[61u*BlockSize]; }
if (K_length >= 63u) { val3 = inPtr[62u*BlockSize]; }
// Note: Do not uncomment => *OVERFLOW* bug !!!
//if (K_length >= 64u) { val4 = inPtr[63u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 60u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 61u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 62u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
// Note: Do not uncomment => *OVERFLOW* bug !!!
//if (K_length >= 63u)
//{
// Extractor::Extract4( b1, b2, b3, b4, val4 );
// mapper.Transform4( b1, b2, b3, b4,
// bin1, bin2, bin3, bin4 );
// BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//}
//-----
// Move to next row of work
//-----
currIdx += rowSize;
inPtr += rowSize;
// Increment 'overflow' count
overflow += K4_length; // K values * 4 bytes per value
}
__syncthreads();
//--------------------------------------
// LAST: Process last leftover chunk
// with more careful range checking
//--------------------------------------
if (nLeftOverElems)
{
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow >= K4_stop)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
// NOTE #1: the 'K_length' variable below is a static
// hard-coded constant in the range [1..63].
// K = 'Work per thread' per loop (stride)...
// The compiler will take care of throwing away
// any unused code greater than our specified 'K'
// value, with no negative impact on performance.
// NOTE #2: We use a cooperative stride
// across each thread in each block in grid
// ChunkSize = BlockSize * GridSize = 64 * 48 = 3072
// RowSize = WorkPerThead(K) * ChunkSize = 63 * 3072 = 193,536
//
// B0 B1 ... B47 (Blocks in Grid)
// ---- ---- --- ----
// k = 1 => |64| |64| ... |64| (3072 Thread & I/O requests for 1st work item per thread)
// k = 2 => |64| |64| ... |64| ditto (2nd work item per thread)
// ... ... ...
// k = 63 => |64| |64| ... |64| ditto (63 work item per thread)
// NOTE #3: We use "Divide & Conquer" to avoid as much slower range checking as possible
// Try batches of 32, 16, 8, 4, 2, 1, and finally leftover (on which we finally must range check)
//----
// Setup Pointers & Indices for cooperative stride
//----
U32 bid = (blockIdx.y * gridDim.x) + blockIdx.x; // Get block index
U32 nSkip = nSafeRows * rowSize; // Skip past already processed rows
U32 chunkIdx = (bid * BlockSize) + tid; // Get starting index within chunk
U32 baseIdx = start + nSkip + chunkIdx; // Get starting index for left over elements
U32 val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
//------
// Try Section of 32
//------
if (K_length >= 32u)
{
// Process 32 chunks safely without range checking
if (nLeftOverElems >= (32u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [17..20]
//-----
val1 = inPtr[(16u*nThreadsPerGrid)];
val2 = inPtr[(17u*nThreadsPerGrid)];
val3 = inPtr[(18u*nThreadsPerGrid)];
val4 = inPtr[(19u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [21..24]
//-----
val1 = inPtr[(20u*nThreadsPerGrid)];
val2 = inPtr[(21u*nThreadsPerGrid)];
val3 = inPtr[(22u*nThreadsPerGrid)];
val4 = inPtr[(23u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [25..28]
//-----
val1 = inPtr[(24u*nThreadsPerGrid)];
val2 = inPtr[(25u*nThreadsPerGrid)];
val3 = inPtr[(26u*nThreadsPerGrid)];
val4 = inPtr[(27u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [29..32]
//-----
val1 = inPtr[(28u*nThreadsPerGrid)];
val2 = inPtr[(29u*nThreadsPerGrid)];
val3 = inPtr[(30u*nThreadsPerGrid)];
val4 = inPtr[(31u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (32u * nThreadsPerGrid);
nLeftOverElems -= (32u * nThreadsPerGrid);
}
}
//------
// Try Section of 16
//------
if (K_length >= 16u)
{
// Process 16 chunks safely without range checking
if (nLeftOverElems >= (16u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (16u * nThreadsPerGrid);
nLeftOverElems -= (16u * nThreadsPerGrid);
}
}
//------
// Try Section of 8
//------
if (K_length >= 8u)
{
// Process 8 chunks safely without range checking
if (nLeftOverElems >= (8u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (8u * nThreadsPerGrid);
nLeftOverElems -= (8u * nThreadsPerGrid);
}
}
//------
// Try Section of 4
//------
if (K_length >= 4u)
{
// Process 4 chunks safely without range checking
if (nLeftOverElems >= (4u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (4u * nThreadsPerGrid);
nLeftOverElems -= (4u * nThreadsPerGrid);
}
}
//------
// Try Section of 2
//------
if (K_length >= 2u)
{
// Process 2 chunks safely without range checking
if (nLeftOverElems >= (2u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..2]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (2u * nThreadsPerGrid);
nLeftOverElems -= (2u * nThreadsPerGrid);
}
}
//------
// Try Section of 1
//------
if (K_length >= 1u)
{
// Process 1 chunk safely without range checking
if (nLeftOverElems >= (1u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (1u * nThreadsPerGrid);
nLeftOverElems -= (1u * nThreadsPerGrid);
}
}
//------
// Process Last few elements
// with careful RANGE CHECKING !!!
//------
if (nLeftOverElems > 0u)
{
// Make sure we are 'in range' before reading & binning
U32 inRange1 = (baseIdx <= stop);
if (inRange1)
{
// Read in 32-bit element
val1 = inVals[baseIdx];
// Process element
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
}
// Update Accumulation count
overflow += K4_length; // 64 = 16 elems * 4 bytes per elem
}
// Cleanup Mapping object
// (Give mapper a chance to cleanup any resources)
mapper.Finish();
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow > 0u)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
//-------------------------------------------------
// Write out final row 'counts'
//-------------------------------------------------
{
// Compute starting 'row counts' offset
U32 rIdx = threadIdx.x * 4u; // 4 groups per lane
U32 rRow = rIdx >> logBankSize;
U32 rCol = rIdx & BankMask;
U32 rowIdx = (rRow * strideBank) + (rCol + 1u);
// Extra '+1' to shift past initial pad element
U32 * rowPtr = &s_thrdCounts[rowIdx];
// Store row counts in row array
rowPtr[0] = rowCnt1;
rowPtr[1] = rowCnt2;
rowPtr[2] = rowCnt3;
rowPtr[3] = rowCnt4;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
// Get Warp Row & Column
U32 warpRow = threadIdx.x >> logWarpSize; // tid / 32
U32 warpCol = threadIdx.x & WarpMask; // tid % 32
// Get local & global indices
U32 outGlobal = (blockIdx.x * nHistBins);
U32 outLocal = (warpRow * OutWarpSize);
U32 rowBase = (warpRow * OutStrideSize);
U32 outBase = outGlobal + outLocal;
U32 rowOff = warpCol + 1u;
U32 outIdx = outBase + warpCol;
rowIdx = rowBase + rowOff;
// Get local & global pointers
U32 * outPtr = &outRowCounts[outIdx];
rowPtr = &s_thrdCounts[rowIdx];
// Write our 'per row' counts in warp sequential order
if (OutLength >= 1u) { outPtr[(0u*WarpSize)] = rowPtr[(0u*strideBank)]; }
if (OutLength >= 2u) { outPtr[(1u*WarpSize)] = rowPtr[(1u*strideBank)]; }
if (OutLength >= 3u) { outPtr[(2u*WarpSize)] = rowPtr[(2u*strideBank)]; }
if (OutLength >= 4u) { outPtr[(3u*WarpSize)] = rowPtr[(3u*strideBank)]; }
if (OutLength >= 5u) { outPtr[(4u*WarpSize)] = rowPtr[(4u*strideBank)]; }
if (OutLength >= 6u) { outPtr[(5u*WarpSize)] = rowPtr[(5u*strideBank)]; }
if (OutLength >= 7u) { outPtr[(6u*WarpSize)] = rowPtr[(6u*strideBank)]; }
if (OutLength >= 8u) { outPtr[(7u*WarpSize)] = rowPtr[(7u*strideBank)]; }
}
}
/*---------------------------------------------------------
Name: K1_TRISH_CountRows_GEN_B2
Desc:
Note:
1. Assumes underlying data is stored as two 16-bit values
(U16,I16) per 32-bit storage element.
2. This further implies that K = [1,127] to safely
avoid overflowing an 8-bit counter.
---------------------------------------------------------*/
template <
typename valT, // underlying value Type (U8, I8)
typename mapT, // underlying mapper object
U32 logBankSize, // log<2>( Channels per Bank )
U32 logWarpSize, // log<2>( Threads per Warp )
U32 BlockSize, // Threads Per Block (needs to be a power of 2 & multiple of warpsize)
U32 GridSize, // Blocks Per Grid
U32 K_length // #elements to process per thread before looping
>
__global__
void K1_TRISH_CountRows_GEN_B2
(
U32 * outRowCounts, // OUT - 256-way row-sums array
const U32 * inVals, // IN - values to bin and count
U32 start, // IN - range [start,stop] to check and count
U32 stop, // ditto
valT minVal, // IN - minimum value
valT maxVal, // IN - maximum value
U32 numBins // IN - number of bins (in histogram)
)
{
//-------------------------------------------
// Constant values (computed at compile time)
//-------------------------------------------
// Bank Size (elements per bank)
const U32 BankSize = (1u << logBankSize); // 32 = 2^5 threads per bank
const U32 BankMask = BankSize - 1u; // 31 = 32 - 1 = 0x1F = b11111
const U32 strideBank = BankSize + 1u; // 33 = 32 + 1
// Extra '+1' to help try and avoid bank conflicts
// Warp Size (threads per warp)
const U32 WarpSize = (1u << logWarpSize); // 32 = 2^5 threads per warp
const U32 WarpMask = WarpSize - 1u; // 31 = 32 - 1 = 0x1F = b11111
// Block Size (threads per block)
//const U32 BlockSize = 64u;
const U32 BlockMask = BlockSize - 1u;
// Chunk Size
//const U32 ChunkSize = BlockSize * K_length;
//const U32 IN_WarpSize = K_length * WarpSize;
// K_length
//const U32 K_length = 16u; // 16
const U32 K2_length = K_length * 2u; // 32 = 16 * 2 (2 words per 32-bit input value)
const U32 K2_stop = 256u - K2_length; // 224 = 256 - 32 (conservative test)
// Warps Per Block
const U32 WarpsPerBlock = BlockSize / WarpSize; // 2 = 64/32
// Bins per Histogram
const U32 nHistBins = 256u; // 256 = 2^8
// Lane Info (Compress 4 'bins' into each 32-bit value)
const U32 nLanes = 64u; // 64, # Lanes = 256 bins / 4 bins per lane
// 'Per Thread' counts array
const U32 nTCounts = nLanes * BlockSize;
const U32 banksTCounts = (nTCounts + BankMask) / BankSize;
const U32 padTCounts = (banksTCounts * BankSize) - nTCounts;
const U32 sizeTCounts = nTCounts + padTCounts;
// Output size
const U32 OutWarpSize = nHistBins / WarpsPerBlock;
const U32 OutLength = OutWarpSize / WarpSize;
const U32 OutStrideSize = OutLength * strideBank;
// Array Initialization
const U32 nPassesThrd = sizeTCounts / BlockSize;
const U32 leftOverThrd = sizeTCounts - (nPassesThrd * BlockSize);
const U32 nThreadsPerGrid = BlockSize * GridSize; // 3,072 = 64 * 48
const U32 rowSize = K_length * nThreadsPerGrid; // 193,586 = 63 * 64 * 48
//------------------------------------
// Local Typedefs
//------------------------------------
// TRISH types
typedef typename TRISH_traits<valT>::base_type baseType;
typedef typename TRISH_traits<valT>::bin_type binType;
typedef typename TRISH_traits<valT>::upscale_type upscaleType;
typedef typename TRISH_traits<valT>::convert_type convertType;
typedef typename ExtractWords<upscaleType> Extractor;
//------------------------------------
// Local Variables
//------------------------------------
// Local variables (shared memory)
__shared__ U32 s_thrdCounts[sizeTCounts]; // 'per thread' counts
// Local variables (registers)
U32 rowCnt1 = 0u;
U32 rowCnt2 = 0u;
U32 rowCnt3 = 0u;
U32 rowCnt4 = 0u;
//---------------------------
// Compute Indices & Pointers
//---------------------------
U32 tid = threadIdx.x; // Thread ID within Block
U32 * cntPtr;
U32 * basePtr;
{
// Get Warp Row & Column
//U32 warpRow = threadIdx.x >> logWarpSize; // tid / 32
//U32 warpCol = threadIdx.x & WarpMask; // tid % 32
// Compute starting 'input' offset (Warp Sequential Layout)
//inIdx = (warpRow * IN_WarpSize) // Move to each warps assigned portion of work
// + warpCol; // Move to warp column (in warp)
// Compute starting serial scan index
U32 baseIdx = (tid * BlockSize);
// Get pointers into shared memory array
// for different views of memory
cntPtr = &s_thrdCounts[threadIdx.x];
basePtr = &s_thrdCounts[baseIdx];
}
//-------------------------------------------
// Zero out arrays
//-------------------------------------------
{
//-
// Zero out 'Per Thread' counts
//-
U32 * ptrTC = (&s_thrdCounts[0]);
SetArray_BlockSeq
<
U32, BlockSize, nPassesThrd, leftOverThrd, sizeTCounts
>
(
ptrTC, 0u
);
}
//-----
// Compute thread, block, & grid indices & sizes
//-----
U32 bid = (blockIdx.y * gridDim.x) + blockIdx.x; // Block ID within Grid
U32 elemOffset = (bid * K_length * BlockSize) + tid; // Starting offset
U32 nElems32 = stop - start + 1u;
U32 nMaxRows = (nElems32 + (rowSize - 1u)) / rowSize;
U32 nSafeRows = nElems32 / rowSize;
U32 nSafeElems = nSafeRows * rowSize;
U32 nLeftOverElems = nElems32 - nSafeElems;
U32 startIdx = start + elemOffset;
U32 stopIdx = startIdx + (nSafeRows * rowSize);
U32 currIdx = startIdx;
U32 overflow = 0u;
// Initiate
// Initiate Mapping object
// (Transform from values to bin indices)
mapT mapper;
mapper.Initiate( minVal, maxVal, numBins );
//-----
// Process all safe blocks
//-----
// 'input' pointer for reading from memory
const U32 * inPtr = &inVals[currIdx];
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
while (currIdx < stopIdx)
{
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow >= K2_stop)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
U32 val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
// NOTE: the 'K_length' variable below is a static
// hard-coded constant in the range [1..63].
// K = 'Work per thread' per loop (stride)...
// The compiler will take care of throwing away
// any unused code greater than our specified 'K'
// value, with no negative impact on performance.
//-
// Process values [0..3] (bytes 0..15)
//-
// Read in first 'four' values (32-bit)
if (K_length >= 1u) { val1 = inPtr[0u*BlockSize]; }
if (K_length >= 2u) { val2 = inPtr[1u*BlockSize]; }
if (K_length >= 3u) { val3 = inPtr[2u*BlockSize]; }
if (K_length >= 4u) { val4 = inPtr[3u*BlockSize]; }
// Bin first 'four' values into count array
if (K_length >= 4u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 3u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 2u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 1u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [4..7] (bytes 16..31)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 5u) { val1 = inPtr[4u*BlockSize]; }
if (K_length >= 6u) { val2 = inPtr[5u*BlockSize]; }
if (K_length >= 7u) { val3 = inPtr[6u*BlockSize]; }
if (K_length >= 8u) { val4 = inPtr[7u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 8u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 7u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 6u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 5u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [8..11] (bytes 32..47)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 9u) { val1 = inPtr[ 8u*BlockSize]; }
if (K_length >= 10u) { val2 = inPtr[ 9u*BlockSize]; }
if (K_length >= 11u) { val3 = inPtr[10u*BlockSize]; }
if (K_length >= 12u) { val4 = inPtr[11u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 12u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 11u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 10u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 9u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [12..15] (bytes 48..63)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 13u) { val1 = inPtr[12u*BlockSize]; }
if (K_length >= 14u) { val2 = inPtr[13u*BlockSize]; }
if (K_length >= 15u) { val3 = inPtr[14u*BlockSize]; }
if (K_length >= 16u) { val4 = inPtr[15u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 16u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 15u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 14u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 13u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [16..19] (bytes 64..79)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 17u) { val1 = inPtr[16u*BlockSize]; }
if (K_length >= 18u) { val2 = inPtr[17u*BlockSize]; }
if (K_length >= 19u) { val3 = inPtr[18u*BlockSize]; }
if (K_length >= 20u) { val4 = inPtr[19u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 20u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 19u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 18u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 17u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [20..23] (bytes 80..95)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 21u) { val1 = inPtr[20u*BlockSize]; }
if (K_length >= 22u) { val2 = inPtr[21u*BlockSize]; }
if (K_length >= 23u) { val3 = inPtr[22u*BlockSize]; }
if (K_length >= 24u) { val4 = inPtr[23u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 24u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 23u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 22u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 21u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [24..27] (bytes 96..111)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 25u) { val1 = inPtr[24u*BlockSize]; }
if (K_length >= 26u) { val2 = inPtr[25u*BlockSize]; }
if (K_length >= 27u) { val3 = inPtr[26u*BlockSize]; }
if (K_length >= 28u) { val4 = inPtr[27u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 28u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 27u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 26u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 25u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [28..31] (bytes 112..127)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 29u) { val1 = inPtr[28u*BlockSize]; }
if (K_length >= 30u) { val2 = inPtr[29u*BlockSize]; }
if (K_length >= 31u) { val3 = inPtr[30u*BlockSize]; }
if (K_length >= 32u) { val4 = inPtr[31u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 32u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 31u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 30u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 29u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [32..35] (bytes 128..143)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 33u) { val1 = inPtr[32u*BlockSize]; }
if (K_length >= 34u) { val2 = inPtr[33u*BlockSize]; }
if (K_length >= 35u) { val3 = inPtr[34u*BlockSize]; }
if (K_length >= 36u) { val4 = inPtr[35u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 36u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 35u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 34u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 33u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [36..39] (bytes 144..159)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 37u) { val1 = inPtr[36u*BlockSize]; }
if (K_length >= 38u) { val2 = inPtr[37u*BlockSize]; }
if (K_length >= 39u) { val3 = inPtr[38u*BlockSize]; }
if (K_length >= 40u) { val4 = inPtr[39u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 40u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 39u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 38u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 37u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [40..43] (bytes 160-175)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 41u) { val1 = inPtr[40u*BlockSize]; }
if (K_length >= 42u) { val2 = inPtr[41u*BlockSize]; }
if (K_length >= 43u) { val3 = inPtr[42u*BlockSize]; }
if (K_length >= 44u) { val4 = inPtr[43u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 44u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 43u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 42u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 41u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [44..47] (bytes 176-191)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 45u) { val1 = inPtr[44u*BlockSize]; }
if (K_length >= 46u) { val2 = inPtr[45u*BlockSize]; }
if (K_length >= 47u) { val3 = inPtr[46u*BlockSize]; }
if (K_length >= 48u) { val4 = inPtr[47u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 48u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 47u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 46u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 45u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [48-51] (bytes 192-207)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 49u) { val1 = inPtr[48u*BlockSize]; }
if (K_length >= 50u) { val2 = inPtr[49u*BlockSize]; }
if (K_length >= 51u) { val3 = inPtr[50u*BlockSize]; }
if (K_length >= 52u) { val4 = inPtr[51u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 52u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 51u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 50u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 49u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [52-55] (bytes 208-223)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 53u) { val1 = inPtr[52u*BlockSize]; }
if (K_length >= 54u) { val2 = inPtr[53u*BlockSize]; }
if (K_length >= 55u) { val3 = inPtr[54u*BlockSize]; }
if (K_length >= 56u) { val4 = inPtr[55u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 56u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 55u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 54u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 53u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [56-59] (bytes 224-239)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 57u) { val1 = inPtr[56u*BlockSize]; }
if (K_length >= 58u) { val2 = inPtr[57u*BlockSize]; }
if (K_length >= 59u) { val3 = inPtr[58u*BlockSize]; }
if (K_length >= 60u) { val4 = inPtr[59u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 60u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 59u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 58u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 57u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [60-63]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 61u) { val1 = inPtr[60u*BlockSize]; }
if (K_length >= 62u) { val2 = inPtr[61u*BlockSize]; }
if (K_length >= 63u) { val3 = inPtr[62u*BlockSize]; }
if (K_length >= 64u) { val4 = inPtr[63u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 64u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 63u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 62u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 61u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [64-67]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 65u) { val1 = inPtr[64u*BlockSize]; }
if (K_length >= 66u) { val2 = inPtr[65u*BlockSize]; }
if (K_length >= 67u) { val3 = inPtr[66u*BlockSize]; }
if (K_length >= 68u) { val4 = inPtr[67u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 68u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 67u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 66u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 65u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [68-71]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 69u) { val1 = inPtr[68u*BlockSize]; }
if (K_length >= 70u) { val2 = inPtr[69u*BlockSize]; }
if (K_length >= 71u) { val3 = inPtr[70u*BlockSize]; }
if (K_length >= 72u) { val4 = inPtr[71u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 72u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 71u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 70u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 69u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [72-75]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 73u) { val1 = inPtr[72u*BlockSize]; }
if (K_length >= 74u) { val2 = inPtr[73u*BlockSize]; }
if (K_length >= 75u) { val3 = inPtr[74u*BlockSize]; }
if (K_length >= 76u) { val4 = inPtr[75u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 76u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 75u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 74u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 73u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [76-79]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 77u) { val1 = inPtr[76u*BlockSize]; }
if (K_length >= 78u) { val2 = inPtr[77u*BlockSize]; }
if (K_length >= 79u) { val3 = inPtr[78u*BlockSize]; }
if (K_length >= 80u) { val4 = inPtr[79u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 80u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 79u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 78u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 77u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [80-83]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 81u) { val1 = inPtr[80u*BlockSize]; }
if (K_length >= 82u) { val2 = inPtr[81u*BlockSize]; }
if (K_length >= 83u) { val3 = inPtr[82u*BlockSize]; }
if (K_length >= 84u) { val4 = inPtr[83u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 84u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 83u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 82u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 81u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [84-87]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 85u) { val1 = inPtr[84u*BlockSize]; }
if (K_length >= 86u) { val2 = inPtr[85u*BlockSize]; }
if (K_length >= 87u) { val3 = inPtr[86u*BlockSize]; }
if (K_length >= 88u) { val4 = inPtr[87u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 88u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 87u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 86u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 85u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [88-91]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 89u) { val1 = inPtr[88u*BlockSize]; }
if (K_length >= 90u) { val2 = inPtr[89u*BlockSize]; }
if (K_length >= 91u) { val3 = inPtr[90u*BlockSize]; }
if (K_length >= 92u) { val4 = inPtr[91u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 92u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 91u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 90u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 89u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [92-95]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 93u) { val1 = inPtr[92u*BlockSize]; }
if (K_length >= 94u) { val2 = inPtr[93u*BlockSize]; }
if (K_length >= 95u) { val3 = inPtr[94u*BlockSize]; }
if (K_length >= 96u) { val4 = inPtr[95u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 96u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 95u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 94u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 93u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [96-99]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 97u) { val1 = inPtr[96u*BlockSize]; }
if (K_length >= 98u) { val2 = inPtr[97u*BlockSize]; }
if (K_length >= 99u) { val3 = inPtr[98u*BlockSize]; }
if (K_length >= 100u) { val4 = inPtr[99u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 100u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 99u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 98u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 97u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [100-103]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 101u) { val1 = inPtr[100u*BlockSize]; }
if (K_length >= 102u) { val2 = inPtr[101u*BlockSize]; }
if (K_length >= 103u) { val3 = inPtr[102u*BlockSize]; }
if (K_length >= 104u) { val4 = inPtr[103u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 104u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 103u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 102u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 101u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [104-107]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 105u) { val1 = inPtr[104u*BlockSize]; }
if (K_length >= 106u) { val2 = inPtr[105u*BlockSize]; }
if (K_length >= 107u) { val3 = inPtr[106u*BlockSize]; }
if (K_length >= 108u) { val4 = inPtr[107u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 108u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 107u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 106u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 105u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [108-111]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 109u) { val1 = inPtr[108u*BlockSize]; }
if (K_length >= 110u) { val2 = inPtr[109u*BlockSize]; }
if (K_length >= 111u) { val3 = inPtr[110u*BlockSize]; }
if (K_length >= 112u) { val4 = inPtr[111u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 112u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 111u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 110u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 109u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [112-115]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 113u) { val1 = inPtr[112u*BlockSize]; }
if (K_length >= 114u) { val2 = inPtr[113u*BlockSize]; }
if (K_length >= 115u) { val3 = inPtr[114u*BlockSize]; }
if (K_length >= 116u) { val4 = inPtr[115u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 116u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 115u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 114u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 113u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [116-119]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 117u) { val1 = inPtr[116u*BlockSize]; }
if (K_length >= 118u) { val2 = inPtr[117u*BlockSize]; }
if (K_length >= 119u) { val3 = inPtr[118u*BlockSize]; }
if (K_length >= 120u) { val4 = inPtr[119u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 120u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 119u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 118u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 117u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [120-123]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 121u) { val1 = inPtr[120u*BlockSize]; }
if (K_length >= 122u) { val2 = inPtr[121u*BlockSize]; }
if (K_length >= 123u) { val3 = inPtr[122u*BlockSize]; }
if (K_length >= 124u) { val4 = inPtr[123u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 124u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 123u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 122u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 121u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [124-127]
//-
// Read in next 'three' values (32-bit)
if (K_length >= 125u) { val1 = inPtr[124u*BlockSize]; }
if (K_length >= 126u) { val2 = inPtr[125u*BlockSize]; }
if (K_length >= 127u) { val3 = inPtr[126u*BlockSize]; }
// NOTE: Do not uncomment the line below => *OVERFLOW* BUG !!!
//if (K_length >= 128u) { val4 = inPtr[127u*BlockSize]; }
// Bin next 'four' values into count array
// NOTE: Do not uncomment the section below => *OVERFLOW* BUG !!!
/*
if (K_length >= 128u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
*/
{
if (K_length == 127u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 126u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 125u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-----
// Move to next row of work
//-----
currIdx += rowSize;
inPtr += rowSize;
// Increment 'overflow' count
overflow += K2_length; // K values * 2 words per value
}
__syncthreads();
//--------------------------------------
// LAST: Process last leftover chunk
// with more careful range checking
//--------------------------------------
if (nLeftOverElems)
{
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow >= K2_stop)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
// NOTE #1: the 'K_length' variable below is a static
// hard-coded constant in the range [1..127].
// K = 'Work per thread' per loop (stride)...
// The compiler will take care of throwing away
// any unused code greater than our specified 'K'
// value, with no negative impact on performance.
// NOTE #2: We use a cooperative stride
// across each thread in each block in grid
// ChunkSize = BlockSize * GridSize = 64 * 48 = 3072
// RowSize = WorkPerThead(K) * ChunkSize = 63 * 3072 = 193,536
//
// B0 B1 ... B47 (Blocks in Grid)
// ---- ---- --- ----
// k = 1 => |64| |64| ... |64| (3072 Thread & I/O requests for 1st work item per thread)
// k = 2 => |64| |64| ... |64| ditto (2nd work item per thread)
// ... ... ...
// k = 63 => |64| |64| ... |64| ditto (63 work item per thread)
// NOTE #3: We use "Divide & Conquer" to avoid as much slower range checking as possible
// Try batches of 32, 16, 8, 4, 2, 1, and finally leftover (on which we finally must range check)
//----
// Setup Pointers & Indices for cooperative stride
//----
U32 bid = (blockIdx.y * gridDim.x) + blockIdx.x; // Get block index
U32 nSkip = nSafeRows * rowSize; // Skip past already processed rows
U32 chunkIdx = (bid * BlockSize) + tid; // Get starting index within chunk
U32 baseIdx = start + nSkip + chunkIdx; // Get starting index for left over elements
U32 val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
//------
// Try Section of 64
//------
if (K_length >= 64u)
{
// Process 64 chunks safely without range checking
if (nLeftOverElems >= (64u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [17..20]
//-----
val1 = inPtr[(16u*nThreadsPerGrid)];
val2 = inPtr[(17u*nThreadsPerGrid)];
val3 = inPtr[(18u*nThreadsPerGrid)];
val4 = inPtr[(19u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [21..24]
//-----
val1 = inPtr[(20u*nThreadsPerGrid)];
val2 = inPtr[(21u*nThreadsPerGrid)];
val3 = inPtr[(22u*nThreadsPerGrid)];
val4 = inPtr[(23u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [25..28]
//-----
val1 = inPtr[(24u*nThreadsPerGrid)];
val2 = inPtr[(25u*nThreadsPerGrid)];
val3 = inPtr[(26u*nThreadsPerGrid)];
val4 = inPtr[(27u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [29..32]
//-----
val1 = inPtr[(28u*nThreadsPerGrid)];
val2 = inPtr[(29u*nThreadsPerGrid)];
val3 = inPtr[(30u*nThreadsPerGrid)];
val4 = inPtr[(31u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [33..35]
//-----
val1 = inPtr[(32u*nThreadsPerGrid)];
val2 = inPtr[(33u*nThreadsPerGrid)];
val3 = inPtr[(34u*nThreadsPerGrid)];
val4 = inPtr[(35u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [36..39]
//-----
val1 = inPtr[(36u*nThreadsPerGrid)];
val2 = inPtr[(37u*nThreadsPerGrid)];
val3 = inPtr[(38u*nThreadsPerGrid)];
val4 = inPtr[(39u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [40..43]
//-----
val1 = inPtr[(40u*nThreadsPerGrid)];
val2 = inPtr[(41u*nThreadsPerGrid)];
val3 = inPtr[(42u*nThreadsPerGrid)];
val4 = inPtr[(43u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [44..47]
//-----
val1 = inPtr[(44u*nThreadsPerGrid)];
val2 = inPtr[(45u*nThreadsPerGrid)];
val3 = inPtr[(46u*nThreadsPerGrid)];
val4 = inPtr[(47u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [48..51]
//-----
val1 = inPtr[(48u*nThreadsPerGrid)];
val2 = inPtr[(49u*nThreadsPerGrid)];
val3 = inPtr[(50u*nThreadsPerGrid)];
val4 = inPtr[(51u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [52..55]
//-----
val1 = inPtr[(52u*nThreadsPerGrid)];
val2 = inPtr[(53u*nThreadsPerGrid)];
val3 = inPtr[(54u*nThreadsPerGrid)];
val4 = inPtr[(55u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [56..59]
//-----
val1 = inPtr[(56u*nThreadsPerGrid)];
val2 = inPtr[(57u*nThreadsPerGrid)];
val3 = inPtr[(58u*nThreadsPerGrid)];
val4 = inPtr[(59u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [60..63]
//-----
val1 = inPtr[(60u*nThreadsPerGrid)];
val2 = inPtr[(61u*nThreadsPerGrid)];
val3 = inPtr[(62u*nThreadsPerGrid)];
val4 = inPtr[(63u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (64u * nThreadsPerGrid);
nLeftOverElems -= (64u * nThreadsPerGrid);
}
}
//------
// Try Section of 32
//------
if (K_length >= 32u)
{
// Process 32 chunks safely without range checking
if (nLeftOverElems >= (32u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [17..20]
//-----
val1 = inPtr[(16u*nThreadsPerGrid)];
val2 = inPtr[(17u*nThreadsPerGrid)];
val3 = inPtr[(18u*nThreadsPerGrid)];
val4 = inPtr[(19u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [21..24]
//-----
val1 = inPtr[(20u*nThreadsPerGrid)];
val2 = inPtr[(21u*nThreadsPerGrid)];
val3 = inPtr[(22u*nThreadsPerGrid)];
val4 = inPtr[(23u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [25..28]
//-----
val1 = inPtr[(24u*nThreadsPerGrid)];
val2 = inPtr[(25u*nThreadsPerGrid)];
val3 = inPtr[(26u*nThreadsPerGrid)];
val4 = inPtr[(27u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [29..32]
//-----
val1 = inPtr[(28u*nThreadsPerGrid)];
val2 = inPtr[(29u*nThreadsPerGrid)];
val3 = inPtr[(30u*nThreadsPerGrid)];
val4 = inPtr[(31u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (32u * nThreadsPerGrid);
nLeftOverElems -= (32u * nThreadsPerGrid);
}
}
//------
// Try Section of 16
//------
if (K_length >= 16u)
{
// Process 16 chunks safely without range checking
if (nLeftOverElems >= (16u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (16u * nThreadsPerGrid);
nLeftOverElems -= (16u * nThreadsPerGrid);
}
}
//------
// Try Section of 8
//------
if (K_length >= 8u)
{
// Process 8 chunks safely without range checking
if (nLeftOverElems >= (8u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (8u * nThreadsPerGrid);
nLeftOverElems -= (8u * nThreadsPerGrid);
}
}
//------
// Try Section of 4
//------
if (K_length >= 4u)
{
// Process 4 chunks safely without range checking
if (nLeftOverElems >= (4u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (4u * nThreadsPerGrid);
nLeftOverElems -= (4u * nThreadsPerGrid);
}
}
//------
// Try Section of 2
//------
if (K_length >= 2u)
{
// Process 2 chunks safely without range checking
if (nLeftOverElems >= (2u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..2]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (2u * nThreadsPerGrid);
nLeftOverElems -= (2u * nThreadsPerGrid);
}
}
//------
// Try Section of 1
//------
if (K_length >= 1u)
{
// Process 1 chunk safely without range checking
if (nLeftOverElems >= (1u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
// Move to next section
baseIdx += (1u * nThreadsPerGrid);
nLeftOverElems -= (1u * nThreadsPerGrid);
}
}
//------
// Process Last few elements
// with careful RANGE CHECKING !!!
//------
if (nLeftOverElems > 0u)
{
// Make sure we are 'in range' before reading & binning
U32 inRange1 = (baseIdx <= stop);
if (inRange1)
{
// Read in 32-bit element
val1 = inVals[baseIdx];
// Process element
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
// Update Accumulation count
overflow += K2_length; // overflow += K elements * 2 words per value
}
// Cleanup Mapping object
// (Give mapper a chance to cleanup any resources)
mapper.Finish();
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow > 0u)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
//-------------------------------------------------
// Write out final row 'counts'
//-------------------------------------------------
{
// Compute starting 'row counts' offset
U32 rIdx = threadIdx.x * 4u; // 4 groups per lane
U32 rRow = rIdx >> logBankSize;
U32 rCol = rIdx & BankMask;
U32 rowIdx = (rRow * strideBank) + (rCol + 1u);
// Extra '+1' to shift past initial pad element
U32 * rowPtr = &s_thrdCounts[rowIdx];
// Store row counts in row array
rowPtr[0] = rowCnt1;
rowPtr[1] = rowCnt2;
rowPtr[2] = rowCnt3;
rowPtr[3] = rowCnt4;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
// Get Warp Row & Column
U32 warpRow = threadIdx.x >> logWarpSize; // tid / 32
U32 warpCol = threadIdx.x & WarpMask; // tid % 32
// Get local & global indices
U32 outGlobal = (blockIdx.x * nHistBins);
U32 outLocal = (warpRow * OutWarpSize);
U32 rowBase = (warpRow * OutStrideSize);
U32 outBase = outGlobal + outLocal;
U32 rowOff = warpCol + 1u;
U32 outIdx = outBase + warpCol;
rowIdx = rowBase + rowOff;
// Get local & global pointers
U32 * outPtr = &outRowCounts[outIdx];
rowPtr = &s_thrdCounts[rowIdx];
// Write our 'per row' counts in warp sequential order
if (OutLength >= 1u) { outPtr[(0u*WarpSize)] = rowPtr[(0u*strideBank)]; }
if (OutLength >= 2u) { outPtr[(1u*WarpSize)] = rowPtr[(1u*strideBank)]; }
if (OutLength >= 3u) { outPtr[(2u*WarpSize)] = rowPtr[(2u*strideBank)]; }
if (OutLength >= 4u) { outPtr[(3u*WarpSize)] = rowPtr[(3u*strideBank)]; }
if (OutLength >= 5u) { outPtr[(4u*WarpSize)] = rowPtr[(4u*strideBank)]; }
if (OutLength >= 6u) { outPtr[(5u*WarpSize)] = rowPtr[(5u*strideBank)]; }
if (OutLength >= 7u) { outPtr[(6u*WarpSize)] = rowPtr[(6u*strideBank)]; }
if (OutLength >= 8u) { outPtr[(7u*WarpSize)] = rowPtr[(7u*strideBank)]; }
}
}
/*---------------------------------------------------------
Name: K1_TRISH_CountRows_GEN_B4
Desc:
Note:
1. Assumes underlying data is stored as 32-bit values
(U32,I32,F32) per 32-bit storage element.
2. This further implies that K = [1,255] to safely
avoid overflowing an 8-bit counter.
3. However, K >= 104 impacts performance negatively
as the program appears to grow to be too large
to fit into the hardware code cache ...,
so we restrict K to the range K=[1..127]
---------------------------------------------------------*/
template <
typename valT, // underlying value Type (U8, I8)
typename mapT, // underlying mapper object
U32 logBankSize, // log<2>( Channels per Bank )
U32 logWarpSize, // log<2>( Threads per Warp )
U32 BlockSize, // Threads Per Block (needs to be a power of 2 & multiple of warpsize)
U32 GridSize, // Blocks Per Grid
U32 K_length // #elements to process per thread before looping
>
__global__
void K1_TRISH_CountRows_GEN_B4
(
U32 * outRowCounts, // OUT - 256-way row-sums array
const valT * inVals, // IN - values to bin and count
U32 start, // IN - range [start,stop] to check and count
U32 stop, // ditto
valT minVal, // IN - minimum value
valT maxVal, // IN - maximum value
U32 numBins // IN - number of bins (in histogram)
)
{
//-------------------------------------------
// Constant values (computed at compile time)
//-------------------------------------------
// Bank Size (elements per bank)
const U32 BankSize = (1u << logBankSize); // 32 = 2^5 threads per bank
const U32 BankMask = BankSize - 1u; // 31 = 32 - 1 = 0x1F = b11111
const U32 strideBank = BankSize + 1u; // 33 = 32 + 1
// Extra '+1' to help try and avoid bank conflicts
// Warp Size (threads per warp)
const U32 WarpSize = (1u << logWarpSize); // 32 = 2^5 threads per warp
const U32 WarpMask = WarpSize - 1u; // 31 = 32 - 1 = 0x1F = b11111
// Block Size (threads per block)
//const U32 BlockSize = 64u;
const U32 BlockMask = BlockSize - 1u;
// Chunk Size
//const U32 ChunkSize = BlockSize * K_length;
//const U32 IN_WarpSize = K_length * WarpSize;
// K_length
//const U32 K_length = 16u; // 16
const U32 K1_length = K_length; // 16 = 16 (1 storage value per input value)
const U32 K1_stop = 256u - K1_length; // 240 = 256 - 16 (conservative test)
// Warps Per Block
const U32 WarpsPerBlock = BlockSize / WarpSize; // 2 = 64/32
// Bins per Histogram
const U32 nHistBins = 256u; // 256 = 2^8
// Lane Info (Compress 4 'bins' into each 32-bit value)
const U32 nLanes = 64u; // 64, # Lanes = 256 bins / 4 bins per lane
// 'Per Thread' counts array
const U32 nTCounts = nLanes * BlockSize;
const U32 banksTCounts = (nTCounts + BankMask) / BankSize;
const U32 padTCounts = (banksTCounts * BankSize) - nTCounts;
const U32 sizeTCounts = nTCounts + padTCounts;
// Output size
const U32 OutWarpSize = nHistBins / WarpsPerBlock;
const U32 OutLength = OutWarpSize / WarpSize;
const U32 OutStrideSize = OutLength * strideBank;
// Array Initialization
const U32 nPassesThrd = sizeTCounts / BlockSize;
const U32 leftOverThrd = sizeTCounts - (nPassesThrd * BlockSize);
const U32 nThreadsPerGrid = BlockSize * GridSize; // 3,072 = 64 * 48
const U32 rowSize = K_length * nThreadsPerGrid; // 193,586 = 63 * 64 * 48
//------------------------------------
// Local Typedefs
//------------------------------------
// TRISH types
typedef typename TRISH_traits<valT>::base_type baseType;
typedef typename TRISH_traits<valT>::bin_type binType;
typedef typename TRISH_traits<valT>::upscale_type upscaleType;
typedef typename TRISH_traits<valT>::convert_type convertType;
//------------------------------------
// Local Variables
//------------------------------------
// Local variables (shared memory)
__shared__ U32 s_thrdCounts[sizeTCounts]; // 'per thread' counts
// Local variables (registers)
U32 rowCnt1 = 0u;
U32 rowCnt2 = 0u;
U32 rowCnt3 = 0u;
U32 rowCnt4 = 0u;
//---------------------------
// Compute Indices & Pointers
//---------------------------
U32 tid = threadIdx.x; // Thread ID within Block
U32 * cntPtr;
U32 * basePtr;
{
// Get Warp Row & Column
//U32 warpRow = threadIdx.x >> logWarpSize; // tid / 32
//U32 warpCol = threadIdx.x & WarpMask; // tid % 32
// Compute starting 'input' offset (Warp Sequential Layout)
//inIdx = (warpRow * IN_WarpSize) // Move to each warps assigned portion of work
// + warpCol; // Move to warp column (in warp)
// Compute starting serial scan index
U32 baseIdx = (tid * BlockSize);
// Get pointers into shared memory array
// for different views of memory
cntPtr = &s_thrdCounts[threadIdx.x];
basePtr = &s_thrdCounts[baseIdx];
}
//-------------------------------------------
// Zero out arrays
//-------------------------------------------
{
//-
// Zero out 'Per Thread' counts
//-
U32 * ptrTC = (&s_thrdCounts[0]);
SetArray_BlockSeq
<
U32, BlockSize, nPassesThrd, leftOverThrd, sizeTCounts
>
(
ptrTC, 0u
);
}
//-----
// Compute thread, block, & grid indices & sizes
//-----
U32 bid = (blockIdx.y * gridDim.x) + blockIdx.x; // Block ID within Grid
U32 elemOffset = (bid * K_length * BlockSize) + tid; // Starting offset
U32 nElems32 = stop - start + 1u;
U32 nMaxRows = (nElems32 + (rowSize - 1u)) / rowSize;
U32 nSafeRows = nElems32 / rowSize;
U32 nSafeElems = nSafeRows * rowSize;
U32 nLeftOverElems = nElems32 - nSafeElems;
U32 startIdx = start + elemOffset;
U32 stopIdx = startIdx + (nSafeRows * rowSize);
U32 currIdx = startIdx;
U32 overflow = 0u;
// Initiate
// Initiate Mapping object
// (Transform from values to bin indices)
mapT mapper;
mapper.Initiate( minVal, maxVal, numBins );
//-----
// Process all safe blocks
//-----
// 'input' pointer for reading from memory
const valT * inPtr = &inVals[currIdx];
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
while (currIdx < stopIdx)
{
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow >= K1_stop)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
valT val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
// NOTE: the 'K_length' variable below is a static
// hard-coded constant in the range [1..63].
// K = 'Work per thread' per loop (stride)...
// The compiler will take care of throwing away
// any unused code greater than our specified 'K'
// value, with no negative impact on performance.
//-
// Process values [0..3]
//-
// Read in first 'four' values (32-bit)
if (K_length >= 1u) { val1 = inPtr[0u*BlockSize]; }
if (K_length >= 2u) { val2 = inPtr[1u*BlockSize]; }
if (K_length >= 3u) { val3 = inPtr[2u*BlockSize]; }
if (K_length >= 4u) { val4 = inPtr[3u*BlockSize]; }
// Convert to upscale type
if (K_length >= 1u) { b1 = upscaleType(val1); }
if (K_length >= 2u) { b2 = upscaleType(val2); }
if (K_length >= 3u) { b3 = upscaleType(val3); }
if (K_length >= 4u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 4u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 3u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 2u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 1u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [4..7]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 5u) { val1 = inPtr[4u*BlockSize]; }
if (K_length >= 6u) { val2 = inPtr[5u*BlockSize]; }
if (K_length >= 7u) { val3 = inPtr[6u*BlockSize]; }
if (K_length >= 8u) { val4 = inPtr[7u*BlockSize]; }
// Convert to upscale type
if (K_length >= 5u) { b1 = upscaleType(val1); }
if (K_length >= 6u) { b2 = upscaleType(val2); }
if (K_length >= 7u) { b3 = upscaleType(val3); }
if (K_length >= 8u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 8u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 7u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 6u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 5u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [8..11]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 9u) { val1 = inPtr[ 8u*BlockSize]; }
if (K_length >= 10u) { val2 = inPtr[ 9u*BlockSize]; }
if (K_length >= 11u) { val3 = inPtr[10u*BlockSize]; }
if (K_length >= 12u) { val4 = inPtr[11u*BlockSize]; }
// Convert to upscale type
if (K_length >= 9u) { b1 = upscaleType(val1); }
if (K_length >= 10u) { b2 = upscaleType(val2); }
if (K_length >= 11u) { b3 = upscaleType(val3); }
if (K_length >= 12u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 12u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 11u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 10u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 9u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [12..15]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 13u) { val1 = inPtr[12u*BlockSize]; }
if (K_length >= 14u) { val2 = inPtr[13u*BlockSize]; }
if (K_length >= 15u) { val3 = inPtr[14u*BlockSize]; }
if (K_length >= 16u) { val4 = inPtr[15u*BlockSize]; }
// Convert to upscale type
if (K_length >= 13u) { b1 = upscaleType(val1); }
if (K_length >= 14u) { b2 = upscaleType(val2); }
if (K_length >= 15u) { b3 = upscaleType(val3); }
if (K_length >= 16u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 16u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 15u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 14u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 13u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [16..19]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 17u) { val1 = inPtr[16u*BlockSize]; }
if (K_length >= 18u) { val2 = inPtr[17u*BlockSize]; }
if (K_length >= 19u) { val3 = inPtr[18u*BlockSize]; }
if (K_length >= 20u) { val4 = inPtr[19u*BlockSize]; }
// Convert to upscale type
if (K_length >= 17u) { b1 = upscaleType(val1); }
if (K_length >= 18u) { b2 = upscaleType(val2); }
if (K_length >= 19u) { b3 = upscaleType(val3); }
if (K_length >= 20u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 20u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 19u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 18u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 17u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [20..23]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 21u) { val1 = inPtr[20u*BlockSize]; }
if (K_length >= 22u) { val2 = inPtr[21u*BlockSize]; }
if (K_length >= 23u) { val3 = inPtr[22u*BlockSize]; }
if (K_length >= 24u) { val4 = inPtr[23u*BlockSize]; }
// Convert to upscale type
if (K_length >= 21u) { b1 = upscaleType(val1); }
if (K_length >= 22u) { b2 = upscaleType(val2); }
if (K_length >= 23u) { b3 = upscaleType(val3); }
if (K_length >= 24u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 24u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 23u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 22u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 21u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [24..27]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 25u) { val1 = inPtr[24u*BlockSize]; }
if (K_length >= 26u) { val2 = inPtr[25u*BlockSize]; }
if (K_length >= 27u) { val3 = inPtr[26u*BlockSize]; }
if (K_length >= 28u) { val4 = inPtr[27u*BlockSize]; }
// Convert to upscale type
if (K_length >= 25u) { b1 = upscaleType(val1); }
if (K_length >= 26u) { b2 = upscaleType(val2); }
if (K_length >= 27u) { b3 = upscaleType(val3); }
if (K_length >= 28u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 28u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 27u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 26u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 25u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [28..31]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 29u) { val1 = inPtr[28u*BlockSize]; }
if (K_length >= 30u) { val2 = inPtr[29u*BlockSize]; }
if (K_length >= 31u) { val3 = inPtr[30u*BlockSize]; }
if (K_length >= 32u) { val4 = inPtr[31u*BlockSize]; }
// Convert to upscale type
if (K_length >= 29u) { b1 = upscaleType(val1); }
if (K_length >= 30u) { b2 = upscaleType(val2); }
if (K_length >= 31u) { b3 = upscaleType(val3); }
if (K_length >= 32u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 32u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 31u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 30u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 29u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [32..35]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 33u) { val1 = inPtr[32u*BlockSize]; }
if (K_length >= 34u) { val2 = inPtr[33u*BlockSize]; }
if (K_length >= 35u) { val3 = inPtr[34u*BlockSize]; }
if (K_length >= 36u) { val4 = inPtr[35u*BlockSize]; }
// Convert to upscale type
if (K_length >= 33u) { b1 = upscaleType(val1); }
if (K_length >= 34u) { b2 = upscaleType(val2); }
if (K_length >= 35u) { b3 = upscaleType(val3); }
if (K_length >= 36u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 36u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 35u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 34u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 33u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [36..39]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 37u) { val1 = inPtr[36u*BlockSize]; }
if (K_length >= 38u) { val2 = inPtr[37u*BlockSize]; }
if (K_length >= 39u) { val3 = inPtr[38u*BlockSize]; }
if (K_length >= 40u) { val4 = inPtr[39u*BlockSize]; }
// Convert to upscale type
if (K_length >= 37u) { b1 = upscaleType(val1); }
if (K_length >= 38u) { b2 = upscaleType(val2); }
if (K_length >= 39u) { b3 = upscaleType(val3); }
if (K_length >= 40u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 40u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 39u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 38u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 37u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [40..43]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 41u) { val1 = inPtr[40u*BlockSize]; }
if (K_length >= 42u) { val2 = inPtr[41u*BlockSize]; }
if (K_length >= 43u) { val3 = inPtr[42u*BlockSize]; }
if (K_length >= 44u) { val4 = inPtr[43u*BlockSize]; }
// Convert to upscale type
if (K_length >= 41u) { b1 = upscaleType(val1); }
if (K_length >= 42u) { b2 = upscaleType(val2); }
if (K_length >= 43u) { b3 = upscaleType(val3); }
if (K_length >= 44u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 44u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 43u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 42u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 41u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [44..47]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 45u) { val1 = inPtr[44u*BlockSize]; }
if (K_length >= 46u) { val2 = inPtr[45u*BlockSize]; }
if (K_length >= 47u) { val3 = inPtr[46u*BlockSize]; }
if (K_length >= 48u) { val4 = inPtr[47u*BlockSize]; }
// Convert to upscale type
if (K_length >= 45u) { b1 = upscaleType(val1); }
if (K_length >= 46u) { b2 = upscaleType(val2); }
if (K_length >= 47u) { b3 = upscaleType(val3); }
if (K_length >= 48u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 48u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 47u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 46u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 45u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [48..51]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 49u) { val1 = inPtr[48u*BlockSize]; }
if (K_length >= 50u) { val2 = inPtr[49u*BlockSize]; }
if (K_length >= 51u) { val3 = inPtr[50u*BlockSize]; }
if (K_length >= 52u) { val4 = inPtr[51u*BlockSize]; }
// Convert to upscale type
if (K_length >= 49u) { b1 = upscaleType(val1); }
if (K_length >= 50u) { b2 = upscaleType(val2); }
if (K_length >= 51u) { b3 = upscaleType(val3); }
if (K_length >= 52u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 52u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 51u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 50u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 49u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [52..55]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 53u) { val1 = inPtr[52u*BlockSize]; }
if (K_length >= 54u) { val2 = inPtr[53u*BlockSize]; }
if (K_length >= 55u) { val3 = inPtr[54u*BlockSize]; }
if (K_length >= 56u) { val4 = inPtr[55u*BlockSize]; }
// Convert to upscale type
if (K_length >= 53u) { b1 = upscaleType(val1); }
if (K_length >= 54u) { b2 = upscaleType(val2); }
if (K_length >= 55u) { b3 = upscaleType(val3); }
if (K_length >= 56u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 56u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 55u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 54u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 53u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [56..59]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 57u) { val1 = inPtr[56u*BlockSize]; }
if (K_length >= 58u) { val2 = inPtr[57u*BlockSize]; }
if (K_length >= 59u) { val3 = inPtr[58u*BlockSize]; }
if (K_length >= 60u) { val4 = inPtr[59u*BlockSize]; }
// Convert to upscale type
if (K_length >= 57u) { b1 = upscaleType(val1); }
if (K_length >= 58u) { b2 = upscaleType(val2); }
if (K_length >= 59u) { b3 = upscaleType(val3); }
if (K_length >= 60u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 60u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 59u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 58u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 57u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [60..63]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 61u) { val1 = inPtr[60u*BlockSize]; }
if (K_length >= 62u) { val2 = inPtr[61u*BlockSize]; }
if (K_length >= 63u) { val3 = inPtr[62u*BlockSize]; }
if (K_length >= 64u) { val4 = inPtr[63u*BlockSize]; }
// Convert to upscale type
if (K_length >= 61u) { b1 = upscaleType(val1); }
if (K_length >= 62u) { b2 = upscaleType(val2); }
if (K_length >= 63u) { b3 = upscaleType(val3); }
if (K_length >= 64u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 64u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 63u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 62u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 61u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [64..67]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 65u) { val1 = inPtr[64u*BlockSize]; }
if (K_length >= 66u) { val2 = inPtr[65u*BlockSize]; }
if (K_length >= 67u) { val3 = inPtr[66u*BlockSize]; }
if (K_length >= 68u) { val4 = inPtr[67u*BlockSize]; }
// Convert to upscale type
if (K_length >= 65u) { b1 = upscaleType(val1); }
if (K_length >= 66u) { b2 = upscaleType(val2); }
if (K_length >= 67u) { b3 = upscaleType(val3); }
if (K_length >= 68u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 68u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 67u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 66u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 65u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [68..71]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 69u) { val1 = inPtr[68u*BlockSize]; }
if (K_length >= 70u) { val2 = inPtr[69u*BlockSize]; }
if (K_length >= 71u) { val3 = inPtr[70u*BlockSize]; }
if (K_length >= 72u) { val4 = inPtr[71u*BlockSize]; }
// Convert to upscale type
if (K_length >= 69u) { b1 = upscaleType(val1); }
if (K_length >= 70u) { b2 = upscaleType(val2); }
if (K_length >= 71u) { b3 = upscaleType(val3); }
if (K_length >= 72u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 72u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 71u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 70u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 69u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [72..75]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 73u) { val1 = inPtr[72u*BlockSize]; }
if (K_length >= 74u) { val2 = inPtr[73u*BlockSize]; }
if (K_length >= 75u) { val3 = inPtr[74u*BlockSize]; }
if (K_length >= 76u) { val4 = inPtr[75u*BlockSize]; }
// Convert to upscale type
if (K_length >= 73u) { b1 = upscaleType(val1); }
if (K_length >= 74u) { b2 = upscaleType(val2); }
if (K_length >= 75u) { b3 = upscaleType(val3); }
if (K_length >= 76u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 76u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 75u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 74u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 73u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [76..79]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 77u) { val1 = inPtr[76u*BlockSize]; }
if (K_length >= 78u) { val2 = inPtr[77u*BlockSize]; }
if (K_length >= 79u) { val3 = inPtr[78u*BlockSize]; }
if (K_length >= 80u) { val4 = inPtr[79u*BlockSize]; }
// Convert to upscale type
if (K_length >= 77u) { b1 = upscaleType(val1); }
if (K_length >= 78u) { b2 = upscaleType(val2); }
if (K_length >= 79u) { b3 = upscaleType(val3); }
if (K_length >= 80u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 80u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 79u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 78u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 77u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [80..83]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 81u) { val1 = inPtr[80u*BlockSize]; }
if (K_length >= 82u) { val2 = inPtr[81u*BlockSize]; }
if (K_length >= 83u) { val3 = inPtr[82u*BlockSize]; }
if (K_length >= 84u) { val4 = inPtr[83u*BlockSize]; }
// Convert to upscale type
if (K_length >= 81u) { b1 = upscaleType(val1); }
if (K_length >= 82u) { b2 = upscaleType(val2); }
if (K_length >= 83u) { b3 = upscaleType(val3); }
if (K_length >= 84u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 84u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 83u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 82u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 81u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [84..87]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 85u) { val1 = inPtr[84u*BlockSize]; }
if (K_length >= 86u) { val2 = inPtr[85u*BlockSize]; }
if (K_length >= 87u) { val3 = inPtr[86u*BlockSize]; }
if (K_length >= 88u) { val4 = inPtr[87u*BlockSize]; }
// Convert to upscale type
if (K_length >= 85u) { b1 = upscaleType(val1); }
if (K_length >= 86u) { b2 = upscaleType(val2); }
if (K_length >= 87u) { b3 = upscaleType(val3); }
if (K_length >= 88u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 88u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 87u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 86u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 85u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [88..91]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 89u) { val1 = inPtr[88u*BlockSize]; }
if (K_length >= 90u) { val2 = inPtr[89u*BlockSize]; }
if (K_length >= 91u) { val3 = inPtr[90u*BlockSize]; }
if (K_length >= 92u) { val4 = inPtr[91u*BlockSize]; }
// Convert to upscale type
if (K_length >= 89u) { b1 = upscaleType(val1); }
if (K_length >= 90u) { b2 = upscaleType(val2); }
if (K_length >= 91u) { b3 = upscaleType(val3); }
if (K_length >= 92u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 92u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 91u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 90u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 89u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [92..95]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 93u) { val1 = inPtr[92u*BlockSize]; }
if (K_length >= 94u) { val2 = inPtr[93u*BlockSize]; }
if (K_length >= 95u) { val3 = inPtr[94u*BlockSize]; }
if (K_length >= 96u) { val4 = inPtr[95u*BlockSize]; }
// Convert to upscale type
if (K_length >= 93u) { b1 = upscaleType(val1); }
if (K_length >= 94u) { b2 = upscaleType(val2); }
if (K_length >= 95u) { b3 = upscaleType(val3); }
if (K_length >= 96u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 96u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 95u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 94u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 93u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [96..99]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 97u) { val1 = inPtr[96u*BlockSize]; }
if (K_length >= 98u) { val2 = inPtr[97u*BlockSize]; }
if (K_length >= 99u) { val3 = inPtr[98u*BlockSize]; }
if (K_length >= 100u) { val4 = inPtr[99u*BlockSize]; }
// Convert to upscale type
if (K_length >= 97u) { b1 = upscaleType(val1); }
if (K_length >= 98u) { b2 = upscaleType(val2); }
if (K_length >= 99u) { b3 = upscaleType(val3); }
if (K_length >= 100u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 100u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 99u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 98u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 97u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [100..103]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 101u) { val1 = inPtr[100u*BlockSize]; }
if (K_length >= 102u) { val2 = inPtr[101u*BlockSize]; }
if (K_length >= 103u) { val3 = inPtr[102u*BlockSize]; }
if (K_length >= 104u) { val4 = inPtr[103u*BlockSize]; }
// Convert to upscale type
if (K_length >= 101u) { b1 = upscaleType(val1); }
if (K_length >= 102u) { b2 = upscaleType(val2); }
if (K_length >= 103u) { b3 = upscaleType(val3); }
if (K_length >= 104u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 104u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 103u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 102u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 101u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [104..107]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 105u) { val1 = inPtr[104u*BlockSize]; }
if (K_length >= 106u) { val2 = inPtr[105u*BlockSize]; }
if (K_length >= 107u) { val3 = inPtr[106u*BlockSize]; }
if (K_length >= 108u) { val4 = inPtr[107u*BlockSize]; }
// Convert to upscale type
if (K_length >= 105u) { b1 = upscaleType(val1); }
if (K_length >= 106u) { b2 = upscaleType(val2); }
if (K_length >= 107u) { b3 = upscaleType(val3); }
if (K_length >= 108u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 108u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 107u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 106u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 105u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [108..111]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 109u) { val1 = inPtr[108u*BlockSize]; }
if (K_length >= 110u) { val2 = inPtr[109u*BlockSize]; }
if (K_length >= 111u) { val3 = inPtr[110u*BlockSize]; }
if (K_length >= 112u) { val4 = inPtr[111u*BlockSize]; }
// Convert to upscale type
if (K_length >= 109u) { b1 = upscaleType(val1); }
if (K_length >= 110u) { b2 = upscaleType(val2); }
if (K_length >= 111u) { b3 = upscaleType(val3); }
if (K_length >= 112u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 112u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 111u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 110u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 109u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [112..115]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 113u) { val1 = inPtr[112u*BlockSize]; }
if (K_length >= 114u) { val2 = inPtr[113u*BlockSize]; }
if (K_length >= 115u) { val3 = inPtr[114u*BlockSize]; }
if (K_length >= 116u) { val4 = inPtr[115u*BlockSize]; }
// Convert to upscale type
if (K_length >= 113u) { b1 = upscaleType(val1); }
if (K_length >= 114u) { b2 = upscaleType(val2); }
if (K_length >= 115u) { b3 = upscaleType(val3); }
if (K_length >= 116u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 116u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 115u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 114u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 113u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [116..119]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 117u) { val1 = inPtr[116u*BlockSize]; }
if (K_length >= 118u) { val2 = inPtr[117u*BlockSize]; }
if (K_length >= 119u) { val3 = inPtr[118u*BlockSize]; }
if (K_length >= 120u) { val4 = inPtr[119u*BlockSize]; }
// Convert to upscale type
if (K_length >= 117u) { b1 = upscaleType(val1); }
if (K_length >= 118u) { b2 = upscaleType(val2); }
if (K_length >= 119u) { b3 = upscaleType(val3); }
if (K_length >= 120u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 120u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 119u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 118u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 117u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [120..123]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 121u) { val1 = inPtr[120u*BlockSize]; }
if (K_length >= 122u) { val2 = inPtr[121u*BlockSize]; }
if (K_length >= 123u) { val3 = inPtr[122u*BlockSize]; }
if (K_length >= 124u) { val4 = inPtr[123u*BlockSize]; }
// Convert to upscale type
if (K_length >= 121u) { b1 = upscaleType(val1); }
if (K_length >= 122u) { b2 = upscaleType(val2); }
if (K_length >= 123u) { b3 = upscaleType(val3); }
if (K_length >= 124u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 124u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 123u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 122u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 121u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [124..127]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 125u) { val1 = inPtr[124u*BlockSize]; }
if (K_length >= 126u) { val2 = inPtr[125u*BlockSize]; }
if (K_length >= 127u) { val3 = inPtr[126u*BlockSize]; }
if (K_length >= 128u) { val4 = inPtr[127u*BlockSize]; }
// Convert to upscale type
if (K_length >= 125u) { b1 = upscaleType(val1); }
if (K_length >= 126u) { b2 = upscaleType(val2); }
if (K_length >= 127u) { b3 = upscaleType(val3); }
if (K_length >= 128u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 128u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 127u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 126u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 125u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//
// Note: We could repeat the above pattern all the way up to
// K = [252..255] making sure to deliberately skip
// K = 255 (the 256th value) to avoid overflow
// However, somewhere around K = 104, we appear to overflow
// the hardware code cache anyway which negatively impacts
// performance, so we don't need to go all the way...
//
//-----
// Move to next row of work
//-----
currIdx += rowSize;
inPtr += rowSize;
// Increment 'overflow' count
overflow += K1_length; // K values
}
__syncthreads();
//--------------------------------------
// LAST: Process last leftover chunk
// with more careful range checking
//--------------------------------------
if (nLeftOverElems)
{
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow >= K1_stop)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
// NOTE #1: the 'K_length' variable below is a static
// hard-coded constant in the range [1..255].
// K = 'Work per thread' per loop (stride)...
// The compiler will take care of throwing away
// any unused code greater than our specified 'K'
// value, with no negative impact on performance.
// NOTE #2: We use a cooperative stride
// across each thread in each block in grid
// ChunkSize = BlockSize * GridSize = 64 * 48 = 3072
// RowSize = WorkPerThead(K) * ChunkSize = 63 * 3072 = 193,536
//
// B0 B1 ... B47 (Blocks in Grid)
// ---- ---- --- ----
// k = 1 => |64| |64| ... |64| (3072 Thread & I/O requests for 1st work item per thread)
// k = 2 => |64| |64| ... |64| ditto (2nd work item per thread)
// ... ... ...
// k = 63 => |64| |64| ... |64| ditto (63 work item per thread)
// NOTE #3: We use a "Divide & Conquer" approach
// to avoid as much slower range checking as possible
// We try batches of 128, 64, 32, 16, 8, 4, 2, 1,
// and then finally a leftover chunk (on which we must carefully range check)
//----
// Setup Pointers & Indices for cooperative stride
//----
U32 bid = (blockIdx.y * gridDim.x) + blockIdx.x; // Get block index
U32 nSkip = nSafeRows * rowSize; // Skip past already processed rows
U32 chunkIdx = (bid * BlockSize) + tid; // Get starting index within chunk
U32 baseIdx = start + nSkip + chunkIdx; // Get starting index for left over elements
U32 val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
//------
// Try Section of 128
//------
//
// Note: We didn't bother to insert this code due to the "code cache" performance problem
// for K >= 104.
//
// If desired, repeat the pattern for the section of 64 below
// while doubling the # of elements processed.
//------
// Try Section of 64
//------
if (K_length >= 64u)
{
// Process 64 chunks safely without range checking
if (nLeftOverElems >= (64u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [17..20]
//-----
val1 = inPtr[(16u*nThreadsPerGrid)];
val2 = inPtr[(17u*nThreadsPerGrid)];
val3 = inPtr[(18u*nThreadsPerGrid)];
val4 = inPtr[(19u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [21..24]
//-----
val1 = inPtr[(20u*nThreadsPerGrid)];
val2 = inPtr[(21u*nThreadsPerGrid)];
val3 = inPtr[(22u*nThreadsPerGrid)];
val4 = inPtr[(23u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [25..28]
//-----
val1 = inPtr[(24u*nThreadsPerGrid)];
val2 = inPtr[(25u*nThreadsPerGrid)];
val3 = inPtr[(26u*nThreadsPerGrid)];
val4 = inPtr[(27u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [29..32]
//-----
val1 = inPtr[(28u*nThreadsPerGrid)];
val2 = inPtr[(29u*nThreadsPerGrid)];
val3 = inPtr[(30u*nThreadsPerGrid)];
val4 = inPtr[(31u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [33..35]
//-----
val1 = inPtr[(32u*nThreadsPerGrid)];
val2 = inPtr[(33u*nThreadsPerGrid)];
val3 = inPtr[(34u*nThreadsPerGrid)];
val4 = inPtr[(35u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [36..39]
//-----
val1 = inPtr[(36u*nThreadsPerGrid)];
val2 = inPtr[(37u*nThreadsPerGrid)];
val3 = inPtr[(38u*nThreadsPerGrid)];
val4 = inPtr[(39u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [40..43]
//-----
val1 = inPtr[(40u*nThreadsPerGrid)];
val2 = inPtr[(41u*nThreadsPerGrid)];
val3 = inPtr[(42u*nThreadsPerGrid)];
val4 = inPtr[(43u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [44..47]
//-----
val1 = inPtr[(44u*nThreadsPerGrid)];
val2 = inPtr[(45u*nThreadsPerGrid)];
val3 = inPtr[(46u*nThreadsPerGrid)];
val4 = inPtr[(47u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [48..51]
//-----
val1 = inPtr[(48u*nThreadsPerGrid)];
val2 = inPtr[(49u*nThreadsPerGrid)];
val3 = inPtr[(50u*nThreadsPerGrid)];
val4 = inPtr[(51u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [52..55]
//-----
val1 = inPtr[(52u*nThreadsPerGrid)];
val2 = inPtr[(53u*nThreadsPerGrid)];
val3 = inPtr[(54u*nThreadsPerGrid)];
val4 = inPtr[(55u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [56..59]
//-----
val1 = inPtr[(56u*nThreadsPerGrid)];
val2 = inPtr[(57u*nThreadsPerGrid)];
val3 = inPtr[(58u*nThreadsPerGrid)];
val4 = inPtr[(59u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [60..63]
//-----
val1 = inPtr[(60u*nThreadsPerGrid)];
val2 = inPtr[(61u*nThreadsPerGrid)];
val3 = inPtr[(62u*nThreadsPerGrid)];
val4 = inPtr[(63u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (64u * nThreadsPerGrid);
nLeftOverElems -= (64u * nThreadsPerGrid);
}
}
//------
// Try Section of 32
//------
if (K_length >= 32u)
{
// Process 32 chunks safely without range checking
if (nLeftOverElems >= (32u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [17..20]
//-----
val1 = inPtr[(16u*nThreadsPerGrid)];
val2 = inPtr[(17u*nThreadsPerGrid)];
val3 = inPtr[(18u*nThreadsPerGrid)];
val4 = inPtr[(19u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [21..24]
//-----
val1 = inPtr[(20u*nThreadsPerGrid)];
val2 = inPtr[(21u*nThreadsPerGrid)];
val3 = inPtr[(22u*nThreadsPerGrid)];
val4 = inPtr[(23u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [25..28]
//-----
val1 = inPtr[(24u*nThreadsPerGrid)];
val2 = inPtr[(25u*nThreadsPerGrid)];
val3 = inPtr[(26u*nThreadsPerGrid)];
val4 = inPtr[(27u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [29..32]
//-----
val1 = inPtr[(28u*nThreadsPerGrid)];
val2 = inPtr[(29u*nThreadsPerGrid)];
val3 = inPtr[(30u*nThreadsPerGrid)];
val4 = inPtr[(31u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (32u * nThreadsPerGrid);
nLeftOverElems -= (32u * nThreadsPerGrid);
}
}
//------
// Try Section of 16
//------
if (K_length >= 16u)
{
// Process 16 chunks safely without range checking
if (nLeftOverElems >= (16u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (16u * nThreadsPerGrid);
nLeftOverElems -= (16u * nThreadsPerGrid);
}
}
//------
// Try Section of 8
//------
if (K_length >= 8u)
{
// Process 8 chunks safely without range checking
if (nLeftOverElems >= (8u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (8u * nThreadsPerGrid);
nLeftOverElems -= (8u * nThreadsPerGrid);
}
}
//------
// Try Section of 4
//------
if (K_length >= 4u)
{
// Process 4 chunks safely without range checking
if (nLeftOverElems >= (4u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (4u * nThreadsPerGrid);
nLeftOverElems -= (4u * nThreadsPerGrid);
}
}
//------
// Try Section of 2
//------
if (K_length >= 2u)
{
// Process 2 chunks safely without range checking
if (nLeftOverElems >= (2u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..2]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
// Process v1, v2
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
// Move to next section
baseIdx += (2u * nThreadsPerGrid);
nLeftOverElems -= (2u * nThreadsPerGrid);
}
}
//------
// Try Section of 1
//------
if (K_length >= 1u)
{
// Process 1 chunk safely without range checking
if (nLeftOverElems >= (1u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
// Move to next section
baseIdx += (1u * nThreadsPerGrid);
nLeftOverElems -= (1u * nThreadsPerGrid);
}
}
//------
// Process Last few elements
// with careful RANGE CHECKING !!!
//------
if (nLeftOverElems > 0u)
{
// Make sure we are 'in range' before reading & binning
U32 inRange1 = (baseIdx <= stop);
if (inRange1)
{
// Read in 32-bit element
val1 = inVals[baseIdx];
// Process single element
b1 = upscaleType(val1);
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
// Update Accumulation count
overflow += K1_length; // overflow += K elements
}
// Cleanup Mapping object
// (Give mapper a chance to cleanup any resources)
mapper.Finish();
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow > 0u)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
//-------------------------------------------------
// Write out final row 'counts'
//-------------------------------------------------
{
// Compute starting 'row counts' offset
U32 rIdx = threadIdx.x * 4u; // 4 groups per lane
U32 rRow = rIdx >> logBankSize;
U32 rCol = rIdx & BankMask;
U32 rowIdx = (rRow * strideBank) + (rCol + 1u);
// Extra '+1' to shift past initial pad element
U32 * rowPtr = &s_thrdCounts[rowIdx];
// Store row counts in row array
rowPtr[0] = rowCnt1;
rowPtr[1] = rowCnt2;
rowPtr[2] = rowCnt3;
rowPtr[3] = rowCnt4;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
// Get Warp Row & Column
U32 warpRow = threadIdx.x >> logWarpSize; // tid / 32
U32 warpCol = threadIdx.x & WarpMask; // tid % 32
// Get local & global indices
U32 outGlobal = (blockIdx.x * nHistBins);
U32 outLocal = (warpRow * OutWarpSize);
U32 rowBase = (warpRow * OutStrideSize);
U32 outBase = outGlobal + outLocal;
U32 rowOff = warpCol + 1u;
U32 outIdx = outBase + warpCol;
rowIdx = rowBase + rowOff;
// Get local & global pointers
U32 * outPtr = &outRowCounts[outIdx];
rowPtr = &s_thrdCounts[rowIdx];
// Write our 'per row' counts in warp sequential order
if (OutLength >= 1u) { outPtr[(0u*WarpSize)] = rowPtr[(0u*strideBank)]; }
if (OutLength >= 2u) { outPtr[(1u*WarpSize)] = rowPtr[(1u*strideBank)]; }
if (OutLength >= 3u) { outPtr[(2u*WarpSize)] = rowPtr[(2u*strideBank)]; }
if (OutLength >= 4u) { outPtr[(3u*WarpSize)] = rowPtr[(3u*strideBank)]; }
if (OutLength >= 5u) { outPtr[(4u*WarpSize)] = rowPtr[(4u*strideBank)]; }
if (OutLength >= 6u) { outPtr[(5u*WarpSize)] = rowPtr[(5u*strideBank)]; }
if (OutLength >= 7u) { outPtr[(6u*WarpSize)] = rowPtr[(6u*strideBank)]; }
if (OutLength >= 8u) { outPtr[(7u*WarpSize)] = rowPtr[(7u*strideBank)]; }
}
}
//-----------------------------------------------
// Name: K2_TRISH_RowCounts_To_RowStarts
// Desc: Sum 256-way 'per row' counts into
// total 256-way counts using prefix-sum
//------------------------------------------------
template <
U32 logBankSize, // log<2>( Channels per Bank )
U32 logWarpSize, // log<2>( Threads Per Warp )
U32 BlockSize // Threads Per Block
>
__global__
void K2_TRISH_RowCounts_To_RowStarts
(
U32 * outTotalCounts, // OUT - total counts
U32 * outTotalStarts, // OUT - total starts
U32 * outRowStarts, // OUT - row starts
const U32 * inRowCounts, // IN - 'per row' counts to accumulate
U32 nRows // IN - number of rows to accumulate
)
{
//------------------------------------
// Constant values
//------------------------------------
// Memory Channels Per Bank
const U32 BankSize = 1u << logBankSize; // 32 (or 16)
const U32 BankMask = BankSize - 1u; // 31 (or 15)
// Threads Per Warp
const U32 WarpSize = 1u << logWarpSize; // 32
const U32 WarpMask = WarpSize - 1u; // 31
// Warps Per Block
const U32 WarpsPerBlock = BlockSize / WarpSize; // 8 = 256 / 32
// Size of 'Row Counts' and 'Row Starts' array
//const U32 nElemsCounts = 256;
//const U32 banksCounts = (nElemsCounts + BankMask) / BankSize;
//const U32 padCounts = ((banksCounts * BankSize) - nElemsCounts);
//const U32 sizeCounts = nElemsCounts + padCounts;
// Stride for padded bank of elements
const U32 strideBank = 1u + BankSize;
// Serial Scan Array
const U32 nSS1 = 256u + 2u;
const U32 nRowsSS1 = (nSS1 + BankMask) / BankSize;
const U32 nElemsSS1 = nRowsSS1 * strideBank;
const U32 banksSS1 = (nElemsSS1 + BankMask) / BankSize;
const U32 padSS1 = ((banksSS1 * BankSize) - nElemsSS1);
const U32 sizeSS1 = nElemsSS1 + padSS1;
// WarpScan array
const U32 strideWS2 = WarpSize
+ (WarpSize >> 1u)
+ 1u; // 49 = (32 + 16 + 1)
const U32 nWarpsWS2 = 1u;
const U32 nElemsWS2 = nWarpsWS2 * strideWS2;
const U32 banksWS2 = (nElemsWS2 + BankMask) / BankSize;
const U32 padWS2 = ((banksWS2 * BankSize) - nElemsWS2);
const U32 sizeWS2 = nElemsWS2 + padWS2;
//const U32 nSafePassesCnts = sizeCounts / BlockSize;
//const U32 leftOverCnts = sizeCounts - (nSafePassesCnts * BlockSize);
const U32 nSafePassesSS1 = sizeSS1 / BlockSize;
const U32 leftOverSS1 = sizeSS1 - (nSafePassesSS1 * BlockSize);
const U32 nSafePassesWS2 = sizeWS2 / BlockSize;
const U32 leftOverWS2 = sizeWS2 - (nSafePassesWS2 * BlockSize);
//------------------------------------
// Local variables
//------------------------------------
// shared memory
//__shared__ U32 s_rowStarts[sizeCounts]; // 'Row Starts' one chunk at a time
__shared__ U32 s_ss1[sizeSS1]; // Used for serial scan
__shared__ U32 s_ws2[sizeWS2]; // Used for parallel warp scan
// Registers
U32 tSum; // Per thread accumulator
//------------------------------------
// Compute Indices & Pointers
//------------------------------------
U32 warpRow, warpCol;
U32 storeIdx, prevIdx, ss1Idx, ws2Idx;
{
// Compute Bank Offsets
//U32 bankRow = threadIdx.x >> logBankSize; // tid / 32
U32 bankCol = threadIdx.x & BankMask; // tid % 32
// Compute warp offsets
warpRow = threadIdx.x >> logWarpSize; // tid / 32
warpCol = threadIdx.x & WarpMask; // tid % 32
// Compute Store index (for storing final counts before prefix sum)
U32 sIdx = threadIdx.x;
U32 storeRow = sIdx >> logBankSize; // tid / 32
U32 storeCol = sIdx & BankMask; // tid % 32
storeIdx = (storeRow * strideBank)
+ storeCol
+ 2u; // Pad for 'reach back'
//--
// Previous Column (Serial Scan 1)
// 1.) Reach back one column
// 2.) But, we need to skip over extra padding before the first
// thread in every bank, so reach back two columns
// However, the very first thread in the very first bank needs
// to be able to reach back safely 2 columns without going 'out of range'.
//
// We work around this by pre-padding the 's_ss1' array with
// an extra 2 elements and shifting indices over by two as needed to skip over padding.
//--
U32 prevCol = ((bankCol == 0u) ? 2u : 1u);
prevIdx = storeIdx - prevCol;
// Compute Serial Scan index
U32 ssIdx = threadIdx.x * 8u;
U32 ss1Row = ssIdx >> logBankSize; // (tid*8) / 32
U32 ss1Col = ssIdx & BankMask; // (tid*8) % 32
ss1Idx = (ss1Row * strideBank)
+ ss1Col
+ 2u; // pad for 'reach back'
// Compute Warp Scan Index
ws2Idx = (warpRow * strideWS2)
+ (WarpSize >> 1u)
+ warpCol;
}
//------------------------------------
// Zero out 'arrays'
//------------------------------------
U32 * setPtr = NULL;
//-
// Zero out 'row starts' array
//-
//setPtr = (&s_rowStarts[0]);
//SetArray_BlockSeq
// <
// U32, BlockSize, nSafePassesCnts,
// leftOverCnts, sizeCounts
// >
// (
// setPtr, 0u
// );
//-
// Zero out 'Serial Scan' array
//-
setPtr = (&s_ss1[0]);
SetArray_BlockSeq
<
U32, BlockSize, nSafePassesSS1,
leftOverSS1, sizeSS1
>
(
setPtr, 0u
);
//-
// Zero out 'Warp Scan' array
//-
setPtr = (&s_ws2[0]);
SetArray_BlockSeq
<
U32, BlockSize, nSafePassesWS2,
leftOverWS2, sizeWS2
>
(
setPtr, 0u
);
// Sync all threads in block
if (WarpsPerBlock > 2u) { __syncthreads(); }
//-------------------------------------------------
// Phase 1:
// Serial Reduction of all rows of 'per row' counts
// down to single set of 'total' counts
//-------------------------------------------------
{
const U32 * inPtr = &inRowCounts[threadIdx.x];
// Initialize 'Thread Sum' to identity value
tSum = 0;
// Loop over row counts
#pragma unroll
for (U32 currPass = 0u; currPass < nRows; currPass++)
{
// Grab count from global arrary
U32 currCnt = inPtr[0];
// Accumulate 'per row' counts into a 'total' count
tSum = tSum + currCnt;
// Move to next set of 'row counts' to process
inPtr += BlockSize;
}
// Store the 'total count's
outTotalCounts[threadIdx.x] = tSum;
// Also store 'total count's into 'Serial Scan' array
s_ss1[storeIdx] = tSum;
// Sync all threads in block
if (WarpsPerBlock > 2u) { __syncthreads(); }
}
//--------------------------------------
// Phase 2:
// convert 'total counts' into 'total starts'
// using prefix sum
//--------------------------------------
if (warpRow == 0)
{
volatile U32 * wsPtr = (U32 *)&(s_ws2[0]);
U32 * SS1_ptr = &s_ss1[ss1Idx];
// For higher performance, we use registers instead of shared memory
// Tradeoff - lots of register pressure (8 registers per thread)
U32 ss01, ss02, ss03, ss04;
U32 ss05, ss06, ss07, ss08;
//-----
// Serial Scan (on short sequence of 8 values)
//-----
// Grab short sequence of 8 values from ss1 array
ss01 = SS1_ptr[0];
ss02 = SS1_ptr[1];
ss03 = SS1_ptr[2];
ss04 = SS1_ptr[3];
ss05 = SS1_ptr[4];
ss06 = SS1_ptr[5];
ss07 = SS1_ptr[6];
ss08 = SS1_ptr[7];
// Serial scan short sequence (in registers)
//ss01 = <identity> + ss01;
ss02 = ss01 + ss02;
ss03 = ss02 + ss03;
ss04 = ss03 + ss04;
ss05 = ss04 + ss05;
ss06 = ss05 + ss06;
ss07 = ss06 + ss07;
ss08 = ss07 + ss08;
//-
// Store final serial scan result into warp scan array
//-
U32 wi = ws2Idx;
tSum = ss08;
wsPtr[wi] = tSum;
//-----
// Warp Scan (on 32 threads in parallel)
//-----
wsPtr[wi] = tSum = wsPtr[wi - 1u] + tSum;
wsPtr[wi] = tSum = wsPtr[wi - 2u] + tSum;
wsPtr[wi] = tSum = wsPtr[wi - 4u] + tSum;
wsPtr[wi] = tSum = wsPtr[wi - 8u] + tSum;
wsPtr[wi] = tSum = wsPtr[wi - 16u] + tSum;
//-----
// Serial Update (on short sequence of 8 values)
//-----
//-
// Grab update (prefix) value from Warp Array
//-
// Note: Need to reach back 'one column' to get exclusive result
U32 prevWI = wi - 1u;
tSum = wsPtr[prevWI];
//-
// Update each element short sequence with prefix (in registers)
//-
ss01 = tSum + ss01;
ss02 = tSum + ss02;
ss03 = tSum + ss03;
ss04 = tSum + ss04;
ss05 = tSum + ss05;
ss06 = tSum + ss06;
ss07 = tSum + ss07;
ss08 = tSum + ss08;
// Store 'prefix sum' results back in 'serial scan' array
SS1_ptr[0] = ss01;
SS1_ptr[1] = ss02;
SS1_ptr[2] = ss03;
SS1_ptr[3] = ss04;
SS1_ptr[4] = ss05;
SS1_ptr[5] = ss06;
SS1_ptr[6] = ss07;
SS1_ptr[7] = ss08;
} // end warpRow == 0
// Sync all threads in block
if (WarpsPerBlock > 2u) { __syncthreads(); }
//-----
// Grab starting 'row start' (total sum) for this thread
// Note #1: Need to 'reach back' one column for exclusive results
// Note #2: This will result in an unavoidable '2-way' bank conflict
//-----
U32 rowSum = s_ss1[prevIdx];
// Store total starts (from previous column)
outTotalStarts[threadIdx.x] = rowSum;
// Sync all threads in block
if (WarpsPerBlock > 2u) { __syncthreads(); }
//-------------------------------------------------
// Phase 3:
// Accumulate and write out 'per row' starts
//-------------------------------------------------
{
const U32 * inPtr = &inRowCounts[threadIdx.x];
U32 * outPtr = &outRowStarts[threadIdx.x];
// Initialize 'Thread Sum' to identity value
// Loop over row counts
#pragma unroll
for (U32 currPass = 0u; currPass < nRows; currPass++)
{
// Read 'in' current count from global arrary
U32 currCnt = inPtr[0];
// Write 'out' current row sum to global array
outPtr[0] = rowSum;
// Accumulate 'per row' count into running 'row sum' start
rowSum = rowSum + currCnt;
//-
// Move to next row
//-
inPtr += BlockSize;
outPtr += BlockSize;
}
// Sync all threads in block
//if (WarpsPerBlock > 2u) { __syncthreads(); }
}
}
////////////////////////////////////////////////////////////////////////////////
// Host interface to GPU TRISH histogram
////////////////////////////////////////////////////////////////////////////////
/*-----------------
Local Defines
-----------------*/
// Number of SM's per GPU
#if (GPU_GTX_560M == GPU_PLATFORM)
#define NUM_GPU_SMs (4u)
#elif (GPU_TELSA_M2050 == GPU_PLATFORM)
#define NUM_GPU_SMs (14u)
#elif (GPU_GTX_480 == GPU_PLATFORM)
#define NUM_GPU_SMs (15u)
#elif (GPU_GTX_580 == GPU_PLATFORM)
#define NUM_GPU_SMs (16u)
#elif (GPU_GTX_680 == GPU_PLATFORM)
#define NUM_GPU_SMs (8u)
#else
// Unknown GPU - assume 16 SM's for now...
#define NUM_GPU_SMs (16u)
#endif
// Intermediate CUDA buffers
static U32 * d_rowCounts = NULL;
static U32 * d_rowStarts = NULL;
static U32 * d_totalStarts = NULL;
//-----------------------------------------------
// Name: initTrish256
// Desc: Initialize intermediate GPU Buffers
//-----------------------------------------------
extern "C"
void initTrish256( void )
{
// Local Constants
const U32 nHistBins256 = 256u;
const U32 nGPU_SMs = NUM_GPU_SMs;
const U32 nGPU_ConcurrentBlocks = 3u;
const U32 K1_GridSize = nGPU_SMs * nGPU_ConcurrentBlocks;
const U32 K1_nRows = K1_GridSize;
const U32 sizeRowCounts = K1_nRows * nHistBins256 * sizeof(U32);
const U32 sizeTotal = nHistBins256 * sizeof(U32);
// Create intermediate GPU buffers
cutilSafeCall( hipMalloc( (void **)&d_rowCounts, sizeRowCounts ) );
cutilSafeCall( hipMalloc( (void **)&d_rowStarts, sizeRowCounts ) );
cutilSafeCall( hipMalloc( (void **)&d_totalStarts, sizeTotal ) );
}
//-----------------------------------------------
// Name: closeTrish256
// Desc: cleanup intermediate GPU buffers
//-----------------------------------------------
extern "C"
void closeTrish256( void )
{
// Destroy Intermediate GPU buffers
cutilSafeCall( hipFree( d_totalStarts ) );
cutilSafeCall( hipFree( d_rowStarts ) );
cutilSafeCall( hipFree( d_rowCounts ) );
}
//---------------------------------------------------------
// Name: genTrishByteU8
// Desc: CPU Wrapper function around
// generalized TRISH histogram for byte data
// invoked by "genTRISH" demo
//---------------------------------------------------------
extern "C"
void genTrishByteU8
(
// Function Parameters
U32 * d_Histogram, // OUT - Final 256-way histogram counts
void * d_Data, // IN - input data to bin & count into histogram
U32 byteCount, // IN - length of input data array
U32 minVal, // IN - minVal
U32 maxVal, // IN - maxVal
U32 numBins // IN - number of bins
)
{
//-----
// Local Constants=
//-----
// Note: The best # of blocks for the TRISH algorithm appears to be
// The # of SM's on the card * the number of concurrent blocks.
// This is the mininum to effectively use all hardware resources effectively.
//
// For Example: On the following Fermi cards, the grid sizes for best performance would be ...
// GTX 560M = 12 = 4 * 3
// TELSA M2050 = 42 = 14 * 3
// GTX 480 = 45 = 15 * 3
// GTX 580 = 48 = 16 * 3
const U32 nGPU_SMs = NUM_GPU_SMs; // See #defines above
const U32 nGPU_ConcurrentBlocks = 3u; // for Fermi architectures, we can achieve 3 concurrent blocks per SM (64 * 3 = 192 => 192/1536 => 12.5% occupancy
const U32 logBankSize = 5u; // 5 = log<2>( Memory Banks )
const U32 logWarpSize = 5u; // 5 = log<2>( Threads per Warp )
const U32 K1_BlockSize = 64u; // 64 = Threads per Block (Histogram Kernel)
const U32 K1_GridSize = nGPU_SMs * nGPU_ConcurrentBlocks; // GridSize (Histogram Kernel)
const U32 K2_BlockSize = 256u; // 256 = Threads per Block (RowSum Kernel)
const U32 K2_GridSize = 1u; // 1 = GridSize (RowSum Kernel)
const U32 K1_Length = 31u; // 31 = Work Per thread (loop unrolling)
const U32 in_start = 0u; // 0 = starting range
const U32 K1_nRows = K1_GridSize; // ?? = Number of rows (blocks) that are cooperatively striding across input data set
//-----
// Get number of elements
//-----
assert( byteCount > 0u );
assert( byteCount % sizeof(U32) == 0u );
U32 nElems = byteCount >> 2u; // byteCount/4
U32 in_stop = nElems - 1u;
const U32 * d_inVals = (const U32 *)d_Data;
/*--------------------------------------
Step 0. Create Intermediate buffers
--------------------------------------*/
// Code moved to initTrish256() above
/*------------------------------------------------------
Step 1. Bin & count elements into 'per row' 256-way histograms
------------------------------------------------------*/
typedef MapToBin
<
U32, // Value Type
F32, // Conversion Type
U32, // Bin Type
1u, // Formula = #1: B=(A-Mu)*Alpha; where Mu = Min - 0.5; and Alpha = n/(max-min+1);
1u, // Range check for values <= min
1u // Range check for values >= max
> MapperU8;
hipLaunchKernelGGL(( K1_TRISH_CountRows_GEN_B1
<
// Template Parameters
U32, // underlying value type
MapperU8, // underlying mapper type
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Threads per Warp )
K1_BlockSize, // Threads per Block
K1_GridSize, // Blocks per Grid
K1_Length // Work Per Thread (Loop unrolling)
>)
,
// CUDA CTA Parameters
K1_GridSize, // Blocks per Grid
K1_BlockSize // Threads per Block
, 0, 0,
// Function parameters
d_rowCounts, // IN - 'per row' histograms
d_inVals, // IN - 'input' data to count & bin
in_start, // IN - input range [start, stop]
in_stop, // ditto
minVal, // IN - [min,max] value for histogram binning
maxVal, // ditto
numBins // number of bins in histogram
);
// Check if kernel execution generated an error
cutilCheckMsg( "K1_TRISH_CountRows_GEN_B1() Kernel execution failed!" );
/*------------------------------------------------------
Step 2. Sum 'per row' histograms into 'final' 256-bin histogram
------------------------------------------------------*/
hipLaunchKernelGGL(( K2_TRISH_RowCounts_To_RowStarts
<
// Template Parameters
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Warp Size )
K2_BlockSize // Threads per Block
>)
,
// CUDA CTA Parameters
K2_GridSize, // Blocks per Grid
K2_BlockSize // Threads per Block
, 0, 0,
// Function parameters
d_Histogram, // OUT - Histogram Counts
d_totalStarts, // OUT - Histogram Starts
d_rowStarts, // OUT - 'Per Row' Histogram Starts
d_rowCounts, // IN - 'Per Row' Histogram Counts
K1_nRows // IN - number of rows
);
// Check if kernel execution generated an error
cutilCheckMsg( "K2_TRISH_RowCounts_To_RowStarts() Kernel execution failed!" );
#if 1 == TRISH_VERIFY_HISTOGRAM
//-----
// Step 3. Verify Histogram results are correct
//-----
TRISH_VerifyHistogram_B1< U32, MyMapper >
(
nElems, (U32 *)d_inVals,
numBins, (U32 *)d_Histogram,
minVal, maxVal
);
#endif
/*--------------------------------------
Step 4. Cleanup intermediate buffers
--------------------------------------*/
// Code moved to closeTrish256() above
}
//---------------------------------------------------------
// Name: genTrishWordU16
// Desc: CPU Wrapper function around
// generalized TRISH histogram for word data
// invoked by "genTRISH" demo
//---------------------------------------------------------
extern "C"
void genTrishWordU16
(
// Function Parameters
U32 * d_Histogram, // OUT - Final 256-way histogram counts
void * d_Data, // IN - input data to bin & count into histogram
U32 wordCount, // IN - length of input data array
U32 minVal, // IN - minVal
U32 maxVal, // IN - maxVal
U32 numBins // IN - number of bins
)
{
//-----
// Local Constants=
//-----
// Note: The best # of blocks for the TRISH algorithm appears to be
// The # of SM's on the card * the number of concurrent blocks.
// This is the mininum to effectively use all hardware resources effectively.
//
// For Example: On the following Fermi cards, the grid sizes for best performance would be ...
// GTX 560M = 12 = 4 * 3
// TELSA M2050 = 42 = 14 * 3
// GTX 480 = 45 = 15 * 3
// GTX 580 = 48 = 16 * 3
const U32 nGPU_SMs = NUM_GPU_SMs; // See #defines above
const U32 nGPU_ConcurrentBlocks = 3u; // for Fermi architectures, we can achieve 3 concurrent blocks per SM (64 * 3 = 192 => 192/1536 => 12.5% occupancy
const U32 logBankSize = 5u; // 5 = log<2>( Memory Banks )
const U32 logWarpSize = 5u; // 5 = log<2>( Threads per Warp )
const U32 K1_BlockSize = 64u; // 64 = Threads per Block (Histogram Kernel)
const U32 K1_GridSize = nGPU_SMs * nGPU_ConcurrentBlocks; // GridSize (Histogram Kernel)
const U32 K2_BlockSize = 256u; // 256 = Threads per Block (RowSum Kernel)
const U32 K2_GridSize = 1u; // 1 = GridSize (RowSum Kernel)
// Efficiency Formula = (Floor(127/k)*k)/127
// Ideal k-values = 1, 127 (IE efficiency = 1)
// Best k-values otherwise = 2,3,6,7,9,14,18,21,42,63
// Also try 25 & 31 (Local Maxima)
// Worst k-values = 50 (0.504) and 43 (0.677) and 32 (0.756)
//const U32 K1_Length = 1u; // 1, Efficiency = 1.0, Throughput = 17.75 GB/s (480), *POOR ILP*
//const U32 K1_Length = 2u; // 2, Efficiency = 0.992, Throughput = 27.20 GB/s (480), *POOR ILP*
//const U32 K1_Length = 3u; // 3, Efficiency = 0.992, Throughput = 32.71 GB/s (480), *POOR ILP*
//const U32 K1_Length = 6u; // 6, Efficiency = 0.992, Throughput = 40.02 GB/s (480)
//const U32 K1_Length = 7u; // 7, Efficiency = 0.992, Throughput = 41.50 GB/s (480)
//const U32 K1_Length = 9u; // 9, Efficiency = 0.992, Throughput = 40.21 GB/s (480)
//const U32 K1_Length = 14u; // 14, Efficiency = 0.992, Throughput = 43.56 GB/s (480)
//const U32 K1_Length = 18u; // 18, Efficiency = 0.992, Throughput = 44.08 GB/s (480)
//const U32 K1_Length = 21u; // 21, Efficiency = 0.992, Throughput = 43.74 GB/s (480)
//const U32 K1_Length = 25u; // 25, Efficiency = 0.984, Throughput = 44.21 GB/s (480)
//const U32 K1_Length = 31u; // 31, Efficiency = 0.976, Throughput = 44.29 GB/s (480)
//const U32 K1_Length = 42u; // 42, Efficiency = 0.992, Throughput = 44.43 GB/s (480)
const U32 K1_Length = 63u; // 63, Efficiency = 0.992, Throughput = 45.66 GB/s (480), *BEST* result
//const U32 K1_Length = 64u; // 64, Efficiency = 0.504, Throughput = 41.10 GB/s (480), *WORST* Efficiency
//const U32 K1_Length = 105u; // 106, Efficiency = 0.827, Throughput = 44.86 GB/s (480), Good result, Program probably still fits in code cache...
//const U32 K1_Length = 106u; // 106, Efficiency = 0.835, Throughput = 42.60 GB/s (480), Starts declining, ??? Program too large to fit in code cache ???
//const U32 K1_Length = 127u; // 127, Efficiency = 1.0, Throughput = 26.16 GB/s (480), *POOR* performance, ??? Program too large to fit in code cache ???
const U32 in_start = 0u; // 0 = starting range
const U32 K1_nRows = K1_GridSize; // ?? = Number of rows (blocks) that are cooperatively striding across input data set
//-----
// Get number of elements
//-----
assert( wordCount > 0u );
assert( wordCount % 4 == 0u );
U32 nElems = wordCount >> 1u; // wordCount/2
U32 in_stop = nElems - 1u;
const U32 * d_inVals = (const U32 *)d_Data;
/*--------------------------------------
Step 0. Create Intermediate buffers
--------------------------------------*/
// Code moved to initTrish256() above
/*------------------------------------------------------
Step 1. Bin & count elements into 'per row' 256-way histograms
------------------------------------------------------*/
typedef MapToBin
<
U32, // Value Type
F32, // Conversion Type
U32, // Bin Type
1u, // Formula = #1: B=(A-Mu)*Alpha; where Mu = Min - 0.5; and Alpha = n/(max-min+1);
1u, // Range check for values < min
1u // Range check for values > max
> MapperU16;
hipLaunchKernelGGL(( K1_TRISH_CountRows_GEN_B2
<
// Template Parameters
U32, // underlying value type
MapperU16, // underlying mapper type
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Threads per Warp )
K1_BlockSize, // Threads per Block
K1_GridSize, // Blocks per Grid
K1_Length // Work Per Thread (Loop unrolling)
>)
,
// CUDA CTA Parameters
K1_GridSize, // Blocks per Grid
K1_BlockSize // Threads per Block
, 0, 0,
// Function parameters
d_rowCounts, // IN - 'per row' histograms
d_inVals, // IN - 'input' data to count & bin
in_start, // IN - input range [start, stop]
in_stop, // ditto
minVal, // IN - [min,max] value for histogram binning
maxVal, // ditto
numBins // number of bins in histogram
);
// Check if kernel execution generated an error
cutilCheckMsg( "K1_TRISH_CountRows_GEN_B2() Kernel execution failed!" );
/*------------------------------------------------------
Step 2. Sum 'per row' histograms into 'final' 256-bin histogram
------------------------------------------------------*/
hipLaunchKernelGGL(( K2_TRISH_RowCounts_To_RowStarts
<
// Template Parameters
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Warp Size )
K2_BlockSize // Threads per Block
>)
,
// CUDA CTA Parameters
K2_GridSize, // Blocks per Grid
K2_BlockSize // Threads per Block
, 0, 0,
// Function parameters
d_Histogram, // OUT - Histogram Counts
d_totalStarts, // OUT - Histogram Starts
d_rowStarts, // OUT - 'Per Row' Histogram Starts
d_rowCounts, // IN - 'Per Row' Histogram Counts
K1_nRows // IN - number of rows
);
// Check if kernel execution generated an error
cutilCheckMsg( "K2_TRISH_RowCounts_To_RowStarts() Kernel execution failed!" );
#if 1 == TRISH_VERIFY_HISTOGRAM
//-----
// Step 3. Verify Histogram results are correct
//-----
TRISH_VerifyHistogram_B2< U32, MyMapper >
(
nElems, (U32 *)d_inVals,
numBins, (U32 *)d_Histogram,
minVal, maxVal
);
#endif
/*--------------------------------------
Step 4. Cleanup intermediate buffers
--------------------------------------*/
// Code moved to closeTrish256() above
}
//---------------------------------------------------------
// Name: genTrishDWordU32
// Desc: CPU Wrapper function around
// generalized TRISH histogram for DWORD data
// invoked by "genTRISH" demo
//---------------------------------------------------------
extern "C"
void genTrishDWordU32
(
// Function Parameters
U32 * d_Histogram, // OUT - Final 256-way histogram counts
void * d_Data, // IN - input data to bin & count into histogram
U32 dwordCount, // IN - length of input data array
U32 minVal, // IN - minVal
U32 maxVal, // IN - maxVal
U32 numBins // IN - number of bins
)
{
//-----
// Local Constants=
//-----
// Note: The best # of blocks for the TRISH algorithm appears to be
// The # of SM's on the card * the number of concurrent blocks.
// This is the mininum to effectively use all hardware resources effectively.
//
// For Example: On the following Fermi cards, the grid sizes for best performance would be ...
// GTX 560M = 12 = 4 * 3
// TELSA M2050 = 42 = 14 * 3
// GTX 480 = 45 = 15 * 3
// GTX 580 = 48 = 16 * 3
const U32 nGPU_SMs = NUM_GPU_SMs; // See #defines above
const U32 nGPU_ConcurrentBlocks = 3u; // for Fermi architectures, we can achieve 3 concurrent blocks per SM (64 * 3 = 192 => 192/1536 => 12.5% occupancy
const U32 logBankSize = 5u; // 5 = log<2>( Memory Banks )
const U32 logWarpSize = 5u; // 5 = log<2>( Threads per Warp )
const U32 K1_BlockSize = 64u; // 64 = Threads per Block (Histogram Kernel)
const U32 K1_GridSize = nGPU_SMs * nGPU_ConcurrentBlocks; // GridSize (Histogram Kernel)
const U32 K2_BlockSize = 256u; // 256 = Threads per Block (RowSum Kernel)
const U32 K2_GridSize = 1u; // 1 = GridSize (RowSum Kernel)
// Efficiency Formula = (Floor(255/k)*k)/255
// Ideal k-values = 1, 3, 5, 15, 17, 51, 85, 255
// Best k-values otherwise = 2, 4, 6, 7, 9, 10, 11, 12, 14, 18, 21, 23, 25, 28, 36, 42, 50, 63, 84, 125, 126, 127, 253, 254
// Worst k-values = 128 (0.502) & 86 (0.675) & 64 (0.753)
// K >= 105 => code won't fit in cache
//const U32 K1_Length = 1u; // 1, Efficiency = 1.0, Throughput = 19.66 GB/s (480), *POOR ILP*
//const U32 K1_Length = 2u; // 2, Efficiency = 0.996, Throughput = 34.16 GB/s (480), *POOR ILP*
//const U32 K1_Length = 3u; // 3, Efficiency = 1.0, Throughput = 44.90 GB/s (480), *POOR ILP*
//const U32 K1_Length = 4u; // 4, Efficiency = 0.988, Throughput = 52.03 GB/s (480), *POOR ILP*
//const U32 K1_Length = 5u; // 5, Efficiency = 1.0, Throughput = 56.56 GB/s (480),
//const U32 K1_Length = 6u; // 6, Efficiency = 0.988, Throughput = 60.32 GB/s (480)
//const U32 K1_Length = 7u; // 7, Efficiency = 0.988, Throughput = 53.07 GB/s (480)
//const U32 K1_Length = 9u; // 9, Efficiency = 0.988, Throughput = 59.97 GB/s (480)
//const U32 K1_Length = 10u; // 10, Efficiency = 0.980, Throughput = 61.61 GB/s (480)
//const U32 K1_Length = 11u; // 11, Efficiency = 0.992, Throughput = 62.57 GB/s (480)
//const U32 K1_Length = 12u; // 12, Efficiency = 0.988, Throughput = 62.00 GB/s (480)
//const U32 K1_Length = 14u; // 14, Efficiency = 0.988, Throughput = 64.24 GB/s (480)
const U32 K1_Length = 15u; // 15, Efficiency = 1.0, Throughput = 65.05 GB/s (480) *BEST*
//const U32 K1_Length = 16u; // 16, Efficiency = 0.941, Throughput = 63.14 GB/s (480)
//const U32 K1_Length = 17u; // 17, Efficiency = 1.0, Throughput = 63.06 GB/s (480)
//const U32 K1_Length = 18u; // 18, Efficiency = 0.988, Throughput = 58.58 GB/s (480)
//const U32 K1_Length = 21u; // 21, Efficiency = 0.988, Throughput = 59.07 GB/s (480)
//const U32 K1_Length = 23u; // 23, Efficiency = 0.992, Throughput = 59.99 GB/s (480)
//const U32 K1_Length = 25u; // 25, Efficiency = 0.980, Throughput = 61.24 GB/s (480)
//const U32 K1_Length = 28u; // 28, Efficiency = 0.988, Throughput = 62.17 GB/s (480)
//const U32 K1_Length = 36u; // 36, Efficiency = 0.988, Throughput = 58.93 GB/s (480)
//const U32 K1_Length = 42u; // 42, Efficiency = 0.988, Throughput = 60.09 GB/s (480)
//const U32 K1_Length = 50u; // 50, Efficiency = 0.980, Throughput = 62.01 GB/s (480)
//const U32 K1_Length = 51u; // 51, Efficiency = 1.0, Throughput = 62.46 GB/s (480)
//const U32 K1_Length = 63u; // 63, Efficiency = 0.988, Throughput = 62.88 GB/s (480),
//const U32 K1_Length = 84u; // 84, Efficiency = 0.988, Throughput = 64.62 GB/s (480),
//const U32 K1_Length = 85u; // 85, Efficiency = 1.0, Throughput = 64.17 GB/s (480),
//const U32 K1_Length = 86u; // 86, Efficiency = 0.675, Throughput = 60.61 GB/s (480), *POOR EFFICIENCY*
//const U32 K1_Length = 125u; // 125, Efficiency = 0.980, Throughput = 65.41 GB/s (480), *BEST*
//const U32 K1_Length = 126u; // 126, Efficiency = 0.988, Throughput = 65.55 GB/s (480), *BEST of the BEST*
//const U32 K1_Length = 127u; // 127, Efficiency = 0.996, Throughput = 65.13 GB/s (480), *BEST*
//const U32 K1_Length = 128u; // 128, Efficiency = 0.502, Throughput = 59.59 GB/s (480), *WORST EFFICIENCY*
// K=[105..255], code probably won't fit in cache
const U32 in_start = 0u; // 0 = starting range
const U32 K1_nRows = K1_GridSize; // ?? = Number of rows (blocks) that are cooperatively striding across input data set
//-----
// Get number of elements
//-----
assert( dwordCount > 0u );
assert( dwordCount % 4 == 0u );
U32 nElems = dwordCount;
U32 in_stop = nElems - 1u;
const U32 * d_inVals = (const U32 *)d_Data;
/*--------------------------------------
Step 0. Create Intermediate buffers
--------------------------------------*/
// Code moved to initTrish256() above
/*------------------------------------------------------
Step 1. Bin & count elements into 'per row' 256-way histograms
------------------------------------------------------*/
typedef MapToBin
<
U32, // Value Type
F32, // Conversion Type
U32, // Bin Type
1u, // Formula = #1: B=(A-Mu)*Alpha; where Mu = Min - 0.5; and Alpha = n/(max-min+1);
1u, // Range check for values < min
1u // Range check for values > max
> MapperU32;
hipLaunchKernelGGL(( K1_TRISH_CountRows_GEN_B4
<
// Template Parameters
U32, // underlying value type
MapperU32, // underlying mapper type
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Threads per Warp )
K1_BlockSize, // Threads per Block
K1_GridSize, // Blocks per Grid
K1_Length // Work Per Thread (Loop unrolling)
>)
,
// CUDA CTA Parameters
K1_GridSize, // Blocks per Grid
K1_BlockSize // Threads per Block
, 0, 0,
// Function parameters
d_rowCounts, // IN - 'per row' histograms
d_inVals, // IN - 'input' data to count & bin
in_start, // IN - input range [start, stop]
in_stop, // ditto
minVal, // IN - [min,max] value for histogram binning
maxVal, // ditto
numBins // number of bins in histogram
);
// Check if kernel execution generated an error
cutilCheckMsg( "K1_TRISH_CountRows_GEN_B2() Kernel execution failed!" );
/*------------------------------------------------------
Step 2. Sum 'per row' histograms into 'final' 256-bin histogram
------------------------------------------------------*/
hipLaunchKernelGGL(( K2_TRISH_RowCounts_To_RowStarts
<
// Template Parameters
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Warp Size )
K2_BlockSize // Threads per Block
>)
,
// CUDA CTA Parameters
K2_GridSize, // Blocks per Grid
K2_BlockSize // Threads per Block
, 0, 0,
// Function parameters
d_Histogram, // OUT - Histogram Counts
d_totalStarts, // OUT - Histogram Starts
d_rowStarts, // OUT - 'Per Row' Histogram Starts
d_rowCounts, // IN - 'Per Row' Histogram Counts
K1_nRows // IN - number of rows
);
// Check if kernel execution generated an error
cutilCheckMsg( "K2_TRISH_RowCounts_To_RowStarts() Kernel execution failed!" );
#if 1 == TRISH_VERIFY_HISTOGRAM
//-----
// Step 3. Verify Histogram results are correct
//-----
TRISH_VerifyHistogram_B4< U32, MyMapper >
(
nElems, (U32 *)d_inVals,
numBins, (U32 *)d_Histogram,
minVal, maxVal
);
#endif
/*--------------------------------------
Step 4. Cleanup intermediate buffers
--------------------------------------*/
// Code moved to closeTrish256() above
}
//---------------------------------------------------------
// Name: genTrishFloatF32
// Desc: CPU Wrapper function around
// generalized TRISH histogram for FLOAT data
// invoked by "genTRISH" demo
//---------------------------------------------------------
extern "C"
void genTrishFloatF32
(
// Function Parameters
U32 * d_Histogram, // OUT - Final 256-way histogram counts
void * d_Data, // IN - input data to bin & count into histogram
U32 floatCount, // IN - length of input data array
F32 minVal, // IN - minVal
F32 maxVal, // IN - maxVal
U32 numBins // IN - number of bins
)
{
//-----
// Local Constants=
//-----
// Note: The best # of blocks for the TRISH algorithm appears to be
// The # of SM's on the card * the number of concurrent blocks.
// This is the mininum to effectively use all hardware resources effectively.
//
// For Example: On the following Fermi cards, the grid sizes for best performance would be ...
// GTX 560M = 12 = 4 * 3
// TELSA M2050 = 42 = 14 * 3
// GTX 480 = 45 = 15 * 3
// GTX 580 = 48 = 16 * 3
const U32 nGPU_SMs = NUM_GPU_SMs; // See #defines above
const U32 nGPU_ConcurrentBlocks = 3u; // for Fermi architectures, we can achieve 3 concurrent blocks per SM (64 * 3 = 192 => 192/1536 => 12.5% occupancy
const U32 logBankSize = 5u; // 5 = log<2>( Memory Banks )
const U32 logWarpSize = 5u; // 5 = log<2>( Threads per Warp )
const U32 K1_BlockSize = 64u; // 64 = Threads per Block (Histogram Kernel)
const U32 K1_GridSize = nGPU_SMs * nGPU_ConcurrentBlocks; // GridSize (Histogram Kernel)
const U32 K2_BlockSize = 256u; // 256 = Threads per Block (RowSum Kernel)
const U32 K2_GridSize = 1u; // 1 = GridSize (RowSum Kernel)
// Efficiency Formula = (Floor(255/k)*k)/255
// Ideal k-values = 1, 3, 5, 15, 17, 51, 85, 255
// Best k-values otherwise = 2, 4, 6, 7, 9, 10, 11, 12, 14, 18, 21, 23, 25, 28, 36, 42, 50, 63, 84, 125, 126, 127, 253, 254
// Worst k-values = 128 (0.502) & 86 (0.675) & 64 (0.753)
// K >= 105 => code won't fit in cache
//const U32 K1_Length = 1u; // 1, Efficiency = 1.0, Throughput = 19.66 GB/s (480), *POOR ILP*
//const U32 K1_Length = 2u; // 2, Efficiency = 0.996, Throughput = 34.16 GB/s (480), *POOR ILP*
//const U32 K1_Length = 3u; // 3, Efficiency = 1.0, Throughput = 44.90 GB/s (480), *POOR ILP*
//const U32 K1_Length = 4u; // 4, Efficiency = 0.988, Throughput = 52.03 GB/s (480), *POOR ILP*
//const U32 K1_Length = 5u; // 5, Efficiency = 1.0, Throughput = 56.56 GB/s (480),
//const U32 K1_Length = 6u; // 6, Efficiency = 0.988, Throughput = 60.32 GB/s (480)
//const U32 K1_Length = 7u; // 7, Efficiency = 0.988, Throughput = 53.07 GB/s (480)
//const U32 K1_Length = 9u; // 9, Efficiency = 0.988, Throughput = 59.97 GB/s (480)
//const U32 K1_Length = 10u; // 10, Efficiency = 0.980, Throughput = 61.61 GB/s (480)
//const U32 K1_Length = 11u; // 11, Efficiency = 0.992, Throughput = 62.57 GB/s (480)
//const U32 K1_Length = 12u; // 12, Efficiency = 0.988, Throughput = 62.00 GB/s (480)
//const U32 K1_Length = 14u; // 14, Efficiency = 0.988, Throughput = 64.24 GB/s (480)
const U32 K1_Length = 15u; // 15, Efficiency = 1.0, Throughput = 65.05 GB/s (480) *BEST*
//const U32 K1_Length = 16u; // 16, Efficiency = 0.941, Throughput = 63.14 GB/s (480)
//const U32 K1_Length = 17u; // 17, Efficiency = 1.0, Throughput = 63.06 GB/s (480)
//const U32 K1_Length = 18u; // 18, Efficiency = 0.988, Throughput = 58.58 GB/s (480)
//const U32 K1_Length = 21u; // 21, Efficiency = 0.988, Throughput = 59.07 GB/s (480)
//const U32 K1_Length = 23u; // 23, Efficiency = 0.992, Throughput = 59.99 GB/s (480)
//const U32 K1_Length = 25u; // 25, Efficiency = 0.980, Throughput = 61.24 GB/s (480)
//const U32 K1_Length = 28u; // 28, Efficiency = 0.988, Throughput = 62.17 GB/s (480)
//const U32 K1_Length = 36u; // 36, Efficiency = 0.988, Throughput = 58.93 GB/s (480)
//const U32 K1_Length = 42u; // 42, Efficiency = 0.988, Throughput = 60.09 GB/s (480)
//const U32 K1_Length = 50u; // 50, Efficiency = 0.980, Throughput = 62.01 GB/s (480)
//const U32 K1_Length = 51u; // 51, Efficiency = 1.0, Throughput = 62.46 GB/s (480)
//const U32 K1_Length = 63u; // 63, Efficiency = 0.988, Throughput = 62.88 GB/s (480),
//const U32 K1_Length = 84u; // 84, Efficiency = 0.988, Throughput = 64.62 GB/s (480),
//const U32 K1_Length = 85u; // 85, Efficiency = 1.0, Throughput = 64.17 GB/s (480),
//const U32 K1_Length = 86u; // 86, Efficiency = 0.675, Throughput = 60.61 GB/s (480), *POOR EFFICIENCY*
//const U32 K1_Length = 125u; // 125, Efficiency = 0.980, Throughput = 65.41 GB/s (480), *BEST*
//const U32 K1_Length = 126u; // 126, Efficiency = 0.988, Throughput = 65.55 GB/s (480), *BEST of the BEST*
//const U32 K1_Length = 127u; // 127, Efficiency = 0.996, Throughput = 65.13 GB/s (480), *BEST*
//const U32 K1_Length = 128u; // 128, Efficiency = 0.502, Throughput = 59.59 GB/s (480), *WORST EFFICIENCY*
// K=[105..255], code probably won't fit in cache
const U32 in_start = 0u; // 0 = starting range
const U32 K1_nRows = K1_GridSize; // ?? = Number of rows (blocks) that are cooperatively striding across input data set
//-----
// Get number of elements
//-----
assert( floatCount > 0u );
assert( floatCount % 4 == 0u );
U32 nElems = floatCount;
U32 in_stop = nElems - 1u;
const F32 * d_inVals = (const F32 *)d_Data;
/*--------------------------------------
Step 0. Create Intermediate buffers
--------------------------------------*/
// Code moved to initTrish256() above
/*------------------------------------------------------
Step 1. Bin & count elements into 'per row' 256-way histograms
------------------------------------------------------*/
typedef MapToBin
<
F32, // Value Type
F32, // Conversion Type
U32, // Bin Type
1u, // Formula = #1: B=(A-Mu)*Alpha; where Mu = Min - 0.5; and Alpha = n/(max-min+1);
1u, // Range check for values < min
1u // Range check for values > max
> MapperF32;
hipLaunchKernelGGL(( K1_TRISH_CountRows_GEN_B4
<
// Template Parameters
F32, // underlying value type
MapperF32, // underlying mapper type
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Threads per Warp )
K1_BlockSize, // Threads per Block
K1_GridSize, // Blocks per Grid
K1_Length // Work Per Thread (Loop unrolling)
>)
,
// CUDA CTA Parameters
K1_GridSize, // Blocks per Grid
K1_BlockSize // Threads per Block
, 0, 0,
// Function parameters
d_rowCounts, // IN - 'per row' histograms
d_inVals, // IN - 'input' data to count & bin
in_start, // IN - input range [start, stop]
in_stop, // ditto
minVal, // IN - [min,max] value for histogram binning
maxVal, // ditto
numBins // number of bins in histogram
);
// Check if kernel execution generated an error
cutilCheckMsg( "K1_TRISH_CountRows_GEN_B4() Kernel execution failed!" );
/*------------------------------------------------------
Step 2. Sum 'per row' histograms into 'final' 256-bin histogram
------------------------------------------------------*/
hipLaunchKernelGGL(( K2_TRISH_RowCounts_To_RowStarts
<
// Template Parameters
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Warp Size )
K2_BlockSize // Threads per Block
>)
,
// CUDA CTA Parameters
K2_GridSize, // Blocks per Grid
K2_BlockSize // Threads per Block
, 0, 0,
// Function parameters
d_Histogram, // OUT - Histogram Counts
d_totalStarts, // OUT - Histogram Starts
d_rowStarts, // OUT - 'Per Row' Histogram Starts
d_rowCounts, // IN - 'Per Row' Histogram Counts
K1_nRows // IN - number of rows
);
// Check if kernel execution generated an error
cutilCheckMsg( "K2_TRISH_RowCounts_To_RowStarts() Kernel execution failed!" );
#if 1 == TRISH_VERIFY_HISTOGRAM
//-----
// Step 3. Verify Histogram results are correct
//-----
TRISH_VerifyHistogram_B4< F32, MyMapper >
(
nElems, (F32 *)d_inVals,
numBins, (U32 *)d_Histogram,
minVal, maxVal
);
#endif
/*--------------------------------------
Step 4. Cleanup intermediate buffers
--------------------------------------*/
// Code moved to closeTrish256() above
}
| a73dc073b8f374d37a68840f1d7799d5b8d49010.cu | /*-----------------------------------------------------------------------------
Name: histTRISH_Gen.cu
Desc: Implements generic binning histograms on GPU
Disclaimer:
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-----------------------------------------------------------------------------*/
/*-----------------------------------------------------------------------------
Includes
-----------------------------------------------------------------------------*/
// System Includes
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// CUDA Includes
#include <cutil_inline.h>
// Local Includes
#include "Platform.h"
#include "BaseDefs.h"
#include "TRISH_traits.h"
#include "MapToBin.h"
#include "Extract.h"
#include "histogram_common.h"
/*-----------------------------------------------------------------------------
Compiler Settings
-----------------------------------------------------------------------------*/
//#define INTERLEAVE 1
//#define INTERLEAVE 2
#define INTERLEAVE 4
//#define TRISH_VERIFY_HISTOGRAM 1
#define TRISH_VERIFY_HISTOGRAM 0
/*-----------------------------------------------------------------------------
Helper Templates
-----------------------------------------------------------------------------*/
/*-------------------------------------
Name: TRISH_VerifyHistogram
-------------------------------------*/
#if 1 == TRISH_VERIFY_HISTOGRAM
// Verify single byte integers (I8, U8)
template <
typename valT, // Underlying value type
typename mapT // Mapper Type
>
__host__
void TRISH_VerifyHistogram_B1
(
U32 nElems, // IN - number of 32-bit elements to bin & count
U32 * d_gpuElems, // IN - array of elements to bin & count
U32 numBins, // IN - number of bins in histogram
U32 * d_gpuCounts, // IN - GPU histogram counts
valT minVal, // IN - [min,max] values for histogram
valT maxVal // ditto
)
{
assert( numBins > 0u );
assert( numBins <= 256u );
assert( nElems > 0u );
U32 mem_size_elems = nElems * sizeof( U32 );
U32 mem_size_counts = 256u * sizeof( U32 );
U32 * h_cpuElems = NULL;
U32 * h_gpuCounts = NULL;
U32 * h_cpuCounts = NULL;
//-----
// Allocate memory resources
//-----
h_cpuElems = (U32 *)malloc( mem_size_elems );
h_gpuCounts = (U32 *)malloc( mem_size_counts );
h_cpuCounts = (U32 *)malloc( mem_size_counts );
//-----
// Transfer arrays from GPU to CPU
//-----
cutilSafeCall( cudaMemcpy( h_cpuElems, d_gpuElems, mem_size_elems, cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy( h_gpuCounts, d_gpuCounts, mem_size_counts, cudaMemcpyDeviceToHost) );
// Zero CPU counts
for (U32 idx = 0; idx < 256u; idx++)
{
h_cpuCounts[idx] = 0u;
}
// Get TRISH types
typedef ExtractorBytes<U32> Extractor;
typedef typename TRISH_trait<valT>:base_type baseType;
typedef typename TRISH_trait<valT>::bin_type binType;
typedef typename TRISH_trait<valT>::upscale_type upscaleType;
typedef typename TRISH_trait<valT>::convert_type convertType;
//-----
// Compute CPU row counts
//-----
// Initialize Mapper
mapT mapper;
mapper.Initiate( minVal, maxVal, numBins );
U32 val1;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
for (U32 idx = 0; idx < nElems; idx+=1u)
{
// Get current values
val1 = h_cpuElems[idx];
// Extract 4 bytes from single values
Extractor.Extract4( b1, b2, b3, b4, val2 );
// Transform values into bins
mapper.Transform4( bin1, bin2, bin3, bin4, // Out => transformed bins
b1, b2, b3, b4 ); // In => values to transform into bins
// Bin results
h_cpuCounts[bin1] += 1u;
h_cpuCounts[bin2] += 1u;
h_cpuCounts[bin3] += 1u;
h_cpuCounts[bin4] += 1u;
}
// Cleanup Mapper
mapper.Finish();
//-----
// Compare CPU vs. GPU totals
//-----
U64 totalCPU = 0ull;
U64 totalGPU = 0ull;
for (U32 idx = 0; idx < numBins; idx++)
{
U32 cpuCount = h_cpuCounts[idx];
U32 gpuCount = h_gpuCounts[idx];
totalCPU += (U64)cpuCount;
totalGPU += (U64)gpuCount;
if (cpuCount != gpuCount)
{
fprintf( stdout, "Total Counts[%u] : CPU (%u) != GPU (%u) !!! ERROR !!!\n",
idx, cpuCount, gpuCount );
}
else
{
fprintf( stdout, "Total Counts[%u] : CPU (%u) == GPU (%u) Success\n",
idx, cpuCount, gpuCount );
}
}
// Get items below range
U32 minCPU, minGPU;
minCPU = h_cpuCounts[numBins+1];
minGPU = h_gpuCounts[numBins+1];
if (minCPU != minGPU)
{
fprintf( stdout, "For < min (%d), CPU count (%d) != GPU count (%d) !!! ERROR !!!\n",
minVal, minCPU, minGPU );
}
else
{
fprintf( stdout, "For < min (%d), CPU count (%d) == GPU count (%d) Success\n",
minVal, minCPU, minGPU );
}
totalCPU += (U64)minCPU;
totalGPU += (U64)minGPU;
// Get items above range
U32 maxCPU, maxGPU;
maxCPU = h_cpuCounts[numBins+2];
maxGPU = h_gpuCounts[numBins+2];
if (minCPU != minGPU)
{
fprintf( stdout, "For > max (%d), CPU count (%d) != GPU count (%d) !!! ERROR !!!\n",
maxVal, maxCPU, maxGPU );
}
else
{
fprintf( stdout, "For > max (%d), CPU count (%d) == GPU count (%d) Success\n",
maxVal, maxCPU, maxGPU );
}
totalCPU += (U64)maxCPU;
totalGPU += (U64)maxGPU;
// Verify final counts
if (totalCPU != totalGPU)
{
fprintf( stdout, "\nTotal CPU (%I64u) != Total GPU (%I64u) !!! ERROR !!!\n\n\n",
totalCPU, totalGPU );
}
else
{
fprintf( stdout, "\nTotal CPU (%I64u) == Total GPU (%I64u) Success\n\n\n",
totalCPU, totalGPU );
}
//-----
// Free memory resources
//-----
free( h_cpuCounts );
free( h_gpuCounts );
free( h_cpuElems );
}
// Verify 2 byte integers (I16, U16)
template <
typename valT, // Underlying value type
typename mapT // Mapper Type
>
__host__
void TRISH_VerifyHistogram_B2
(
U32 nElems, // IN - number of 32-bit elements to bin & count
U32 * d_gpuElems, // IN - array of elements to bin & count
U32 numBins, // IN - number of bins in histogram
U32 * d_gpuCounts, // IN - GPU histogram counts
valT minVal, // IN - [min,max] values for histogram
valT maxVal // ditto
)
{
assert( numBins > 0u );
assert( numBins <= 256u );
assert( nElems > 0u );
U32 mem_size_elems = nElems * sizeof( U32 );
U32 mem_size_counts = 256u * sizeof( U32 );
U32 * h_cpuElems = NULL;
U32 * h_gpuCounts = NULL;
U32 * h_cpuCounts = NULL;
//-----
// Allocate memory resources
//-----
h_cpuElems = (U32 *)malloc( mem_size_elems );
h_gpuCounts = (U32 *)malloc( mem_size_counts );
h_cpuCounts = (U32 *)malloc( mem_size_counts );
//-----
// Transfer arrays from GPU to CPU
//-----
cutilSafeCall( cudaMemcpy( h_cpuElems, d_gpuElems, mem_size_elems, cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy( h_gpuCounts, d_gpuCounts, mem_size_counts, cudaMemcpyDeviceToHost) );
// Zero CPU counts
for (U32 idx = 0; idx < 256u; idx++)
{
h_cpuCounts[idx] = 0u;
}
// Get TRISH types
typedef ExtractorWords<U32> Extractor;
typedef typename TRISH_trait<valT>:base_type baseType;
typedef typename TRISH_trait<valT>::bin_type binType;
typedef typename TRISH_trait<valT>::upscale_type upscaleType;
typedef typename TRISH_trait<valT>::convert_type convertType;
//-----
// Compute CPU row counts
//-----
// Initialize Mapper
mapT mapper;
mapper.Initiate( minVal, maxVal, numBins );
U32 val1, val2;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
for (U32 idx = 0u; idx < nElems; idx+=2u)
{
// Get current value
val1 = h_cpuElems[idx];
val2 = h_cpuElems[idx+1u];
// Extract 4 words from 2 values
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
// Transform
mapper.Transform4( bin1, bin2, bin3, bin4, // Out => transformed bins
b1, b2, b3, b4 ); // In => values to transform into bins
// Bin results
h_cpuCounts[bin1] += 1u;
h_cpuCounts[bin2] += 1u;
h_cpuCounts[bin3] += 1u;
h_cpuCounts[bin4] += 1u;
}
// Cleanup Mapper
mapper.Finish();
//-----
// Compare CPU vs. GPU totals
//-----
U64 totalCPU = 0ull;
U64 totalGPU = 0ull;
for (U32 idx = 0; idx < numBins; idx++)
{
U32 cpuCount = h_cpuCounts[idx];
U32 gpuCount = h_gpuCounts[idx];
totalCPU += (U64)cpuCount;
totalGPU += (U64)gpuCount;
if (cpuCount != gpuCount)
{
fprintf( stdout, "Total Counts[%u] : CPU (%u) != GPU (%u) !!! ERROR !!!\n",
idx, cpuCount, gpuCount );
}
else
{
fprintf( stdout, "Total Counts[%u] : CPU (%u) == GPU (%u) Success\n",
idx, cpuCount, gpuCount );
}
}
// Get items below range
U32 minCPU, minGPU;
minCPU = h_cpuCounts[numBins+1];
minGPU = h_gpuCounts[numBins+1];
if (minCPU != minGPU)
{
fprintf( stdout, "For < min (%d), CPU count (%d) != GPU count (%d) !!! ERROR !!!\n",
minVal, minCPU, minGPU );
}
else
{
fprintf( stdout, "For < min (%d), CPU count (%d) == GPU count (%d) Success\n",
minVal, minCPU, minGPU );
}
totalCPU += (U64)minCPU;
totalGPU += (U64)minGPU;
// Get items above range
U32 maxCPU, maxGPU;
maxCPU = h_cpuCounts[numBins+2];
maxGPU = h_gpuCounts[numBins+2];
if (minCPU != minGPU)
{
fprintf( stdout, "For > max (%d), CPU count (%d) != GPU count (%d) !!! ERROR !!!\n",
maxVal, maxCPU, maxGPU );
}
else
{
fprintf( stdout, "For > max (%d), CPU count (%d) == GPU count (%d) Success\n",
maxVal, maxCPU, maxGPU );
}
totalCPU += (U64)maxCPU;
totalGPU += (U64)maxGPU;
// Verify final counts
if (totalCPU != totalGPU)
{
fprintf( stdout, "\nTotal CPU (%I64u) != Total GPU (%I64u) !!! ERROR !!!\n\n\n",
totalCPU, totalGPU );
}
else
{
fprintf( stdout, "\nTotal CPU (%I64u) == Total GPU (%I64u) Success\n\n\n",
totalCPU, totalGPU );
}
//-----
// Free memory resources
//-----
free( h_cpuCounts );
free( h_gpuCounts );
free( h_cpuElems );
}
// Verify 4 byte integers (I32, U32)
template <
typename valT, // Underlying value type
typename mapT // Mapper Type
>
__host__
void TRISH_VerifyHistogram_B4
(
U32 nElems, // IN - number of 32-bit elements to bin & count
valT * d_gpuElems, // IN - array of elements to bin & count
U32 numBins, // IN - number of bins in histogram
U32 * d_gpuCounts, // IN - GPU histogram counts
valT minVal, // IN - [min,max] values for histogram
valT maxVal // ditto
)
{
assert( numBins > 0u );
assert( numBins <= 256u );
assert( nElems > 0u );
U32 mem_size_elems = nElems * sizeof( valT );
U32 mem_size_counts = 256u * sizeof( U32 );
valT * h_cpuElems = NULL;
U32 * h_gpuCounts = NULL;
U32 * h_cpuCounts = NULL;
//-----
// Allocate memory resources
//-----
h_cpuElems = (U32 *)malloc( mem_size_elems );
h_gpuCounts = (U32 *)malloc( mem_size_counts );
h_cpuCounts = (U32 *)malloc( mem_size_counts );
//-----
// Transfer arrays from GPU to CPU
//-----
cutilSafeCall( cudaMemcpy( h_cpuElems, d_gpuElems, mem_size_elems, cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy( h_gpuCounts, d_gpuCounts, mem_size_counts, cudaMemcpyDeviceToHost) );
// Zero CPU counts
for (U32 idx = 0; idx < 256u; idx++)
{
h_cpuCounts[idx] = 0u;
}
// Get TRISH types
typedef ExtractorWords<valT> Extractor;
typedef typename TRISH_trait<valT>:base_type baseType;
typedef typename TRISH_trait<valT>::bin_type binType;
typedef typename TRISH_trait<valT>::upscale_type upscaleType;
typedef typename TRISH_trait<valT>::convert_type convertType;
//-----
// Compute CPU row counts
//-----
// Initialize Mapper
mapT mapper;
mapper.Initiate( minVal, maxVal, numBins );
U32 nRows = nElems / 4u;
U32 nCols = nElems % 4u;
valT val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
for (U32 idx = 0u; idx < nRows; idx+=4u)
{
// Get current value
val1 = h_cpuElems[idx];
val2 = h_cpuElems[idx+1u];
val3 = h_cpuElems[idx+2u];
val4 = h_cpuElems[idx+3u];
b1 = (upscaleType)val1;
b2 = (upscaleType)val2;
b3 = (upscaleType)val3;
b4 = (upscaleType)val4;
// Transform
mapper.Transform4( bin1, bin2, bin3, bin4, // Out => transformed bins
b1, b2, b3, b4 ); // In => values to transform into bins
// Bin results
h_cpuCounts[bin1] += 1u;
h_cpuCounts[bin2] += 1u;
h_cpuCounts[bin3] += 1u;
h_cpuCounts[bin4] += 1u;
}
// Cleanup Mapper
mapper.Finish();
//-----
// Compare CPU vs. GPU totals
//-----
U64 totalCPU = 0ull;
U64 totalGPU = 0ull;
for (U32 idx = 0; idx < numBins; idx++)
{
U32 cpuCount = h_cpuCounts[idx];
U32 gpuCount = h_gpuCounts[idx];
totalCPU += (U64)cpuCount;
totalGPU += (U64)gpuCount;
if (cpuCount != gpuCount)
{
fprintf( stdout, "Total Counts[%u] : CPU (%u) != GPU (%u) !!! ERROR !!!\n",
idx, cpuCount, gpuCount );
}
else
{
fprintf( stdout, "Total Counts[%u] : CPU (%u) == GPU (%u) Success\n",
idx, cpuCount, gpuCount );
}
}
// Get items below range
U32 minCPU, minGPU;
minCPU = h_cpuCounts[numBins+1];
minGPU = h_gpuCounts[numBins+1];
if (minCPU != minGPU)
{
fprintf( stdout, "For < min (%d), CPU count (%d) != GPU count (%d) !!! ERROR !!!\n",
minVal, minCPU, minGPU );
}
else
{
fprintf( stdout, "For < min (%d), CPU count (%d) == GPU count (%d) Success\n",
minVal, minCPU, minGPU );
}
totalCPU += (U64)minCPU;
totalGPU += (U64)minGPU;
// Get items above range
U32 maxCPU, maxGPU;
maxCPU = h_cpuCounts[numBins+2];
maxGPU = h_gpuCounts[numBins+2];
if (minCPU != minGPU)
{
fprintf( stdout, "For > max (%d), CPU count (%d) != GPU count (%d) !!! ERROR !!!\n",
maxVal, maxCPU, maxGPU );
}
else
{
fprintf( stdout, "For > max (%d), CPU count (%d) == GPU count (%d) Success\n",
maxVal, maxCPU, maxGPU );
}
totalCPU += (U64)maxCPU;
totalGPU += (U64)maxGPU;
// Verify final counts
if (totalCPU != totalGPU)
{
fprintf( stdout, "\nTotal CPU (%I64u) != Total GPU (%I64u) !!! ERROR !!!\n\n\n",
totalCPU, totalGPU );
}
else
{
fprintf( stdout, "\nTotal CPU (%I64u) == Total GPU (%I64u) Success\n\n\n",
totalCPU, totalGPU );
}
//-----
// Free memory resources
//-----
free( h_cpuCounts );
free( h_gpuCounts );
free( h_cpuElems );
}
#endif
/*-----------------------------------------------
Name: BinCounts
Desc: Adds Bins into count array
-----------------------------------------------*/
template <U32 BlockSize>
__host__ __device__ __forceinline__
void BinCount1
(
U32 * cntPtr, // OUT - count array (to store bin results in)
U32 bin1 // IN - input 'bins' to count
)
{
// Lane Row[0..63] = bin / 4
U32 LI_1;
LI_1 = bin1 >> 2u;
// Multiply by block Size;
LI_1 = LI_1 * BlockSize;
// Lane Col[0,1,2,3] = bin % 4
U32 col1;
col1 = bin1 & 0x3u;
// Shift[0,8,16,24] = Lane Col [0,1,2,3] * 8
U32 s1;
s1 = col1 << 3u;
// Get Increments
U32 inc1;
inc1 = 1u << s1;
U32 oldCnt, newCnt;
//-----
// Add bin counts into count array
//-----
// Increment 1st bin count
oldCnt = cntPtr[LI_1];
newCnt = oldCnt + inc1;
cntPtr[LI_1] = newCnt;
}
template <U32 BlockSize>
__host__ __device__ __forceinline__
void BinCount2
(
U32 * cntPtr, // OUT - count array (to store bin results in)
U32 bin1, // IN - input 'bins' to count
U32 bin2
)
{
// Lane Row = bin / 4
U32 LI_1, LI_2;
LI_1 = bin1 >> 2u;
LI_2 = bin2 >> 2u;
// Multiply by block Size;
LI_1 = LI_1 * BlockSize;
LI_2 = LI_2 * BlockSize;
// Lane Col = bin % 4
U32 col1, col2;
col1 = bin1 & 0x3u;
col2 = bin2 & 0x3u;
// Shift = Lane Col [0,1,2,3] * 8
U32 s1, s2;
s1 = col1 << 3u;
s2 = col2 << 3u;
// Get Increments
U32 inc1, inc2;
inc1 = 1u << s1;
inc2 = 1u << s2;
//-----
// Add bin counts into count array
//-----
U32 oldCnt, newCnt;
// Increment 1st bin
oldCnt = cntPtr[LI_1];
newCnt = oldCnt + inc1;
cntPtr[LI_1] = newCnt;
// Increment 2nd bin
oldCnt = cntPtr[LI_2];
newCnt = oldCnt + inc2;
cntPtr[LI_2] = newCnt;
}
template <U32 BlockSize>
__host__ __device__ __forceinline__
void BinCount3
(
U32 * cntPtr, // OUT - count array (to store bin results in)
U32 bin1, // IN - input 'bins' to count
U32 bin2,
U32 bin3
)
{
// Lane Row = bin / 4
U32 LI_1, LI_2, LI_3;
LI_1 = bin1 >> 2u;
LI_2 = bin2 >> 2u;
LI_3 = bin3 >> 2u;
// Multiply by block Size;
LI_1 = LI_1 * BlockSize;
LI_2 = LI_2 * BlockSize;
LI_3 = LI_3 * BlockSize;
// Lane Col = bin % 4
U32 col1, col2, col3;
col1 = bin1 & 0x3u;
col2 = bin2 & 0x3u;
col3 = bin3 & 0x3u;
// Shift = Lane Col [0,1,2,3] * 8
U32 s1, s2, s3;
s1 = col1 << 3u;
s2 = col2 << 3u;
s3 = col3 << 3u;
// Get Increments
U32 inc1, inc2, inc3;
inc1 = 1u << s1;
inc2 = 1u << s2;
inc3 = 1u << s3;
//-----
// Add bin counts into count array
//-----
U32 oldCnt, newCnt;
// Increment 1st bin
oldCnt = cntPtr[LI_1];
newCnt = oldCnt + inc1;
cntPtr[LI_1] = newCnt;
// Increment 2nd bin
oldCnt = cntPtr[LI_2];
newCnt = oldCnt + inc2;
cntPtr[LI_2] = newCnt;
// Increment 3rd bin
oldCnt = cntPtr[LI_3];
newCnt = oldCnt + inc3;
cntPtr[LI_3] = newCnt;
}
template <U32 BlockSize>
__host__ __device__ __forceinline__
void BinCount4
(
U32 * cntPtr, // OUT - count array (to store bin results in)
U32 bin1, // IN - input 'bins' to count
U32 bin2,
U32 bin3,
U32 bin4
)
{
// Lane Row = bin / 4
U32 LI_1, LI_2, LI_3, LI_4;
LI_1 = bin1 >> 2u;
LI_2 = bin2 >> 2u;
LI_3 = bin3 >> 2u;
LI_4 = bin4 >> 2u;
// Multiply by block Size;
LI_1 = LI_1 * BlockSize;
LI_2 = LI_2 * BlockSize;
LI_3 = LI_3 * BlockSize;
LI_4 = LI_4 * BlockSize;
// Lane Col = bin % 4
U32 col1, col2, col3, col4;
col1 = bin1 & 0x3u;
col2 = bin2 & 0x3u;
col3 = bin3 & 0x3u;
col4 = bin4 & 0x3u;
// Shift = Lane Col [0,1,2,3] * 8
U32 s1, s2, s3, s4;
s1 = col1 << 3u;
s2 = col2 << 3u;
s3 = col3 << 3u;
s4 = col4 << 3u;
// Get Increments
U32 inc1, inc2, inc3, inc4;
inc1 = 1u << s1;
inc2 = 1u << s2;
inc3 = 1u << s3;
inc4 = 1u << s4;
//-----
// Add bin counts into count array
//-----
U32 oldCnt, newCnt;
// Increment 1st bin
oldCnt = cntPtr[LI_1];
newCnt = oldCnt + inc1;
cntPtr[LI_1] = newCnt;
// Increment 2nd bin
oldCnt = cntPtr[LI_2];
newCnt = oldCnt + inc2;
cntPtr[LI_2] = newCnt;
// Increment 3rd bin
oldCnt = cntPtr[LI_3];
newCnt = oldCnt + inc3;
cntPtr[LI_3] = newCnt;
// Increment 4th bin
oldCnt = cntPtr[LI_4];
newCnt = oldCnt + inc4;
cntPtr[LI_4] = newCnt;
}
/*---------------------------------------------------------
Name: SetArray_BlockSeq
Desc: Sets elements in array to specified value
Note: Uses "Block Sequential" access pattern
---------------------------------------------------------*/
template <
typename valT, // Underlying value type
U32 BlockSize, // Threads Per Block
U32 nSafePasses, // Number of safe passes
U32 nLeftOver, // Number of left over elements
U32 maxSize // Max Size of array
>
__device__ __forceinline__
void SetArray_BlockSeq
(
valT * basePtr, // IN/OUT - array to set to 'set' value
valT toSet // IN - value to set array elements 'to'
)
{
// Get 'per thread' pointer
valT * setPtr = basePtr + threadIdx.x;
// Initialize as many elements as we
// safely can with no range checking
if (nSafePasses >= 1u) { setPtr[( 0u * BlockSize)] = toSet; }
if (nSafePasses >= 2u) { setPtr[( 1u * BlockSize)] = toSet; }
if (nSafePasses >= 3u) { setPtr[( 2u * BlockSize)] = toSet; }
if (nSafePasses >= 4u) { setPtr[( 3u * BlockSize)] = toSet; }
if (nSafePasses >= 5u) { setPtr[( 4u * BlockSize)] = toSet; }
if (nSafePasses >= 6u) { setPtr[( 5u * BlockSize)] = toSet; }
if (nSafePasses >= 7u) { setPtr[( 6u * BlockSize)] = toSet; }
if (nSafePasses >= 8u) { setPtr[( 7u * BlockSize)] = toSet; }
if (nSafePasses >= 9u) { setPtr[( 8u * BlockSize)] = toSet; }
if (nSafePasses >= 10u) { setPtr[( 9u * BlockSize)] = toSet; }
if (nSafePasses >= 11u) { setPtr[(10u * BlockSize)] = toSet; }
if (nSafePasses >= 12u) { setPtr[(11u * BlockSize)] = toSet; }
if (nSafePasses >= 13u) { setPtr[(12u * BlockSize)] = toSet; }
if (nSafePasses >= 14u) { setPtr[(13u * BlockSize)] = toSet; }
if (nSafePasses >= 15u) { setPtr[(14u * BlockSize)] = toSet; }
if (nSafePasses >= 16u) { setPtr[(15u * BlockSize)] = toSet; }
if (nSafePasses >= 17u) { setPtr[(16u * BlockSize)] = toSet; }
if (nSafePasses >= 18u) { setPtr[(17u * BlockSize)] = toSet; }
if (nSafePasses >= 19u) { setPtr[(18u * BlockSize)] = toSet; }
if (nSafePasses >= 20u) { setPtr[(19u * BlockSize)] = toSet; }
if (nSafePasses >= 21u) { setPtr[(20u * BlockSize)] = toSet; }
if (nSafePasses >= 22u) { setPtr[(21u * BlockSize)] = toSet; }
if (nSafePasses >= 23u) { setPtr[(22u * BlockSize)] = toSet; }
if (nSafePasses >= 24u) { setPtr[(23u * BlockSize)] = toSet; }
if (nSafePasses >= 25u) { setPtr[(24u * BlockSize)] = toSet; }
if (nSafePasses >= 26u) { setPtr[(25u * BlockSize)] = toSet; }
if (nSafePasses >= 27u) { setPtr[(26u * BlockSize)] = toSet; }
if (nSafePasses >= 28u) { setPtr[(27u * BlockSize)] = toSet; }
if (nSafePasses >= 29u) { setPtr[(28u * BlockSize)] = toSet; }
if (nSafePasses >= 30u) { setPtr[(29u * BlockSize)] = toSet; }
if (nSafePasses >= 31u) { setPtr[(30u * BlockSize)] = toSet; }
if (nSafePasses >= 32u) { setPtr[(31u * BlockSize)] = toSet; }
if (nSafePasses >= 33u) { setPtr[(32u * BlockSize)] = toSet; }
if (nSafePasses >= 34u) { setPtr[(33u * BlockSize)] = toSet; }
if (nSafePasses >= 35u) { setPtr[(34u * BlockSize)] = toSet; }
if (nSafePasses >= 36u) { setPtr[(35u * BlockSize)] = toSet; }
if (nSafePasses >= 37u) { setPtr[(36u * BlockSize)] = toSet; }
if (nSafePasses >= 38u) { setPtr[(37u * BlockSize)] = toSet; }
if (nSafePasses >= 39u) { setPtr[(38u * BlockSize)] = toSet; }
if (nSafePasses >= 40u) { setPtr[(39u * BlockSize)] = toSet; }
if (nSafePasses >= 41u) { setPtr[(40u * BlockSize)] = toSet; }
if (nSafePasses >= 42u) { setPtr[(41u * BlockSize)] = toSet; }
if (nSafePasses >= 43u) { setPtr[(42u * BlockSize)] = toSet; }
if (nSafePasses >= 44u) { setPtr[(43u * BlockSize)] = toSet; }
if (nSafePasses >= 45u) { setPtr[(44u * BlockSize)] = toSet; }
if (nSafePasses >= 46u) { setPtr[(45u * BlockSize)] = toSet; }
if (nSafePasses >= 47u) { setPtr[(46u * BlockSize)] = toSet; }
if (nSafePasses >= 48u) { setPtr[(47u * BlockSize)] = toSet; }
if (nSafePasses >= 49u) { setPtr[(48u * BlockSize)] = toSet; }
if (nSafePasses >= 50u) { setPtr[(49u * BlockSize)] = toSet; }
if (nSafePasses >= 51u) { setPtr[(50u * BlockSize)] = toSet; }
if (nSafePasses >= 52u) { setPtr[(51u * BlockSize)] = toSet; }
if (nSafePasses >= 53u) { setPtr[(52u * BlockSize)] = toSet; }
if (nSafePasses >= 54u) { setPtr[(53u * BlockSize)] = toSet; }
if (nSafePasses >= 55u) { setPtr[(54u * BlockSize)] = toSet; }
if (nSafePasses >= 56u) { setPtr[(55u * BlockSize)] = toSet; }
if (nSafePasses >= 57u) { setPtr[(56u * BlockSize)] = toSet; }
if (nSafePasses >= 58u) { setPtr[(57u * BlockSize)] = toSet; }
if (nSafePasses >= 59u) { setPtr[(58u * BlockSize)] = toSet; }
if (nSafePasses >= 60u) { setPtr[(59u * BlockSize)] = toSet; }
if (nSafePasses >= 61u) { setPtr[(60u * BlockSize)] = toSet; }
if (nSafePasses >= 62u) { setPtr[(61u * BlockSize)] = toSet; }
if (nSafePasses >= 63u) { setPtr[(62u * BlockSize)] = toSet; }
if (nSafePasses >= 64u) { setPtr[(63u * BlockSize)] = toSet; }
if (nSafePasses >= 65u) { setPtr[(64u * BlockSize)] = toSet; }
if (nSafePasses >= 66u) { setPtr[(65u * BlockSize)] = toSet; }
// Set any 'left over' values with range checking
if (nLeftOver > 0u)
{
U32 idx = (nSafePasses * BlockSize) + threadIdx.x;
if (idx < maxSize)
{
basePtr[idx] = toSet;
}
}
}
/*---------------------------------------------------------
Name: SetArray_WarpSeq
Desc: Sets elements in array to specified value
Note: Uses "Warp Sequential" access pattern
---------------------------------------------------------*/
template <
typename valT, // Underlying value type
U32 WarpSize, // Threads per Warp
U32 nSafePasses, // Number of safe passes (warps per subsection)
U32 nLeftOver, // Number of left over elements
U32 maxSize // Max Size of array
>
__device__ __forceinline__
void SetArray_WarpSeq
(
valT * basePtr, // IN/OUT - array to set to 'set' value
valT toSet, // IN - value to set array elements 'to'
U32 startIdx // starting index for this thread
)
{
// Get 'per thread' pointer
valT * setPtr = &basePtr[startIdx];
// Initialize as many elements as we
// safely can with no range checking
if (nSafePasses >= 1u) { setPtr[( 0u * WarpSize)] = toSet; }
if (nSafePasses >= 2u) { setPtr[( 1u * WarpSize)] = toSet; }
if (nSafePasses >= 3u) { setPtr[( 2u * WarpSize)] = toSet; }
if (nSafePasses >= 4u) { setPtr[( 3u * WarpSize)] = toSet; }
if (nSafePasses >= 5u) { setPtr[( 4u * WarpSize)] = toSet; }
if (nSafePasses >= 6u) { setPtr[( 5u * WarpSize)] = toSet; }
if (nSafePasses >= 7u) { setPtr[( 6u * WarpSize)] = toSet; }
if (nSafePasses >= 8u) { setPtr[( 7u * WarpSize)] = toSet; }
if (nSafePasses >= 9u) { setPtr[( 8u * WarpSize)] = toSet; }
if (nSafePasses >= 10u) { setPtr[( 9u * WarpSize)] = toSet; }
if (nSafePasses >= 11u) { setPtr[(10u * WarpSize)] = toSet; }
if (nSafePasses >= 12u) { setPtr[(11u * WarpSize)] = toSet; }
if (nSafePasses >= 13u) { setPtr[(12u * WarpSize)] = toSet; }
if (nSafePasses >= 14u) { setPtr[(13u * WarpSize)] = toSet; }
if (nSafePasses >= 15u) { setPtr[(14u * WarpSize)] = toSet; }
if (nSafePasses >= 16u) { setPtr[(15u * WarpSize)] = toSet; }
if (nSafePasses >= 17u) { setPtr[(16u * WarpSize)] = toSet; }
if (nSafePasses >= 18u) { setPtr[(17u * WarpSize)] = toSet; }
if (nSafePasses >= 19u) { setPtr[(18u * WarpSize)] = toSet; }
if (nSafePasses >= 20u) { setPtr[(19u * WarpSize)] = toSet; }
if (nSafePasses >= 21u) { setPtr[(20u * WarpSize)] = toSet; }
if (nSafePasses >= 22u) { setPtr[(21u * WarpSize)] = toSet; }
if (nSafePasses >= 23u) { setPtr[(22u * WarpSize)] = toSet; }
if (nSafePasses >= 24u) { setPtr[(23u * WarpSize)] = toSet; }
if (nSafePasses >= 25u) { setPtr[(24u * WarpSize)] = toSet; }
if (nSafePasses >= 26u) { setPtr[(25u * WarpSize)] = toSet; }
if (nSafePasses >= 27u) { setPtr[(26u * WarpSize)] = toSet; }
if (nSafePasses >= 28u) { setPtr[(27u * WarpSize)] = toSet; }
if (nSafePasses >= 29u) { setPtr[(28u * WarpSize)] = toSet; }
if (nSafePasses >= 30u) { setPtr[(29u * WarpSize)] = toSet; }
if (nSafePasses >= 31u) { setPtr[(30u * WarpSize)] = toSet; }
if (nSafePasses >= 32u) { setPtr[(31u * WarpSize)] = toSet; }
if (nSafePasses >= 33u) { setPtr[(32u * WarpSize)] = toSet; }
if (nSafePasses >= 34u) { setPtr[(33u * WarpSize)] = toSet; }
if (nSafePasses >= 35u) { setPtr[(34u * WarpSize)] = toSet; }
if (nSafePasses >= 36u) { setPtr[(35u * WarpSize)] = toSet; }
if (nSafePasses >= 37u) { setPtr[(36u * WarpSize)] = toSet; }
if (nSafePasses >= 38u) { setPtr[(37u * WarpSize)] = toSet; }
if (nSafePasses >= 39u) { setPtr[(38u * WarpSize)] = toSet; }
if (nSafePasses >= 40u) { setPtr[(39u * WarpSize)] = toSet; }
if (nSafePasses >= 41u) { setPtr[(40u * WarpSize)] = toSet; }
if (nSafePasses >= 42u) { setPtr[(41u * WarpSize)] = toSet; }
if (nSafePasses >= 43u) { setPtr[(42u * WarpSize)] = toSet; }
if (nSafePasses >= 44u) { setPtr[(43u * WarpSize)] = toSet; }
if (nSafePasses >= 45u) { setPtr[(44u * WarpSize)] = toSet; }
if (nSafePasses >= 46u) { setPtr[(45u * WarpSize)] = toSet; }
if (nSafePasses >= 47u) { setPtr[(46u * WarpSize)] = toSet; }
if (nSafePasses >= 48u) { setPtr[(47u * WarpSize)] = toSet; }
if (nSafePasses >= 49u) { setPtr[(48u * WarpSize)] = toSet; }
if (nSafePasses >= 50u) { setPtr[(49u * WarpSize)] = toSet; }
if (nSafePasses >= 51u) { setPtr[(50u * WarpSize)] = toSet; }
if (nSafePasses >= 52u) { setPtr[(51u * WarpSize)] = toSet; }
if (nSafePasses >= 53u) { setPtr[(52u * WarpSize)] = toSet; }
if (nSafePasses >= 54u) { setPtr[(53u * WarpSize)] = toSet; }
if (nSafePasses >= 55u) { setPtr[(54u * WarpSize)] = toSet; }
if (nSafePasses >= 56u) { setPtr[(55u * WarpSize)] = toSet; }
if (nSafePasses >= 57u) { setPtr[(56u * WarpSize)] = toSet; }
if (nSafePasses >= 58u) { setPtr[(57u * WarpSize)] = toSet; }
if (nSafePasses >= 59u) { setPtr[(58u * WarpSize)] = toSet; }
if (nSafePasses >= 60u) { setPtr[(59u * WarpSize)] = toSet; }
if (nSafePasses >= 61u) { setPtr[(60u * WarpSize)] = toSet; }
if (nSafePasses >= 62u) { setPtr[(61u * WarpSize)] = toSet; }
if (nSafePasses >= 63u) { setPtr[(62u * WarpSize)] = toSet; }
if (nSafePasses >= 64u) { setPtr[(63u * WarpSize)] = toSet; }
if (nSafePasses >= 65u) { setPtr[(64u * WarpSize)] = toSet; }
if (nSafePasses >= 66u) { setPtr[(65u * WarpSize)] = toSet; }
// Set any 'left over' values with range checking
if (nLeftOver > 0u)
{
U32 idx = startIdx + (nSafePasses * WarpSize);
if (idx < maxSize)
{
basePtr[idx] = toSet;
}
}
}
/*-------------------------------------------------------------------
Name: SS_Sums_4_Next_V1
Desc: Serial scan on next 4 elements in seq [0..3]
------------------------------------------------------------------*/
template <
U32 BlockSize, // Threads per block
U32 BlockMask // Block Mask
>
__device__ __forceinline__
void SS_Sums_4_Next_V1
(
U32 & sum1, // OUT - sum1 .. sum4 (as singletons)
U32 & sum2,
U32 & sum3,
U32 & sum4,
U32 * cntPtr, // IN - 'per thread' counts <horizontal row> to sum up
U32 baseIdx
)
{
// wrap = (idx + [0..3]) % BlockSize
U32 idx1, idx2, idx3, idx4;
idx1 = baseIdx + 0u;
idx2 = baseIdx + 1u;
idx3 = baseIdx + 2u;
idx4 = baseIdx + 3u;
U32 wrap1, wrap2, wrap3, wrap4;
wrap1 = idx1 & BlockMask;
wrap2 = idx2 & BlockMask;
wrap3 = idx3 & BlockMask;
wrap4 = idx4 & BlockMask;
//-
// Grab 4 elements in seq [0..3]
//-
U32 lane1, lane2, lane3, lane4;
lane1 = cntPtr[wrap1];
lane2 = cntPtr[wrap2];
lane3 = cntPtr[wrap3];
lane4 = cntPtr[wrap4];
//-
// Zero out sequence [0..3]
//-
cntPtr[wrap1] = 0u;
cntPtr[wrap2] = 0u;
cntPtr[wrap3] = 0u;
cntPtr[wrap4] = 0u;
//-
// Accumulate all 4 groups in each lane
//-
//-
// Initialize sums from 1st lane (of 4 groups)
//-
U32 s3 = lane1 >> 16u; // 3rd bin (of 4) in lane
U32 s2 = lane1 >> 8u; // 2nd bin (of 4) in lane
U32 cnt4 = lane1 >> 24u;
U32 cnt3 = s3 & 0xFFu;
U32 cnt2 = s2 & 0xFFu;
U32 cnt1 = lane1 & 0xFFu;
sum4 = sum4 + cnt4;
sum3 = sum3 + cnt3;
sum2 = sum2 + cnt2;
sum1 = sum1 + cnt1;
//-
// Accumulate sums from 2nd lane (of 4 groups)
//-
s3 = lane2 >> 16u; // 3rd bin (of 4) in lane
s2 = lane2 >> 8u; // 2nd bin (of 4) in lane
cnt4 = lane2 >> 24u;
cnt3 = s3 & 0xFFu;
cnt2 = s2 & 0xFFu;
cnt1 = lane2 & 0xFFu;
sum4 = sum4 + cnt4;
sum3 = sum3 + cnt3;
sum2 = sum2 + cnt2;
sum1 = sum1 + cnt1;
//-
// Accumulate sums from 3rd lane (of 4 groups)
//-
s3 = lane3 >> 16u; // 3rd bin (of 4) in lane
s2 = lane3 >> 8u; // 2nd bin (of 4) in lane
cnt4 = lane3 >> 24u;
cnt3 = s3 & 0xFFu;
cnt2 = s2 & 0xFFu;
cnt1 = lane3 & 0xFFu;
sum4 = sum4 + cnt4;
sum3 = sum3 + cnt3;
sum2 = sum2 + cnt2;
sum1 = sum1 + cnt1;
//-
// Accumulate sums from 4th lane (of 4 groups)
//-
s3 = lane4 >> 16u; // 3rd bin (of 4) in lane
s2 = lane4 >> 8u; // 2nd bin (of 4) in lane
cnt4 = lane4 >> 24u;
cnt3 = s3 & 0xFFu;
cnt2 = s2 & 0xFFu;
cnt1 = lane4 & 0xFFu;
sum4 = sum4 + cnt4;
sum3 = sum3 + cnt3;
sum2 = sum2 + cnt2;
sum1 = sum1 + cnt1;
}
/*-------------------------------------------------------------------
Name: SS_Sums_4_Next_V2
Desc: Serial scan on next 4 elements in seq [0..3]
------------------------------------------------------------------*/
template <
U32 BlockSize, // Threads Per Block
U32 BlockMask // Block Mask
>
__device__ __forceinline__
void SS_Sums_4_Next_V2
(
U32 & sum13, // OUT - sum1 .. sum4 (as pairs)
U32 & sum24,
U32 * cntPtr, // IN - 'per thread' counts <horizontal row> to sum up
U32 baseIdx
)
{
// wrap = (idx + [0..3]) % BlockSize
U32 idx1, idx2, idx3, idx4;
idx1 = baseIdx + 0u;
idx2 = baseIdx + 1u;
idx3 = baseIdx + 2u;
idx4 = baseIdx + 3u;
U32 wrap1, wrap2, wrap3, wrap4;
wrap1 = idx1 & BlockMask;
wrap2 = idx2 & BlockMask;
wrap3 = idx3 & BlockMask;
wrap4 = idx4 & BlockMask;
//-
// Grab 4 elements in seq [0..3]
//-
U32 lane1, lane2, lane3, lane4;
lane1 = cntPtr[wrap1];
lane2 = cntPtr[wrap2];
lane3 = cntPtr[wrap3];
lane4 = cntPtr[wrap4];
//-
// Zero out sequence [0..3]
//-
cntPtr[wrap1] = 0u;
cntPtr[wrap2] = 0u;
cntPtr[wrap3] = 0u;
cntPtr[wrap4] = 0u;
//-
// Accumulate all 4 groups in each lane
//-
//-
// Initialize sums from 1st lane (of 4 groups)
//-
U32 cnt13, cnt24;
cnt13 = (lane1 >> 0u) & 0x00FF00FFu;
cnt24 = (lane1 >> 8u) & 0x00FF00FFu;
sum13 += cnt13;
sum24 += cnt24;
//-
// Accumulate sums from 2nd lane (of 4 groups)
//-
cnt13 = (lane2 >> 0u) & 0x00FF00FFu;
cnt24 = (lane2 >> 8u) & 0x00FF00FFu;
sum13 += cnt13;
sum24 += cnt24;
//-
// Accumulate sums from 3rd lane (of 4 groups)
//-
cnt13 = (lane3 >> 0u) & 0x00FF00FFu;
cnt24 = (lane3 >> 8u) & 0x00FF00FFu;
sum13 += cnt13;
sum24 += cnt24;
//-
// Accumulate sums from 4th lane (of 4 groups)
//-
cnt13 = (lane4 >> 0u) & 0x00FF00FFu;
cnt24 = (lane4 >> 8u) & 0x00FF00FFu;
sum13 += cnt13;
sum24 += cnt24;
}
/*-------------------------------------------------------------------
Name: AddThreadToRowCounts_V1
Desc: Accumulates 'Per Thread' counts into 'Per Row' Counts
------------------------------------------------------------------*/
template <
U32 BlockSize, // Threads per Block
U32 BlockMask // Block Mask
>
__device__ __forceinline__
void AddThreadToRowCounts_V1
(
U32 & rCnt1, // OUT - 4 'per row' counts assigned to this thread
U32 & rCnt2, // ditto
U32 & rCnt3, // ditto
U32 & rCnt4, // ditto
U32 * basePtr, // IN - array of 'per thread' counts
U32 tid
)
{
//-----
// Serial Scan (Scan All 64 elements in sequence)
//-----
// Accumulate [0..63]
// Note: Also zeros out [0..63]
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 0) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 4) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 8) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 12) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 16) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 20) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 24) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 28) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 32) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 36) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 40) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 44) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 48) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 52) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 56) );
SS_Sums_4_Next_V1< BlockSize, BlockMask >( rCnt1, rCnt2, rCnt3, rCnt4, basePtr, (tid + 60) );
}
/*-------------------------------------------------------------------
Name: AddThreadToRowCounts_V2
Desc: Accumulates 'Per Thread' counts into 'Per Row' Counts
Notes:
1. Vector Parallelism:
We accumulate 2 pairs at a time across each row
instead of 4 singletons for a big savings
in arithmetic operations.
2. Overflow:
We store 2 16-bit row sums per 32-bit number
Which means that the accumulated Row sums need to not
overflow a 16-bit number (65,535).
Since, we assume the maximum possible count per thread is 252
64 threads * 252 = 16,128 <Safe>
128 threads * 252 = 32,256 <Safe>
256 threads * 252 = 64,512 <Safe>
512 threads * 252 = 129,024 *** UNSAFE ***
If this is a problem, revert to *_V1
3. Register Pressure:
*_V2 uses 6 more registers per thread than *_V1
If this is a problem, revert to *_V1
------------------------------------------------------------------*/
template <
U32 BlockSize, // Threads per Block
U32 BlockMask // BlockSize - 1
>
__device__ __forceinline__
void AddThreadToRowCounts_V2
(
U32 & rCnt1, // OUT - 4 'per row' counts assigned to this thread
U32 & rCnt2, // ditto
U32 & rCnt3, // ditto
U32 & rCnt4, // ditto
U32 * basePtr, // IN - array of 'per thread' counts
U32 tid // IN - thread ID
)
{
U32 sum13, sum24;
sum13 = 0u;
sum24 = 0u;
//-----
// Serial Scan (Scan All 64 elements in sequence)
//-----
// Accumulate Row Sums [0..63]
// Note: Also zeros out count array while accumulating
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 0) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 4) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 8) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 12) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 16) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 20) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 24) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 28) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 32) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 36) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 40) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 44) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 48) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 52) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 56) );
SS_Sums_4_Next_V2< BlockSize, BlockMask >( sum13, sum24, basePtr, (tid + 60) );
// Convert row sums from pairs back into singletons
U32 sum1, sum2, sum3, sum4;
sum1 = sum13 & 0x0000FFFFu;
sum2 = sum24 & 0x0000FFFFu;
sum3 = sum13 >> 16u;
sum4 = sum24 >> 16u;
// Add row sums back into register counts
rCnt1 += sum1;
rCnt2 += sum2;
rCnt3 += sum3;
rCnt4 += sum4;
}
/*---------------------------------------------------------
Name: K1_TRISH_CountRows_GEN_B1
Desc:
Note: Assumes underlying data is stored as
four 8-bit values (U8,I8) per 32-bit
storage element
---------------------------------------------------------*/
template <
typename valT, // underlying value Type (U8, I8)
typename mapT, // underlying mapper object
U32 logBankSize, // log<2>( Channels per Bank )
U32 logWarpSize, // log<2>( Threads per Warp )
U32 BlockSize, // Threads Per Block (needs to be a power of 2 & multiple of warpsize)
U32 GridSize, // Blocks Per Grid
U32 K_length // #elements to process per thread before looping
>
__global__
void K1_TRISH_CountRows_GEN_B1
(
U32 * outRowCounts, // OUT - 256-way row-sums array
const U32 * inVals, // IN - values to bin and count
U32 start, // IN - range [start,stop] to check and count
U32 stop, // ditto
valT minVal, // IN - minimum value
valT maxVal, // IN - maximum value
U32 numBins // IN - number of bins (in histogram)
)
{
//-------------------------------------------
// Constant values (computed at compile time)
//-------------------------------------------
// Bank Size (elements per bank)
const U32 BankSize = (1u << logBankSize); // 32 = 2^5 threads per bank
const U32 BankMask = BankSize - 1u; // 31 = 32 - 1 = 0x1F = b11111
const U32 strideBank = BankSize + 1u; // 33 = 32 + 1
// Extra '+1' to help try and avoid bank conflicts
// Warp Size (threads per warp)
const U32 WarpSize = (1u << logWarpSize); // 32 = 2^5 threads per warp
const U32 WarpMask = WarpSize - 1u; // 31 = 32 - 1 = 0x1F = b11111
// Block Size (threads per block)
//const U32 BlockSize = 64u;
const U32 BlockMask = BlockSize - 1u;
// Chunk Size
//const U32 ChunkSize = BlockSize * K_length;
//const U32 IN_WarpSize = K_length * WarpSize;
// K_length
//const U32 K_length = 16u; // 16
const U32 K4_length = K_length * 4u; // 64 = 16 * 4
const U32 K4_stop = 256u - K4_length; // 192 = 256 - 64
// Warps Per Block
const U32 WarpsPerBlock = BlockSize / WarpSize; // 2 = 64/32
// Bins per Histogram
const U32 nHistBins = 256u; // 256 = 2^8
// Lane Info (Compress 4 'bins' into each 32-bit value)
const U32 nLanes = 64u; // 64, # Lanes = 256 bins / 4 bins per lane
// 'Per Thread' counts array
const U32 nTCounts = nLanes * BlockSize;
const U32 banksTCounts = (nTCounts + BankMask) / BankSize;
const U32 padTCounts = (banksTCounts * BankSize) - nTCounts;
const U32 sizeTCounts = nTCounts + padTCounts;
// Output size
const U32 OutWarpSize = nHistBins / WarpsPerBlock;
const U32 OutLength = OutWarpSize / WarpSize;
const U32 OutStrideSize = OutLength * strideBank;
// Array Initialization
const U32 nPassesThrd = sizeTCounts / BlockSize;
const U32 leftOverThrd = sizeTCounts - (nPassesThrd * BlockSize);
const U32 nThreadsPerGrid = BlockSize * GridSize; // 3,072 = 64 * 48
const U32 rowSize = K_length * nThreadsPerGrid; // 193,586 = 63 * 64 * 48
//------------------------------------
// Local Typedefs
//------------------------------------
// TRISH types
typedef typename TRISH_traits<valT>::base_type baseType;
typedef typename TRISH_traits<valT>::bin_type binType;
typedef typename TRISH_traits<valT>::upscale_type upscaleType;
typedef typename TRISH_traits<valT>::convert_type convertType;
typedef typename ExtractBytes<upscaleType> Extractor;
//------------------------------------
// Local Variables
//------------------------------------
// Local variables (shared memory)
__shared__ U32 s_thrdCounts[sizeTCounts]; // 'per thread' counts
// Local variables (registers)
U32 rowCnt1 = 0u;
U32 rowCnt2 = 0u;
U32 rowCnt3 = 0u;
U32 rowCnt4 = 0u;
//---------------------------
// Compute Indices & Pointers
//---------------------------
U32 tid = threadIdx.x; // Thread ID within Block
U32 * cntPtr;
U32 * basePtr;
{
// Get Warp Row & Column
//U32 warpRow = threadIdx.x >> logWarpSize; // tid / 32
//U32 warpCol = threadIdx.x & WarpMask; // tid % 32
// Compute starting 'input' offset (Warp Sequential Layout)
//inIdx = (warpRow * IN_WarpSize) // Move to each warps assigned portion of work
// + warpCol; // Move to warp column (in warp)
// Compute starting serial scan index
U32 baseIdx = (tid * BlockSize);
// Get pointers into shared memory array
// for different views of memory
cntPtr = &s_thrdCounts[threadIdx.x];
basePtr = &s_thrdCounts[baseIdx];
}
//-------------------------------------------
// Zero out arrays
//-------------------------------------------
{
//-
// Zero out 'Per Thread' counts
//-
U32 * ptrTC = (&s_thrdCounts[0]);
SetArray_BlockSeq
<
U32, BlockSize, nPassesThrd, leftOverThrd, sizeTCounts
>
(
ptrTC, 0u
);
}
//-----
// Compute thread, block, & grid indices & sizes
//-----
U32 bid = (blockIdx.y * gridDim.x) + blockIdx.x; // Block ID within Grid
U32 elemOffset = (bid * K_length * BlockSize) + tid; // Starting offset
U32 nElems32 = stop - start + 1u;
U32 nMaxRows = (nElems32 + (rowSize - 1u)) / rowSize;
U32 nSafeRows = nElems32 / rowSize;
U32 nSafeElems = nSafeRows * rowSize;
U32 nLeftOverElems = nElems32 - nSafeElems;
U32 startIdx = start + elemOffset;
U32 stopIdx = startIdx + (nSafeRows * rowSize);
U32 currIdx = startIdx;
U32 overflow = 0u;
// Initiate
// Initiate Mapping object
// (Transform from values to bin indices)
mapT mapper;
mapper.Initiate( minVal, maxVal, numBins );
//-----
// Process all safe blocks
//-----
// 'input' pointer for reading from memory
const U32 * inPtr = &inVals[currIdx];
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
while (currIdx < stopIdx)
{
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow >= K4_stop)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
U32 val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
// NOTE: the 'K_length' variable below is a static
// hard-coded constant in the range [1..63].
// K = 'Work per thread' per loop (stride)...
// The compiler will take care of throwing away
// any unused code greater than our specified 'K'
// value, with no negative impact on performance.
//-
// Process values [0..3] (bytes 0..15)
//-
// Read in first 'four' values (32-bit)
if (K_length >= 1u) { val1 = inPtr[0u*BlockSize]; }
if (K_length >= 2u) { val2 = inPtr[1u*BlockSize]; }
if (K_length >= 3u) { val3 = inPtr[2u*BlockSize]; }
if (K_length >= 4u) { val4 = inPtr[3u*BlockSize]; }
// Bin first 'four' values into count array
if (K_length >= 1u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 2u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 3u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 4u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [4..7] (bytes 16..31)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 5u) { val1 = inPtr[4u*BlockSize]; }
if (K_length >= 6u) { val2 = inPtr[5u*BlockSize]; }
if (K_length >= 7u) { val3 = inPtr[6u*BlockSize]; }
if (K_length >= 8u) { val4 = inPtr[7u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 5u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 6u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 7u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 8u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [8..11] (bytes 32..47)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 9u) { val1 = inPtr[ 8u*BlockSize]; }
if (K_length >= 10u) { val2 = inPtr[ 9u*BlockSize]; }
if (K_length >= 11u) { val3 = inPtr[10u*BlockSize]; }
if (K_length >= 12u) { val4 = inPtr[11u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 9u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 10u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 11u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 12u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [12..15] (bytes 48..63)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 13u) { val1 = inPtr[12u*BlockSize]; }
if (K_length >= 14u) { val2 = inPtr[13u*BlockSize]; }
if (K_length >= 15u) { val3 = inPtr[14u*BlockSize]; }
if (K_length >= 16u) { val4 = inPtr[15u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 13u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 14u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 15u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 16u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [16..19] (bytes 64..79)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 17u) { val1 = inPtr[16u*BlockSize]; }
if (K_length >= 18u) { val2 = inPtr[17u*BlockSize]; }
if (K_length >= 19u) { val3 = inPtr[18u*BlockSize]; }
if (K_length >= 20u) { val4 = inPtr[19u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 17u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 18u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 19u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 20u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [20..23] (bytes 80..95)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 21u) { val1 = inPtr[20u*BlockSize]; }
if (K_length >= 22u) { val2 = inPtr[21u*BlockSize]; }
if (K_length >= 23u) { val3 = inPtr[22u*BlockSize]; }
if (K_length >= 24u) { val4 = inPtr[23u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 21u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 22u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 23u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 24u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [24..27] (bytes 96..111)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 25u) { val1 = inPtr[24u*BlockSize]; }
if (K_length >= 26u) { val2 = inPtr[25u*BlockSize]; }
if (K_length >= 27u) { val3 = inPtr[26u*BlockSize]; }
if (K_length >= 28u) { val4 = inPtr[27u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 25u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 26u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 27u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 28u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [28..31] (bytes 112..127)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 29u) { val1 = inPtr[28u*BlockSize]; }
if (K_length >= 30u) { val2 = inPtr[29u*BlockSize]; }
if (K_length >= 31u) { val3 = inPtr[30u*BlockSize]; }
if (K_length >= 32u) { val4 = inPtr[31u*BlockSize]; }
// Bin 'four' values (4 bytes at a time)
// Bin next 'four' values into count array
if (K_length >= 29u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 30u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 31u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 32u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [32..35] (bytes 128..143)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 33u) { val1 = inPtr[32u*BlockSize]; }
if (K_length >= 34u) { val2 = inPtr[33u*BlockSize]; }
if (K_length >= 35u) { val3 = inPtr[34u*BlockSize]; }
if (K_length >= 36u) { val4 = inPtr[35u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 33u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 34u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 35u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 36u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [36..39] (bytes 144..159)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 37u) { val1 = inPtr[36u*BlockSize]; }
if (K_length >= 38u) { val2 = inPtr[37u*BlockSize]; }
if (K_length >= 39u) { val3 = inPtr[38u*BlockSize]; }
if (K_length >= 40u) { val4 = inPtr[39u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 37u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 38u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 39u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 40u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [40..43] (bytes 160-175)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 41u) { val1 = inPtr[40u*BlockSize]; }
if (K_length >= 42u) { val2 = inPtr[41u*BlockSize]; }
if (K_length >= 43u) { val3 = inPtr[42u*BlockSize]; }
if (K_length >= 44u) { val4 = inPtr[43u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 41u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 42u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 43u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 44u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [44..47] (bytes 176-191)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 45u) { val1 = inPtr[44u*BlockSize]; }
if (K_length >= 46u) { val2 = inPtr[45u*BlockSize]; }
if (K_length >= 47u) { val3 = inPtr[46u*BlockSize]; }
if (K_length >= 48u) { val4 = inPtr[47u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 45u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 46u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 47u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 48u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [48-51] (bytes 192-207)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 49u) { val1 = inPtr[48u*BlockSize]; }
if (K_length >= 50u) { val2 = inPtr[49u*BlockSize]; }
if (K_length >= 51u) { val3 = inPtr[50u*BlockSize]; }
if (K_length >= 52u) { val4 = inPtr[51u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 49u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 50u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 51u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 52u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [52-55] (bytes 208-223)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 53u) { val1 = inPtr[52u*BlockSize]; }
if (K_length >= 54u) { val2 = inPtr[53u*BlockSize]; }
if (K_length >= 55u) { val3 = inPtr[54u*BlockSize]; }
if (K_length >= 56u) { val4 = inPtr[55u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 53u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 54u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 55u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 56u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [56-59] (bytes 224-239)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 57u) { val1 = inPtr[56u*BlockSize]; }
if (K_length >= 58u) { val2 = inPtr[57u*BlockSize]; }
if (K_length >= 59u) { val3 = inPtr[58u*BlockSize]; }
if (K_length >= 60u) { val4 = inPtr[59u*BlockSize]; }
// Bin 'four' values (4 bytes at a time)
// Bin next 'four' values into count array
if (K_length >= 57u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 58u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 59u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 60u)
{
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
//-
// Process values [60-62] (bytes 240-251)
//-
// Note: We deliberately do not support k >= '64' to
// avoid overflow issues during 'binning'
// As our 'per thread' 'bin counts' can only handle
// '255' increments before overflow becomes a problem.
// and 252 is the next smallest number
// evenly divisible by 4, IE 4 bytes per 32-bit value
// 63 values = 252 bytes / 4 bytes per value.
// Read in next 'four' values (32-bit)
if (K_length >= 61u) { val1 = inPtr[60u*BlockSize]; }
if (K_length >= 62u) { val2 = inPtr[61u*BlockSize]; }
if (K_length >= 63u) { val3 = inPtr[62u*BlockSize]; }
// Note: Do not uncomment => *OVERFLOW* bug !!!
//if (K_length >= 64u) { val4 = inPtr[63u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 60u)
{
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 61u)
{
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length >= 62u)
{
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
// Note: Do not uncomment => *OVERFLOW* bug !!!
//if (K_length >= 63u)
//{
// Extractor::Extract4( b1, b2, b3, b4, val4 );
// mapper.Transform4( b1, b2, b3, b4,
// bin1, bin2, bin3, bin4 );
// BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//}
//-----
// Move to next row of work
//-----
currIdx += rowSize;
inPtr += rowSize;
// Increment 'overflow' count
overflow += K4_length; // K values * 4 bytes per value
}
__syncthreads();
//--------------------------------------
// LAST: Process last leftover chunk
// with more careful range checking
//--------------------------------------
if (nLeftOverElems)
{
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow >= K4_stop)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
// NOTE #1: the 'K_length' variable below is a static
// hard-coded constant in the range [1..63].
// K = 'Work per thread' per loop (stride)...
// The compiler will take care of throwing away
// any unused code greater than our specified 'K'
// value, with no negative impact on performance.
// NOTE #2: We use a cooperative stride
// across each thread in each block in grid
// ChunkSize = BlockSize * GridSize = 64 * 48 = 3072
// RowSize = WorkPerThead(K) * ChunkSize = 63 * 3072 = 193,536
//
// B0 B1 ... B47 (Blocks in Grid)
// ---- ---- --- ----
// k = 1 => |64| |64| ... |64| (3072 Thread & I/O requests for 1st work item per thread)
// k = 2 => |64| |64| ... |64| ditto (2nd work item per thread)
// ... ... ...
// k = 63 => |64| |64| ... |64| ditto (63 work item per thread)
// NOTE #3: We use "Divide & Conquer" to avoid as much slower range checking as possible
// Try batches of 32, 16, 8, 4, 2, 1, and finally leftover (on which we finally must range check)
//----
// Setup Pointers & Indices for cooperative stride
//----
U32 bid = (blockIdx.y * gridDim.x) + blockIdx.x; // Get block index
U32 nSkip = nSafeRows * rowSize; // Skip past already processed rows
U32 chunkIdx = (bid * BlockSize) + tid; // Get starting index within chunk
U32 baseIdx = start + nSkip + chunkIdx; // Get starting index for left over elements
U32 val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
//------
// Try Section of 32
//------
if (K_length >= 32u)
{
// Process 32 chunks safely without range checking
if (nLeftOverElems >= (32u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [17..20]
//-----
val1 = inPtr[(16u*nThreadsPerGrid)];
val2 = inPtr[(17u*nThreadsPerGrid)];
val3 = inPtr[(18u*nThreadsPerGrid)];
val4 = inPtr[(19u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [21..24]
//-----
val1 = inPtr[(20u*nThreadsPerGrid)];
val2 = inPtr[(21u*nThreadsPerGrid)];
val3 = inPtr[(22u*nThreadsPerGrid)];
val4 = inPtr[(23u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [25..28]
//-----
val1 = inPtr[(24u*nThreadsPerGrid)];
val2 = inPtr[(25u*nThreadsPerGrid)];
val3 = inPtr[(26u*nThreadsPerGrid)];
val4 = inPtr[(27u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [29..32]
//-----
val1 = inPtr[(28u*nThreadsPerGrid)];
val2 = inPtr[(29u*nThreadsPerGrid)];
val3 = inPtr[(30u*nThreadsPerGrid)];
val4 = inPtr[(31u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (32u * nThreadsPerGrid);
nLeftOverElems -= (32u * nThreadsPerGrid);
}
}
//------
// Try Section of 16
//------
if (K_length >= 16u)
{
// Process 16 chunks safely without range checking
if (nLeftOverElems >= (16u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (16u * nThreadsPerGrid);
nLeftOverElems -= (16u * nThreadsPerGrid);
}
}
//------
// Try Section of 8
//------
if (K_length >= 8u)
{
// Process 8 chunks safely without range checking
if (nLeftOverElems >= (8u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (8u * nThreadsPerGrid);
nLeftOverElems -= (8u * nThreadsPerGrid);
}
}
//------
// Try Section of 4
//------
if (K_length >= 4u)
{
// Process 4 chunks safely without range checking
if (nLeftOverElems >= (4u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val3 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (4u * nThreadsPerGrid);
nLeftOverElems -= (4u * nThreadsPerGrid);
}
}
//------
// Try Section of 2
//------
if (K_length >= 2u)
{
// Process 2 chunks safely without range checking
if (nLeftOverElems >= (2u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..2]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
Extractor::Extract4( b1, b2, b3, b4, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (2u * nThreadsPerGrid);
nLeftOverElems -= (2u * nThreadsPerGrid);
}
}
//------
// Try Section of 1
//------
if (K_length >= 1u)
{
// Process 1 chunk safely without range checking
if (nLeftOverElems >= (1u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (1u * nThreadsPerGrid);
nLeftOverElems -= (1u * nThreadsPerGrid);
}
}
//------
// Process Last few elements
// with careful RANGE CHECKING !!!
//------
if (nLeftOverElems > 0u)
{
// Make sure we are 'in range' before reading & binning
U32 inRange1 = (baseIdx <= stop);
if (inRange1)
{
// Read in 32-bit element
val1 = inVals[baseIdx];
// Process element
Extractor::Extract4( b1, b2, b3, b4, val1 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
}
// Update Accumulation count
overflow += K4_length; // 64 = 16 elems * 4 bytes per elem
}
// Cleanup Mapping object
// (Give mapper a chance to cleanup any resources)
mapper.Finish();
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow > 0u)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
//-------------------------------------------------
// Write out final row 'counts'
//-------------------------------------------------
{
// Compute starting 'row counts' offset
U32 rIdx = threadIdx.x * 4u; // 4 groups per lane
U32 rRow = rIdx >> logBankSize;
U32 rCol = rIdx & BankMask;
U32 rowIdx = (rRow * strideBank) + (rCol + 1u);
// Extra '+1' to shift past initial pad element
U32 * rowPtr = &s_thrdCounts[rowIdx];
// Store row counts in row array
rowPtr[0] = rowCnt1;
rowPtr[1] = rowCnt2;
rowPtr[2] = rowCnt3;
rowPtr[3] = rowCnt4;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
// Get Warp Row & Column
U32 warpRow = threadIdx.x >> logWarpSize; // tid / 32
U32 warpCol = threadIdx.x & WarpMask; // tid % 32
// Get local & global indices
U32 outGlobal = (blockIdx.x * nHistBins);
U32 outLocal = (warpRow * OutWarpSize);
U32 rowBase = (warpRow * OutStrideSize);
U32 outBase = outGlobal + outLocal;
U32 rowOff = warpCol + 1u;
U32 outIdx = outBase + warpCol;
rowIdx = rowBase + rowOff;
// Get local & global pointers
U32 * outPtr = &outRowCounts[outIdx];
rowPtr = &s_thrdCounts[rowIdx];
// Write our 'per row' counts in warp sequential order
if (OutLength >= 1u) { outPtr[(0u*WarpSize)] = rowPtr[(0u*strideBank)]; }
if (OutLength >= 2u) { outPtr[(1u*WarpSize)] = rowPtr[(1u*strideBank)]; }
if (OutLength >= 3u) { outPtr[(2u*WarpSize)] = rowPtr[(2u*strideBank)]; }
if (OutLength >= 4u) { outPtr[(3u*WarpSize)] = rowPtr[(3u*strideBank)]; }
if (OutLength >= 5u) { outPtr[(4u*WarpSize)] = rowPtr[(4u*strideBank)]; }
if (OutLength >= 6u) { outPtr[(5u*WarpSize)] = rowPtr[(5u*strideBank)]; }
if (OutLength >= 7u) { outPtr[(6u*WarpSize)] = rowPtr[(6u*strideBank)]; }
if (OutLength >= 8u) { outPtr[(7u*WarpSize)] = rowPtr[(7u*strideBank)]; }
}
}
/*---------------------------------------------------------
Name: K1_TRISH_CountRows_GEN_B2
Desc:
Note:
1. Assumes underlying data is stored as two 16-bit values
(U16,I16) per 32-bit storage element.
2. This further implies that K = [1,127] to safely
avoid overflowing an 8-bit counter.
---------------------------------------------------------*/
template <
typename valT, // underlying value Type (U8, I8)
typename mapT, // underlying mapper object
U32 logBankSize, // log<2>( Channels per Bank )
U32 logWarpSize, // log<2>( Threads per Warp )
U32 BlockSize, // Threads Per Block (needs to be a power of 2 & multiple of warpsize)
U32 GridSize, // Blocks Per Grid
U32 K_length // #elements to process per thread before looping
>
__global__
void K1_TRISH_CountRows_GEN_B2
(
U32 * outRowCounts, // OUT - 256-way row-sums array
const U32 * inVals, // IN - values to bin and count
U32 start, // IN - range [start,stop] to check and count
U32 stop, // ditto
valT minVal, // IN - minimum value
valT maxVal, // IN - maximum value
U32 numBins // IN - number of bins (in histogram)
)
{
//-------------------------------------------
// Constant values (computed at compile time)
//-------------------------------------------
// Bank Size (elements per bank)
const U32 BankSize = (1u << logBankSize); // 32 = 2^5 threads per bank
const U32 BankMask = BankSize - 1u; // 31 = 32 - 1 = 0x1F = b11111
const U32 strideBank = BankSize + 1u; // 33 = 32 + 1
// Extra '+1' to help try and avoid bank conflicts
// Warp Size (threads per warp)
const U32 WarpSize = (1u << logWarpSize); // 32 = 2^5 threads per warp
const U32 WarpMask = WarpSize - 1u; // 31 = 32 - 1 = 0x1F = b11111
// Block Size (threads per block)
//const U32 BlockSize = 64u;
const U32 BlockMask = BlockSize - 1u;
// Chunk Size
//const U32 ChunkSize = BlockSize * K_length;
//const U32 IN_WarpSize = K_length * WarpSize;
// K_length
//const U32 K_length = 16u; // 16
const U32 K2_length = K_length * 2u; // 32 = 16 * 2 (2 words per 32-bit input value)
const U32 K2_stop = 256u - K2_length; // 224 = 256 - 32 (conservative test)
// Warps Per Block
const U32 WarpsPerBlock = BlockSize / WarpSize; // 2 = 64/32
// Bins per Histogram
const U32 nHistBins = 256u; // 256 = 2^8
// Lane Info (Compress 4 'bins' into each 32-bit value)
const U32 nLanes = 64u; // 64, # Lanes = 256 bins / 4 bins per lane
// 'Per Thread' counts array
const U32 nTCounts = nLanes * BlockSize;
const U32 banksTCounts = (nTCounts + BankMask) / BankSize;
const U32 padTCounts = (banksTCounts * BankSize) - nTCounts;
const U32 sizeTCounts = nTCounts + padTCounts;
// Output size
const U32 OutWarpSize = nHistBins / WarpsPerBlock;
const U32 OutLength = OutWarpSize / WarpSize;
const U32 OutStrideSize = OutLength * strideBank;
// Array Initialization
const U32 nPassesThrd = sizeTCounts / BlockSize;
const U32 leftOverThrd = sizeTCounts - (nPassesThrd * BlockSize);
const U32 nThreadsPerGrid = BlockSize * GridSize; // 3,072 = 64 * 48
const U32 rowSize = K_length * nThreadsPerGrid; // 193,586 = 63 * 64 * 48
//------------------------------------
// Local Typedefs
//------------------------------------
// TRISH types
typedef typename TRISH_traits<valT>::base_type baseType;
typedef typename TRISH_traits<valT>::bin_type binType;
typedef typename TRISH_traits<valT>::upscale_type upscaleType;
typedef typename TRISH_traits<valT>::convert_type convertType;
typedef typename ExtractWords<upscaleType> Extractor;
//------------------------------------
// Local Variables
//------------------------------------
// Local variables (shared memory)
__shared__ U32 s_thrdCounts[sizeTCounts]; // 'per thread' counts
// Local variables (registers)
U32 rowCnt1 = 0u;
U32 rowCnt2 = 0u;
U32 rowCnt3 = 0u;
U32 rowCnt4 = 0u;
//---------------------------
// Compute Indices & Pointers
//---------------------------
U32 tid = threadIdx.x; // Thread ID within Block
U32 * cntPtr;
U32 * basePtr;
{
// Get Warp Row & Column
//U32 warpRow = threadIdx.x >> logWarpSize; // tid / 32
//U32 warpCol = threadIdx.x & WarpMask; // tid % 32
// Compute starting 'input' offset (Warp Sequential Layout)
//inIdx = (warpRow * IN_WarpSize) // Move to each warps assigned portion of work
// + warpCol; // Move to warp column (in warp)
// Compute starting serial scan index
U32 baseIdx = (tid * BlockSize);
// Get pointers into shared memory array
// for different views of memory
cntPtr = &s_thrdCounts[threadIdx.x];
basePtr = &s_thrdCounts[baseIdx];
}
//-------------------------------------------
// Zero out arrays
//-------------------------------------------
{
//-
// Zero out 'Per Thread' counts
//-
U32 * ptrTC = (&s_thrdCounts[0]);
SetArray_BlockSeq
<
U32, BlockSize, nPassesThrd, leftOverThrd, sizeTCounts
>
(
ptrTC, 0u
);
}
//-----
// Compute thread, block, & grid indices & sizes
//-----
U32 bid = (blockIdx.y * gridDim.x) + blockIdx.x; // Block ID within Grid
U32 elemOffset = (bid * K_length * BlockSize) + tid; // Starting offset
U32 nElems32 = stop - start + 1u;
U32 nMaxRows = (nElems32 + (rowSize - 1u)) / rowSize;
U32 nSafeRows = nElems32 / rowSize;
U32 nSafeElems = nSafeRows * rowSize;
U32 nLeftOverElems = nElems32 - nSafeElems;
U32 startIdx = start + elemOffset;
U32 stopIdx = startIdx + (nSafeRows * rowSize);
U32 currIdx = startIdx;
U32 overflow = 0u;
// Initiate
// Initiate Mapping object
// (Transform from values to bin indices)
mapT mapper;
mapper.Initiate( minVal, maxVal, numBins );
//-----
// Process all safe blocks
//-----
// 'input' pointer for reading from memory
const U32 * inPtr = &inVals[currIdx];
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
while (currIdx < stopIdx)
{
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow >= K2_stop)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
U32 val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
// NOTE: the 'K_length' variable below is a static
// hard-coded constant in the range [1..63].
// K = 'Work per thread' per loop (stride)...
// The compiler will take care of throwing away
// any unused code greater than our specified 'K'
// value, with no negative impact on performance.
//-
// Process values [0..3] (bytes 0..15)
//-
// Read in first 'four' values (32-bit)
if (K_length >= 1u) { val1 = inPtr[0u*BlockSize]; }
if (K_length >= 2u) { val2 = inPtr[1u*BlockSize]; }
if (K_length >= 3u) { val3 = inPtr[2u*BlockSize]; }
if (K_length >= 4u) { val4 = inPtr[3u*BlockSize]; }
// Bin first 'four' values into count array
if (K_length >= 4u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 3u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 2u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 1u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [4..7] (bytes 16..31)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 5u) { val1 = inPtr[4u*BlockSize]; }
if (K_length >= 6u) { val2 = inPtr[5u*BlockSize]; }
if (K_length >= 7u) { val3 = inPtr[6u*BlockSize]; }
if (K_length >= 8u) { val4 = inPtr[7u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 8u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 7u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 6u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 5u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [8..11] (bytes 32..47)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 9u) { val1 = inPtr[ 8u*BlockSize]; }
if (K_length >= 10u) { val2 = inPtr[ 9u*BlockSize]; }
if (K_length >= 11u) { val3 = inPtr[10u*BlockSize]; }
if (K_length >= 12u) { val4 = inPtr[11u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 12u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 11u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 10u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 9u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [12..15] (bytes 48..63)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 13u) { val1 = inPtr[12u*BlockSize]; }
if (K_length >= 14u) { val2 = inPtr[13u*BlockSize]; }
if (K_length >= 15u) { val3 = inPtr[14u*BlockSize]; }
if (K_length >= 16u) { val4 = inPtr[15u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 16u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 15u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 14u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 13u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [16..19] (bytes 64..79)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 17u) { val1 = inPtr[16u*BlockSize]; }
if (K_length >= 18u) { val2 = inPtr[17u*BlockSize]; }
if (K_length >= 19u) { val3 = inPtr[18u*BlockSize]; }
if (K_length >= 20u) { val4 = inPtr[19u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 20u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 19u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 18u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 17u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [20..23] (bytes 80..95)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 21u) { val1 = inPtr[20u*BlockSize]; }
if (K_length >= 22u) { val2 = inPtr[21u*BlockSize]; }
if (K_length >= 23u) { val3 = inPtr[22u*BlockSize]; }
if (K_length >= 24u) { val4 = inPtr[23u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 24u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 23u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 22u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 21u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [24..27] (bytes 96..111)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 25u) { val1 = inPtr[24u*BlockSize]; }
if (K_length >= 26u) { val2 = inPtr[25u*BlockSize]; }
if (K_length >= 27u) { val3 = inPtr[26u*BlockSize]; }
if (K_length >= 28u) { val4 = inPtr[27u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 28u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 27u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 26u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 25u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [28..31] (bytes 112..127)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 29u) { val1 = inPtr[28u*BlockSize]; }
if (K_length >= 30u) { val2 = inPtr[29u*BlockSize]; }
if (K_length >= 31u) { val3 = inPtr[30u*BlockSize]; }
if (K_length >= 32u) { val4 = inPtr[31u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 32u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 31u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 30u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 29u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [32..35] (bytes 128..143)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 33u) { val1 = inPtr[32u*BlockSize]; }
if (K_length >= 34u) { val2 = inPtr[33u*BlockSize]; }
if (K_length >= 35u) { val3 = inPtr[34u*BlockSize]; }
if (K_length >= 36u) { val4 = inPtr[35u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 36u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 35u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 34u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 33u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [36..39] (bytes 144..159)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 37u) { val1 = inPtr[36u*BlockSize]; }
if (K_length >= 38u) { val2 = inPtr[37u*BlockSize]; }
if (K_length >= 39u) { val3 = inPtr[38u*BlockSize]; }
if (K_length >= 40u) { val4 = inPtr[39u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 40u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 39u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 38u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 37u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [40..43] (bytes 160-175)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 41u) { val1 = inPtr[40u*BlockSize]; }
if (K_length >= 42u) { val2 = inPtr[41u*BlockSize]; }
if (K_length >= 43u) { val3 = inPtr[42u*BlockSize]; }
if (K_length >= 44u) { val4 = inPtr[43u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 44u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 43u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 42u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 41u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [44..47] (bytes 176-191)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 45u) { val1 = inPtr[44u*BlockSize]; }
if (K_length >= 46u) { val2 = inPtr[45u*BlockSize]; }
if (K_length >= 47u) { val3 = inPtr[46u*BlockSize]; }
if (K_length >= 48u) { val4 = inPtr[47u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 48u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 47u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 46u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 45u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [48-51] (bytes 192-207)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 49u) { val1 = inPtr[48u*BlockSize]; }
if (K_length >= 50u) { val2 = inPtr[49u*BlockSize]; }
if (K_length >= 51u) { val3 = inPtr[50u*BlockSize]; }
if (K_length >= 52u) { val4 = inPtr[51u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 52u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 51u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 50u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 49u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [52-55] (bytes 208-223)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 53u) { val1 = inPtr[52u*BlockSize]; }
if (K_length >= 54u) { val2 = inPtr[53u*BlockSize]; }
if (K_length >= 55u) { val3 = inPtr[54u*BlockSize]; }
if (K_length >= 56u) { val4 = inPtr[55u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 56u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 55u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 54u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 53u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [56-59] (bytes 224-239)
//-
// Read in next 'four' values (32-bit)
if (K_length >= 57u) { val1 = inPtr[56u*BlockSize]; }
if (K_length >= 58u) { val2 = inPtr[57u*BlockSize]; }
if (K_length >= 59u) { val3 = inPtr[58u*BlockSize]; }
if (K_length >= 60u) { val4 = inPtr[59u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 60u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 59u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 58u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 57u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [60-63]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 61u) { val1 = inPtr[60u*BlockSize]; }
if (K_length >= 62u) { val2 = inPtr[61u*BlockSize]; }
if (K_length >= 63u) { val3 = inPtr[62u*BlockSize]; }
if (K_length >= 64u) { val4 = inPtr[63u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 64u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 63u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 62u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 61u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [64-67]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 65u) { val1 = inPtr[64u*BlockSize]; }
if (K_length >= 66u) { val2 = inPtr[65u*BlockSize]; }
if (K_length >= 67u) { val3 = inPtr[66u*BlockSize]; }
if (K_length >= 68u) { val4 = inPtr[67u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 68u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 67u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 66u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 65u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [68-71]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 69u) { val1 = inPtr[68u*BlockSize]; }
if (K_length >= 70u) { val2 = inPtr[69u*BlockSize]; }
if (K_length >= 71u) { val3 = inPtr[70u*BlockSize]; }
if (K_length >= 72u) { val4 = inPtr[71u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 72u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 71u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 70u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 69u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [72-75]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 73u) { val1 = inPtr[72u*BlockSize]; }
if (K_length >= 74u) { val2 = inPtr[73u*BlockSize]; }
if (K_length >= 75u) { val3 = inPtr[74u*BlockSize]; }
if (K_length >= 76u) { val4 = inPtr[75u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 76u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 75u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 74u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 73u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [76-79]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 77u) { val1 = inPtr[76u*BlockSize]; }
if (K_length >= 78u) { val2 = inPtr[77u*BlockSize]; }
if (K_length >= 79u) { val3 = inPtr[78u*BlockSize]; }
if (K_length >= 80u) { val4 = inPtr[79u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 80u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 79u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 78u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 77u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [80-83]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 81u) { val1 = inPtr[80u*BlockSize]; }
if (K_length >= 82u) { val2 = inPtr[81u*BlockSize]; }
if (K_length >= 83u) { val3 = inPtr[82u*BlockSize]; }
if (K_length >= 84u) { val4 = inPtr[83u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 84u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 83u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 82u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 81u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [84-87]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 85u) { val1 = inPtr[84u*BlockSize]; }
if (K_length >= 86u) { val2 = inPtr[85u*BlockSize]; }
if (K_length >= 87u) { val3 = inPtr[86u*BlockSize]; }
if (K_length >= 88u) { val4 = inPtr[87u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 88u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 87u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 86u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 85u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [88-91]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 89u) { val1 = inPtr[88u*BlockSize]; }
if (K_length >= 90u) { val2 = inPtr[89u*BlockSize]; }
if (K_length >= 91u) { val3 = inPtr[90u*BlockSize]; }
if (K_length >= 92u) { val4 = inPtr[91u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 92u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 91u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 90u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 89u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [92-95]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 93u) { val1 = inPtr[92u*BlockSize]; }
if (K_length >= 94u) { val2 = inPtr[93u*BlockSize]; }
if (K_length >= 95u) { val3 = inPtr[94u*BlockSize]; }
if (K_length >= 96u) { val4 = inPtr[95u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 96u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 95u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 94u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 93u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [96-99]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 97u) { val1 = inPtr[96u*BlockSize]; }
if (K_length >= 98u) { val2 = inPtr[97u*BlockSize]; }
if (K_length >= 99u) { val3 = inPtr[98u*BlockSize]; }
if (K_length >= 100u) { val4 = inPtr[99u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 100u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 99u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 98u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 97u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [100-103]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 101u) { val1 = inPtr[100u*BlockSize]; }
if (K_length >= 102u) { val2 = inPtr[101u*BlockSize]; }
if (K_length >= 103u) { val3 = inPtr[102u*BlockSize]; }
if (K_length >= 104u) { val4 = inPtr[103u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 104u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 103u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 102u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 101u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [104-107]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 105u) { val1 = inPtr[104u*BlockSize]; }
if (K_length >= 106u) { val2 = inPtr[105u*BlockSize]; }
if (K_length >= 107u) { val3 = inPtr[106u*BlockSize]; }
if (K_length >= 108u) { val4 = inPtr[107u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 108u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 107u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 106u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 105u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [108-111]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 109u) { val1 = inPtr[108u*BlockSize]; }
if (K_length >= 110u) { val2 = inPtr[109u*BlockSize]; }
if (K_length >= 111u) { val3 = inPtr[110u*BlockSize]; }
if (K_length >= 112u) { val4 = inPtr[111u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 112u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 111u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 110u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 109u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [112-115]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 113u) { val1 = inPtr[112u*BlockSize]; }
if (K_length >= 114u) { val2 = inPtr[113u*BlockSize]; }
if (K_length >= 115u) { val3 = inPtr[114u*BlockSize]; }
if (K_length >= 116u) { val4 = inPtr[115u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 116u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 115u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 114u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 113u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [116-119]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 117u) { val1 = inPtr[116u*BlockSize]; }
if (K_length >= 118u) { val2 = inPtr[117u*BlockSize]; }
if (K_length >= 119u) { val3 = inPtr[118u*BlockSize]; }
if (K_length >= 120u) { val4 = inPtr[119u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 120u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 119u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 118u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 117u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [120-123]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 121u) { val1 = inPtr[120u*BlockSize]; }
if (K_length >= 122u) { val2 = inPtr[121u*BlockSize]; }
if (K_length >= 123u) { val3 = inPtr[122u*BlockSize]; }
if (K_length >= 124u) { val4 = inPtr[123u*BlockSize]; }
// Bin next 'four' values into count array
if (K_length >= 124u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 123u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 122u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 121u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-
// Process values [124-127]
//-
// Read in next 'three' values (32-bit)
if (K_length >= 125u) { val1 = inPtr[124u*BlockSize]; }
if (K_length >= 126u) { val2 = inPtr[125u*BlockSize]; }
if (K_length >= 127u) { val3 = inPtr[126u*BlockSize]; }
// NOTE: Do not uncomment the line below => *OVERFLOW* BUG !!!
//if (K_length >= 128u) { val4 = inPtr[127u*BlockSize]; }
// Bin next 'four' values into count array
// NOTE: Do not uncomment the section below => *OVERFLOW* BUG !!!
/*
if (K_length >= 128u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3,v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
*/
{
if (K_length == 127u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3
Extractor::Extract2( b1, b2, val3 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 126u)
{
// Process v1,v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
if (K_length == 125u)
{
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
//-----
// Move to next row of work
//-----
currIdx += rowSize;
inPtr += rowSize;
// Increment 'overflow' count
overflow += K2_length; // K values * 2 words per value
}
__syncthreads();
//--------------------------------------
// LAST: Process last leftover chunk
// with more careful range checking
//--------------------------------------
if (nLeftOverElems)
{
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow >= K2_stop)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
// NOTE #1: the 'K_length' variable below is a static
// hard-coded constant in the range [1..127].
// K = 'Work per thread' per loop (stride)...
// The compiler will take care of throwing away
// any unused code greater than our specified 'K'
// value, with no negative impact on performance.
// NOTE #2: We use a cooperative stride
// across each thread in each block in grid
// ChunkSize = BlockSize * GridSize = 64 * 48 = 3072
// RowSize = WorkPerThead(K) * ChunkSize = 63 * 3072 = 193,536
//
// B0 B1 ... B47 (Blocks in Grid)
// ---- ---- --- ----
// k = 1 => |64| |64| ... |64| (3072 Thread & I/O requests for 1st work item per thread)
// k = 2 => |64| |64| ... |64| ditto (2nd work item per thread)
// ... ... ...
// k = 63 => |64| |64| ... |64| ditto (63 work item per thread)
// NOTE #3: We use "Divide & Conquer" to avoid as much slower range checking as possible
// Try batches of 32, 16, 8, 4, 2, 1, and finally leftover (on which we finally must range check)
//----
// Setup Pointers & Indices for cooperative stride
//----
U32 bid = (blockIdx.y * gridDim.x) + blockIdx.x; // Get block index
U32 nSkip = nSafeRows * rowSize; // Skip past already processed rows
U32 chunkIdx = (bid * BlockSize) + tid; // Get starting index within chunk
U32 baseIdx = start + nSkip + chunkIdx; // Get starting index for left over elements
U32 val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
//------
// Try Section of 64
//------
if (K_length >= 64u)
{
// Process 64 chunks safely without range checking
if (nLeftOverElems >= (64u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [17..20]
//-----
val1 = inPtr[(16u*nThreadsPerGrid)];
val2 = inPtr[(17u*nThreadsPerGrid)];
val3 = inPtr[(18u*nThreadsPerGrid)];
val4 = inPtr[(19u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [21..24]
//-----
val1 = inPtr[(20u*nThreadsPerGrid)];
val2 = inPtr[(21u*nThreadsPerGrid)];
val3 = inPtr[(22u*nThreadsPerGrid)];
val4 = inPtr[(23u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [25..28]
//-----
val1 = inPtr[(24u*nThreadsPerGrid)];
val2 = inPtr[(25u*nThreadsPerGrid)];
val3 = inPtr[(26u*nThreadsPerGrid)];
val4 = inPtr[(27u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [29..32]
//-----
val1 = inPtr[(28u*nThreadsPerGrid)];
val2 = inPtr[(29u*nThreadsPerGrid)];
val3 = inPtr[(30u*nThreadsPerGrid)];
val4 = inPtr[(31u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [33..35]
//-----
val1 = inPtr[(32u*nThreadsPerGrid)];
val2 = inPtr[(33u*nThreadsPerGrid)];
val3 = inPtr[(34u*nThreadsPerGrid)];
val4 = inPtr[(35u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [36..39]
//-----
val1 = inPtr[(36u*nThreadsPerGrid)];
val2 = inPtr[(37u*nThreadsPerGrid)];
val3 = inPtr[(38u*nThreadsPerGrid)];
val4 = inPtr[(39u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [40..43]
//-----
val1 = inPtr[(40u*nThreadsPerGrid)];
val2 = inPtr[(41u*nThreadsPerGrid)];
val3 = inPtr[(42u*nThreadsPerGrid)];
val4 = inPtr[(43u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [44..47]
//-----
val1 = inPtr[(44u*nThreadsPerGrid)];
val2 = inPtr[(45u*nThreadsPerGrid)];
val3 = inPtr[(46u*nThreadsPerGrid)];
val4 = inPtr[(47u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [48..51]
//-----
val1 = inPtr[(48u*nThreadsPerGrid)];
val2 = inPtr[(49u*nThreadsPerGrid)];
val3 = inPtr[(50u*nThreadsPerGrid)];
val4 = inPtr[(51u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [52..55]
//-----
val1 = inPtr[(52u*nThreadsPerGrid)];
val2 = inPtr[(53u*nThreadsPerGrid)];
val3 = inPtr[(54u*nThreadsPerGrid)];
val4 = inPtr[(55u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [56..59]
//-----
val1 = inPtr[(56u*nThreadsPerGrid)];
val2 = inPtr[(57u*nThreadsPerGrid)];
val3 = inPtr[(58u*nThreadsPerGrid)];
val4 = inPtr[(59u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [60..63]
//-----
val1 = inPtr[(60u*nThreadsPerGrid)];
val2 = inPtr[(61u*nThreadsPerGrid)];
val3 = inPtr[(62u*nThreadsPerGrid)];
val4 = inPtr[(63u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (64u * nThreadsPerGrid);
nLeftOverElems -= (64u * nThreadsPerGrid);
}
}
//------
// Try Section of 32
//------
if (K_length >= 32u)
{
// Process 32 chunks safely without range checking
if (nLeftOverElems >= (32u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [17..20]
//-----
val1 = inPtr[(16u*nThreadsPerGrid)];
val2 = inPtr[(17u*nThreadsPerGrid)];
val3 = inPtr[(18u*nThreadsPerGrid)];
val4 = inPtr[(19u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [21..24]
//-----
val1 = inPtr[(20u*nThreadsPerGrid)];
val2 = inPtr[(21u*nThreadsPerGrid)];
val3 = inPtr[(22u*nThreadsPerGrid)];
val4 = inPtr[(23u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [25..28]
//-----
val1 = inPtr[(24u*nThreadsPerGrid)];
val2 = inPtr[(25u*nThreadsPerGrid)];
val3 = inPtr[(26u*nThreadsPerGrid)];
val4 = inPtr[(27u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [29..32]
//-----
val1 = inPtr[(28u*nThreadsPerGrid)];
val2 = inPtr[(29u*nThreadsPerGrid)];
val3 = inPtr[(30u*nThreadsPerGrid)];
val4 = inPtr[(31u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (32u * nThreadsPerGrid);
nLeftOverElems -= (32u * nThreadsPerGrid);
}
}
//------
// Try Section of 16
//------
if (K_length >= 16u)
{
// Process 16 chunks safely without range checking
if (nLeftOverElems >= (16u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (16u * nThreadsPerGrid);
nLeftOverElems -= (16u * nThreadsPerGrid);
}
}
//------
// Try Section of 8
//------
if (K_length >= 8u)
{
// Process 8 chunks safely without range checking
if (nLeftOverElems >= (8u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (8u * nThreadsPerGrid);
nLeftOverElems -= (8u * nThreadsPerGrid);
}
}
//------
// Try Section of 4
//------
if (K_length >= 4u)
{
// Process 4 chunks safely without range checking
if (nLeftOverElems >= (4u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Process v3, v4
Extractor::Extract4( b1, b2, b3, b4, val3, val4 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (4u * nThreadsPerGrid);
nLeftOverElems -= (4u * nThreadsPerGrid);
}
}
//------
// Try Section of 2
//------
if (K_length >= 2u)
{
// Process 2 chunks safely without range checking
if (nLeftOverElems >= (2u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..2]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
// Process v1, v2
Extractor::Extract4( b1, b2, b3, b4, val1, val2 );
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (2u * nThreadsPerGrid);
nLeftOverElems -= (2u * nThreadsPerGrid);
}
}
//------
// Try Section of 1
//------
if (K_length >= 1u)
{
// Process 1 chunk safely without range checking
if (nLeftOverElems >= (1u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
// Process v1
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
// Move to next section
baseIdx += (1u * nThreadsPerGrid);
nLeftOverElems -= (1u * nThreadsPerGrid);
}
}
//------
// Process Last few elements
// with careful RANGE CHECKING !!!
//------
if (nLeftOverElems > 0u)
{
// Make sure we are 'in range' before reading & binning
U32 inRange1 = (baseIdx <= stop);
if (inRange1)
{
// Read in 32-bit element
val1 = inVals[baseIdx];
// Process element
Extractor::Extract2( b1, b2, val1 );
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
}
// Update Accumulation count
overflow += K2_length; // overflow += K elements * 2 words per value
}
// Cleanup Mapping object
// (Give mapper a chance to cleanup any resources)
mapper.Finish();
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow > 0u)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
//-------------------------------------------------
// Write out final row 'counts'
//-------------------------------------------------
{
// Compute starting 'row counts' offset
U32 rIdx = threadIdx.x * 4u; // 4 groups per lane
U32 rRow = rIdx >> logBankSize;
U32 rCol = rIdx & BankMask;
U32 rowIdx = (rRow * strideBank) + (rCol + 1u);
// Extra '+1' to shift past initial pad element
U32 * rowPtr = &s_thrdCounts[rowIdx];
// Store row counts in row array
rowPtr[0] = rowCnt1;
rowPtr[1] = rowCnt2;
rowPtr[2] = rowCnt3;
rowPtr[3] = rowCnt4;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
// Get Warp Row & Column
U32 warpRow = threadIdx.x >> logWarpSize; // tid / 32
U32 warpCol = threadIdx.x & WarpMask; // tid % 32
// Get local & global indices
U32 outGlobal = (blockIdx.x * nHistBins);
U32 outLocal = (warpRow * OutWarpSize);
U32 rowBase = (warpRow * OutStrideSize);
U32 outBase = outGlobal + outLocal;
U32 rowOff = warpCol + 1u;
U32 outIdx = outBase + warpCol;
rowIdx = rowBase + rowOff;
// Get local & global pointers
U32 * outPtr = &outRowCounts[outIdx];
rowPtr = &s_thrdCounts[rowIdx];
// Write our 'per row' counts in warp sequential order
if (OutLength >= 1u) { outPtr[(0u*WarpSize)] = rowPtr[(0u*strideBank)]; }
if (OutLength >= 2u) { outPtr[(1u*WarpSize)] = rowPtr[(1u*strideBank)]; }
if (OutLength >= 3u) { outPtr[(2u*WarpSize)] = rowPtr[(2u*strideBank)]; }
if (OutLength >= 4u) { outPtr[(3u*WarpSize)] = rowPtr[(3u*strideBank)]; }
if (OutLength >= 5u) { outPtr[(4u*WarpSize)] = rowPtr[(4u*strideBank)]; }
if (OutLength >= 6u) { outPtr[(5u*WarpSize)] = rowPtr[(5u*strideBank)]; }
if (OutLength >= 7u) { outPtr[(6u*WarpSize)] = rowPtr[(6u*strideBank)]; }
if (OutLength >= 8u) { outPtr[(7u*WarpSize)] = rowPtr[(7u*strideBank)]; }
}
}
/*---------------------------------------------------------
Name: K1_TRISH_CountRows_GEN_B4
Desc:
Note:
1. Assumes underlying data is stored as 32-bit values
(U32,I32,F32) per 32-bit storage element.
2. This further implies that K = [1,255] to safely
avoid overflowing an 8-bit counter.
3. However, K >= 104 impacts performance negatively
as the program appears to grow to be too large
to fit into the hardware code cache ...,
so we restrict K to the range K=[1..127]
---------------------------------------------------------*/
template <
typename valT, // underlying value Type (U8, I8)
typename mapT, // underlying mapper object
U32 logBankSize, // log<2>( Channels per Bank )
U32 logWarpSize, // log<2>( Threads per Warp )
U32 BlockSize, // Threads Per Block (needs to be a power of 2 & multiple of warpsize)
U32 GridSize, // Blocks Per Grid
U32 K_length // #elements to process per thread before looping
>
__global__
void K1_TRISH_CountRows_GEN_B4
(
U32 * outRowCounts, // OUT - 256-way row-sums array
const valT * inVals, // IN - values to bin and count
U32 start, // IN - range [start,stop] to check and count
U32 stop, // ditto
valT minVal, // IN - minimum value
valT maxVal, // IN - maximum value
U32 numBins // IN - number of bins (in histogram)
)
{
//-------------------------------------------
// Constant values (computed at compile time)
//-------------------------------------------
// Bank Size (elements per bank)
const U32 BankSize = (1u << logBankSize); // 32 = 2^5 threads per bank
const U32 BankMask = BankSize - 1u; // 31 = 32 - 1 = 0x1F = b11111
const U32 strideBank = BankSize + 1u; // 33 = 32 + 1
// Extra '+1' to help try and avoid bank conflicts
// Warp Size (threads per warp)
const U32 WarpSize = (1u << logWarpSize); // 32 = 2^5 threads per warp
const U32 WarpMask = WarpSize - 1u; // 31 = 32 - 1 = 0x1F = b11111
// Block Size (threads per block)
//const U32 BlockSize = 64u;
const U32 BlockMask = BlockSize - 1u;
// Chunk Size
//const U32 ChunkSize = BlockSize * K_length;
//const U32 IN_WarpSize = K_length * WarpSize;
// K_length
//const U32 K_length = 16u; // 16
const U32 K1_length = K_length; // 16 = 16 (1 storage value per input value)
const U32 K1_stop = 256u - K1_length; // 240 = 256 - 16 (conservative test)
// Warps Per Block
const U32 WarpsPerBlock = BlockSize / WarpSize; // 2 = 64/32
// Bins per Histogram
const U32 nHistBins = 256u; // 256 = 2^8
// Lane Info (Compress 4 'bins' into each 32-bit value)
const U32 nLanes = 64u; // 64, # Lanes = 256 bins / 4 bins per lane
// 'Per Thread' counts array
const U32 nTCounts = nLanes * BlockSize;
const U32 banksTCounts = (nTCounts + BankMask) / BankSize;
const U32 padTCounts = (banksTCounts * BankSize) - nTCounts;
const U32 sizeTCounts = nTCounts + padTCounts;
// Output size
const U32 OutWarpSize = nHistBins / WarpsPerBlock;
const U32 OutLength = OutWarpSize / WarpSize;
const U32 OutStrideSize = OutLength * strideBank;
// Array Initialization
const U32 nPassesThrd = sizeTCounts / BlockSize;
const U32 leftOverThrd = sizeTCounts - (nPassesThrd * BlockSize);
const U32 nThreadsPerGrid = BlockSize * GridSize; // 3,072 = 64 * 48
const U32 rowSize = K_length * nThreadsPerGrid; // 193,586 = 63 * 64 * 48
//------------------------------------
// Local Typedefs
//------------------------------------
// TRISH types
typedef typename TRISH_traits<valT>::base_type baseType;
typedef typename TRISH_traits<valT>::bin_type binType;
typedef typename TRISH_traits<valT>::upscale_type upscaleType;
typedef typename TRISH_traits<valT>::convert_type convertType;
//------------------------------------
// Local Variables
//------------------------------------
// Local variables (shared memory)
__shared__ U32 s_thrdCounts[sizeTCounts]; // 'per thread' counts
// Local variables (registers)
U32 rowCnt1 = 0u;
U32 rowCnt2 = 0u;
U32 rowCnt3 = 0u;
U32 rowCnt4 = 0u;
//---------------------------
// Compute Indices & Pointers
//---------------------------
U32 tid = threadIdx.x; // Thread ID within Block
U32 * cntPtr;
U32 * basePtr;
{
// Get Warp Row & Column
//U32 warpRow = threadIdx.x >> logWarpSize; // tid / 32
//U32 warpCol = threadIdx.x & WarpMask; // tid % 32
// Compute starting 'input' offset (Warp Sequential Layout)
//inIdx = (warpRow * IN_WarpSize) // Move to each warps assigned portion of work
// + warpCol; // Move to warp column (in warp)
// Compute starting serial scan index
U32 baseIdx = (tid * BlockSize);
// Get pointers into shared memory array
// for different views of memory
cntPtr = &s_thrdCounts[threadIdx.x];
basePtr = &s_thrdCounts[baseIdx];
}
//-------------------------------------------
// Zero out arrays
//-------------------------------------------
{
//-
// Zero out 'Per Thread' counts
//-
U32 * ptrTC = (&s_thrdCounts[0]);
SetArray_BlockSeq
<
U32, BlockSize, nPassesThrd, leftOverThrd, sizeTCounts
>
(
ptrTC, 0u
);
}
//-----
// Compute thread, block, & grid indices & sizes
//-----
U32 bid = (blockIdx.y * gridDim.x) + blockIdx.x; // Block ID within Grid
U32 elemOffset = (bid * K_length * BlockSize) + tid; // Starting offset
U32 nElems32 = stop - start + 1u;
U32 nMaxRows = (nElems32 + (rowSize - 1u)) / rowSize;
U32 nSafeRows = nElems32 / rowSize;
U32 nSafeElems = nSafeRows * rowSize;
U32 nLeftOverElems = nElems32 - nSafeElems;
U32 startIdx = start + elemOffset;
U32 stopIdx = startIdx + (nSafeRows * rowSize);
U32 currIdx = startIdx;
U32 overflow = 0u;
// Initiate
// Initiate Mapping object
// (Transform from values to bin indices)
mapT mapper;
mapper.Initiate( minVal, maxVal, numBins );
//-----
// Process all safe blocks
//-----
// 'input' pointer for reading from memory
const valT * inPtr = &inVals[currIdx];
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
while (currIdx < stopIdx)
{
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow >= K1_stop)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
valT val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
// NOTE: the 'K_length' variable below is a static
// hard-coded constant in the range [1..63].
// K = 'Work per thread' per loop (stride)...
// The compiler will take care of throwing away
// any unused code greater than our specified 'K'
// value, with no negative impact on performance.
//-
// Process values [0..3]
//-
// Read in first 'four' values (32-bit)
if (K_length >= 1u) { val1 = inPtr[0u*BlockSize]; }
if (K_length >= 2u) { val2 = inPtr[1u*BlockSize]; }
if (K_length >= 3u) { val3 = inPtr[2u*BlockSize]; }
if (K_length >= 4u) { val4 = inPtr[3u*BlockSize]; }
// Convert to upscale type
if (K_length >= 1u) { b1 = upscaleType(val1); }
if (K_length >= 2u) { b2 = upscaleType(val2); }
if (K_length >= 3u) { b3 = upscaleType(val3); }
if (K_length >= 4u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 4u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 3u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 2u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 1u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [4..7]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 5u) { val1 = inPtr[4u*BlockSize]; }
if (K_length >= 6u) { val2 = inPtr[5u*BlockSize]; }
if (K_length >= 7u) { val3 = inPtr[6u*BlockSize]; }
if (K_length >= 8u) { val4 = inPtr[7u*BlockSize]; }
// Convert to upscale type
if (K_length >= 5u) { b1 = upscaleType(val1); }
if (K_length >= 6u) { b2 = upscaleType(val2); }
if (K_length >= 7u) { b3 = upscaleType(val3); }
if (K_length >= 8u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 8u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 7u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 6u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 5u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [8..11]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 9u) { val1 = inPtr[ 8u*BlockSize]; }
if (K_length >= 10u) { val2 = inPtr[ 9u*BlockSize]; }
if (K_length >= 11u) { val3 = inPtr[10u*BlockSize]; }
if (K_length >= 12u) { val4 = inPtr[11u*BlockSize]; }
// Convert to upscale type
if (K_length >= 9u) { b1 = upscaleType(val1); }
if (K_length >= 10u) { b2 = upscaleType(val2); }
if (K_length >= 11u) { b3 = upscaleType(val3); }
if (K_length >= 12u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 12u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 11u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 10u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 9u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [12..15]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 13u) { val1 = inPtr[12u*BlockSize]; }
if (K_length >= 14u) { val2 = inPtr[13u*BlockSize]; }
if (K_length >= 15u) { val3 = inPtr[14u*BlockSize]; }
if (K_length >= 16u) { val4 = inPtr[15u*BlockSize]; }
// Convert to upscale type
if (K_length >= 13u) { b1 = upscaleType(val1); }
if (K_length >= 14u) { b2 = upscaleType(val2); }
if (K_length >= 15u) { b3 = upscaleType(val3); }
if (K_length >= 16u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 16u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 15u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 14u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 13u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [16..19]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 17u) { val1 = inPtr[16u*BlockSize]; }
if (K_length >= 18u) { val2 = inPtr[17u*BlockSize]; }
if (K_length >= 19u) { val3 = inPtr[18u*BlockSize]; }
if (K_length >= 20u) { val4 = inPtr[19u*BlockSize]; }
// Convert to upscale type
if (K_length >= 17u) { b1 = upscaleType(val1); }
if (K_length >= 18u) { b2 = upscaleType(val2); }
if (K_length >= 19u) { b3 = upscaleType(val3); }
if (K_length >= 20u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 20u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 19u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 18u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 17u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [20..23]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 21u) { val1 = inPtr[20u*BlockSize]; }
if (K_length >= 22u) { val2 = inPtr[21u*BlockSize]; }
if (K_length >= 23u) { val3 = inPtr[22u*BlockSize]; }
if (K_length >= 24u) { val4 = inPtr[23u*BlockSize]; }
// Convert to upscale type
if (K_length >= 21u) { b1 = upscaleType(val1); }
if (K_length >= 22u) { b2 = upscaleType(val2); }
if (K_length >= 23u) { b3 = upscaleType(val3); }
if (K_length >= 24u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 24u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 23u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 22u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 21u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [24..27]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 25u) { val1 = inPtr[24u*BlockSize]; }
if (K_length >= 26u) { val2 = inPtr[25u*BlockSize]; }
if (K_length >= 27u) { val3 = inPtr[26u*BlockSize]; }
if (K_length >= 28u) { val4 = inPtr[27u*BlockSize]; }
// Convert to upscale type
if (K_length >= 25u) { b1 = upscaleType(val1); }
if (K_length >= 26u) { b2 = upscaleType(val2); }
if (K_length >= 27u) { b3 = upscaleType(val3); }
if (K_length >= 28u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 28u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 27u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 26u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 25u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [28..31]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 29u) { val1 = inPtr[28u*BlockSize]; }
if (K_length >= 30u) { val2 = inPtr[29u*BlockSize]; }
if (K_length >= 31u) { val3 = inPtr[30u*BlockSize]; }
if (K_length >= 32u) { val4 = inPtr[31u*BlockSize]; }
// Convert to upscale type
if (K_length >= 29u) { b1 = upscaleType(val1); }
if (K_length >= 30u) { b2 = upscaleType(val2); }
if (K_length >= 31u) { b3 = upscaleType(val3); }
if (K_length >= 32u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 32u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 31u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 30u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 29u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [32..35]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 33u) { val1 = inPtr[32u*BlockSize]; }
if (K_length >= 34u) { val2 = inPtr[33u*BlockSize]; }
if (K_length >= 35u) { val3 = inPtr[34u*BlockSize]; }
if (K_length >= 36u) { val4 = inPtr[35u*BlockSize]; }
// Convert to upscale type
if (K_length >= 33u) { b1 = upscaleType(val1); }
if (K_length >= 34u) { b2 = upscaleType(val2); }
if (K_length >= 35u) { b3 = upscaleType(val3); }
if (K_length >= 36u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 36u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 35u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 34u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 33u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [36..39]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 37u) { val1 = inPtr[36u*BlockSize]; }
if (K_length >= 38u) { val2 = inPtr[37u*BlockSize]; }
if (K_length >= 39u) { val3 = inPtr[38u*BlockSize]; }
if (K_length >= 40u) { val4 = inPtr[39u*BlockSize]; }
// Convert to upscale type
if (K_length >= 37u) { b1 = upscaleType(val1); }
if (K_length >= 38u) { b2 = upscaleType(val2); }
if (K_length >= 39u) { b3 = upscaleType(val3); }
if (K_length >= 40u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 40u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 39u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 38u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 37u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [40..43]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 41u) { val1 = inPtr[40u*BlockSize]; }
if (K_length >= 42u) { val2 = inPtr[41u*BlockSize]; }
if (K_length >= 43u) { val3 = inPtr[42u*BlockSize]; }
if (K_length >= 44u) { val4 = inPtr[43u*BlockSize]; }
// Convert to upscale type
if (K_length >= 41u) { b1 = upscaleType(val1); }
if (K_length >= 42u) { b2 = upscaleType(val2); }
if (K_length >= 43u) { b3 = upscaleType(val3); }
if (K_length >= 44u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 44u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 43u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 42u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 41u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [44..47]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 45u) { val1 = inPtr[44u*BlockSize]; }
if (K_length >= 46u) { val2 = inPtr[45u*BlockSize]; }
if (K_length >= 47u) { val3 = inPtr[46u*BlockSize]; }
if (K_length >= 48u) { val4 = inPtr[47u*BlockSize]; }
// Convert to upscale type
if (K_length >= 45u) { b1 = upscaleType(val1); }
if (K_length >= 46u) { b2 = upscaleType(val2); }
if (K_length >= 47u) { b3 = upscaleType(val3); }
if (K_length >= 48u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 48u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 47u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 46u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 45u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [48..51]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 49u) { val1 = inPtr[48u*BlockSize]; }
if (K_length >= 50u) { val2 = inPtr[49u*BlockSize]; }
if (K_length >= 51u) { val3 = inPtr[50u*BlockSize]; }
if (K_length >= 52u) { val4 = inPtr[51u*BlockSize]; }
// Convert to upscale type
if (K_length >= 49u) { b1 = upscaleType(val1); }
if (K_length >= 50u) { b2 = upscaleType(val2); }
if (K_length >= 51u) { b3 = upscaleType(val3); }
if (K_length >= 52u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 52u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 51u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 50u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 49u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [52..55]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 53u) { val1 = inPtr[52u*BlockSize]; }
if (K_length >= 54u) { val2 = inPtr[53u*BlockSize]; }
if (K_length >= 55u) { val3 = inPtr[54u*BlockSize]; }
if (K_length >= 56u) { val4 = inPtr[55u*BlockSize]; }
// Convert to upscale type
if (K_length >= 53u) { b1 = upscaleType(val1); }
if (K_length >= 54u) { b2 = upscaleType(val2); }
if (K_length >= 55u) { b3 = upscaleType(val3); }
if (K_length >= 56u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 56u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 55u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 54u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 53u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [56..59]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 57u) { val1 = inPtr[56u*BlockSize]; }
if (K_length >= 58u) { val2 = inPtr[57u*BlockSize]; }
if (K_length >= 59u) { val3 = inPtr[58u*BlockSize]; }
if (K_length >= 60u) { val4 = inPtr[59u*BlockSize]; }
// Convert to upscale type
if (K_length >= 57u) { b1 = upscaleType(val1); }
if (K_length >= 58u) { b2 = upscaleType(val2); }
if (K_length >= 59u) { b3 = upscaleType(val3); }
if (K_length >= 60u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 60u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 59u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 58u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 57u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [60..63]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 61u) { val1 = inPtr[60u*BlockSize]; }
if (K_length >= 62u) { val2 = inPtr[61u*BlockSize]; }
if (K_length >= 63u) { val3 = inPtr[62u*BlockSize]; }
if (K_length >= 64u) { val4 = inPtr[63u*BlockSize]; }
// Convert to upscale type
if (K_length >= 61u) { b1 = upscaleType(val1); }
if (K_length >= 62u) { b2 = upscaleType(val2); }
if (K_length >= 63u) { b3 = upscaleType(val3); }
if (K_length >= 64u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 64u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 63u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 62u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 61u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [64..67]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 65u) { val1 = inPtr[64u*BlockSize]; }
if (K_length >= 66u) { val2 = inPtr[65u*BlockSize]; }
if (K_length >= 67u) { val3 = inPtr[66u*BlockSize]; }
if (K_length >= 68u) { val4 = inPtr[67u*BlockSize]; }
// Convert to upscale type
if (K_length >= 65u) { b1 = upscaleType(val1); }
if (K_length >= 66u) { b2 = upscaleType(val2); }
if (K_length >= 67u) { b3 = upscaleType(val3); }
if (K_length >= 68u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 68u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 67u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 66u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 65u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [68..71]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 69u) { val1 = inPtr[68u*BlockSize]; }
if (K_length >= 70u) { val2 = inPtr[69u*BlockSize]; }
if (K_length >= 71u) { val3 = inPtr[70u*BlockSize]; }
if (K_length >= 72u) { val4 = inPtr[71u*BlockSize]; }
// Convert to upscale type
if (K_length >= 69u) { b1 = upscaleType(val1); }
if (K_length >= 70u) { b2 = upscaleType(val2); }
if (K_length >= 71u) { b3 = upscaleType(val3); }
if (K_length >= 72u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 72u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 71u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 70u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 69u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [72..75]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 73u) { val1 = inPtr[72u*BlockSize]; }
if (K_length >= 74u) { val2 = inPtr[73u*BlockSize]; }
if (K_length >= 75u) { val3 = inPtr[74u*BlockSize]; }
if (K_length >= 76u) { val4 = inPtr[75u*BlockSize]; }
// Convert to upscale type
if (K_length >= 73u) { b1 = upscaleType(val1); }
if (K_length >= 74u) { b2 = upscaleType(val2); }
if (K_length >= 75u) { b3 = upscaleType(val3); }
if (K_length >= 76u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 76u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 75u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 74u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 73u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [76..79]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 77u) { val1 = inPtr[76u*BlockSize]; }
if (K_length >= 78u) { val2 = inPtr[77u*BlockSize]; }
if (K_length >= 79u) { val3 = inPtr[78u*BlockSize]; }
if (K_length >= 80u) { val4 = inPtr[79u*BlockSize]; }
// Convert to upscale type
if (K_length >= 77u) { b1 = upscaleType(val1); }
if (K_length >= 78u) { b2 = upscaleType(val2); }
if (K_length >= 79u) { b3 = upscaleType(val3); }
if (K_length >= 80u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 80u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 79u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 78u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 77u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [80..83]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 81u) { val1 = inPtr[80u*BlockSize]; }
if (K_length >= 82u) { val2 = inPtr[81u*BlockSize]; }
if (K_length >= 83u) { val3 = inPtr[82u*BlockSize]; }
if (K_length >= 84u) { val4 = inPtr[83u*BlockSize]; }
// Convert to upscale type
if (K_length >= 81u) { b1 = upscaleType(val1); }
if (K_length >= 82u) { b2 = upscaleType(val2); }
if (K_length >= 83u) { b3 = upscaleType(val3); }
if (K_length >= 84u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 84u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 83u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 82u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 81u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [84..87]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 85u) { val1 = inPtr[84u*BlockSize]; }
if (K_length >= 86u) { val2 = inPtr[85u*BlockSize]; }
if (K_length >= 87u) { val3 = inPtr[86u*BlockSize]; }
if (K_length >= 88u) { val4 = inPtr[87u*BlockSize]; }
// Convert to upscale type
if (K_length >= 85u) { b1 = upscaleType(val1); }
if (K_length >= 86u) { b2 = upscaleType(val2); }
if (K_length >= 87u) { b3 = upscaleType(val3); }
if (K_length >= 88u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 88u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 87u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 86u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 85u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [88..91]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 89u) { val1 = inPtr[88u*BlockSize]; }
if (K_length >= 90u) { val2 = inPtr[89u*BlockSize]; }
if (K_length >= 91u) { val3 = inPtr[90u*BlockSize]; }
if (K_length >= 92u) { val4 = inPtr[91u*BlockSize]; }
// Convert to upscale type
if (K_length >= 89u) { b1 = upscaleType(val1); }
if (K_length >= 90u) { b2 = upscaleType(val2); }
if (K_length >= 91u) { b3 = upscaleType(val3); }
if (K_length >= 92u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 92u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 91u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 90u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 89u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [92..95]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 93u) { val1 = inPtr[92u*BlockSize]; }
if (K_length >= 94u) { val2 = inPtr[93u*BlockSize]; }
if (K_length >= 95u) { val3 = inPtr[94u*BlockSize]; }
if (K_length >= 96u) { val4 = inPtr[95u*BlockSize]; }
// Convert to upscale type
if (K_length >= 93u) { b1 = upscaleType(val1); }
if (K_length >= 94u) { b2 = upscaleType(val2); }
if (K_length >= 95u) { b3 = upscaleType(val3); }
if (K_length >= 96u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 96u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 95u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 94u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 93u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [96..99]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 97u) { val1 = inPtr[96u*BlockSize]; }
if (K_length >= 98u) { val2 = inPtr[97u*BlockSize]; }
if (K_length >= 99u) { val3 = inPtr[98u*BlockSize]; }
if (K_length >= 100u) { val4 = inPtr[99u*BlockSize]; }
// Convert to upscale type
if (K_length >= 97u) { b1 = upscaleType(val1); }
if (K_length >= 98u) { b2 = upscaleType(val2); }
if (K_length >= 99u) { b3 = upscaleType(val3); }
if (K_length >= 100u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 100u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 99u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 98u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 97u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [100..103]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 101u) { val1 = inPtr[100u*BlockSize]; }
if (K_length >= 102u) { val2 = inPtr[101u*BlockSize]; }
if (K_length >= 103u) { val3 = inPtr[102u*BlockSize]; }
if (K_length >= 104u) { val4 = inPtr[103u*BlockSize]; }
// Convert to upscale type
if (K_length >= 101u) { b1 = upscaleType(val1); }
if (K_length >= 102u) { b2 = upscaleType(val2); }
if (K_length >= 103u) { b3 = upscaleType(val3); }
if (K_length >= 104u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 104u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 103u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 102u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 101u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [104..107]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 105u) { val1 = inPtr[104u*BlockSize]; }
if (K_length >= 106u) { val2 = inPtr[105u*BlockSize]; }
if (K_length >= 107u) { val3 = inPtr[106u*BlockSize]; }
if (K_length >= 108u) { val4 = inPtr[107u*BlockSize]; }
// Convert to upscale type
if (K_length >= 105u) { b1 = upscaleType(val1); }
if (K_length >= 106u) { b2 = upscaleType(val2); }
if (K_length >= 107u) { b3 = upscaleType(val3); }
if (K_length >= 108u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 108u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 107u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 106u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 105u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [108..111]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 109u) { val1 = inPtr[108u*BlockSize]; }
if (K_length >= 110u) { val2 = inPtr[109u*BlockSize]; }
if (K_length >= 111u) { val3 = inPtr[110u*BlockSize]; }
if (K_length >= 112u) { val4 = inPtr[111u*BlockSize]; }
// Convert to upscale type
if (K_length >= 109u) { b1 = upscaleType(val1); }
if (K_length >= 110u) { b2 = upscaleType(val2); }
if (K_length >= 111u) { b3 = upscaleType(val3); }
if (K_length >= 112u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 112u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 111u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 110u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 109u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [112..115]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 113u) { val1 = inPtr[112u*BlockSize]; }
if (K_length >= 114u) { val2 = inPtr[113u*BlockSize]; }
if (K_length >= 115u) { val3 = inPtr[114u*BlockSize]; }
if (K_length >= 116u) { val4 = inPtr[115u*BlockSize]; }
// Convert to upscale type
if (K_length >= 113u) { b1 = upscaleType(val1); }
if (K_length >= 114u) { b2 = upscaleType(val2); }
if (K_length >= 115u) { b3 = upscaleType(val3); }
if (K_length >= 116u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 116u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 115u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 114u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 113u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [116..119]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 117u) { val1 = inPtr[116u*BlockSize]; }
if (K_length >= 118u) { val2 = inPtr[117u*BlockSize]; }
if (K_length >= 119u) { val3 = inPtr[118u*BlockSize]; }
if (K_length >= 120u) { val4 = inPtr[119u*BlockSize]; }
// Convert to upscale type
if (K_length >= 117u) { b1 = upscaleType(val1); }
if (K_length >= 118u) { b2 = upscaleType(val2); }
if (K_length >= 119u) { b3 = upscaleType(val3); }
if (K_length >= 120u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 120u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 119u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 118u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 117u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [120..123]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 121u) { val1 = inPtr[120u*BlockSize]; }
if (K_length >= 122u) { val2 = inPtr[121u*BlockSize]; }
if (K_length >= 123u) { val3 = inPtr[122u*BlockSize]; }
if (K_length >= 124u) { val4 = inPtr[123u*BlockSize]; }
// Convert to upscale type
if (K_length >= 121u) { b1 = upscaleType(val1); }
if (K_length >= 122u) { b2 = upscaleType(val2); }
if (K_length >= 123u) { b3 = upscaleType(val3); }
if (K_length >= 124u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 124u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 123u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 122u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 121u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//-
// Process values [124..127]
//-
// Read in next 'four' values (32-bit)
if (K_length >= 125u) { val1 = inPtr[124u*BlockSize]; }
if (K_length >= 126u) { val2 = inPtr[125u*BlockSize]; }
if (K_length >= 127u) { val3 = inPtr[126u*BlockSize]; }
if (K_length >= 128u) { val4 = inPtr[127u*BlockSize]; }
// Convert to upscale type
if (K_length >= 125u) { b1 = upscaleType(val1); }
if (K_length >= 126u) { b2 = upscaleType(val2); }
if (K_length >= 127u) { b3 = upscaleType(val3); }
if (K_length >= 128u) { b4 = upscaleType(val4); }
// Bin first 'four' values into count array
if (K_length >= 128u)
{
// Process v1,v2,v3,v4
mapper.Transform4( bin1, bin2, bin3, bin4, // OUT => bins
b1, b2, b3, b4 ); // IN => values to transform
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
}
else
{
if (K_length == 127u)
{
// Process v1,v2,v3
mapper.Transform3( bin1, bin2, bin3, // OUT => bins
b1, b2, b3 ); // IN => values to transform
BinCount3<BlockSize>( cntPtr, bin1, bin2, bin3 );
}
if (K_length == 126u)
{
// Process v1,v2
mapper.Transform2( bin1, bin2, // OUT => bins
b1, b2 ); // IN => values to transform
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
}
if (K_length == 125u)
{
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
//
// Note: We could repeat the above pattern all the way up to
// K = [252..255] making sure to deliberately skip
// K = 255 (the 256th value) to avoid overflow
// However, somewhere around K = 104, we appear to overflow
// the hardware code cache anyway which negatively impacts
// performance, so we don't need to go all the way...
//
//-----
// Move to next row of work
//-----
currIdx += rowSize;
inPtr += rowSize;
// Increment 'overflow' count
overflow += K1_length; // K values
}
__syncthreads();
//--------------------------------------
// LAST: Process last leftover chunk
// with more careful range checking
//--------------------------------------
if (nLeftOverElems)
{
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow >= K1_stop)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
// NOTE #1: the 'K_length' variable below is a static
// hard-coded constant in the range [1..255].
// K = 'Work per thread' per loop (stride)...
// The compiler will take care of throwing away
// any unused code greater than our specified 'K'
// value, with no negative impact on performance.
// NOTE #2: We use a cooperative stride
// across each thread in each block in grid
// ChunkSize = BlockSize * GridSize = 64 * 48 = 3072
// RowSize = WorkPerThead(K) * ChunkSize = 63 * 3072 = 193,536
//
// B0 B1 ... B47 (Blocks in Grid)
// ---- ---- --- ----
// k = 1 => |64| |64| ... |64| (3072 Thread & I/O requests for 1st work item per thread)
// k = 2 => |64| |64| ... |64| ditto (2nd work item per thread)
// ... ... ...
// k = 63 => |64| |64| ... |64| ditto (63 work item per thread)
// NOTE #3: We use a "Divide & Conquer" approach
// to avoid as much slower range checking as possible
// We try batches of 128, 64, 32, 16, 8, 4, 2, 1,
// and then finally a leftover chunk (on which we must carefully range check)
//----
// Setup Pointers & Indices for cooperative stride
//----
U32 bid = (blockIdx.y * gridDim.x) + blockIdx.x; // Get block index
U32 nSkip = nSafeRows * rowSize; // Skip past already processed rows
U32 chunkIdx = (bid * BlockSize) + tid; // Get starting index within chunk
U32 baseIdx = start + nSkip + chunkIdx; // Get starting index for left over elements
U32 val1, val2, val3, val4;
upscaleType b1, b2, b3, b4;
binType bin1, bin2, bin3, bin4;
//------
// Try Section of 128
//------
//
// Note: We didn't bother to insert this code due to the "code cache" performance problem
// for K >= 104.
//
// If desired, repeat the pattern for the section of 64 below
// while doubling the # of elements processed.
//------
// Try Section of 64
//------
if (K_length >= 64u)
{
// Process 64 chunks safely without range checking
if (nLeftOverElems >= (64u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [17..20]
//-----
val1 = inPtr[(16u*nThreadsPerGrid)];
val2 = inPtr[(17u*nThreadsPerGrid)];
val3 = inPtr[(18u*nThreadsPerGrid)];
val4 = inPtr[(19u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [21..24]
//-----
val1 = inPtr[(20u*nThreadsPerGrid)];
val2 = inPtr[(21u*nThreadsPerGrid)];
val3 = inPtr[(22u*nThreadsPerGrid)];
val4 = inPtr[(23u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [25..28]
//-----
val1 = inPtr[(24u*nThreadsPerGrid)];
val2 = inPtr[(25u*nThreadsPerGrid)];
val3 = inPtr[(26u*nThreadsPerGrid)];
val4 = inPtr[(27u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [29..32]
//-----
val1 = inPtr[(28u*nThreadsPerGrid)];
val2 = inPtr[(29u*nThreadsPerGrid)];
val3 = inPtr[(30u*nThreadsPerGrid)];
val4 = inPtr[(31u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [33..35]
//-----
val1 = inPtr[(32u*nThreadsPerGrid)];
val2 = inPtr[(33u*nThreadsPerGrid)];
val3 = inPtr[(34u*nThreadsPerGrid)];
val4 = inPtr[(35u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [36..39]
//-----
val1 = inPtr[(36u*nThreadsPerGrid)];
val2 = inPtr[(37u*nThreadsPerGrid)];
val3 = inPtr[(38u*nThreadsPerGrid)];
val4 = inPtr[(39u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [40..43]
//-----
val1 = inPtr[(40u*nThreadsPerGrid)];
val2 = inPtr[(41u*nThreadsPerGrid)];
val3 = inPtr[(42u*nThreadsPerGrid)];
val4 = inPtr[(43u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [44..47]
//-----
val1 = inPtr[(44u*nThreadsPerGrid)];
val2 = inPtr[(45u*nThreadsPerGrid)];
val3 = inPtr[(46u*nThreadsPerGrid)];
val4 = inPtr[(47u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [48..51]
//-----
val1 = inPtr[(48u*nThreadsPerGrid)];
val2 = inPtr[(49u*nThreadsPerGrid)];
val3 = inPtr[(50u*nThreadsPerGrid)];
val4 = inPtr[(51u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [52..55]
//-----
val1 = inPtr[(52u*nThreadsPerGrid)];
val2 = inPtr[(53u*nThreadsPerGrid)];
val3 = inPtr[(54u*nThreadsPerGrid)];
val4 = inPtr[(55u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [56..59]
//-----
val1 = inPtr[(56u*nThreadsPerGrid)];
val2 = inPtr[(57u*nThreadsPerGrid)];
val3 = inPtr[(58u*nThreadsPerGrid)];
val4 = inPtr[(59u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [60..63]
//-----
val1 = inPtr[(60u*nThreadsPerGrid)];
val2 = inPtr[(61u*nThreadsPerGrid)];
val3 = inPtr[(62u*nThreadsPerGrid)];
val4 = inPtr[(63u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (64u * nThreadsPerGrid);
nLeftOverElems -= (64u * nThreadsPerGrid);
}
}
//------
// Try Section of 32
//------
if (K_length >= 32u)
{
// Process 32 chunks safely without range checking
if (nLeftOverElems >= (32u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [17..20]
//-----
val1 = inPtr[(16u*nThreadsPerGrid)];
val2 = inPtr[(17u*nThreadsPerGrid)];
val3 = inPtr[(18u*nThreadsPerGrid)];
val4 = inPtr[(19u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [21..24]
//-----
val1 = inPtr[(20u*nThreadsPerGrid)];
val2 = inPtr[(21u*nThreadsPerGrid)];
val3 = inPtr[(22u*nThreadsPerGrid)];
val4 = inPtr[(23u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [25..28]
//-----
val1 = inPtr[(24u*nThreadsPerGrid)];
val2 = inPtr[(25u*nThreadsPerGrid)];
val3 = inPtr[(26u*nThreadsPerGrid)];
val4 = inPtr[(27u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [29..32]
//-----
val1 = inPtr[(28u*nThreadsPerGrid)];
val2 = inPtr[(29u*nThreadsPerGrid)];
val3 = inPtr[(30u*nThreadsPerGrid)];
val4 = inPtr[(31u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (32u * nThreadsPerGrid);
nLeftOverElems -= (32u * nThreadsPerGrid);
}
}
//------
// Try Section of 16
//------
if (K_length >= 16u)
{
// Process 16 chunks safely without range checking
if (nLeftOverElems >= (16u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [9..12]
//-----
val1 = inPtr[( 8u*nThreadsPerGrid)];
val2 = inPtr[( 9u*nThreadsPerGrid)];
val3 = inPtr[(10u*nThreadsPerGrid)];
val4 = inPtr[(11u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [13..16]
//-----
val1 = inPtr[(12u*nThreadsPerGrid)];
val2 = inPtr[(13u*nThreadsPerGrid)];
val3 = inPtr[(14u*nThreadsPerGrid)];
val4 = inPtr[(15u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (16u * nThreadsPerGrid);
nLeftOverElems -= (16u * nThreadsPerGrid);
}
}
//------
// Try Section of 8
//------
if (K_length >= 8u)
{
// Process 8 chunks safely without range checking
if (nLeftOverElems >= (8u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
//-----
// Read & Bin [5..8]
//-----
val1 = inPtr[(4u*nThreadsPerGrid)];
val2 = inPtr[(5u*nThreadsPerGrid)];
val3 = inPtr[(6u*nThreadsPerGrid)];
val4 = inPtr[(7u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (8u * nThreadsPerGrid);
nLeftOverElems -= (8u * nThreadsPerGrid);
}
}
//------
// Try Section of 4
//------
if (K_length >= 4u)
{
// Process 4 chunks safely without range checking
if (nLeftOverElems >= (4u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..4]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
val3 = inPtr[(2u*nThreadsPerGrid)];
val4 = inPtr[(3u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
b3 = upscaleType(val3);
b4 = upscaleType(val4);
// Process v1, v2, v3, v4
mapper.Transform4( bin1, bin2, bin3, bin4,
b1, b2, b3, b4 );
BinCount4<BlockSize>( cntPtr, bin1, bin2, bin3, bin4 );
// Move to next section
baseIdx += (4u * nThreadsPerGrid);
nLeftOverElems -= (4u * nThreadsPerGrid);
}
}
//------
// Try Section of 2
//------
if (K_length >= 2u)
{
// Process 2 chunks safely without range checking
if (nLeftOverElems >= (2u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1..2]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
val2 = inPtr[(1u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
b2 = upscaleType(val2);
// Process v1, v2
mapper.Transform2( bin1, bin2, b1, b2 );
BinCount2<BlockSize>( cntPtr, bin1, bin2 );
// Move to next section
baseIdx += (2u * nThreadsPerGrid);
nLeftOverElems -= (2u * nThreadsPerGrid);
}
}
//------
// Try Section of 1
//------
if (K_length >= 1u)
{
// Process 1 chunk safely without range checking
if (nLeftOverElems >= (1u * nThreadsPerGrid))
{
// Get pointer
inPtr = &inVals[baseIdx];
//-----
// Read & Bin [1]
//-----
val1 = inPtr[(0u*nThreadsPerGrid)];
// Convert to upscale type
b1 = upscaleType(val1);
// Process v1
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
// Move to next section
baseIdx += (1u * nThreadsPerGrid);
nLeftOverElems -= (1u * nThreadsPerGrid);
}
}
//------
// Process Last few elements
// with careful RANGE CHECKING !!!
//------
if (nLeftOverElems > 0u)
{
// Make sure we are 'in range' before reading & binning
U32 inRange1 = (baseIdx <= stop);
if (inRange1)
{
// Read in 32-bit element
val1 = inVals[baseIdx];
// Process single element
b1 = upscaleType(val1);
mapper.Transform1( bin1, b1 );
BinCount1<BlockSize>( cntPtr, bin1 );
}
}
// Update Accumulation count
overflow += K1_length; // overflow += K elements
}
// Cleanup Mapping object
// (Give mapper a chance to cleanup any resources)
mapper.Finish();
//-----
// Accumulate 'thread' counts into 'row' counts
// Note: Also zeros out 'per thread' count array
//-----
if (overflow > 0u)
{
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
//AddThreadToRowCounts_V1< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
AddThreadToRowCounts_V2< BlockSize, BlockMask >( rowCnt1, rowCnt2, rowCnt3, rowCnt4, basePtr, tid );
overflow = 0u;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
}
//-------------------------------------------------
// Write out final row 'counts'
//-------------------------------------------------
{
// Compute starting 'row counts' offset
U32 rIdx = threadIdx.x * 4u; // 4 groups per lane
U32 rRow = rIdx >> logBankSize;
U32 rCol = rIdx & BankMask;
U32 rowIdx = (rRow * strideBank) + (rCol + 1u);
// Extra '+1' to shift past initial pad element
U32 * rowPtr = &s_thrdCounts[rowIdx];
// Store row counts in row array
rowPtr[0] = rowCnt1;
rowPtr[1] = rowCnt2;
rowPtr[2] = rowCnt3;
rowPtr[3] = rowCnt4;
// Sync Threads in Block
if (WarpsPerBlock >= 2u) { __syncthreads(); }
// Get Warp Row & Column
U32 warpRow = threadIdx.x >> logWarpSize; // tid / 32
U32 warpCol = threadIdx.x & WarpMask; // tid % 32
// Get local & global indices
U32 outGlobal = (blockIdx.x * nHistBins);
U32 outLocal = (warpRow * OutWarpSize);
U32 rowBase = (warpRow * OutStrideSize);
U32 outBase = outGlobal + outLocal;
U32 rowOff = warpCol + 1u;
U32 outIdx = outBase + warpCol;
rowIdx = rowBase + rowOff;
// Get local & global pointers
U32 * outPtr = &outRowCounts[outIdx];
rowPtr = &s_thrdCounts[rowIdx];
// Write our 'per row' counts in warp sequential order
if (OutLength >= 1u) { outPtr[(0u*WarpSize)] = rowPtr[(0u*strideBank)]; }
if (OutLength >= 2u) { outPtr[(1u*WarpSize)] = rowPtr[(1u*strideBank)]; }
if (OutLength >= 3u) { outPtr[(2u*WarpSize)] = rowPtr[(2u*strideBank)]; }
if (OutLength >= 4u) { outPtr[(3u*WarpSize)] = rowPtr[(3u*strideBank)]; }
if (OutLength >= 5u) { outPtr[(4u*WarpSize)] = rowPtr[(4u*strideBank)]; }
if (OutLength >= 6u) { outPtr[(5u*WarpSize)] = rowPtr[(5u*strideBank)]; }
if (OutLength >= 7u) { outPtr[(6u*WarpSize)] = rowPtr[(6u*strideBank)]; }
if (OutLength >= 8u) { outPtr[(7u*WarpSize)] = rowPtr[(7u*strideBank)]; }
}
}
//-----------------------------------------------
// Name: K2_TRISH_RowCounts_To_RowStarts
// Desc: Sum 256-way 'per row' counts into
// total 256-way counts using prefix-sum
//------------------------------------------------
template <
U32 logBankSize, // log<2>( Channels per Bank )
U32 logWarpSize, // log<2>( Threads Per Warp )
U32 BlockSize // Threads Per Block
>
__global__
void K2_TRISH_RowCounts_To_RowStarts
(
U32 * outTotalCounts, // OUT - total counts
U32 * outTotalStarts, // OUT - total starts
U32 * outRowStarts, // OUT - row starts
const U32 * inRowCounts, // IN - 'per row' counts to accumulate
U32 nRows // IN - number of rows to accumulate
)
{
//------------------------------------
// Constant values
//------------------------------------
// Memory Channels Per Bank
const U32 BankSize = 1u << logBankSize; // 32 (or 16)
const U32 BankMask = BankSize - 1u; // 31 (or 15)
// Threads Per Warp
const U32 WarpSize = 1u << logWarpSize; // 32
const U32 WarpMask = WarpSize - 1u; // 31
// Warps Per Block
const U32 WarpsPerBlock = BlockSize / WarpSize; // 8 = 256 / 32
// Size of 'Row Counts' and 'Row Starts' array
//const U32 nElemsCounts = 256;
//const U32 banksCounts = (nElemsCounts + BankMask) / BankSize;
//const U32 padCounts = ((banksCounts * BankSize) - nElemsCounts);
//const U32 sizeCounts = nElemsCounts + padCounts;
// Stride for padded bank of elements
const U32 strideBank = 1u + BankSize;
// Serial Scan Array
const U32 nSS1 = 256u + 2u;
const U32 nRowsSS1 = (nSS1 + BankMask) / BankSize;
const U32 nElemsSS1 = nRowsSS1 * strideBank;
const U32 banksSS1 = (nElemsSS1 + BankMask) / BankSize;
const U32 padSS1 = ((banksSS1 * BankSize) - nElemsSS1);
const U32 sizeSS1 = nElemsSS1 + padSS1;
// WarpScan array
const U32 strideWS2 = WarpSize
+ (WarpSize >> 1u)
+ 1u; // 49 = (32 + 16 + 1)
const U32 nWarpsWS2 = 1u;
const U32 nElemsWS2 = nWarpsWS2 * strideWS2;
const U32 banksWS2 = (nElemsWS2 + BankMask) / BankSize;
const U32 padWS2 = ((banksWS2 * BankSize) - nElemsWS2);
const U32 sizeWS2 = nElemsWS2 + padWS2;
//const U32 nSafePassesCnts = sizeCounts / BlockSize;
//const U32 leftOverCnts = sizeCounts - (nSafePassesCnts * BlockSize);
const U32 nSafePassesSS1 = sizeSS1 / BlockSize;
const U32 leftOverSS1 = sizeSS1 - (nSafePassesSS1 * BlockSize);
const U32 nSafePassesWS2 = sizeWS2 / BlockSize;
const U32 leftOverWS2 = sizeWS2 - (nSafePassesWS2 * BlockSize);
//------------------------------------
// Local variables
//------------------------------------
// shared memory
//__shared__ U32 s_rowStarts[sizeCounts]; // 'Row Starts' one chunk at a time
__shared__ U32 s_ss1[sizeSS1]; // Used for serial scan
__shared__ U32 s_ws2[sizeWS2]; // Used for parallel warp scan
// Registers
U32 tSum; // Per thread accumulator
//------------------------------------
// Compute Indices & Pointers
//------------------------------------
U32 warpRow, warpCol;
U32 storeIdx, prevIdx, ss1Idx, ws2Idx;
{
// Compute Bank Offsets
//U32 bankRow = threadIdx.x >> logBankSize; // tid / 32
U32 bankCol = threadIdx.x & BankMask; // tid % 32
// Compute warp offsets
warpRow = threadIdx.x >> logWarpSize; // tid / 32
warpCol = threadIdx.x & WarpMask; // tid % 32
// Compute Store index (for storing final counts before prefix sum)
U32 sIdx = threadIdx.x;
U32 storeRow = sIdx >> logBankSize; // tid / 32
U32 storeCol = sIdx & BankMask; // tid % 32
storeIdx = (storeRow * strideBank)
+ storeCol
+ 2u; // Pad for 'reach back'
//--
// Previous Column (Serial Scan 1)
// 1.) Reach back one column
// 2.) But, we need to skip over extra padding before the first
// thread in every bank, so reach back two columns
// However, the very first thread in the very first bank needs
// to be able to reach back safely 2 columns without going 'out of range'.
//
// We work around this by pre-padding the 's_ss1' array with
// an extra 2 elements and shifting indices over by two as needed to skip over padding.
//--
U32 prevCol = ((bankCol == 0u) ? 2u : 1u);
prevIdx = storeIdx - prevCol;
// Compute Serial Scan index
U32 ssIdx = threadIdx.x * 8u;
U32 ss1Row = ssIdx >> logBankSize; // (tid*8) / 32
U32 ss1Col = ssIdx & BankMask; // (tid*8) % 32
ss1Idx = (ss1Row * strideBank)
+ ss1Col
+ 2u; // pad for 'reach back'
// Compute Warp Scan Index
ws2Idx = (warpRow * strideWS2)
+ (WarpSize >> 1u)
+ warpCol;
}
//------------------------------------
// Zero out 'arrays'
//------------------------------------
U32 * setPtr = NULL;
//-
// Zero out 'row starts' array
//-
//setPtr = (&s_rowStarts[0]);
//SetArray_BlockSeq
// <
// U32, BlockSize, nSafePassesCnts,
// leftOverCnts, sizeCounts
// >
// (
// setPtr, 0u
// );
//-
// Zero out 'Serial Scan' array
//-
setPtr = (&s_ss1[0]);
SetArray_BlockSeq
<
U32, BlockSize, nSafePassesSS1,
leftOverSS1, sizeSS1
>
(
setPtr, 0u
);
//-
// Zero out 'Warp Scan' array
//-
setPtr = (&s_ws2[0]);
SetArray_BlockSeq
<
U32, BlockSize, nSafePassesWS2,
leftOverWS2, sizeWS2
>
(
setPtr, 0u
);
// Sync all threads in block
if (WarpsPerBlock > 2u) { __syncthreads(); }
//-------------------------------------------------
// Phase 1:
// Serial Reduction of all rows of 'per row' counts
// down to single set of 'total' counts
//-------------------------------------------------
{
const U32 * inPtr = &inRowCounts[threadIdx.x];
// Initialize 'Thread Sum' to identity value
tSum = 0;
// Loop over row counts
#pragma unroll
for (U32 currPass = 0u; currPass < nRows; currPass++)
{
// Grab count from global arrary
U32 currCnt = inPtr[0];
// Accumulate 'per row' counts into a 'total' count
tSum = tSum + currCnt;
// Move to next set of 'row counts' to process
inPtr += BlockSize;
}
// Store the 'total count's
outTotalCounts[threadIdx.x] = tSum;
// Also store 'total count's into 'Serial Scan' array
s_ss1[storeIdx] = tSum;
// Sync all threads in block
if (WarpsPerBlock > 2u) { __syncthreads(); }
}
//--------------------------------------
// Phase 2:
// convert 'total counts' into 'total starts'
// using prefix sum
//--------------------------------------
if (warpRow == 0)
{
volatile U32 * wsPtr = (U32 *)&(s_ws2[0]);
U32 * SS1_ptr = &s_ss1[ss1Idx];
// For higher performance, we use registers instead of shared memory
// Tradeoff - lots of register pressure (8 registers per thread)
U32 ss01, ss02, ss03, ss04;
U32 ss05, ss06, ss07, ss08;
//-----
// Serial Scan (on short sequence of 8 values)
//-----
// Grab short sequence of 8 values from ss1 array
ss01 = SS1_ptr[0];
ss02 = SS1_ptr[1];
ss03 = SS1_ptr[2];
ss04 = SS1_ptr[3];
ss05 = SS1_ptr[4];
ss06 = SS1_ptr[5];
ss07 = SS1_ptr[6];
ss08 = SS1_ptr[7];
// Serial scan short sequence (in registers)
//ss01 = <identity> + ss01;
ss02 = ss01 + ss02;
ss03 = ss02 + ss03;
ss04 = ss03 + ss04;
ss05 = ss04 + ss05;
ss06 = ss05 + ss06;
ss07 = ss06 + ss07;
ss08 = ss07 + ss08;
//-
// Store final serial scan result into warp scan array
//-
U32 wi = ws2Idx;
tSum = ss08;
wsPtr[wi] = tSum;
//-----
// Warp Scan (on 32 threads in parallel)
//-----
wsPtr[wi] = tSum = wsPtr[wi - 1u] + tSum;
wsPtr[wi] = tSum = wsPtr[wi - 2u] + tSum;
wsPtr[wi] = tSum = wsPtr[wi - 4u] + tSum;
wsPtr[wi] = tSum = wsPtr[wi - 8u] + tSum;
wsPtr[wi] = tSum = wsPtr[wi - 16u] + tSum;
//-----
// Serial Update (on short sequence of 8 values)
//-----
//-
// Grab update (prefix) value from Warp Array
//-
// Note: Need to reach back 'one column' to get exclusive result
U32 prevWI = wi - 1u;
tSum = wsPtr[prevWI];
//-
// Update each element short sequence with prefix (in registers)
//-
ss01 = tSum + ss01;
ss02 = tSum + ss02;
ss03 = tSum + ss03;
ss04 = tSum + ss04;
ss05 = tSum + ss05;
ss06 = tSum + ss06;
ss07 = tSum + ss07;
ss08 = tSum + ss08;
// Store 'prefix sum' results back in 'serial scan' array
SS1_ptr[0] = ss01;
SS1_ptr[1] = ss02;
SS1_ptr[2] = ss03;
SS1_ptr[3] = ss04;
SS1_ptr[4] = ss05;
SS1_ptr[5] = ss06;
SS1_ptr[6] = ss07;
SS1_ptr[7] = ss08;
} // end warpRow == 0
// Sync all threads in block
if (WarpsPerBlock > 2u) { __syncthreads(); }
//-----
// Grab starting 'row start' (total sum) for this thread
// Note #1: Need to 'reach back' one column for exclusive results
// Note #2: This will result in an unavoidable '2-way' bank conflict
//-----
U32 rowSum = s_ss1[prevIdx];
// Store total starts (from previous column)
outTotalStarts[threadIdx.x] = rowSum;
// Sync all threads in block
if (WarpsPerBlock > 2u) { __syncthreads(); }
//-------------------------------------------------
// Phase 3:
// Accumulate and write out 'per row' starts
//-------------------------------------------------
{
const U32 * inPtr = &inRowCounts[threadIdx.x];
U32 * outPtr = &outRowStarts[threadIdx.x];
// Initialize 'Thread Sum' to identity value
// Loop over row counts
#pragma unroll
for (U32 currPass = 0u; currPass < nRows; currPass++)
{
// Read 'in' current count from global arrary
U32 currCnt = inPtr[0];
// Write 'out' current row sum to global array
outPtr[0] = rowSum;
// Accumulate 'per row' count into running 'row sum' start
rowSum = rowSum + currCnt;
//-
// Move to next row
//-
inPtr += BlockSize;
outPtr += BlockSize;
}
// Sync all threads in block
//if (WarpsPerBlock > 2u) { __syncthreads(); }
}
}
////////////////////////////////////////////////////////////////////////////////
// Host interface to GPU TRISH histogram
////////////////////////////////////////////////////////////////////////////////
/*-----------------
Local Defines
-----------------*/
// Number of SM's per GPU
#if (GPU_GTX_560M == GPU_PLATFORM)
#define NUM_GPU_SMs (4u)
#elif (GPU_TELSA_M2050 == GPU_PLATFORM)
#define NUM_GPU_SMs (14u)
#elif (GPU_GTX_480 == GPU_PLATFORM)
#define NUM_GPU_SMs (15u)
#elif (GPU_GTX_580 == GPU_PLATFORM)
#define NUM_GPU_SMs (16u)
#elif (GPU_GTX_680 == GPU_PLATFORM)
#define NUM_GPU_SMs (8u)
#else
// Unknown GPU - assume 16 SM's for now...
#define NUM_GPU_SMs (16u)
#endif
// Intermediate CUDA buffers
static U32 * d_rowCounts = NULL;
static U32 * d_rowStarts = NULL;
static U32 * d_totalStarts = NULL;
//-----------------------------------------------
// Name: initTrish256
// Desc: Initialize intermediate GPU Buffers
//-----------------------------------------------
extern "C"
void initTrish256( void )
{
// Local Constants
const U32 nHistBins256 = 256u;
const U32 nGPU_SMs = NUM_GPU_SMs;
const U32 nGPU_ConcurrentBlocks = 3u;
const U32 K1_GridSize = nGPU_SMs * nGPU_ConcurrentBlocks;
const U32 K1_nRows = K1_GridSize;
const U32 sizeRowCounts = K1_nRows * nHistBins256 * sizeof(U32);
const U32 sizeTotal = nHistBins256 * sizeof(U32);
// Create intermediate GPU buffers
cutilSafeCall( cudaMalloc( (void **)&d_rowCounts, sizeRowCounts ) );
cutilSafeCall( cudaMalloc( (void **)&d_rowStarts, sizeRowCounts ) );
cutilSafeCall( cudaMalloc( (void **)&d_totalStarts, sizeTotal ) );
}
//-----------------------------------------------
// Name: closeTrish256
// Desc: cleanup intermediate GPU buffers
//-----------------------------------------------
extern "C"
void closeTrish256( void )
{
// Destroy Intermediate GPU buffers
cutilSafeCall( cudaFree( d_totalStarts ) );
cutilSafeCall( cudaFree( d_rowStarts ) );
cutilSafeCall( cudaFree( d_rowCounts ) );
}
//---------------------------------------------------------
// Name: genTrishByteU8
// Desc: CPU Wrapper function around
// generalized TRISH histogram for byte data
// invoked by "genTRISH" demo
//---------------------------------------------------------
extern "C"
void genTrishByteU8
(
// Function Parameters
U32 * d_Histogram, // OUT - Final 256-way histogram counts
void * d_Data, // IN - input data to bin & count into histogram
U32 byteCount, // IN - length of input data array
U32 minVal, // IN - minVal
U32 maxVal, // IN - maxVal
U32 numBins // IN - number of bins
)
{
//-----
// Local Constants=
//-----
// Note: The best # of blocks for the TRISH algorithm appears to be
// The # of SM's on the card * the number of concurrent blocks.
// This is the mininum to effectively use all hardware resources effectively.
//
// For Example: On the following Fermi cards, the grid sizes for best performance would be ...
// GTX 560M = 12 = 4 * 3
// TELSA M2050 = 42 = 14 * 3
// GTX 480 = 45 = 15 * 3
// GTX 580 = 48 = 16 * 3
const U32 nGPU_SMs = NUM_GPU_SMs; // See #defines above
const U32 nGPU_ConcurrentBlocks = 3u; // for Fermi architectures, we can achieve 3 concurrent blocks per SM (64 * 3 = 192 => 192/1536 => 12.5% occupancy
const U32 logBankSize = 5u; // 5 = log<2>( Memory Banks )
const U32 logWarpSize = 5u; // 5 = log<2>( Threads per Warp )
const U32 K1_BlockSize = 64u; // 64 = Threads per Block (Histogram Kernel)
const U32 K1_GridSize = nGPU_SMs * nGPU_ConcurrentBlocks; // GridSize (Histogram Kernel)
const U32 K2_BlockSize = 256u; // 256 = Threads per Block (RowSum Kernel)
const U32 K2_GridSize = 1u; // 1 = GridSize (RowSum Kernel)
const U32 K1_Length = 31u; // 31 = Work Per thread (loop unrolling)
const U32 in_start = 0u; // 0 = starting range
const U32 K1_nRows = K1_GridSize; // ?? = Number of rows (blocks) that are cooperatively striding across input data set
//-----
// Get number of elements
//-----
assert( byteCount > 0u );
assert( byteCount % sizeof(U32) == 0u );
U32 nElems = byteCount >> 2u; // byteCount/4
U32 in_stop = nElems - 1u;
const U32 * d_inVals = (const U32 *)d_Data;
/*--------------------------------------
Step 0. Create Intermediate buffers
--------------------------------------*/
// Code moved to initTrish256() above
/*------------------------------------------------------
Step 1. Bin & count elements into 'per row' 256-way histograms
------------------------------------------------------*/
typedef MapToBin
<
U32, // Value Type
F32, // Conversion Type
U32, // Bin Type
1u, // Formula = #1: B=(A-Mu)*Alpha; where Mu = Min - 0.5; and Alpha = n/(max-min+1);
1u, // Range check for values <= min
1u // Range check for values >= max
> MapperU8;
K1_TRISH_CountRows_GEN_B1
<
// Template Parameters
U32, // underlying value type
MapperU8, // underlying mapper type
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Threads per Warp )
K1_BlockSize, // Threads per Block
K1_GridSize, // Blocks per Grid
K1_Length // Work Per Thread (Loop unrolling)
>
<<<
// CUDA CTA Parameters
K1_GridSize, // Blocks per Grid
K1_BlockSize // Threads per Block
>>>
(
// Function parameters
d_rowCounts, // IN - 'per row' histograms
d_inVals, // IN - 'input' data to count & bin
in_start, // IN - input range [start, stop]
in_stop, // ditto
minVal, // IN - [min,max] value for histogram binning
maxVal, // ditto
numBins // number of bins in histogram
);
// Check if kernel execution generated an error
cutilCheckMsg( "K1_TRISH_CountRows_GEN_B1() Kernel execution failed!" );
/*------------------------------------------------------
Step 2. Sum 'per row' histograms into 'final' 256-bin histogram
------------------------------------------------------*/
K2_TRISH_RowCounts_To_RowStarts
<
// Template Parameters
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Warp Size )
K2_BlockSize // Threads per Block
>
<<<
// CUDA CTA Parameters
K2_GridSize, // Blocks per Grid
K2_BlockSize // Threads per Block
>>>
(
// Function parameters
d_Histogram, // OUT - Histogram Counts
d_totalStarts, // OUT - Histogram Starts
d_rowStarts, // OUT - 'Per Row' Histogram Starts
d_rowCounts, // IN - 'Per Row' Histogram Counts
K1_nRows // IN - number of rows
);
// Check if kernel execution generated an error
cutilCheckMsg( "K2_TRISH_RowCounts_To_RowStarts() Kernel execution failed!" );
#if 1 == TRISH_VERIFY_HISTOGRAM
//-----
// Step 3. Verify Histogram results are correct
//-----
TRISH_VerifyHistogram_B1< U32, MyMapper >
(
nElems, (U32 *)d_inVals,
numBins, (U32 *)d_Histogram,
minVal, maxVal
);
#endif
/*--------------------------------------
Step 4. Cleanup intermediate buffers
--------------------------------------*/
// Code moved to closeTrish256() above
}
//---------------------------------------------------------
// Name: genTrishWordU16
// Desc: CPU Wrapper function around
// generalized TRISH histogram for word data
// invoked by "genTRISH" demo
//---------------------------------------------------------
extern "C"
void genTrishWordU16
(
// Function Parameters
U32 * d_Histogram, // OUT - Final 256-way histogram counts
void * d_Data, // IN - input data to bin & count into histogram
U32 wordCount, // IN - length of input data array
U32 minVal, // IN - minVal
U32 maxVal, // IN - maxVal
U32 numBins // IN - number of bins
)
{
//-----
// Local Constants=
//-----
// Note: The best # of blocks for the TRISH algorithm appears to be
// The # of SM's on the card * the number of concurrent blocks.
// This is the mininum to effectively use all hardware resources effectively.
//
// For Example: On the following Fermi cards, the grid sizes for best performance would be ...
// GTX 560M = 12 = 4 * 3
// TELSA M2050 = 42 = 14 * 3
// GTX 480 = 45 = 15 * 3
// GTX 580 = 48 = 16 * 3
const U32 nGPU_SMs = NUM_GPU_SMs; // See #defines above
const U32 nGPU_ConcurrentBlocks = 3u; // for Fermi architectures, we can achieve 3 concurrent blocks per SM (64 * 3 = 192 => 192/1536 => 12.5% occupancy
const U32 logBankSize = 5u; // 5 = log<2>( Memory Banks )
const U32 logWarpSize = 5u; // 5 = log<2>( Threads per Warp )
const U32 K1_BlockSize = 64u; // 64 = Threads per Block (Histogram Kernel)
const U32 K1_GridSize = nGPU_SMs * nGPU_ConcurrentBlocks; // GridSize (Histogram Kernel)
const U32 K2_BlockSize = 256u; // 256 = Threads per Block (RowSum Kernel)
const U32 K2_GridSize = 1u; // 1 = GridSize (RowSum Kernel)
// Efficiency Formula = (Floor(127/k)*k)/127
// Ideal k-values = 1, 127 (IE efficiency = 1)
// Best k-values otherwise = 2,3,6,7,9,14,18,21,42,63
// Also try 25 & 31 (Local Maxima)
// Worst k-values = 50 (0.504) and 43 (0.677) and 32 (0.756)
//const U32 K1_Length = 1u; // 1, Efficiency = 1.0, Throughput = 17.75 GB/s (480), *POOR ILP*
//const U32 K1_Length = 2u; // 2, Efficiency = 0.992, Throughput = 27.20 GB/s (480), *POOR ILP*
//const U32 K1_Length = 3u; // 3, Efficiency = 0.992, Throughput = 32.71 GB/s (480), *POOR ILP*
//const U32 K1_Length = 6u; // 6, Efficiency = 0.992, Throughput = 40.02 GB/s (480)
//const U32 K1_Length = 7u; // 7, Efficiency = 0.992, Throughput = 41.50 GB/s (480)
//const U32 K1_Length = 9u; // 9, Efficiency = 0.992, Throughput = 40.21 GB/s (480)
//const U32 K1_Length = 14u; // 14, Efficiency = 0.992, Throughput = 43.56 GB/s (480)
//const U32 K1_Length = 18u; // 18, Efficiency = 0.992, Throughput = 44.08 GB/s (480)
//const U32 K1_Length = 21u; // 21, Efficiency = 0.992, Throughput = 43.74 GB/s (480)
//const U32 K1_Length = 25u; // 25, Efficiency = 0.984, Throughput = 44.21 GB/s (480)
//const U32 K1_Length = 31u; // 31, Efficiency = 0.976, Throughput = 44.29 GB/s (480)
//const U32 K1_Length = 42u; // 42, Efficiency = 0.992, Throughput = 44.43 GB/s (480)
const U32 K1_Length = 63u; // 63, Efficiency = 0.992, Throughput = 45.66 GB/s (480), *BEST* result
//const U32 K1_Length = 64u; // 64, Efficiency = 0.504, Throughput = 41.10 GB/s (480), *WORST* Efficiency
//const U32 K1_Length = 105u; // 106, Efficiency = 0.827, Throughput = 44.86 GB/s (480), Good result, Program probably still fits in code cache...
//const U32 K1_Length = 106u; // 106, Efficiency = 0.835, Throughput = 42.60 GB/s (480), Starts declining, ??? Program too large to fit in code cache ???
//const U32 K1_Length = 127u; // 127, Efficiency = 1.0, Throughput = 26.16 GB/s (480), *POOR* performance, ??? Program too large to fit in code cache ???
const U32 in_start = 0u; // 0 = starting range
const U32 K1_nRows = K1_GridSize; // ?? = Number of rows (blocks) that are cooperatively striding across input data set
//-----
// Get number of elements
//-----
assert( wordCount > 0u );
assert( wordCount % 4 == 0u );
U32 nElems = wordCount >> 1u; // wordCount/2
U32 in_stop = nElems - 1u;
const U32 * d_inVals = (const U32 *)d_Data;
/*--------------------------------------
Step 0. Create Intermediate buffers
--------------------------------------*/
// Code moved to initTrish256() above
/*------------------------------------------------------
Step 1. Bin & count elements into 'per row' 256-way histograms
------------------------------------------------------*/
typedef MapToBin
<
U32, // Value Type
F32, // Conversion Type
U32, // Bin Type
1u, // Formula = #1: B=(A-Mu)*Alpha; where Mu = Min - 0.5; and Alpha = n/(max-min+1);
1u, // Range check for values < min
1u // Range check for values > max
> MapperU16;
K1_TRISH_CountRows_GEN_B2
<
// Template Parameters
U32, // underlying value type
MapperU16, // underlying mapper type
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Threads per Warp )
K1_BlockSize, // Threads per Block
K1_GridSize, // Blocks per Grid
K1_Length // Work Per Thread (Loop unrolling)
>
<<<
// CUDA CTA Parameters
K1_GridSize, // Blocks per Grid
K1_BlockSize // Threads per Block
>>>
(
// Function parameters
d_rowCounts, // IN - 'per row' histograms
d_inVals, // IN - 'input' data to count & bin
in_start, // IN - input range [start, stop]
in_stop, // ditto
minVal, // IN - [min,max] value for histogram binning
maxVal, // ditto
numBins // number of bins in histogram
);
// Check if kernel execution generated an error
cutilCheckMsg( "K1_TRISH_CountRows_GEN_B2() Kernel execution failed!" );
/*------------------------------------------------------
Step 2. Sum 'per row' histograms into 'final' 256-bin histogram
------------------------------------------------------*/
K2_TRISH_RowCounts_To_RowStarts
<
// Template Parameters
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Warp Size )
K2_BlockSize // Threads per Block
>
<<<
// CUDA CTA Parameters
K2_GridSize, // Blocks per Grid
K2_BlockSize // Threads per Block
>>>
(
// Function parameters
d_Histogram, // OUT - Histogram Counts
d_totalStarts, // OUT - Histogram Starts
d_rowStarts, // OUT - 'Per Row' Histogram Starts
d_rowCounts, // IN - 'Per Row' Histogram Counts
K1_nRows // IN - number of rows
);
// Check if kernel execution generated an error
cutilCheckMsg( "K2_TRISH_RowCounts_To_RowStarts() Kernel execution failed!" );
#if 1 == TRISH_VERIFY_HISTOGRAM
//-----
// Step 3. Verify Histogram results are correct
//-----
TRISH_VerifyHistogram_B2< U32, MyMapper >
(
nElems, (U32 *)d_inVals,
numBins, (U32 *)d_Histogram,
minVal, maxVal
);
#endif
/*--------------------------------------
Step 4. Cleanup intermediate buffers
--------------------------------------*/
// Code moved to closeTrish256() above
}
//---------------------------------------------------------
// Name: genTrishDWordU32
// Desc: CPU Wrapper function around
// generalized TRISH histogram for DWORD data
// invoked by "genTRISH" demo
//---------------------------------------------------------
extern "C"
void genTrishDWordU32
(
// Function Parameters
U32 * d_Histogram, // OUT - Final 256-way histogram counts
void * d_Data, // IN - input data to bin & count into histogram
U32 dwordCount, // IN - length of input data array
U32 minVal, // IN - minVal
U32 maxVal, // IN - maxVal
U32 numBins // IN - number of bins
)
{
//-----
// Local Constants=
//-----
// Note: The best # of blocks for the TRISH algorithm appears to be
// The # of SM's on the card * the number of concurrent blocks.
// This is the mininum to effectively use all hardware resources effectively.
//
// For Example: On the following Fermi cards, the grid sizes for best performance would be ...
// GTX 560M = 12 = 4 * 3
// TELSA M2050 = 42 = 14 * 3
// GTX 480 = 45 = 15 * 3
// GTX 580 = 48 = 16 * 3
const U32 nGPU_SMs = NUM_GPU_SMs; // See #defines above
const U32 nGPU_ConcurrentBlocks = 3u; // for Fermi architectures, we can achieve 3 concurrent blocks per SM (64 * 3 = 192 => 192/1536 => 12.5% occupancy
const U32 logBankSize = 5u; // 5 = log<2>( Memory Banks )
const U32 logWarpSize = 5u; // 5 = log<2>( Threads per Warp )
const U32 K1_BlockSize = 64u; // 64 = Threads per Block (Histogram Kernel)
const U32 K1_GridSize = nGPU_SMs * nGPU_ConcurrentBlocks; // GridSize (Histogram Kernel)
const U32 K2_BlockSize = 256u; // 256 = Threads per Block (RowSum Kernel)
const U32 K2_GridSize = 1u; // 1 = GridSize (RowSum Kernel)
// Efficiency Formula = (Floor(255/k)*k)/255
// Ideal k-values = 1, 3, 5, 15, 17, 51, 85, 255
// Best k-values otherwise = 2, 4, 6, 7, 9, 10, 11, 12, 14, 18, 21, 23, 25, 28, 36, 42, 50, 63, 84, 125, 126, 127, 253, 254
// Worst k-values = 128 (0.502) & 86 (0.675) & 64 (0.753)
// K >= 105 => code won't fit in cache
//const U32 K1_Length = 1u; // 1, Efficiency = 1.0, Throughput = 19.66 GB/s (480), *POOR ILP*
//const U32 K1_Length = 2u; // 2, Efficiency = 0.996, Throughput = 34.16 GB/s (480), *POOR ILP*
//const U32 K1_Length = 3u; // 3, Efficiency = 1.0, Throughput = 44.90 GB/s (480), *POOR ILP*
//const U32 K1_Length = 4u; // 4, Efficiency = 0.988, Throughput = 52.03 GB/s (480), *POOR ILP*
//const U32 K1_Length = 5u; // 5, Efficiency = 1.0, Throughput = 56.56 GB/s (480),
//const U32 K1_Length = 6u; // 6, Efficiency = 0.988, Throughput = 60.32 GB/s (480)
//const U32 K1_Length = 7u; // 7, Efficiency = 0.988, Throughput = 53.07 GB/s (480)
//const U32 K1_Length = 9u; // 9, Efficiency = 0.988, Throughput = 59.97 GB/s (480)
//const U32 K1_Length = 10u; // 10, Efficiency = 0.980, Throughput = 61.61 GB/s (480)
//const U32 K1_Length = 11u; // 11, Efficiency = 0.992, Throughput = 62.57 GB/s (480)
//const U32 K1_Length = 12u; // 12, Efficiency = 0.988, Throughput = 62.00 GB/s (480)
//const U32 K1_Length = 14u; // 14, Efficiency = 0.988, Throughput = 64.24 GB/s (480)
const U32 K1_Length = 15u; // 15, Efficiency = 1.0, Throughput = 65.05 GB/s (480) *BEST*
//const U32 K1_Length = 16u; // 16, Efficiency = 0.941, Throughput = 63.14 GB/s (480)
//const U32 K1_Length = 17u; // 17, Efficiency = 1.0, Throughput = 63.06 GB/s (480)
//const U32 K1_Length = 18u; // 18, Efficiency = 0.988, Throughput = 58.58 GB/s (480)
//const U32 K1_Length = 21u; // 21, Efficiency = 0.988, Throughput = 59.07 GB/s (480)
//const U32 K1_Length = 23u; // 23, Efficiency = 0.992, Throughput = 59.99 GB/s (480)
//const U32 K1_Length = 25u; // 25, Efficiency = 0.980, Throughput = 61.24 GB/s (480)
//const U32 K1_Length = 28u; // 28, Efficiency = 0.988, Throughput = 62.17 GB/s (480)
//const U32 K1_Length = 36u; // 36, Efficiency = 0.988, Throughput = 58.93 GB/s (480)
//const U32 K1_Length = 42u; // 42, Efficiency = 0.988, Throughput = 60.09 GB/s (480)
//const U32 K1_Length = 50u; // 50, Efficiency = 0.980, Throughput = 62.01 GB/s (480)
//const U32 K1_Length = 51u; // 51, Efficiency = 1.0, Throughput = 62.46 GB/s (480)
//const U32 K1_Length = 63u; // 63, Efficiency = 0.988, Throughput = 62.88 GB/s (480),
//const U32 K1_Length = 84u; // 84, Efficiency = 0.988, Throughput = 64.62 GB/s (480),
//const U32 K1_Length = 85u; // 85, Efficiency = 1.0, Throughput = 64.17 GB/s (480),
//const U32 K1_Length = 86u; // 86, Efficiency = 0.675, Throughput = 60.61 GB/s (480), *POOR EFFICIENCY*
//const U32 K1_Length = 125u; // 125, Efficiency = 0.980, Throughput = 65.41 GB/s (480), *BEST*
//const U32 K1_Length = 126u; // 126, Efficiency = 0.988, Throughput = 65.55 GB/s (480), *BEST of the BEST*
//const U32 K1_Length = 127u; // 127, Efficiency = 0.996, Throughput = 65.13 GB/s (480), *BEST*
//const U32 K1_Length = 128u; // 128, Efficiency = 0.502, Throughput = 59.59 GB/s (480), *WORST EFFICIENCY*
// K=[105..255], code probably won't fit in cache
const U32 in_start = 0u; // 0 = starting range
const U32 K1_nRows = K1_GridSize; // ?? = Number of rows (blocks) that are cooperatively striding across input data set
//-----
// Get number of elements
//-----
assert( dwordCount > 0u );
assert( dwordCount % 4 == 0u );
U32 nElems = dwordCount;
U32 in_stop = nElems - 1u;
const U32 * d_inVals = (const U32 *)d_Data;
/*--------------------------------------
Step 0. Create Intermediate buffers
--------------------------------------*/
// Code moved to initTrish256() above
/*------------------------------------------------------
Step 1. Bin & count elements into 'per row' 256-way histograms
------------------------------------------------------*/
typedef MapToBin
<
U32, // Value Type
F32, // Conversion Type
U32, // Bin Type
1u, // Formula = #1: B=(A-Mu)*Alpha; where Mu = Min - 0.5; and Alpha = n/(max-min+1);
1u, // Range check for values < min
1u // Range check for values > max
> MapperU32;
K1_TRISH_CountRows_GEN_B4
<
// Template Parameters
U32, // underlying value type
MapperU32, // underlying mapper type
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Threads per Warp )
K1_BlockSize, // Threads per Block
K1_GridSize, // Blocks per Grid
K1_Length // Work Per Thread (Loop unrolling)
>
<<<
// CUDA CTA Parameters
K1_GridSize, // Blocks per Grid
K1_BlockSize // Threads per Block
>>>
(
// Function parameters
d_rowCounts, // IN - 'per row' histograms
d_inVals, // IN - 'input' data to count & bin
in_start, // IN - input range [start, stop]
in_stop, // ditto
minVal, // IN - [min,max] value for histogram binning
maxVal, // ditto
numBins // number of bins in histogram
);
// Check if kernel execution generated an error
cutilCheckMsg( "K1_TRISH_CountRows_GEN_B2() Kernel execution failed!" );
/*------------------------------------------------------
Step 2. Sum 'per row' histograms into 'final' 256-bin histogram
------------------------------------------------------*/
K2_TRISH_RowCounts_To_RowStarts
<
// Template Parameters
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Warp Size )
K2_BlockSize // Threads per Block
>
<<<
// CUDA CTA Parameters
K2_GridSize, // Blocks per Grid
K2_BlockSize // Threads per Block
>>>
(
// Function parameters
d_Histogram, // OUT - Histogram Counts
d_totalStarts, // OUT - Histogram Starts
d_rowStarts, // OUT - 'Per Row' Histogram Starts
d_rowCounts, // IN - 'Per Row' Histogram Counts
K1_nRows // IN - number of rows
);
// Check if kernel execution generated an error
cutilCheckMsg( "K2_TRISH_RowCounts_To_RowStarts() Kernel execution failed!" );
#if 1 == TRISH_VERIFY_HISTOGRAM
//-----
// Step 3. Verify Histogram results are correct
//-----
TRISH_VerifyHistogram_B4< U32, MyMapper >
(
nElems, (U32 *)d_inVals,
numBins, (U32 *)d_Histogram,
minVal, maxVal
);
#endif
/*--------------------------------------
Step 4. Cleanup intermediate buffers
--------------------------------------*/
// Code moved to closeTrish256() above
}
//---------------------------------------------------------
// Name: genTrishFloatF32
// Desc: CPU Wrapper function around
// generalized TRISH histogram for FLOAT data
// invoked by "genTRISH" demo
//---------------------------------------------------------
extern "C"
void genTrishFloatF32
(
// Function Parameters
U32 * d_Histogram, // OUT - Final 256-way histogram counts
void * d_Data, // IN - input data to bin & count into histogram
U32 floatCount, // IN - length of input data array
F32 minVal, // IN - minVal
F32 maxVal, // IN - maxVal
U32 numBins // IN - number of bins
)
{
//-----
// Local Constants=
//-----
// Note: The best # of blocks for the TRISH algorithm appears to be
// The # of SM's on the card * the number of concurrent blocks.
// This is the mininum to effectively use all hardware resources effectively.
//
// For Example: On the following Fermi cards, the grid sizes for best performance would be ...
// GTX 560M = 12 = 4 * 3
// TELSA M2050 = 42 = 14 * 3
// GTX 480 = 45 = 15 * 3
// GTX 580 = 48 = 16 * 3
const U32 nGPU_SMs = NUM_GPU_SMs; // See #defines above
const U32 nGPU_ConcurrentBlocks = 3u; // for Fermi architectures, we can achieve 3 concurrent blocks per SM (64 * 3 = 192 => 192/1536 => 12.5% occupancy
const U32 logBankSize = 5u; // 5 = log<2>( Memory Banks )
const U32 logWarpSize = 5u; // 5 = log<2>( Threads per Warp )
const U32 K1_BlockSize = 64u; // 64 = Threads per Block (Histogram Kernel)
const U32 K1_GridSize = nGPU_SMs * nGPU_ConcurrentBlocks; // GridSize (Histogram Kernel)
const U32 K2_BlockSize = 256u; // 256 = Threads per Block (RowSum Kernel)
const U32 K2_GridSize = 1u; // 1 = GridSize (RowSum Kernel)
// Efficiency Formula = (Floor(255/k)*k)/255
// Ideal k-values = 1, 3, 5, 15, 17, 51, 85, 255
// Best k-values otherwise = 2, 4, 6, 7, 9, 10, 11, 12, 14, 18, 21, 23, 25, 28, 36, 42, 50, 63, 84, 125, 126, 127, 253, 254
// Worst k-values = 128 (0.502) & 86 (0.675) & 64 (0.753)
// K >= 105 => code won't fit in cache
//const U32 K1_Length = 1u; // 1, Efficiency = 1.0, Throughput = 19.66 GB/s (480), *POOR ILP*
//const U32 K1_Length = 2u; // 2, Efficiency = 0.996, Throughput = 34.16 GB/s (480), *POOR ILP*
//const U32 K1_Length = 3u; // 3, Efficiency = 1.0, Throughput = 44.90 GB/s (480), *POOR ILP*
//const U32 K1_Length = 4u; // 4, Efficiency = 0.988, Throughput = 52.03 GB/s (480), *POOR ILP*
//const U32 K1_Length = 5u; // 5, Efficiency = 1.0, Throughput = 56.56 GB/s (480),
//const U32 K1_Length = 6u; // 6, Efficiency = 0.988, Throughput = 60.32 GB/s (480)
//const U32 K1_Length = 7u; // 7, Efficiency = 0.988, Throughput = 53.07 GB/s (480)
//const U32 K1_Length = 9u; // 9, Efficiency = 0.988, Throughput = 59.97 GB/s (480)
//const U32 K1_Length = 10u; // 10, Efficiency = 0.980, Throughput = 61.61 GB/s (480)
//const U32 K1_Length = 11u; // 11, Efficiency = 0.992, Throughput = 62.57 GB/s (480)
//const U32 K1_Length = 12u; // 12, Efficiency = 0.988, Throughput = 62.00 GB/s (480)
//const U32 K1_Length = 14u; // 14, Efficiency = 0.988, Throughput = 64.24 GB/s (480)
const U32 K1_Length = 15u; // 15, Efficiency = 1.0, Throughput = 65.05 GB/s (480) *BEST*
//const U32 K1_Length = 16u; // 16, Efficiency = 0.941, Throughput = 63.14 GB/s (480)
//const U32 K1_Length = 17u; // 17, Efficiency = 1.0, Throughput = 63.06 GB/s (480)
//const U32 K1_Length = 18u; // 18, Efficiency = 0.988, Throughput = 58.58 GB/s (480)
//const U32 K1_Length = 21u; // 21, Efficiency = 0.988, Throughput = 59.07 GB/s (480)
//const U32 K1_Length = 23u; // 23, Efficiency = 0.992, Throughput = 59.99 GB/s (480)
//const U32 K1_Length = 25u; // 25, Efficiency = 0.980, Throughput = 61.24 GB/s (480)
//const U32 K1_Length = 28u; // 28, Efficiency = 0.988, Throughput = 62.17 GB/s (480)
//const U32 K1_Length = 36u; // 36, Efficiency = 0.988, Throughput = 58.93 GB/s (480)
//const U32 K1_Length = 42u; // 42, Efficiency = 0.988, Throughput = 60.09 GB/s (480)
//const U32 K1_Length = 50u; // 50, Efficiency = 0.980, Throughput = 62.01 GB/s (480)
//const U32 K1_Length = 51u; // 51, Efficiency = 1.0, Throughput = 62.46 GB/s (480)
//const U32 K1_Length = 63u; // 63, Efficiency = 0.988, Throughput = 62.88 GB/s (480),
//const U32 K1_Length = 84u; // 84, Efficiency = 0.988, Throughput = 64.62 GB/s (480),
//const U32 K1_Length = 85u; // 85, Efficiency = 1.0, Throughput = 64.17 GB/s (480),
//const U32 K1_Length = 86u; // 86, Efficiency = 0.675, Throughput = 60.61 GB/s (480), *POOR EFFICIENCY*
//const U32 K1_Length = 125u; // 125, Efficiency = 0.980, Throughput = 65.41 GB/s (480), *BEST*
//const U32 K1_Length = 126u; // 126, Efficiency = 0.988, Throughput = 65.55 GB/s (480), *BEST of the BEST*
//const U32 K1_Length = 127u; // 127, Efficiency = 0.996, Throughput = 65.13 GB/s (480), *BEST*
//const U32 K1_Length = 128u; // 128, Efficiency = 0.502, Throughput = 59.59 GB/s (480), *WORST EFFICIENCY*
// K=[105..255], code probably won't fit in cache
const U32 in_start = 0u; // 0 = starting range
const U32 K1_nRows = K1_GridSize; // ?? = Number of rows (blocks) that are cooperatively striding across input data set
//-----
// Get number of elements
//-----
assert( floatCount > 0u );
assert( floatCount % 4 == 0u );
U32 nElems = floatCount;
U32 in_stop = nElems - 1u;
const F32 * d_inVals = (const F32 *)d_Data;
/*--------------------------------------
Step 0. Create Intermediate buffers
--------------------------------------*/
// Code moved to initTrish256() above
/*------------------------------------------------------
Step 1. Bin & count elements into 'per row' 256-way histograms
------------------------------------------------------*/
typedef MapToBin
<
F32, // Value Type
F32, // Conversion Type
U32, // Bin Type
1u, // Formula = #1: B=(A-Mu)*Alpha; where Mu = Min - 0.5; and Alpha = n/(max-min+1);
1u, // Range check for values < min
1u // Range check for values > max
> MapperF32;
K1_TRISH_CountRows_GEN_B4
<
// Template Parameters
F32, // underlying value type
MapperF32, // underlying mapper type
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Threads per Warp )
K1_BlockSize, // Threads per Block
K1_GridSize, // Blocks per Grid
K1_Length // Work Per Thread (Loop unrolling)
>
<<<
// CUDA CTA Parameters
K1_GridSize, // Blocks per Grid
K1_BlockSize // Threads per Block
>>>
(
// Function parameters
d_rowCounts, // IN - 'per row' histograms
d_inVals, // IN - 'input' data to count & bin
in_start, // IN - input range [start, stop]
in_stop, // ditto
minVal, // IN - [min,max] value for histogram binning
maxVal, // ditto
numBins // number of bins in histogram
);
// Check if kernel execution generated an error
cutilCheckMsg( "K1_TRISH_CountRows_GEN_B4() Kernel execution failed!" );
/*------------------------------------------------------
Step 2. Sum 'per row' histograms into 'final' 256-bin histogram
------------------------------------------------------*/
K2_TRISH_RowCounts_To_RowStarts
<
// Template Parameters
logBankSize, // log<2>( Memory Banks )
logWarpSize, // log<2>( Warp Size )
K2_BlockSize // Threads per Block
>
<<<
// CUDA CTA Parameters
K2_GridSize, // Blocks per Grid
K2_BlockSize // Threads per Block
>>>
(
// Function parameters
d_Histogram, // OUT - Histogram Counts
d_totalStarts, // OUT - Histogram Starts
d_rowStarts, // OUT - 'Per Row' Histogram Starts
d_rowCounts, // IN - 'Per Row' Histogram Counts
K1_nRows // IN - number of rows
);
// Check if kernel execution generated an error
cutilCheckMsg( "K2_TRISH_RowCounts_To_RowStarts() Kernel execution failed!" );
#if 1 == TRISH_VERIFY_HISTOGRAM
//-----
// Step 3. Verify Histogram results are correct
//-----
TRISH_VerifyHistogram_B4< F32, MyMapper >
(
nElems, (F32 *)d_inVals,
numBins, (U32 *)d_Histogram,
minVal, maxVal
);
#endif
/*--------------------------------------
Step 4. Cleanup intermediate buffers
--------------------------------------*/
// Code moved to closeTrish256() above
}
|
06fbd7bdd588b2220ac8d38f4ef6119c5aef8cd2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* main.cu
*
* Created on: Nov 30, 2015
* Author: john
*/
#include "Bitmap.h"
#include <iostream>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
using namespace std;
const int BINS = 256;
const int BINS4ALL = BINS*16; // Using half warp size since I need 3 sets, and these bins need to fit in 48kb shared memory.
void CPU_histogram (unsigned char *in_red, unsigned char *in_blue, unsigned char *in_green, int N, int *h_red, int *h_blue, int *h_green, int bins)
{
int i;
// initialize histogram counts
for (i = 0; i < bins; i++) {
h_red[i] = 0;
h_blue[i] = 0;
h_green[i] = 0;
}
// accummulate counts
for (i = 0; i < N; i++) {
h_red[in_red[i]]++;
h_blue[in_blue[i]]++;
h_green[in_green[i]]++;
}
}
__device__
void write_shared(int *in, int* bank, int i) {
int temp = in[i];
int v = temp & 0xFF;
int v2 = (temp >> 8) & 0xFF;
int v3 = (temp >> 16) & 0xFF;
int v4 = (temp >> 24) & 0xFF;
atomicAdd (bank + (v << 4), 1);
atomicAdd (bank + (v2 << 4), 1);
atomicAdd (bank + (v3 << 4), 1);
atomicAdd (bank + (v4 << 4), 1);
}
__global__
void GPU_histogramRGB_atomic (int *in_red, int *in_blue, int *in_green, int N, int *h_red, int *h_blue, int *h_green) {
int gloID = blockIdx.x*blockDim.x + threadIdx.x;
int locID = threadIdx.x;
int GRIDSIZE = gridDim.x*blockDim.x;
__shared__ int localH_red[BINS4ALL];
__shared__ int localH_blue[BINS4ALL];
__shared__ int localH_green[BINS4ALL];
int bankID = locID & 0x0F;
int i;
// initialize the local shared-memory bins
for (i = locID; i < BINS4ALL; i += blockDim.x) {
localH_red[i] = 0;
localH_blue[i] = 0;
localH_green[i] = 0;
}
__syncthreads();
int *mySharedBank_red = localH_red + bankID;
int *mySharedBank_blue = localH_blue + bankID;
int *mySharedBank_green = localH_green + bankID;
for (i = gloID; i < N; i += GRIDSIZE) {
write_shared(in_red, mySharedBank_red, i);
write_shared(in_blue, mySharedBank_blue, i);
write_shared(in_green, mySharedBank_green, i);
}
__syncthreads ();
for (i = locID; i < BINS4ALL; i += blockDim.x) {
atomicAdd (h_red + (i >> 4), localH_red[i]);
atomicAdd (h_blue + (i >> 4), localH_blue[i]);
atomicAdd (h_green + (i >> 4), localH_green[i]);
}
}
int main (int argc, char **argv) {
Bitmap* bmp = new Bitmap(argv[1]);
int *d_in_red, *d_in_blue, *d_in_green;
int *h_in_red, *h_in_blue, *h_in_green;
int *cpu_hist_red, *cpu_hist_blue, *cpu_hist_green;
int *d_hist_red, *d_hist_blue, *d_hist_green;
int *hist_red, *hist_blue, *hist_green;
int bins, N;
h_in_red = (int *) bmp->pixels_red;
h_in_blue = (int *) bmp->pixels_blue;
h_in_green = (int *) bmp->pixels_green;
N = ceil((bmp->x_dim * bmp->y_dim) / 4.0);
bins = 256;
hist_red = (int *) malloc (bins * sizeof (int));
hist_blue = (int *) malloc (bins * sizeof (int));
hist_green = (int *) malloc (bins * sizeof (int));
cpu_hist_red = (int *) malloc (bins * sizeof (int));
cpu_hist_blue = (int *) malloc (bins * sizeof (int));
cpu_hist_green = (int *) malloc (bins * sizeof (int));
CPU_histogram(bmp->pixels_red, bmp->pixels_blue, bmp->pixels_green, bmp->x_dim*bmp->y_dim, cpu_hist_red, cpu_hist_blue, cpu_hist_green, bins);
// allocate and copy
hipMalloc ((void **) &d_in_red, sizeof (int) * N);
hipMalloc ((void **) &d_hist_red, sizeof (int) * bins);
hipMemcpy (d_in_red, h_in_red, sizeof (int) * N, hipMemcpyHostToDevice);
hipMemset (d_hist_red, 0, bins * sizeof (int));
hipMalloc ((void **) &d_in_blue, sizeof (int) * N);
hipMalloc ((void **) &d_hist_blue, sizeof (int) * bins);
hipMemcpy (d_in_blue, h_in_blue, sizeof (int) * N, hipMemcpyHostToDevice);
hipMemset (d_hist_blue, 0, bins * sizeof (int));
hipMalloc ((void **) &d_in_green, sizeof (int) * N);
hipMalloc ((void **) &d_hist_green, sizeof (int) * bins);
hipMemcpy (d_in_green, h_in_green, sizeof (int) * N, hipMemcpyHostToDevice);
hipMemset (d_hist_green, 0, bins * sizeof (int));
// initialize two events
hipStream_t str;
hipEvent_t startT, endT;
float duration;
hipStreamCreate (&str);
hipEventCreate (&startT);
hipEventCreate (&endT);
hipEventRecord (startT, str);
hipLaunchKernelGGL(( GPU_histogramRGB_atomic) , dim3(32), dim3(1024), 0, str , d_in_red, d_in_blue, d_in_green, N, d_hist_red, d_hist_blue, d_hist_green);
hipEventRecord (endT, str);
hipEventSynchronize (endT);
hipMemcpy (hist_red, d_hist_red, sizeof (int) * bins, hipMemcpyDeviceToHost);
hipMemcpy (hist_blue, d_hist_blue, sizeof (int) * bins, hipMemcpyDeviceToHost);
hipMemcpy (hist_green, d_hist_green, sizeof (int) * bins, hipMemcpyDeviceToHost);
hipEventElapsedTime (&duration, startT, endT);
for (int i = 0; i < BINS; i++)
printf ("%i %i %i %i\n", i, hist_red[i], hist_blue[i], hist_green[i]);
for (int i = 0; i < BINS; i++)
if (cpu_hist_red[i] != hist_red[i] || cpu_hist_blue[i] != hist_blue[i] || cpu_hist_green[i] != hist_green[i])
printf ("Calculation mismatch (static) at : %i\n", i);
printf ("Kernel executed for %f ms\n", duration);
hipStreamDestroy (str);
hipEventDestroy (startT);
hipEventDestroy (endT);
hipFree ((void *) d_in_red);
hipFree ((void *) d_hist_red);
free (hist_red);
hipFree ((void *) d_in_blue);
hipFree ((void *) d_hist_blue);
free (hist_blue);
hipFree ((void *) d_in_green);
hipFree ((void *) d_hist_green);
free (hist_green);
hipDeviceReset ();
return 0;
}
| 06fbd7bdd588b2220ac8d38f4ef6119c5aef8cd2.cu | /*
* main.cu
*
* Created on: Nov 30, 2015
* Author: john
*/
#include "Bitmap.h"
#include <iostream>
#include <cuda.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
using namespace std;
const int BINS = 256;
const int BINS4ALL = BINS*16; // Using half warp size since I need 3 sets, and these bins need to fit in 48kb shared memory.
void CPU_histogram (unsigned char *in_red, unsigned char *in_blue, unsigned char *in_green, int N, int *h_red, int *h_blue, int *h_green, int bins)
{
int i;
// initialize histogram counts
for (i = 0; i < bins; i++) {
h_red[i] = 0;
h_blue[i] = 0;
h_green[i] = 0;
}
// accummulate counts
for (i = 0; i < N; i++) {
h_red[in_red[i]]++;
h_blue[in_blue[i]]++;
h_green[in_green[i]]++;
}
}
__device__
void write_shared(int *in, int* bank, int i) {
int temp = in[i];
int v = temp & 0xFF;
int v2 = (temp >> 8) & 0xFF;
int v3 = (temp >> 16) & 0xFF;
int v4 = (temp >> 24) & 0xFF;
atomicAdd (bank + (v << 4), 1);
atomicAdd (bank + (v2 << 4), 1);
atomicAdd (bank + (v3 << 4), 1);
atomicAdd (bank + (v4 << 4), 1);
}
__global__
void GPU_histogramRGB_atomic (int *in_red, int *in_blue, int *in_green, int N, int *h_red, int *h_blue, int *h_green) {
int gloID = blockIdx.x*blockDim.x + threadIdx.x;
int locID = threadIdx.x;
int GRIDSIZE = gridDim.x*blockDim.x;
__shared__ int localH_red[BINS4ALL];
__shared__ int localH_blue[BINS4ALL];
__shared__ int localH_green[BINS4ALL];
int bankID = locID & 0x0F;
int i;
// initialize the local shared-memory bins
for (i = locID; i < BINS4ALL; i += blockDim.x) {
localH_red[i] = 0;
localH_blue[i] = 0;
localH_green[i] = 0;
}
__syncthreads();
int *mySharedBank_red = localH_red + bankID;
int *mySharedBank_blue = localH_blue + bankID;
int *mySharedBank_green = localH_green + bankID;
for (i = gloID; i < N; i += GRIDSIZE) {
write_shared(in_red, mySharedBank_red, i);
write_shared(in_blue, mySharedBank_blue, i);
write_shared(in_green, mySharedBank_green, i);
}
__syncthreads ();
for (i = locID; i < BINS4ALL; i += blockDim.x) {
atomicAdd (h_red + (i >> 4), localH_red[i]);
atomicAdd (h_blue + (i >> 4), localH_blue[i]);
atomicAdd (h_green + (i >> 4), localH_green[i]);
}
}
int main (int argc, char **argv) {
Bitmap* bmp = new Bitmap(argv[1]);
int *d_in_red, *d_in_blue, *d_in_green;
int *h_in_red, *h_in_blue, *h_in_green;
int *cpu_hist_red, *cpu_hist_blue, *cpu_hist_green;
int *d_hist_red, *d_hist_blue, *d_hist_green;
int *hist_red, *hist_blue, *hist_green;
int bins, N;
h_in_red = (int *) bmp->pixels_red;
h_in_blue = (int *) bmp->pixels_blue;
h_in_green = (int *) bmp->pixels_green;
N = ceil((bmp->x_dim * bmp->y_dim) / 4.0);
bins = 256;
hist_red = (int *) malloc (bins * sizeof (int));
hist_blue = (int *) malloc (bins * sizeof (int));
hist_green = (int *) malloc (bins * sizeof (int));
cpu_hist_red = (int *) malloc (bins * sizeof (int));
cpu_hist_blue = (int *) malloc (bins * sizeof (int));
cpu_hist_green = (int *) malloc (bins * sizeof (int));
CPU_histogram(bmp->pixels_red, bmp->pixels_blue, bmp->pixels_green, bmp->x_dim*bmp->y_dim, cpu_hist_red, cpu_hist_blue, cpu_hist_green, bins);
// allocate and copy
cudaMalloc ((void **) &d_in_red, sizeof (int) * N);
cudaMalloc ((void **) &d_hist_red, sizeof (int) * bins);
cudaMemcpy (d_in_red, h_in_red, sizeof (int) * N, cudaMemcpyHostToDevice);
cudaMemset (d_hist_red, 0, bins * sizeof (int));
cudaMalloc ((void **) &d_in_blue, sizeof (int) * N);
cudaMalloc ((void **) &d_hist_blue, sizeof (int) * bins);
cudaMemcpy (d_in_blue, h_in_blue, sizeof (int) * N, cudaMemcpyHostToDevice);
cudaMemset (d_hist_blue, 0, bins * sizeof (int));
cudaMalloc ((void **) &d_in_green, sizeof (int) * N);
cudaMalloc ((void **) &d_hist_green, sizeof (int) * bins);
cudaMemcpy (d_in_green, h_in_green, sizeof (int) * N, cudaMemcpyHostToDevice);
cudaMemset (d_hist_green, 0, bins * sizeof (int));
// initialize two events
cudaStream_t str;
cudaEvent_t startT, endT;
float duration;
cudaStreamCreate (&str);
cudaEventCreate (&startT);
cudaEventCreate (&endT);
cudaEventRecord (startT, str);
GPU_histogramRGB_atomic <<<32, 1024, 0, str >>> (d_in_red, d_in_blue, d_in_green, N, d_hist_red, d_hist_blue, d_hist_green);
cudaEventRecord (endT, str);
cudaEventSynchronize (endT);
cudaMemcpy (hist_red, d_hist_red, sizeof (int) * bins, cudaMemcpyDeviceToHost);
cudaMemcpy (hist_blue, d_hist_blue, sizeof (int) * bins, cudaMemcpyDeviceToHost);
cudaMemcpy (hist_green, d_hist_green, sizeof (int) * bins, cudaMemcpyDeviceToHost);
cudaEventElapsedTime (&duration, startT, endT);
for (int i = 0; i < BINS; i++)
printf ("%i %i %i %i\n", i, hist_red[i], hist_blue[i], hist_green[i]);
for (int i = 0; i < BINS; i++)
if (cpu_hist_red[i] != hist_red[i] || cpu_hist_blue[i] != hist_blue[i] || cpu_hist_green[i] != hist_green[i])
printf ("Calculation mismatch (static) at : %i\n", i);
printf ("Kernel executed for %f ms\n", duration);
cudaStreamDestroy (str);
cudaEventDestroy (startT);
cudaEventDestroy (endT);
cudaFree ((void *) d_in_red);
cudaFree ((void *) d_hist_red);
free (hist_red);
cudaFree ((void *) d_in_blue);
cudaFree ((void *) d_hist_blue);
free (hist_blue);
cudaFree ((void *) d_in_green);
cudaFree ((void *) d_hist_green);
free (hist_green);
cudaDeviceReset ();
return 0;
}
|
5a6ce4b4b5810f44d854a882ef04ab15747434ec.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/copy_range.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/dictionary/detail/update_keys.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/strings/detail/copy_range.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <hip/hip_runtime.h>
#include <memory>
namespace {
template <typename T>
void in_place_copy_range(cudf::column_view const& source,
cudf::mutable_column_view& target,
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
hipStream_t stream = 0)
{
auto p_source_device_view = cudf::column_device_view::create(source, stream);
if (source.has_nulls()) {
cudf::detail::copy_range(
cudf::detail::make_null_replacement_iterator<T>(*p_source_device_view, T()) + source_begin,
cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin,
target,
target_begin,
target_begin + (source_end - source_begin),
stream);
} else {
cudf::detail::copy_range(p_source_device_view->begin<T>() + source_begin,
thrust::make_constant_iterator(true), // dummy
target,
target_begin,
target_begin + (source_end - source_begin),
stream);
}
}
struct in_place_copy_range_dispatch {
cudf::column_view const& source;
cudf::mutable_column_view& target;
template <typename T>
std::enable_if_t<cudf::is_fixed_width<T>(), void> operator()(cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
hipStream_t stream = 0)
{
in_place_copy_range<T>(source, target, source_begin, source_end, target_begin, stream);
}
template <typename T>
std::enable_if_t<not cudf::is_fixed_width<T>(), void> operator()(cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
hipStream_t stream = 0)
{
CUDF_FAIL("in-place copy does not work for variable width types.");
}
};
struct out_of_place_copy_range_dispatch {
cudf::column_view const& source;
cudf::column_view const& target;
template <typename T>
std::unique_ptr<cudf::column> operator()(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(),
hipStream_t stream = 0)
{
auto p_ret = std::make_unique<cudf::column>(target, stream, mr);
if ((!p_ret->nullable()) && source.has_nulls(source_begin, source_end)) {
p_ret->set_null_mask(
cudf::create_null_mask(p_ret->size(), cudf::mask_state::ALL_VALID, stream, mr), 0);
}
if (source_end != source_begin) { // otherwise no-op
auto ret_view = p_ret->mutable_view();
in_place_copy_range<T>(source, ret_view, source_begin, source_end, target_begin, stream);
}
return p_ret;
}
};
template <>
std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::string_view>(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto target_end = target_begin + (source_end - source_begin);
auto p_source_device_view = cudf::column_device_view::create(source, stream);
if (source.has_nulls()) {
return cudf::strings::detail::copy_range(
cudf::detail::make_null_replacement_iterator<cudf::string_view>(*p_source_device_view,
cudf::string_view()) +
source_begin,
cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin,
cudf::strings_column_view(target),
target_begin,
target_end,
mr,
stream);
} else {
return cudf::strings::detail::copy_range(
p_source_device_view->begin<cudf::string_view>() + source_begin,
thrust::make_constant_iterator(true),
cudf::strings_column_view(target),
target_begin,
target_end,
mr,
stream);
}
}
template <>
std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<numeric::decimal64>(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("decimal64 type not supported");
}
template <>
std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<numeric::decimal32>(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("decimal32 type not supported");
}
template <>
std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::dictionary32>(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
// check the keys in the source and target
cudf::dictionary_column_view const dict_source(source);
cudf::dictionary_column_view const dict_target(target);
CUDF_EXPECTS(dict_source.keys().type() == dict_target.keys().type(),
"dictionary keys must be the same type");
// combine keys so both dictionaries have the same set
auto target_matched =
cudf::dictionary::detail::add_keys(dict_target, dict_source.keys(), mr, stream);
auto const target_view = cudf::dictionary_column_view(target_matched->view());
auto source_matched = cudf::dictionary::detail::set_keys(
dict_source, target_view.keys(), rmm::mr::get_current_device_resource(), stream);
auto const source_view = cudf::dictionary_column_view(source_matched->view());
// build the new indices by calling in_place_copy_range on just the indices
auto const source_indices = source_view.get_indices_annotated();
auto target_contents = target_matched->release();
auto target_indices(std::move(target_contents.children.front()));
cudf::mutable_column_view new_indices(
target_indices->type(),
dict_target.size(),
target_indices->mutable_view().head(),
static_cast<cudf::bitmask_type*>(target_contents.null_mask->data()),
dict_target.null_count());
cudf::type_dispatcher(new_indices.type(),
in_place_copy_range_dispatch{source_indices, new_indices},
source_begin,
source_end,
target_begin,
stream);
auto null_count = new_indices.null_count();
auto indices_column =
std::make_unique<cudf::column>(new_indices.type(),
new_indices.size(),
std::move(*(target_indices->release().data.release())),
rmm::device_buffer{0, stream, mr},
0);
// take the keys from the matched column allocated using mr
auto keys_column(std::move(target_contents.children.back()));
// create column with keys_column and indices_column
return make_dictionary_column(std::move(keys_column),
std::move(indices_column),
std::move(*(target_contents.null_mask.release())),
null_count);
}
template <>
std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::list_view>(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("list_view type not supported");
}
} // namespace
namespace cudf {
namespace detail {
void copy_range_in_place(column_view const& source,
mutable_column_view& target,
size_type source_begin,
size_type source_end,
size_type target_begin,
hipStream_t stream)
{
CUDF_EXPECTS(cudf::is_fixed_width(target.type()) == true,
"In-place copy_range does not support variable-sized types.");
CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) &&
(source_begin <= source_end) && (target_begin >= 0) &&
(target_begin <= target.size() - (source_end - source_begin)),
"Range is out of bounds.");
CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch.");
CUDF_EXPECTS((target.nullable() == true) || (source.has_nulls() == false),
"target should be nullable if source has null values.");
if (source_end != source_begin) { // otherwise no-op
cudf::type_dispatcher(target.type(),
in_place_copy_range_dispatch{source, target},
source_begin,
source_end,
target_begin,
stream);
}
}
std::unique_ptr<column> copy_range(column_view const& source,
column_view const& target,
size_type source_begin,
size_type source_end,
size_type target_begin,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) &&
(source_begin <= source_end) && (target_begin >= 0) &&
(target_begin <= target.size() - (source_end - source_begin)),
"Range is out of bounds.");
CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch.");
return cudf::type_dispatcher(target.type(),
out_of_place_copy_range_dispatch{source, target},
source_begin,
source_end,
target_begin,
mr,
stream);
}
} // namespace detail
void copy_range_in_place(column_view const& source,
mutable_column_view& target,
size_type source_begin,
size_type source_end,
size_type target_begin)
{
CUDF_FUNC_RANGE();
return detail::copy_range_in_place(source, target, source_begin, source_end, target_begin, 0);
}
std::unique_ptr<column> copy_range(column_view const& source,
column_view const& target,
size_type source_begin,
size_type source_end,
size_type target_begin,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::copy_range(source, target, source_begin, source_end, target_begin, mr, 0);
}
} // namespace cudf
| 5a6ce4b4b5810f44d854a882ef04ab15747434ec.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/copy_range.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/dictionary/detail/update_keys.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/strings/detail/copy_range.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <cuda_runtime.h>
#include <memory>
namespace {
template <typename T>
void in_place_copy_range(cudf::column_view const& source,
cudf::mutable_column_view& target,
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
cudaStream_t stream = 0)
{
auto p_source_device_view = cudf::column_device_view::create(source, stream);
if (source.has_nulls()) {
cudf::detail::copy_range(
cudf::detail::make_null_replacement_iterator<T>(*p_source_device_view, T()) + source_begin,
cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin,
target,
target_begin,
target_begin + (source_end - source_begin),
stream);
} else {
cudf::detail::copy_range(p_source_device_view->begin<T>() + source_begin,
thrust::make_constant_iterator(true), // dummy
target,
target_begin,
target_begin + (source_end - source_begin),
stream);
}
}
struct in_place_copy_range_dispatch {
cudf::column_view const& source;
cudf::mutable_column_view& target;
template <typename T>
std::enable_if_t<cudf::is_fixed_width<T>(), void> operator()(cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
cudaStream_t stream = 0)
{
in_place_copy_range<T>(source, target, source_begin, source_end, target_begin, stream);
}
template <typename T>
std::enable_if_t<not cudf::is_fixed_width<T>(), void> operator()(cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
cudaStream_t stream = 0)
{
CUDF_FAIL("in-place copy does not work for variable width types.");
}
};
struct out_of_place_copy_range_dispatch {
cudf::column_view const& source;
cudf::column_view const& target;
template <typename T>
std::unique_ptr<cudf::column> operator()(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(),
cudaStream_t stream = 0)
{
auto p_ret = std::make_unique<cudf::column>(target, stream, mr);
if ((!p_ret->nullable()) && source.has_nulls(source_begin, source_end)) {
p_ret->set_null_mask(
cudf::create_null_mask(p_ret->size(), cudf::mask_state::ALL_VALID, stream, mr), 0);
}
if (source_end != source_begin) { // otherwise no-op
auto ret_view = p_ret->mutable_view();
in_place_copy_range<T>(source, ret_view, source_begin, source_end, target_begin, stream);
}
return p_ret;
}
};
template <>
std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::string_view>(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto target_end = target_begin + (source_end - source_begin);
auto p_source_device_view = cudf::column_device_view::create(source, stream);
if (source.has_nulls()) {
return cudf::strings::detail::copy_range(
cudf::detail::make_null_replacement_iterator<cudf::string_view>(*p_source_device_view,
cudf::string_view()) +
source_begin,
cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin,
cudf::strings_column_view(target),
target_begin,
target_end,
mr,
stream);
} else {
return cudf::strings::detail::copy_range(
p_source_device_view->begin<cudf::string_view>() + source_begin,
thrust::make_constant_iterator(true),
cudf::strings_column_view(target),
target_begin,
target_end,
mr,
stream);
}
}
template <>
std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<numeric::decimal64>(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("decimal64 type not supported");
}
template <>
std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<numeric::decimal32>(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("decimal32 type not supported");
}
template <>
std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::dictionary32>(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
// check the keys in the source and target
cudf::dictionary_column_view const dict_source(source);
cudf::dictionary_column_view const dict_target(target);
CUDF_EXPECTS(dict_source.keys().type() == dict_target.keys().type(),
"dictionary keys must be the same type");
// combine keys so both dictionaries have the same set
auto target_matched =
cudf::dictionary::detail::add_keys(dict_target, dict_source.keys(), mr, stream);
auto const target_view = cudf::dictionary_column_view(target_matched->view());
auto source_matched = cudf::dictionary::detail::set_keys(
dict_source, target_view.keys(), rmm::mr::get_current_device_resource(), stream);
auto const source_view = cudf::dictionary_column_view(source_matched->view());
// build the new indices by calling in_place_copy_range on just the indices
auto const source_indices = source_view.get_indices_annotated();
auto target_contents = target_matched->release();
auto target_indices(std::move(target_contents.children.front()));
cudf::mutable_column_view new_indices(
target_indices->type(),
dict_target.size(),
target_indices->mutable_view().head(),
static_cast<cudf::bitmask_type*>(target_contents.null_mask->data()),
dict_target.null_count());
cudf::type_dispatcher(new_indices.type(),
in_place_copy_range_dispatch{source_indices, new_indices},
source_begin,
source_end,
target_begin,
stream);
auto null_count = new_indices.null_count();
auto indices_column =
std::make_unique<cudf::column>(new_indices.type(),
new_indices.size(),
std::move(*(target_indices->release().data.release())),
rmm::device_buffer{0, stream, mr},
0);
// take the keys from the matched column allocated using mr
auto keys_column(std::move(target_contents.children.back()));
// create column with keys_column and indices_column
return make_dictionary_column(std::move(keys_column),
std::move(indices_column),
std::move(*(target_contents.null_mask.release())),
null_count);
}
template <>
std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::list_view>(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("list_view type not supported");
}
} // namespace
namespace cudf {
namespace detail {
void copy_range_in_place(column_view const& source,
mutable_column_view& target,
size_type source_begin,
size_type source_end,
size_type target_begin,
cudaStream_t stream)
{
CUDF_EXPECTS(cudf::is_fixed_width(target.type()) == true,
"In-place copy_range does not support variable-sized types.");
CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) &&
(source_begin <= source_end) && (target_begin >= 0) &&
(target_begin <= target.size() - (source_end - source_begin)),
"Range is out of bounds.");
CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch.");
CUDF_EXPECTS((target.nullable() == true) || (source.has_nulls() == false),
"target should be nullable if source has null values.");
if (source_end != source_begin) { // otherwise no-op
cudf::type_dispatcher(target.type(),
in_place_copy_range_dispatch{source, target},
source_begin,
source_end,
target_begin,
stream);
}
}
std::unique_ptr<column> copy_range(column_view const& source,
column_view const& target,
size_type source_begin,
size_type source_end,
size_type target_begin,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) &&
(source_begin <= source_end) && (target_begin >= 0) &&
(target_begin <= target.size() - (source_end - source_begin)),
"Range is out of bounds.");
CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch.");
return cudf::type_dispatcher(target.type(),
out_of_place_copy_range_dispatch{source, target},
source_begin,
source_end,
target_begin,
mr,
stream);
}
} // namespace detail
void copy_range_in_place(column_view const& source,
mutable_column_view& target,
size_type source_begin,
size_type source_end,
size_type target_begin)
{
CUDF_FUNC_RANGE();
return detail::copy_range_in_place(source, target, source_begin, source_end, target_begin, 0);
}
std::unique_ptr<column> copy_range(column_view const& source,
column_view const& target,
size_type source_begin,
size_type source_end,
size_type target_begin,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::copy_range(source, target, source_begin, source_end, target_begin, mr, 0);
}
} // namespace cudf
|
273f4e2d69a15ff0f0aae0b339dcc440d2f39ac4.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
******************************* BLUEBOTTLE-1.0 ********************************
*******************************************************************************
*
* Copyright 2012 - 2014 Adam Sierakowski, The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include "cuda_particle.h"
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
extern "C"
void cuda_part_malloc(void)
{
// allocate device memory on host
_parts = (part_struct**) malloc(nsubdom * sizeof(part_struct*));
cpumem += nsubdom * sizeof(part_struct*);
_pnm_re = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_im = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_re = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_im = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_re = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_im = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_re0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_im0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_re0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_im0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_re0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_im0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_re00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_im00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_re00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_im00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_re00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_im00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phase = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_phase_shell = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_flag_u = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_flag_v = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_flag_w = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
// allocate device memory on device
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(hipSetDevice(dev + dev_start));
checkCudaErrors(hipMalloc((void**) &(_parts[dev]),
sizeof(part_struct) * nparts));
gpumem += sizeof(part_struct) * nparts;
checkCudaErrors(hipMalloc((void**) &(_pnm_re[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_pnm_im[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_phinm_re[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_phinm_im[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_chinm_re[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_chinm_im[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_pnm_re0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_pnm_im0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_phinm_re0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_phinm_im0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_chinm_re0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_chinm_im0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_pnm_re00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_pnm_im00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_phinm_re00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_phinm_im00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_chinm_re00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_chinm_im00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(hipMalloc((void**) &(_phase[dev]),
sizeof(int) * dom[dev].Gcc.s3b));
gpumem += sizeof(int) * dom[dev].Gcc.s3b;
checkCudaErrors(hipMalloc((void**) &(_phase_shell[dev]),
sizeof(int) * dom[dev].Gcc.s3b));
gpumem += sizeof(int) * dom[dev].Gcc.s3b;
checkCudaErrors(hipMalloc((void**) &(_flag_u[dev]),
sizeof(int) * dom[dev].Gfx.s3b));
gpumem += sizeof(int) * dom[dev].Gfx.s3b;
checkCudaErrors(hipMalloc((void**) &(_flag_v[dev]),
sizeof(int) * dom[dev].Gfy.s3b));
gpumem += sizeof(int) * dom[dev].Gfy.s3b;
checkCudaErrors(hipMalloc((void**) &(_flag_w[dev]),
sizeof(int) * dom[dev].Gfz.s3b));
gpumem += sizeof(int) * dom[dev].Gfz.s3b;
}
}
extern "C"
void cuda_part_push(void)
{
// copy host data to device
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(hipSetDevice(dev + dev_start));
checkCudaErrors(hipMemcpy(_parts[dev], parts, sizeof(part_struct) * nparts,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_pnm_re[dev], pnm_re, sizeof(real) * coeff_stride
* nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_pnm_im[dev], pnm_im, sizeof(real) * coeff_stride
* nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_phinm_re[dev], phinm_re, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_phinm_im[dev], phinm_im, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_chinm_re[dev], chinm_re, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_chinm_im[dev], chinm_im, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_pnm_re0[dev], pnm_re0, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_pnm_im0[dev], pnm_im0, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_phinm_re0[dev], phinm_re0, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_phinm_im0[dev], phinm_im0, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_chinm_re0[dev], chinm_re0, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_chinm_im0[dev], chinm_im0, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_pnm_re00[dev], pnm_re00, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_pnm_im00[dev], pnm_im00, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_phinm_re00[dev], phinm_re00, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_phinm_im00[dev], phinm_im00, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_chinm_re00[dev], chinm_re00, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_chinm_im00[dev], chinm_im00, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_phase[0], phase, sizeof(int) * dom[0].Gcc.s3b,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_phase_shell[0], phase_shell,
sizeof(int) * dom[0].Gcc.s3b, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_flag_u[0], flag_u, sizeof(int) * dom[0].Gfx.s3b,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_flag_v[0], flag_v, sizeof(int) * dom[0].Gfy.s3b,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_flag_w[0], flag_w, sizeof(int) * dom[0].Gfz.s3b,
hipMemcpyHostToDevice));
}
}
extern "C"
void cuda_part_pull(void)
{
// all devices have the same particle data for now, so just copy one of them
checkCudaErrors(hipMemcpy(parts, _parts[0], sizeof(part_struct) * nparts,
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(pnm_re, _pnm_re[0], sizeof(real) * coeff_stride
* nparts,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(pnm_im, _pnm_im[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(phinm_re, _phinm_re[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(phinm_im, _phinm_im[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(chinm_re, _chinm_re[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(chinm_im, _chinm_im[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(pnm_re0, _pnm_re0[0], sizeof(real) * coeff_stride
* nparts,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(pnm_im0, _pnm_im0[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(phinm_re0, _phinm_re0[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(phinm_im0, _phinm_im0[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(chinm_re0, _chinm_re0[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(chinm_im0, _chinm_im0[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(pnm_re00, _pnm_re00[0], sizeof(real) * coeff_stride
* nparts,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(pnm_im00, _pnm_im00[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(phinm_re00, _phinm_re00[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(phinm_im00, _phinm_im00[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(chinm_re00, _chinm_re00[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(chinm_im00, _chinm_im00[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
// TODO REMOVE
// copy for device cage setup testing
checkCudaErrors(hipMemcpy(phase, _phase[0], sizeof(int) * dom[0].Gcc.s3b,
hipMemcpyDeviceToHost));
/*
checkCudaErrors(hipMemcpy(phase_shell, _phase_shell[0],
sizeof(int) * dom[0].Gcc.s3b, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(flag_u, _flag_u[0], sizeof(int) * dom[0].Gfx.s3b,
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(flag_v, _flag_v[0], sizeof(int) * dom[0].Gfy.s3b,
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(flag_w, _flag_w[0], sizeof(int) * dom[0].Gfz.s3b,
hipMemcpyDeviceToHost));
*/
}
extern "C"
void cuda_part_free(void)
{
// free device memory on device
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(hipSetDevice(dev + dev_start));
checkCudaErrors(hipFree(_parts[dev]));
checkCudaErrors(hipFree(_pnm_re[dev]));
checkCudaErrors(hipFree(_pnm_im[dev]));
checkCudaErrors(hipFree(_phinm_re[dev]));
checkCudaErrors(hipFree(_phinm_im[dev]));
checkCudaErrors(hipFree(_chinm_re[dev]));
checkCudaErrors(hipFree(_chinm_im[dev]));
checkCudaErrors(hipFree(_pnm_re0[dev]));
checkCudaErrors(hipFree(_pnm_im0[dev]));
checkCudaErrors(hipFree(_phinm_re0[dev]));
checkCudaErrors(hipFree(_phinm_im0[dev]));
checkCudaErrors(hipFree(_chinm_re0[dev]));
checkCudaErrors(hipFree(_chinm_im0[dev]));
checkCudaErrors(hipFree(_pnm_re00[dev]));
checkCudaErrors(hipFree(_pnm_im00[dev]));
checkCudaErrors(hipFree(_phinm_re00[dev]));
checkCudaErrors(hipFree(_phinm_im00[dev]));
checkCudaErrors(hipFree(_chinm_re00[dev]));
checkCudaErrors(hipFree(_chinm_im00[dev]));
checkCudaErrors(hipFree(_phase[dev]));
checkCudaErrors(hipFree(_phase_shell[dev]));
checkCudaErrors(hipFree(_flag_u[dev]));
checkCudaErrors(hipFree(_flag_v[dev]));
checkCudaErrors(hipFree(_flag_w[dev]));
}
free(_parts);
free(_pnm_re);
free(_pnm_im);
free(_phinm_re);
free(_phinm_im);
free(_chinm_re);
free(_chinm_im);
free(_pnm_re0);
free(_pnm_im0);
free(_phinm_re0);
free(_phinm_im0);
free(_chinm_re0);
free(_chinm_im0);
free(_pnm_re00);
free(_pnm_im00);
free(_phinm_re00);
free(_phinm_im00);
free(_chinm_re00);
free(_chinm_im00);
free(_phase);
free(_phase_shell);
free(_flag_u);
free(_flag_v);
free(_flag_w);
}
extern "C"
void cuda_build_cages(void)
{
cuda_part_pull();
// parallelize over domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(hipSetDevice(dev + dev_start));
int i; // iterator
real Y, Z; // virtual particle center location
int threads_x = 0;
int threads_y = 0;
int threads_z = 0;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
int threads_c = 0; // number of threads for cage build
// reset phase
if(dom[dev].Gcc.jnb < MAX_THREADS_DIM)
threads_y = dom[dev].Gcc.jnb;
else
threads_y = MAX_THREADS_DIM;
if(dom[dev].Gcc.knb < MAX_THREADS_DIM)
threads_z = dom[dev].Gcc.knb;
else
threads_z = MAX_THREADS_DIM;
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
dim3 dimBlocks(threads_y, threads_z);
dim3 numBlocks(blocks_y, blocks_z);
hipLaunchKernelGGL(( reset_phase), dim3(numBlocks), dim3(dimBlocks), 0, 0, _phase[dev], _dom[dev]);
hipLaunchKernelGGL(( reset_phase_shell), dim3(numBlocks), dim3(dimBlocks), 0, 0, _phase_shell[dev], _dom[dev]);
// reset flag_u
if(dom[dev].Gfx.jn < MAX_THREADS_DIM)
threads_y = dom[dev].Gfx.jnb;
else
threads_y = MAX_THREADS_DIM;
if(dom[dev].Gfx.kn < MAX_THREADS_DIM)
threads_z = dom[dev].Gfx.knb;
else
threads_z = MAX_THREADS_DIM;
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
dim3 dimBlocks_u(threads_y, threads_z);
dim3 numBlocks_u(blocks_y, blocks_z);
hipLaunchKernelGGL(( reset_flag_u), dim3(numBlocks_u), dim3(dimBlocks_u), 0, 0, _flag_u[dev], _dom[dev], bc);
// reset flag_v
if(dom[dev].Gfy.kn < MAX_THREADS_DIM)
threads_z = dom[dev].Gfy.knb;
else
threads_z = MAX_THREADS_DIM;
if(dom[dev].Gfy.in < MAX_THREADS_DIM)
threads_x = dom[dev].Gfy.inb;
else
threads_x = MAX_THREADS_DIM;
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
dim3 dimBlocks_v(threads_z, threads_x);
dim3 numBlocks_v(blocks_z, blocks_x);
hipLaunchKernelGGL(( reset_flag_v), dim3(numBlocks_v), dim3(dimBlocks_v), 0, 0, _flag_v[dev], _dom[dev], bc);
// reset flag_w
if(dom[dev].Gfz.in < MAX_THREADS_DIM)
threads_x = dom[dev].Gfz.inb;
else
threads_x = MAX_THREADS_DIM;
if(dom[dev].Gfz.jn < MAX_THREADS_DIM)
threads_y = dom[dev].Gfz.jnb;
else
threads_y = MAX_THREADS_DIM;
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
dim3 dimBlocks_w(threads_x, threads_y);
dim3 numBlocks_w(blocks_x, blocks_y);
hipLaunchKernelGGL(( reset_flag_w), dim3(numBlocks_w), dim3(dimBlocks_w), 0, 0, _flag_w[dev], _dom[dev], bc);
// build cages and update phase
// TODO: do the first half of this on the card
threads_c = MAX_THREADS_DIM;
for(i = 0; i < nparts; i++) {
// set up cage extents
// add 4 cells to ensure cage is completely contained in the bounding box
parts[i].cage.in = (int)(2.0 * ceil(parts[i].r / dom[dev].dx));// + 2;
parts[i].cage.jn = (int)(2.0 * ceil(parts[i].r / dom[dev].dy));// + 2;
parts[i].cage.kn = (int)(2.0 * ceil(parts[i].r / dom[dev].dz));// + 2;
// remove a cell from cage for odd number of cells in domain
if(dom[dev].xn % 2) {
parts[i].cage.in = parts[i].cage.in - 1;
}
if(dom[dev].yn % 2) {
parts[i].cage.jn = parts[i].cage.jn - 1;
}
if(dom[dev].zn % 2) {
parts[i].cage.kn = parts[i].cage.kn - 1;
}
// find indices of cell that contains the particle center
parts[i].cage.cx = (int)((parts[i].x - dom->xs + 0.5 * dom->dx) / dom->dx);
parts[i].cage.cy = (int)((parts[i].y - dom->ys + 0.5 * dom->dy) / dom->dy);
parts[i].cage.cz = (int)((parts[i].z - dom->zs + 0.5 * dom->dz) / dom->dz);
// compute start and end cells of cage that contains particle
parts[i].cage.is = (int)(round((parts[i].x-dom->xs)/dom->dx)
- 0.5 * parts[i].cage.in + DOM_BUF);
parts[i].cage.ie = parts[i].cage.is + parts[i].cage.in;
if(parts[i].cage.is <= dom->Gcc.is) {
parts[i].cage.is = parts[i].cage.is + dom->Gcc.ie;
parts[i].cage.ibs = dom->Gcc.ie;
parts[i].cage.ibe = dom->Gcc.is;
} else if(parts[i].cage.ie > dom->Gcc.ie) {
parts[i].cage.ie = parts[i].cage.ie - dom->Gcc.ie;
parts[i].cage.ibs = dom->Gcc.ie;
parts[i].cage.ibe = dom->Gcc.is;
} else {
parts[i].cage.ibs = parts[i].cage.ie;
parts[i].cage.ibe = parts[i].cage.ie;
}
parts[i].cage.js = (int)(round((parts[i].y-dom->ys)/dom->dy)
- 0.5 * parts[i].cage.jn + DOM_BUF);
parts[i].cage.je = parts[i].cage.js + parts[i].cage.jn;
if(parts[i].cage.js <= dom->Gcc.js) {
parts[i].cage.js = parts[i].cage.js + dom->Gcc.je;
parts[i].cage.jbs = dom->Gcc.je;
parts[i].cage.jbe = dom->Gcc.js;
} else if(parts[i].cage.je > dom->Gcc.je) {
parts[i].cage.je = parts[i].cage.je - dom->Gcc.je;
parts[i].cage.jbs = dom->Gcc.je;
parts[i].cage.jbe = dom->Gcc.js;
} else {
parts[i].cage.jbs = parts[i].cage.je;
parts[i].cage.jbe = parts[i].cage.je;
}
parts[i].cage.ks = (int)(round((parts[i].z-dom->zs)/dom->dz)
- 0.5 * parts[i].cage.kn + DOM_BUF);
parts[i].cage.ke = parts[i].cage.ks + parts[i].cage.kn;
if(parts[i].cage.ks <= dom->Gcc.ks) {
parts[i].cage.ks = parts[i].cage.ks + dom->Gcc.ke;
parts[i].cage.kbs = dom->Gcc.ke;
parts[i].cage.kbe = dom->Gcc.ks;
} else if(parts[i].cage.ke > dom->Gcc.ke) {
parts[i].cage.ke = parts[i].cage.ke - dom->Gcc.ke;
parts[i].cage.kbs = dom->Gcc.ke;
parts[i].cage.kbe = dom->Gcc.ks;
} else {
parts[i].cage.kbs = parts[i].cage.ke;
parts[i].cage.kbe = parts[i].cage.ke;
}
}
// push particle information to device
checkCudaErrors(hipMemcpy(_parts[dev], parts, sizeof(part_struct) * nparts,
hipMemcpyHostToDevice));
for(i = 0; i < nparts; i++) {
// BS quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_c);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_c);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_c);
dim3 dimBlocks_c(threads_c, threads_c);
dim3 numBlocks_c(blocks_y, blocks_z);
if(parts[i].y < (dom[dev].ys + parts[i].r)) Y = parts[i].y + dom[dev].yl;
else Y = parts[i].y;
if(parts[i].z < (dom[dev].zs + parts[i].r)) Z = parts[i].z + dom[dev].zl;
else Z = parts[i].z;
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( build_cage), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, i, _parts[dev],
_phase[dev], _phase_shell[dev], _dom[dev],
Y, Z,
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.ks, parts[i].cage.kbs);
// BN quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_c);
blocks_y = (int)ceil((real) (parts[i].cage.je - (parts[i].cage.jbe))
/ (real) threads_c);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_c);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
if(parts[i].y > (dom[dev].ye - parts[i].r)) Y = parts[i].y - dom[dev].yl;
else Y = parts[i].y;
if(parts[i].z < (dom[dev].zs + parts[i].r)) Z = parts[i].z + dom[dev].zl;
else Z = parts[i].z;
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( build_cage), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, i, _parts[dev],
_phase[dev], _phase_shell[dev], _dom[dev],
Y, Z,
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.ks, parts[i].cage.kbs);
// TS quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_c);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_c);
blocks_z = (int)ceil((real) (parts[i].cage.ke - (parts[i].cage.kbe))
/ (real) threads_c);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
if(parts[i].y < (dom[dev].ys + parts[i].r)) Y = parts[i].y + dom[dev].yl;
else Y = parts[i].y;
if(parts[i].z > (dom[dev].ze - parts[i].r)) Z = parts[i].z - dom[dev].zl;
else Z = parts[i].z;
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( build_cage), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, i, _parts[dev],
_phase[dev], _phase_shell[dev], _dom[dev],
Y, Z,
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.kbe, parts[i].cage.ke);
// TN quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_c);
blocks_y = (int)ceil((real) (parts[i].cage.je - (parts[i].cage.jbe))
/ (real) threads_c);
blocks_z = (int)ceil((real) (parts[i].cage.ke - (parts[i].cage.kbe))
/ (real) threads_c);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
if(parts[i].y > (dom[dev].ye - parts[i].r)) Y = parts[i].y - dom[dev].yl;
else Y = parts[i].y;
if(parts[i].z > (dom[dev].ze - parts[i].r)) Z = parts[i].z - dom[dev].zl;
else Z = parts[i].z;
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( build_cage), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, i, _parts[dev],
_phase[dev], _phase_shell[dev], _dom[dev],
Y, Z,
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.kbe, parts[i].cage.ke);
// fill in ghost cells for periodic boundary conditions
if(bc.uW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
hipLaunchKernelGGL(( cage_phases_periodic_W), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
hipLaunchKernelGGL(( cage_phases_periodic_E), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.vS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
numBlocks_c.x = blocks_z;
numBlocks_c.y = blocks_x;
hipLaunchKernelGGL(( cage_phases_periodic_S), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.vN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
numBlocks_c.x = blocks_z;
numBlocks_c.y = blocks_x;
hipLaunchKernelGGL(( cage_phases_periodic_N), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.wB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
numBlocks_c.x = blocks_x;
numBlocks_c.y = blocks_y;
hipLaunchKernelGGL(( cage_phases_periodic_B), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.wT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
numBlocks_c.x = blocks_x;
numBlocks_c.y = blocks_y;
hipLaunchKernelGGL(( cage_phases_periodic_T), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_phase_shell[dev], _dom[dev]);
}
// do flagging
threads_x = MAX_THREADS_DIM;
threads_y = MAX_THREADS_DIM;
threads_z = MAX_THREADS_DIM;
// u
// BS quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
dim3 dimBlocks_cu(threads_y, threads_z);
dim3 numBlocks_cu(blocks_y, blocks_z);
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_u_1), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, i, _flag_u[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.ks, parts[i].cage.kbs);
// BN quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_u_1), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, i, _flag_u[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.ks, parts[i].cage.kbs);
// TS quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_u_1), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, i, _flag_u[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.kbe, parts[i].cage.ke);
// TN quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_u_1), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, i, _flag_u[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.kbe, parts[i].cage.ke);
// v
// BW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
dim3 dimBlocks_cv(threads_z, threads_x);
dim3 numBlocks_cv(blocks_z, blocks_x);
if(blocks_x > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_v_1), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, i, _flag_v[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.ks, parts[i].cage.kbs);
// BE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_v_1), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, i, _flag_v[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.ks, parts[i].cage.kbs);
// TW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_v_1), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, i, _flag_v[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.kbe, parts[i].cage.ke);
// TE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_v_1), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, i, _flag_v[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.kbe, parts[i].cage.ke);
// w
// SW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
dim3 dimBlocks_cw(threads_x, threads_y);
dim3 numBlocks_cw(blocks_x, blocks_y);
if(blocks_x > 0 && blocks_y > 0)
hipLaunchKernelGGL(( cage_flag_w_1), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, i, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.js, parts[i].cage.jbs);
// SE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
hipLaunchKernelGGL(( cage_flag_w_1), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, i, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.js, parts[i].cage.jbs);
// NW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
hipLaunchKernelGGL(( cage_flag_w_1), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, i, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.jbe, parts[i].cage.je);
// NE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
hipLaunchKernelGGL(( cage_flag_w_1), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, i, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.jbe, parts[i].cage.je);
// fill in ghost cells for periodic boundary conditions
if(bc.uW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
hipLaunchKernelGGL(( cage_phases_periodic_W), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
hipLaunchKernelGGL(( cage_phases_periodic_E), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.vS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
numBlocks_c.x = blocks_z;
numBlocks_c.y = blocks_x;
hipLaunchKernelGGL(( cage_phases_periodic_S), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.vN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
numBlocks_c.x = blocks_z;
numBlocks_c.y = blocks_x;
hipLaunchKernelGGL(( cage_phases_periodic_N), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.wB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
numBlocks_c.x = blocks_x;
numBlocks_c.y = blocks_y;
hipLaunchKernelGGL(( cage_phases_periodic_B), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.wT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
numBlocks_c.x = blocks_x;
numBlocks_c.y = blocks_y;
hipLaunchKernelGGL(( cage_phases_periodic_T), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_phase_shell[dev], _dom[dev]);
}
// fill in ghost cells for periodic boundary conditions
if(bc.uW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_u_periodic_W), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_u_periodic_E), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.uS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
numBlocks_cu.x = blocks_z;
numBlocks_cu.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_u_periodic_S), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.uN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
numBlocks_cu.x = blocks_z;
numBlocks_cu.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_u_periodic_N), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.uB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
numBlocks_cu.x = blocks_x;
numBlocks_cu.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_u_periodic_B), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.uT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
numBlocks_cu.x = blocks_x;
numBlocks_cu.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_u_periodic_T), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.vW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
numBlocks_cv.x = blocks_y;
numBlocks_cv.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_v_periodic_W), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.vE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
numBlocks_cv.x = blocks_y;
numBlocks_cv.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_v_periodic_E), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.vS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_v_periodic_S), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.vN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_v_periodic_N), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.vB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
numBlocks_cv.x = blocks_x;
numBlocks_cv.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_v_periodic_B), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.vT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
numBlocks_cv.x = blocks_x;
numBlocks_cv.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_v_periodic_T), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.wW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
numBlocks_cw.x = blocks_y;
numBlocks_cw.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_w_periodic_W), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
if(bc.wE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
numBlocks_cw.x = blocks_y;
numBlocks_cw.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_w_periodic_E), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
if(bc.wS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
numBlocks_cw.x = blocks_z;
numBlocks_cw.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_w_periodic_S), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
if(bc.wN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
numBlocks_cw.x = blocks_z;
numBlocks_cw.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_w_periodic_N), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
if(bc.wB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_w_periodic_B), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
if(bc.wT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_w_periodic_T), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
// create a copy of the flags for this step so intermediate flags don't
// corrupt algorithm
int *_flag_u_tmp;
int *_flag_v_tmp;
int *_flag_w_tmp;
checkCudaErrors(hipMalloc((void**) &(_flag_u_tmp),
sizeof(int) * dom[dev].Gfx.s3b));
gpumem += sizeof(int) * dom[dev].Gfx.s3b;
checkCudaErrors(hipMalloc((void**) &(_flag_v_tmp),
sizeof(int) * dom[dev].Gfy.s3b));
gpumem += sizeof(int) * dom[dev].Gfy.s3b;
checkCudaErrors(hipMalloc((void**) &(_flag_w_tmp),
sizeof(int) * dom[dev].Gfz.s3b));
gpumem += sizeof(int) * dom[dev].Gfz.s3b;
checkCudaErrors(hipMemcpy(_flag_u_tmp, _flag_u[dev],
sizeof(int) * dom[dev].Gfx.s3b, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(_flag_v_tmp, _flag_v[dev],
sizeof(int) * dom[dev].Gfy.s3b, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(_flag_w_tmp, _flag_w[dev],
sizeof(int) * dom[dev].Gfz.s3b, hipMemcpyDeviceToDevice));
// u
// BS quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_u_2), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, i, _flag_u_tmp,
_flag_v[dev], _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.ks, parts[i].cage.kbs);
// BN quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_u_2), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, i, _flag_u_tmp,
_flag_v[dev], _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.ks, parts[i].cage.kbs);
// TS quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_u_2), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, i, _flag_u_tmp,
_flag_v[dev], _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.kbe, parts[i].cage.ke);
// TN quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_u_2), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, i, _flag_u_tmp,
_flag_v[dev], _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.kbe, parts[i].cage.ke);
// v
// BW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_v_2), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, i, _flag_u[dev],
_flag_v_tmp, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.ks, parts[i].cage.kbs);
// BE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_v_2), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, i, _flag_u[dev],
_flag_v_tmp, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.ks, parts[i].cage.kbs);
// TW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_v_2), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, i, _flag_u[dev],
_flag_v_tmp, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.kbe, parts[i].cage.ke);
// TE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_v_2), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, i, _flag_u[dev],
_flag_v_tmp, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.kbe, parts[i].cage.ke);
// w
// SW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
hipLaunchKernelGGL(( cage_flag_w_2), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, i, _flag_u[dev],
_flag_v[dev], _flag_w_tmp,
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.js, parts[i].cage.jbs);
// SE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
hipLaunchKernelGGL(( cage_flag_w_2), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, i, _flag_u[dev],
_flag_v[dev], _flag_w_tmp,
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.js, parts[i].cage.jbs);
// NW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
hipLaunchKernelGGL(( cage_flag_w_2), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, i, _flag_u[dev],
_flag_v[dev], _flag_w_tmp,
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.jbe, parts[i].cage.je);
// NE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
hipLaunchKernelGGL(( cage_flag_w_2), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, i, _flag_u[dev],
_flag_v[dev], _flag_w_tmp,
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.jbe, parts[i].cage.je);
// now copy the results back
checkCudaErrors(hipMemcpy(_flag_u[dev], _flag_u_tmp,
sizeof(int) * dom[dev].Gfx.s3b, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(_flag_v[dev], _flag_v_tmp,
sizeof(int) * dom[dev].Gfy.s3b, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(_flag_w[dev], _flag_w_tmp,
sizeof(int) * dom[dev].Gfz.s3b, hipMemcpyDeviceToDevice));
// clean up copies
checkCudaErrors(hipFree(_flag_u_tmp));
checkCudaErrors(hipFree(_flag_v_tmp));
checkCudaErrors(hipFree(_flag_w_tmp));
// fill in ghost cells for periodic boundary conditions
if(bc.uW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_u_periodic_W), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_u_periodic_E), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.uS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
numBlocks_cu.x = blocks_z;
numBlocks_cu.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_u_periodic_S), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.uN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
numBlocks_cu.x = blocks_z;
numBlocks_cu.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_u_periodic_N), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.uB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
numBlocks_cu.x = blocks_x;
numBlocks_cu.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_u_periodic_B), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.uT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
numBlocks_cu.x = blocks_x;
numBlocks_cu.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_u_periodic_T), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.vW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
numBlocks_cv.x = blocks_y;
numBlocks_cv.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_v_periodic_W), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.vE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
numBlocks_cv.x = blocks_y;
numBlocks_cv.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_v_periodic_E), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.vS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_v_periodic_S), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.vN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_v_periodic_N), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.vB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
numBlocks_cv.x = blocks_x;
numBlocks_cv.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_v_periodic_B), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.vT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
numBlocks_cv.x = blocks_x;
numBlocks_cv.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_v_periodic_T), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.wW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
numBlocks_cw.x = blocks_y;
numBlocks_cw.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_w_periodic_W), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
if(bc.wE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
numBlocks_cw.x = blocks_y;
numBlocks_cw.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_w_periodic_E), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
if(bc.wS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
numBlocks_cw.x = blocks_z;
numBlocks_cw.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_w_periodic_S), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
if(bc.wN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
numBlocks_cw.x = blocks_z;
numBlocks_cw.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_w_periodic_N), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
if(bc.wB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_w_periodic_B), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
if(bc.wT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_w_periodic_T), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
}
}
}
extern "C"
void cuda_part_BC(void)
{
// parallize across domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(hipSetDevice(dev + dev_start));
int threads_x = MAX_THREADS_DIM;
int threads_y = MAX_THREADS_DIM;
int threads_z = MAX_THREADS_DIM;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
// u
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
dim3 dimBlocks_x(threads_y, threads_z);
dim3 numBlocks_x(blocks_y, blocks_z);
hipLaunchKernelGGL(( part_BC_u), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _u[dev], _phase[dev],
_flag_u[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// v
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
dim3 dimBlocks_y(threads_z, threads_x);
dim3 numBlocks_y(blocks_z, blocks_x);
hipLaunchKernelGGL(( part_BC_v), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, _v[dev], _phase[dev],
_flag_v[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// w
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
dim3 dimBlocks_z(threads_x, threads_y);
dim3 numBlocks_z(blocks_x, blocks_y);
hipLaunchKernelGGL(( part_BC_w), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, _w[dev], _phase[dev],
_flag_w[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
}
}
extern "C"
void cuda_part_BC_star(void)
{
// parallize across domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(hipSetDevice(dev + dev_start));
int threads_x = MAX_THREADS_DIM;
int threads_y = MAX_THREADS_DIM;
int threads_z = MAX_THREADS_DIM;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
// u
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
dim3 dimBlocks_x(threads_y, threads_z);
dim3 numBlocks_x(blocks_y, blocks_z);
hipLaunchKernelGGL(( part_BC_u), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _u_star[dev], _phase[dev],
_flag_u[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// v
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
dim3 dimBlocks_y(threads_z, threads_x);
dim3 numBlocks_y(blocks_z, blocks_x);
hipLaunchKernelGGL(( part_BC_v), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, _v_star[dev], _phase[dev],
_flag_v[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// w
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
dim3 dimBlocks_z(threads_x, threads_y);
dim3 numBlocks_z(blocks_x, blocks_y);
hipLaunchKernelGGL(( part_BC_w), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, _w_star[dev], _phase[dev],
_flag_w[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
}
}
// already CPU parallelized in cuda_PP_bicgstab, which calls it
extern "C"
void cuda_part_BC_p(int dev)
{
int threads_c = MAX_THREADS_DIM;
int blocks_y = 0;
int blocks_z = 0;
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_c);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_c);
dim3 dimBlocks_c(threads_c, threads_c);
dim3 numBlocks_c(blocks_y, blocks_z);
hipLaunchKernelGGL(( part_BC_p), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _rhs_p[dev], _phase[dev],
_phase_shell[dev], _parts[dev], _dom[dev],
mu, nu, gradP, rho_f, coeff_stride,
_pnm_re00[dev], _pnm_im00[dev],
_phinm_re00[dev], _phinm_im00[dev], _chinm_re00[dev], _chinm_im00[dev],
_pnm_re[dev], _pnm_im[dev],
_phinm_re[dev], _phinm_im[dev], _chinm_re[dev], _chinm_im[dev]);
}
extern "C"
void cuda_store_coeffs(void)
{
// parallelize over CPU threads
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(hipSetDevice(dev + dev_start));
// coeff00 & coeff ==> coeff0 (Adams-Bashforth)
dim3 dimBlocks(coeff_stride);
dim3 numBlocks(nparts);
// as implemented, this actually makes convergence slower
/*if(dt0 > 0.) {
predict_coeffs<<<numBlocks, dimBlocks>>>(dt0, dt,
_pnm_re00[dev], _pnm_im00[dev], _phinm_re00[dev], _phinm_im00[dev],
_chinm_re00[dev], _chinm_im00[dev],
_pnm_re0[dev], _pnm_im0[dev], _phinm_re0[dev], _phinm_im0[dev],
_chinm_re0[dev], _chinm_im0[dev],
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev], coeff_stride);
}
*/
checkCudaErrors(hipMemcpy(_pnm_re00[dev], _pnm_re[dev],
sizeof(real) * coeff_stride*nparts, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(_pnm_im00[dev], _pnm_im[dev],
sizeof(real) * coeff_stride*nparts, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(_phinm_re00[dev], _phinm_re[dev],
sizeof(real) * coeff_stride*nparts, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(_phinm_im00[dev], _phinm_im[dev],
sizeof(real) * coeff_stride*nparts, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(_chinm_re00[dev], _chinm_re[dev],
sizeof(real) * coeff_stride*nparts, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(_chinm_im00[dev], _chinm_im[dev],
sizeof(real) * coeff_stride*nparts, hipMemcpyDeviceToDevice));
}
}
| 273f4e2d69a15ff0f0aae0b339dcc440d2f39ac4.cu | /*******************************************************************************
******************************* BLUEBOTTLE-1.0 ********************************
*******************************************************************************
*
* Copyright 2012 - 2014 Adam Sierakowski, The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include "cuda_particle.h"
#include <cuda.h>
#include <helper_cuda.h>
extern "C"
void cuda_part_malloc(void)
{
// allocate device memory on host
_parts = (part_struct**) malloc(nsubdom * sizeof(part_struct*));
cpumem += nsubdom * sizeof(part_struct*);
_pnm_re = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_im = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_re = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_im = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_re = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_im = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_re0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_im0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_re0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_im0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_re0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_im0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_re00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_im00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_re00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_im00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_re00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_im00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phase = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_phase_shell = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_flag_u = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_flag_v = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_flag_w = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
// allocate device memory on device
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(cudaSetDevice(dev + dev_start));
checkCudaErrors(cudaMalloc((void**) &(_parts[dev]),
sizeof(part_struct) * nparts));
gpumem += sizeof(part_struct) * nparts;
checkCudaErrors(cudaMalloc((void**) &(_pnm_re[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_pnm_im[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_phinm_re[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_phinm_im[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_chinm_re[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_chinm_im[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_pnm_re0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_pnm_im0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_phinm_re0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_phinm_im0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_chinm_re0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_chinm_im0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_pnm_re00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_pnm_im00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_phinm_re00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_phinm_im00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_chinm_re00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_chinm_im00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
checkCudaErrors(cudaMalloc((void**) &(_phase[dev]),
sizeof(int) * dom[dev].Gcc.s3b));
gpumem += sizeof(int) * dom[dev].Gcc.s3b;
checkCudaErrors(cudaMalloc((void**) &(_phase_shell[dev]),
sizeof(int) * dom[dev].Gcc.s3b));
gpumem += sizeof(int) * dom[dev].Gcc.s3b;
checkCudaErrors(cudaMalloc((void**) &(_flag_u[dev]),
sizeof(int) * dom[dev].Gfx.s3b));
gpumem += sizeof(int) * dom[dev].Gfx.s3b;
checkCudaErrors(cudaMalloc((void**) &(_flag_v[dev]),
sizeof(int) * dom[dev].Gfy.s3b));
gpumem += sizeof(int) * dom[dev].Gfy.s3b;
checkCudaErrors(cudaMalloc((void**) &(_flag_w[dev]),
sizeof(int) * dom[dev].Gfz.s3b));
gpumem += sizeof(int) * dom[dev].Gfz.s3b;
}
}
extern "C"
void cuda_part_push(void)
{
// copy host data to device
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(cudaSetDevice(dev + dev_start));
checkCudaErrors(cudaMemcpy(_parts[dev], parts, sizeof(part_struct) * nparts,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_pnm_re[dev], pnm_re, sizeof(real) * coeff_stride
* nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_pnm_im[dev], pnm_im, sizeof(real) * coeff_stride
* nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_phinm_re[dev], phinm_re, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_phinm_im[dev], phinm_im, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_chinm_re[dev], chinm_re, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_chinm_im[dev], chinm_im, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_pnm_re0[dev], pnm_re0, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_pnm_im0[dev], pnm_im0, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_phinm_re0[dev], phinm_re0, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_phinm_im0[dev], phinm_im0, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_chinm_re0[dev], chinm_re0, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_chinm_im0[dev], chinm_im0, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_pnm_re00[dev], pnm_re00, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_pnm_im00[dev], pnm_im00, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_phinm_re00[dev], phinm_re00, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_phinm_im00[dev], phinm_im00, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_chinm_re00[dev], chinm_re00, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_chinm_im00[dev], chinm_im00, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_phase[0], phase, sizeof(int) * dom[0].Gcc.s3b,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_phase_shell[0], phase_shell,
sizeof(int) * dom[0].Gcc.s3b, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_flag_u[0], flag_u, sizeof(int) * dom[0].Gfx.s3b,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_flag_v[0], flag_v, sizeof(int) * dom[0].Gfy.s3b,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_flag_w[0], flag_w, sizeof(int) * dom[0].Gfz.s3b,
cudaMemcpyHostToDevice));
}
}
extern "C"
void cuda_part_pull(void)
{
// all devices have the same particle data for now, so just copy one of them
checkCudaErrors(cudaMemcpy(parts, _parts[0], sizeof(part_struct) * nparts,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(pnm_re, _pnm_re[0], sizeof(real) * coeff_stride
* nparts,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(pnm_im, _pnm_im[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(phinm_re, _phinm_re[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(phinm_im, _phinm_im[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(chinm_re, _chinm_re[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(chinm_im, _chinm_im[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(pnm_re0, _pnm_re0[0], sizeof(real) * coeff_stride
* nparts,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(pnm_im0, _pnm_im0[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(phinm_re0, _phinm_re0[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(phinm_im0, _phinm_im0[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(chinm_re0, _chinm_re0[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(chinm_im0, _chinm_im0[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(pnm_re00, _pnm_re00[0], sizeof(real) * coeff_stride
* nparts,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(pnm_im00, _pnm_im00[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(phinm_re00, _phinm_re00[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(phinm_im00, _phinm_im00[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(chinm_re00, _chinm_re00[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(chinm_im00, _chinm_im00[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
// TODO REMOVE
// copy for device cage setup testing
checkCudaErrors(cudaMemcpy(phase, _phase[0], sizeof(int) * dom[0].Gcc.s3b,
cudaMemcpyDeviceToHost));
/*
checkCudaErrors(cudaMemcpy(phase_shell, _phase_shell[0],
sizeof(int) * dom[0].Gcc.s3b, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(flag_u, _flag_u[0], sizeof(int) * dom[0].Gfx.s3b,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(flag_v, _flag_v[0], sizeof(int) * dom[0].Gfy.s3b,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(flag_w, _flag_w[0], sizeof(int) * dom[0].Gfz.s3b,
cudaMemcpyDeviceToHost));
*/
}
extern "C"
void cuda_part_free(void)
{
// free device memory on device
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(cudaSetDevice(dev + dev_start));
checkCudaErrors(cudaFree(_parts[dev]));
checkCudaErrors(cudaFree(_pnm_re[dev]));
checkCudaErrors(cudaFree(_pnm_im[dev]));
checkCudaErrors(cudaFree(_phinm_re[dev]));
checkCudaErrors(cudaFree(_phinm_im[dev]));
checkCudaErrors(cudaFree(_chinm_re[dev]));
checkCudaErrors(cudaFree(_chinm_im[dev]));
checkCudaErrors(cudaFree(_pnm_re0[dev]));
checkCudaErrors(cudaFree(_pnm_im0[dev]));
checkCudaErrors(cudaFree(_phinm_re0[dev]));
checkCudaErrors(cudaFree(_phinm_im0[dev]));
checkCudaErrors(cudaFree(_chinm_re0[dev]));
checkCudaErrors(cudaFree(_chinm_im0[dev]));
checkCudaErrors(cudaFree(_pnm_re00[dev]));
checkCudaErrors(cudaFree(_pnm_im00[dev]));
checkCudaErrors(cudaFree(_phinm_re00[dev]));
checkCudaErrors(cudaFree(_phinm_im00[dev]));
checkCudaErrors(cudaFree(_chinm_re00[dev]));
checkCudaErrors(cudaFree(_chinm_im00[dev]));
checkCudaErrors(cudaFree(_phase[dev]));
checkCudaErrors(cudaFree(_phase_shell[dev]));
checkCudaErrors(cudaFree(_flag_u[dev]));
checkCudaErrors(cudaFree(_flag_v[dev]));
checkCudaErrors(cudaFree(_flag_w[dev]));
}
free(_parts);
free(_pnm_re);
free(_pnm_im);
free(_phinm_re);
free(_phinm_im);
free(_chinm_re);
free(_chinm_im);
free(_pnm_re0);
free(_pnm_im0);
free(_phinm_re0);
free(_phinm_im0);
free(_chinm_re0);
free(_chinm_im0);
free(_pnm_re00);
free(_pnm_im00);
free(_phinm_re00);
free(_phinm_im00);
free(_chinm_re00);
free(_chinm_im00);
free(_phase);
free(_phase_shell);
free(_flag_u);
free(_flag_v);
free(_flag_w);
}
extern "C"
void cuda_build_cages(void)
{
cuda_part_pull();
// parallelize over domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(cudaSetDevice(dev + dev_start));
int i; // iterator
real Y, Z; // virtual particle center location
int threads_x = 0;
int threads_y = 0;
int threads_z = 0;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
int threads_c = 0; // number of threads for cage build
// reset phase
if(dom[dev].Gcc.jnb < MAX_THREADS_DIM)
threads_y = dom[dev].Gcc.jnb;
else
threads_y = MAX_THREADS_DIM;
if(dom[dev].Gcc.knb < MAX_THREADS_DIM)
threads_z = dom[dev].Gcc.knb;
else
threads_z = MAX_THREADS_DIM;
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
dim3 dimBlocks(threads_y, threads_z);
dim3 numBlocks(blocks_y, blocks_z);
reset_phase<<<numBlocks, dimBlocks>>>(_phase[dev], _dom[dev]);
reset_phase_shell<<<numBlocks, dimBlocks>>>(_phase_shell[dev], _dom[dev]);
// reset flag_u
if(dom[dev].Gfx.jn < MAX_THREADS_DIM)
threads_y = dom[dev].Gfx.jnb;
else
threads_y = MAX_THREADS_DIM;
if(dom[dev].Gfx.kn < MAX_THREADS_DIM)
threads_z = dom[dev].Gfx.knb;
else
threads_z = MAX_THREADS_DIM;
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
dim3 dimBlocks_u(threads_y, threads_z);
dim3 numBlocks_u(blocks_y, blocks_z);
reset_flag_u<<<numBlocks_u, dimBlocks_u>>>(_flag_u[dev], _dom[dev], bc);
// reset flag_v
if(dom[dev].Gfy.kn < MAX_THREADS_DIM)
threads_z = dom[dev].Gfy.knb;
else
threads_z = MAX_THREADS_DIM;
if(dom[dev].Gfy.in < MAX_THREADS_DIM)
threads_x = dom[dev].Gfy.inb;
else
threads_x = MAX_THREADS_DIM;
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
dim3 dimBlocks_v(threads_z, threads_x);
dim3 numBlocks_v(blocks_z, blocks_x);
reset_flag_v<<<numBlocks_v, dimBlocks_v>>>(_flag_v[dev], _dom[dev], bc);
// reset flag_w
if(dom[dev].Gfz.in < MAX_THREADS_DIM)
threads_x = dom[dev].Gfz.inb;
else
threads_x = MAX_THREADS_DIM;
if(dom[dev].Gfz.jn < MAX_THREADS_DIM)
threads_y = dom[dev].Gfz.jnb;
else
threads_y = MAX_THREADS_DIM;
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
dim3 dimBlocks_w(threads_x, threads_y);
dim3 numBlocks_w(blocks_x, blocks_y);
reset_flag_w<<<numBlocks_w, dimBlocks_w>>>(_flag_w[dev], _dom[dev], bc);
// build cages and update phase
// TODO: do the first half of this on the card
threads_c = MAX_THREADS_DIM;
for(i = 0; i < nparts; i++) {
// set up cage extents
// add 4 cells to ensure cage is completely contained in the bounding box
parts[i].cage.in = (int)(2.0 * ceil(parts[i].r / dom[dev].dx));// + 2;
parts[i].cage.jn = (int)(2.0 * ceil(parts[i].r / dom[dev].dy));// + 2;
parts[i].cage.kn = (int)(2.0 * ceil(parts[i].r / dom[dev].dz));// + 2;
// remove a cell from cage for odd number of cells in domain
if(dom[dev].xn % 2) {
parts[i].cage.in = parts[i].cage.in - 1;
}
if(dom[dev].yn % 2) {
parts[i].cage.jn = parts[i].cage.jn - 1;
}
if(dom[dev].zn % 2) {
parts[i].cage.kn = parts[i].cage.kn - 1;
}
// find indices of cell that contains the particle center
parts[i].cage.cx = (int)((parts[i].x - dom->xs + 0.5 * dom->dx) / dom->dx);
parts[i].cage.cy = (int)((parts[i].y - dom->ys + 0.5 * dom->dy) / dom->dy);
parts[i].cage.cz = (int)((parts[i].z - dom->zs + 0.5 * dom->dz) / dom->dz);
// compute start and end cells of cage that contains particle
parts[i].cage.is = (int)(round((parts[i].x-dom->xs)/dom->dx)
- 0.5 * parts[i].cage.in + DOM_BUF);
parts[i].cage.ie = parts[i].cage.is + parts[i].cage.in;
if(parts[i].cage.is <= dom->Gcc.is) {
parts[i].cage.is = parts[i].cage.is + dom->Gcc.ie;
parts[i].cage.ibs = dom->Gcc.ie;
parts[i].cage.ibe = dom->Gcc.is;
} else if(parts[i].cage.ie > dom->Gcc.ie) {
parts[i].cage.ie = parts[i].cage.ie - dom->Gcc.ie;
parts[i].cage.ibs = dom->Gcc.ie;
parts[i].cage.ibe = dom->Gcc.is;
} else {
parts[i].cage.ibs = parts[i].cage.ie;
parts[i].cage.ibe = parts[i].cage.ie;
}
parts[i].cage.js = (int)(round((parts[i].y-dom->ys)/dom->dy)
- 0.5 * parts[i].cage.jn + DOM_BUF);
parts[i].cage.je = parts[i].cage.js + parts[i].cage.jn;
if(parts[i].cage.js <= dom->Gcc.js) {
parts[i].cage.js = parts[i].cage.js + dom->Gcc.je;
parts[i].cage.jbs = dom->Gcc.je;
parts[i].cage.jbe = dom->Gcc.js;
} else if(parts[i].cage.je > dom->Gcc.je) {
parts[i].cage.je = parts[i].cage.je - dom->Gcc.je;
parts[i].cage.jbs = dom->Gcc.je;
parts[i].cage.jbe = dom->Gcc.js;
} else {
parts[i].cage.jbs = parts[i].cage.je;
parts[i].cage.jbe = parts[i].cage.je;
}
parts[i].cage.ks = (int)(round((parts[i].z-dom->zs)/dom->dz)
- 0.5 * parts[i].cage.kn + DOM_BUF);
parts[i].cage.ke = parts[i].cage.ks + parts[i].cage.kn;
if(parts[i].cage.ks <= dom->Gcc.ks) {
parts[i].cage.ks = parts[i].cage.ks + dom->Gcc.ke;
parts[i].cage.kbs = dom->Gcc.ke;
parts[i].cage.kbe = dom->Gcc.ks;
} else if(parts[i].cage.ke > dom->Gcc.ke) {
parts[i].cage.ke = parts[i].cage.ke - dom->Gcc.ke;
parts[i].cage.kbs = dom->Gcc.ke;
parts[i].cage.kbe = dom->Gcc.ks;
} else {
parts[i].cage.kbs = parts[i].cage.ke;
parts[i].cage.kbe = parts[i].cage.ke;
}
}
// push particle information to device
checkCudaErrors(cudaMemcpy(_parts[dev], parts, sizeof(part_struct) * nparts,
cudaMemcpyHostToDevice));
for(i = 0; i < nparts; i++) {
// BS quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_c);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_c);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_c);
dim3 dimBlocks_c(threads_c, threads_c);
dim3 numBlocks_c(blocks_y, blocks_z);
if(parts[i].y < (dom[dev].ys + parts[i].r)) Y = parts[i].y + dom[dev].yl;
else Y = parts[i].y;
if(parts[i].z < (dom[dev].zs + parts[i].r)) Z = parts[i].z + dom[dev].zl;
else Z = parts[i].z;
if(blocks_y > 0 && blocks_z > 0)
build_cage<<<numBlocks_c, dimBlocks_c>>>(i, _parts[dev],
_phase[dev], _phase_shell[dev], _dom[dev],
Y, Z,
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.ks, parts[i].cage.kbs);
// BN quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_c);
blocks_y = (int)ceil((real) (parts[i].cage.je - (parts[i].cage.jbe))
/ (real) threads_c);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_c);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
if(parts[i].y > (dom[dev].ye - parts[i].r)) Y = parts[i].y - dom[dev].yl;
else Y = parts[i].y;
if(parts[i].z < (dom[dev].zs + parts[i].r)) Z = parts[i].z + dom[dev].zl;
else Z = parts[i].z;
if(blocks_y > 0 && blocks_z > 0)
build_cage<<<numBlocks_c, dimBlocks_c>>>(i, _parts[dev],
_phase[dev], _phase_shell[dev], _dom[dev],
Y, Z,
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.ks, parts[i].cage.kbs);
// TS quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_c);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_c);
blocks_z = (int)ceil((real) (parts[i].cage.ke - (parts[i].cage.kbe))
/ (real) threads_c);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
if(parts[i].y < (dom[dev].ys + parts[i].r)) Y = parts[i].y + dom[dev].yl;
else Y = parts[i].y;
if(parts[i].z > (dom[dev].ze - parts[i].r)) Z = parts[i].z - dom[dev].zl;
else Z = parts[i].z;
if(blocks_y > 0 && blocks_z > 0)
build_cage<<<numBlocks_c, dimBlocks_c>>>(i, _parts[dev],
_phase[dev], _phase_shell[dev], _dom[dev],
Y, Z,
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.kbe, parts[i].cage.ke);
// TN quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_c);
blocks_y = (int)ceil((real) (parts[i].cage.je - (parts[i].cage.jbe))
/ (real) threads_c);
blocks_z = (int)ceil((real) (parts[i].cage.ke - (parts[i].cage.kbe))
/ (real) threads_c);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
if(parts[i].y > (dom[dev].ye - parts[i].r)) Y = parts[i].y - dom[dev].yl;
else Y = parts[i].y;
if(parts[i].z > (dom[dev].ze - parts[i].r)) Z = parts[i].z - dom[dev].zl;
else Z = parts[i].z;
if(blocks_y > 0 && blocks_z > 0)
build_cage<<<numBlocks_c, dimBlocks_c>>>(i, _parts[dev],
_phase[dev], _phase_shell[dev], _dom[dev],
Y, Z,
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.kbe, parts[i].cage.ke);
// fill in ghost cells for periodic boundary conditions
if(bc.uW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
cage_phases_periodic_W<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
cage_phases_periodic_E<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.vS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
numBlocks_c.x = blocks_z;
numBlocks_c.y = blocks_x;
cage_phases_periodic_S<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.vN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
numBlocks_c.x = blocks_z;
numBlocks_c.y = blocks_x;
cage_phases_periodic_N<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.wB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
numBlocks_c.x = blocks_x;
numBlocks_c.y = blocks_y;
cage_phases_periodic_B<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.wT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
numBlocks_c.x = blocks_x;
numBlocks_c.y = blocks_y;
cage_phases_periodic_T<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_phase_shell[dev], _dom[dev]);
}
// do flagging
threads_x = MAX_THREADS_DIM;
threads_y = MAX_THREADS_DIM;
threads_z = MAX_THREADS_DIM;
// u
// BS quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
dim3 dimBlocks_cu(threads_y, threads_z);
dim3 numBlocks_cu(blocks_y, blocks_z);
if(blocks_y > 0 && blocks_z > 0)
cage_flag_u_1<<<numBlocks_cu, dimBlocks_cu>>>(i, _flag_u[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.ks, parts[i].cage.kbs);
// BN quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
cage_flag_u_1<<<numBlocks_cu, dimBlocks_cu>>>(i, _flag_u[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.ks, parts[i].cage.kbs);
// TS quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
cage_flag_u_1<<<numBlocks_cu, dimBlocks_cu>>>(i, _flag_u[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.kbe, parts[i].cage.ke);
// TN quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
cage_flag_u_1<<<numBlocks_cu, dimBlocks_cu>>>(i, _flag_u[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.kbe, parts[i].cage.ke);
// v
// BW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
dim3 dimBlocks_cv(threads_z, threads_x);
dim3 numBlocks_cv(blocks_z, blocks_x);
if(blocks_x > 0 && blocks_z > 0)
cage_flag_v_1<<<numBlocks_cv, dimBlocks_cv>>>(i, _flag_v[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.ks, parts[i].cage.kbs);
// BE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
cage_flag_v_1<<<numBlocks_cv, dimBlocks_cv>>>(i, _flag_v[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.ks, parts[i].cage.kbs);
// TW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
cage_flag_v_1<<<numBlocks_cv, dimBlocks_cv>>>(i, _flag_v[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.kbe, parts[i].cage.ke);
// TE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
cage_flag_v_1<<<numBlocks_cv, dimBlocks_cv>>>(i, _flag_v[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.kbe, parts[i].cage.ke);
// w
// SW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
dim3 dimBlocks_cw(threads_x, threads_y);
dim3 numBlocks_cw(blocks_x, blocks_y);
if(blocks_x > 0 && blocks_y > 0)
cage_flag_w_1<<<numBlocks_cw, dimBlocks_cw>>>(i, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.js, parts[i].cage.jbs);
// SE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
cage_flag_w_1<<<numBlocks_cw, dimBlocks_cw>>>(i, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.js, parts[i].cage.jbs);
// NW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
cage_flag_w_1<<<numBlocks_cw, dimBlocks_cw>>>(i, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.jbe, parts[i].cage.je);
// NE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
cage_flag_w_1<<<numBlocks_cw, dimBlocks_cw>>>(i, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.jbe, parts[i].cage.je);
// fill in ghost cells for periodic boundary conditions
if(bc.uW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
cage_phases_periodic_W<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
cage_phases_periodic_E<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.vS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
numBlocks_c.x = blocks_z;
numBlocks_c.y = blocks_x;
cage_phases_periodic_S<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.vN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
numBlocks_c.x = blocks_z;
numBlocks_c.y = blocks_x;
cage_phases_periodic_N<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.wB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
numBlocks_c.x = blocks_x;
numBlocks_c.y = blocks_y;
cage_phases_periodic_B<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_phase_shell[dev], _dom[dev]);
}
if(bc.wT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
numBlocks_c.x = blocks_x;
numBlocks_c.y = blocks_y;
cage_phases_periodic_T<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_phase_shell[dev], _dom[dev]);
}
// fill in ghost cells for periodic boundary conditions
if(bc.uW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
cage_flag_u_periodic_W<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
cage_flag_u_periodic_E<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.uS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
numBlocks_cu.x = blocks_z;
numBlocks_cu.y = blocks_x;
cage_flag_u_periodic_S<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.uN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
numBlocks_cu.x = blocks_z;
numBlocks_cu.y = blocks_x;
cage_flag_u_periodic_N<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.uB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
numBlocks_cu.x = blocks_x;
numBlocks_cu.y = blocks_y;
cage_flag_u_periodic_B<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.uT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
numBlocks_cu.x = blocks_x;
numBlocks_cu.y = blocks_y;
cage_flag_u_periodic_T<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.vW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
numBlocks_cv.x = blocks_y;
numBlocks_cv.y = blocks_z;
cage_flag_v_periodic_W<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.vE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
numBlocks_cv.x = blocks_y;
numBlocks_cv.y = blocks_z;
cage_flag_v_periodic_E<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.vS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
cage_flag_v_periodic_S<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.vN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
cage_flag_v_periodic_N<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.vB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
numBlocks_cv.x = blocks_x;
numBlocks_cv.y = blocks_y;
cage_flag_v_periodic_B<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.vT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
numBlocks_cv.x = blocks_x;
numBlocks_cv.y = blocks_y;
cage_flag_v_periodic_T<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.wW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
numBlocks_cw.x = blocks_y;
numBlocks_cw.y = blocks_z;
cage_flag_w_periodic_W<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
if(bc.wE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
numBlocks_cw.x = blocks_y;
numBlocks_cw.y = blocks_z;
cage_flag_w_periodic_E<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
if(bc.wS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
numBlocks_cw.x = blocks_z;
numBlocks_cw.y = blocks_x;
cage_flag_w_periodic_S<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
if(bc.wN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
numBlocks_cw.x = blocks_z;
numBlocks_cw.y = blocks_x;
cage_flag_w_periodic_N<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
if(bc.wB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
cage_flag_w_periodic_B<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
if(bc.wT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
cage_flag_w_periodic_T<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
// create a copy of the flags for this step so intermediate flags don't
// corrupt algorithm
int *_flag_u_tmp;
int *_flag_v_tmp;
int *_flag_w_tmp;
checkCudaErrors(cudaMalloc((void**) &(_flag_u_tmp),
sizeof(int) * dom[dev].Gfx.s3b));
gpumem += sizeof(int) * dom[dev].Gfx.s3b;
checkCudaErrors(cudaMalloc((void**) &(_flag_v_tmp),
sizeof(int) * dom[dev].Gfy.s3b));
gpumem += sizeof(int) * dom[dev].Gfy.s3b;
checkCudaErrors(cudaMalloc((void**) &(_flag_w_tmp),
sizeof(int) * dom[dev].Gfz.s3b));
gpumem += sizeof(int) * dom[dev].Gfz.s3b;
checkCudaErrors(cudaMemcpy(_flag_u_tmp, _flag_u[dev],
sizeof(int) * dom[dev].Gfx.s3b, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(_flag_v_tmp, _flag_v[dev],
sizeof(int) * dom[dev].Gfy.s3b, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(_flag_w_tmp, _flag_w[dev],
sizeof(int) * dom[dev].Gfz.s3b, cudaMemcpyDeviceToDevice));
// u
// BS quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
cage_flag_u_2<<<numBlocks_cu, dimBlocks_cu>>>(i, _flag_u_tmp,
_flag_v[dev], _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.ks, parts[i].cage.kbs);
// BN quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
cage_flag_u_2<<<numBlocks_cu, dimBlocks_cu>>>(i, _flag_u_tmp,
_flag_v[dev], _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.ks, parts[i].cage.kbs);
// TS quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
cage_flag_u_2<<<numBlocks_cu, dimBlocks_cu>>>(i, _flag_u_tmp,
_flag_v[dev], _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.kbe, parts[i].cage.ke);
// TN quadrant
blocks_x = (int)ceil((real) parts[i].cage.in / (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
cage_flag_u_2<<<numBlocks_cu, dimBlocks_cu>>>(i, _flag_u_tmp,
_flag_v[dev], _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.kbe, parts[i].cage.ke);
// v
// BW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
cage_flag_v_2<<<numBlocks_cv, dimBlocks_cv>>>(i, _flag_u[dev],
_flag_v_tmp, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.ks, parts[i].cage.kbs);
// BE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.kbs - parts[i].cage.ks)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
cage_flag_v_2<<<numBlocks_cv, dimBlocks_cv>>>(i, _flag_u[dev],
_flag_v_tmp, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.ks, parts[i].cage.kbs);
// TW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
cage_flag_v_2<<<numBlocks_cv, dimBlocks_cv>>>(i, _flag_u[dev],
_flag_v_tmp, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.kbe, parts[i].cage.ke);
// TE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) parts[i].cage.jn / (real) threads_y);
blocks_z = (int)ceil((real) (parts[i].cage.ke - parts[i].cage.kbe)
/ (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
cage_flag_v_2<<<numBlocks_cv, dimBlocks_cv>>>(i, _flag_u[dev],
_flag_v_tmp, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.kbe, parts[i].cage.ke);
// w
// SW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
cage_flag_w_2<<<numBlocks_cw, dimBlocks_cw>>>(i, _flag_u[dev],
_flag_v[dev], _flag_w_tmp,
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.js, parts[i].cage.jbs);
// SE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.jbs - parts[i].cage.js)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
cage_flag_w_2<<<numBlocks_cw, dimBlocks_cw>>>(i, _flag_u[dev],
_flag_v[dev], _flag_w_tmp,
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.js, parts[i].cage.jbs);
// NW quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ibs - parts[i].cage.is)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
cage_flag_w_2<<<numBlocks_cw, dimBlocks_cw>>>(i, _flag_u[dev],
_flag_v[dev], _flag_w_tmp,
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.jbe, parts[i].cage.je);
// NE quadrant
blocks_x = (int)ceil((real) (parts[i].cage.ie - parts[i].cage.ibe)
/ (real) threads_x);
blocks_y = (int)ceil((real) (parts[i].cage.je - parts[i].cage.jbe)
/ (real) threads_y);
blocks_z = (int)ceil((real) parts[i].cage.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
cage_flag_w_2<<<numBlocks_cw, dimBlocks_cw>>>(i, _flag_u[dev],
_flag_v[dev], _flag_w_tmp,
_parts[dev], _dom[dev], _phase[dev],
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.jbe, parts[i].cage.je);
// now copy the results back
checkCudaErrors(cudaMemcpy(_flag_u[dev], _flag_u_tmp,
sizeof(int) * dom[dev].Gfx.s3b, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(_flag_v[dev], _flag_v_tmp,
sizeof(int) * dom[dev].Gfy.s3b, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(_flag_w[dev], _flag_w_tmp,
sizeof(int) * dom[dev].Gfz.s3b, cudaMemcpyDeviceToDevice));
// clean up copies
checkCudaErrors(cudaFree(_flag_u_tmp));
checkCudaErrors(cudaFree(_flag_v_tmp));
checkCudaErrors(cudaFree(_flag_w_tmp));
// fill in ghost cells for periodic boundary conditions
if(bc.uW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
cage_flag_u_periodic_W<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
cage_flag_u_periodic_E<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.uS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
numBlocks_cu.x = blocks_z;
numBlocks_cu.y = blocks_x;
cage_flag_u_periodic_S<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.uN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
numBlocks_cu.x = blocks_z;
numBlocks_cu.y = blocks_x;
cage_flag_u_periodic_N<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.uB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
numBlocks_cu.x = blocks_x;
numBlocks_cu.y = blocks_y;
cage_flag_u_periodic_B<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.uT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
numBlocks_cu.x = blocks_x;
numBlocks_cu.y = blocks_y;
cage_flag_u_periodic_T<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.vW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
numBlocks_cv.x = blocks_y;
numBlocks_cv.y = blocks_z;
cage_flag_v_periodic_W<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.vE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
numBlocks_cv.x = blocks_y;
numBlocks_cv.y = blocks_z;
cage_flag_v_periodic_E<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.vS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
cage_flag_v_periodic_S<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.vN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
cage_flag_v_periodic_N<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.vB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
numBlocks_cv.x = blocks_x;
numBlocks_cv.y = blocks_y;
cage_flag_v_periodic_B<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.vT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
numBlocks_cv.x = blocks_x;
numBlocks_cv.y = blocks_y;
cage_flag_v_periodic_T<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.wW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
numBlocks_cw.x = blocks_y;
numBlocks_cw.y = blocks_z;
cage_flag_w_periodic_W<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
if(bc.wE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
numBlocks_cw.x = blocks_y;
numBlocks_cw.y = blocks_z;
cage_flag_w_periodic_E<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
if(bc.wS == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
numBlocks_cw.x = blocks_z;
numBlocks_cw.y = blocks_x;
cage_flag_w_periodic_S<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
if(bc.wN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
numBlocks_cw.x = blocks_z;
numBlocks_cw.y = blocks_x;
cage_flag_w_periodic_N<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
if(bc.wB == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
cage_flag_w_periodic_B<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
if(bc.wT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
cage_flag_w_periodic_T<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
}
}
}
extern "C"
void cuda_part_BC(void)
{
// parallize across domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(cudaSetDevice(dev + dev_start));
int threads_x = MAX_THREADS_DIM;
int threads_y = MAX_THREADS_DIM;
int threads_z = MAX_THREADS_DIM;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
// u
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
dim3 dimBlocks_x(threads_y, threads_z);
dim3 numBlocks_x(blocks_y, blocks_z);
part_BC_u<<<numBlocks_x, dimBlocks_x>>>(_u[dev], _phase[dev],
_flag_u[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// v
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
dim3 dimBlocks_y(threads_z, threads_x);
dim3 numBlocks_y(blocks_z, blocks_x);
part_BC_v<<<numBlocks_y, dimBlocks_y>>>(_v[dev], _phase[dev],
_flag_v[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// w
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
dim3 dimBlocks_z(threads_x, threads_y);
dim3 numBlocks_z(blocks_x, blocks_y);
part_BC_w<<<numBlocks_z, dimBlocks_z>>>(_w[dev], _phase[dev],
_flag_w[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
}
}
extern "C"
void cuda_part_BC_star(void)
{
// parallize across domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(cudaSetDevice(dev + dev_start));
int threads_x = MAX_THREADS_DIM;
int threads_y = MAX_THREADS_DIM;
int threads_z = MAX_THREADS_DIM;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
// u
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
dim3 dimBlocks_x(threads_y, threads_z);
dim3 numBlocks_x(blocks_y, blocks_z);
part_BC_u<<<numBlocks_x, dimBlocks_x>>>(_u_star[dev], _phase[dev],
_flag_u[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// v
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
dim3 dimBlocks_y(threads_z, threads_x);
dim3 numBlocks_y(blocks_z, blocks_x);
part_BC_v<<<numBlocks_y, dimBlocks_y>>>(_v_star[dev], _phase[dev],
_flag_v[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// w
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
dim3 dimBlocks_z(threads_x, threads_y);
dim3 numBlocks_z(blocks_x, blocks_y);
part_BC_w<<<numBlocks_z, dimBlocks_z>>>(_w_star[dev], _phase[dev],
_flag_w[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
}
}
// already CPU parallelized in cuda_PP_bicgstab, which calls it
extern "C"
void cuda_part_BC_p(int dev)
{
int threads_c = MAX_THREADS_DIM;
int blocks_y = 0;
int blocks_z = 0;
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_c);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_c);
dim3 dimBlocks_c(threads_c, threads_c);
dim3 numBlocks_c(blocks_y, blocks_z);
part_BC_p<<<numBlocks_c, dimBlocks_c>>>(_rhs_p[dev], _phase[dev],
_phase_shell[dev], _parts[dev], _dom[dev],
mu, nu, gradP, rho_f, coeff_stride,
_pnm_re00[dev], _pnm_im00[dev],
_phinm_re00[dev], _phinm_im00[dev], _chinm_re00[dev], _chinm_im00[dev],
_pnm_re[dev], _pnm_im[dev],
_phinm_re[dev], _phinm_im[dev], _chinm_re[dev], _chinm_im[dev]);
}
extern "C"
void cuda_store_coeffs(void)
{
// parallelize over CPU threads
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
checkCudaErrors(cudaSetDevice(dev + dev_start));
// coeff00 & coeff ==> coeff0 (Adams-Bashforth)
dim3 dimBlocks(coeff_stride);
dim3 numBlocks(nparts);
// as implemented, this actually makes convergence slower
/*if(dt0 > 0.) {
predict_coeffs<<<numBlocks, dimBlocks>>>(dt0, dt,
_pnm_re00[dev], _pnm_im00[dev], _phinm_re00[dev], _phinm_im00[dev],
_chinm_re00[dev], _chinm_im00[dev],
_pnm_re0[dev], _pnm_im0[dev], _phinm_re0[dev], _phinm_im0[dev],
_chinm_re0[dev], _chinm_im0[dev],
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev], coeff_stride);
}
*/
checkCudaErrors(cudaMemcpy(_pnm_re00[dev], _pnm_re[dev],
sizeof(real) * coeff_stride*nparts, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(_pnm_im00[dev], _pnm_im[dev],
sizeof(real) * coeff_stride*nparts, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(_phinm_re00[dev], _phinm_re[dev],
sizeof(real) * coeff_stride*nparts, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(_phinm_im00[dev], _phinm_im[dev],
sizeof(real) * coeff_stride*nparts, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(_chinm_re00[dev], _chinm_re[dev],
sizeof(real) * coeff_stride*nparts, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(_chinm_im00[dev], _chinm_im[dev],
sizeof(real) * coeff_stride*nparts, cudaMemcpyDeviceToDevice));
}
}
|
63fd1aa5909a959fe652f5b190b2056ce7819f8a.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya
* @date 2012-2015
* @copyright University of Pennsylvania & STUDENT
*/
#include "rasterize.h"
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include "rasterizeTools.h"
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/constants.hpp>
#include <glm/gtc/matrix_inverse.hpp>
#include <glm/gtc/constants.hpp>
#define MAX_THREADS 128
#define AA 1
//TODO: Make this into a parameter of some kind, allow setting of scale/rot/trans
#define NUM_INSTANCES 1
static int iter;
static int width = 0;
static int height = 0;
static int *dev_bufIdx = NULL;
static int *dev_bufIdxOut = NULL;
static VertexIn *dev_bufVertex = NULL;
static VertexOut *dev_bufVertexOut = NULL;
static Triangle *dev_primitives = NULL;
static Fragment *dev_depthbuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int bufIdxSize = 0;
static int vertCount = 0;
static int vertInCount = 0;
static int vertOutCount = 0;
static Light light;
static int fragCount;
static int primCount;
static int numVertBlocks;
static int numVertInBlocks;
static int numVertOutBlocks;
static int numPrimBlocks;
static int numFragBlocks;
static glm::mat4 Mpvms[NUM_INSTANCES];
static glm::mat3 Mms[NUM_INSTANCES];
static glm::mat4* dev_Mpvms;
static glm::mat3* dev_Mms;
//static Cam cam;
static glm::mat4 Mview;
static glm::mat4 Mmod;
static glm::mat4 Mproj;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
// Writes fragment colors to the framebuffer
__global__
void render(int w, int h, Fragment *depthbuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
int tlx = x*AA;
int tly = y*AA;
glm::vec3 color(0.0);
int sx, sy;
for (int i = 0; i < AA; i++){
for (int j = 0; j < AA; j++){
sx = tlx + i;
sy = tly + j;
color += depthbuffer[sx+sy*w*AA].color;
}
}
color /= AA*AA;
framebuffer[index] = color;
}
}
__global__ void initDepths(int n, Fragment* depthbuffer){
int index = threadIdx.x + (blockDim.x*blockIdx.x);
if (index < n){
depthbuffer[index].fixed_depth = INT_MAX;
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
light.pos = glm::vec3(3.0, 3.0, 3.0);
iter = 0;
checkCUDAError("rasterizeInit");
}
/**
* Set all of the buffers necessary for rasterization.
*/
void rasterizeSetBuffers(
int _bufIdxSize, int *bufIdx,
int _vertCount, float *bufPos, float *bufNor, float *bufCol) {
bufIdxSize = _bufIdxSize;
vertCount = _vertCount;
// Vertex shading
vertInCount = _vertCount;
vertOutCount = vertInCount * NUM_INSTANCES;
fragCount = width * height * AA * AA;
primCount = vertOutCount / 3;
numVertBlocks = (vertCount - 1) / MAX_THREADS + 1;
numVertInBlocks = (vertInCount - 1) / MAX_THREADS + 1;
numVertOutBlocks = (vertOutCount - 1) / MAX_THREADS + 1;
numPrimBlocks = (primCount - 1) / MAX_THREADS + 1;
numFragBlocks = (fragCount - 1) / MAX_THREADS + 1;
printf("fragment count: %d\n", fragCount);
printf("vertex count: %d\n", vertCount);
printf("primitive count: %d\n", primCount);
//int numBlocks = (width*height - 1) / MAX_THREADS + 1;
//initDepths<<<numBlocks, MAX_THREADS>>>(width*height, dev_depthbuffer);
hipFree(dev_bufIdx);
hipMalloc(&dev_bufIdx, bufIdxSize * sizeof(int));
hipMemcpy(dev_bufIdx, bufIdx, bufIdxSize * sizeof(int), hipMemcpyHostToDevice);
VertexIn *bufVertex = new VertexIn[_vertCount];
for (int i = 0; i < vertCount; i++) {
int j = i * 3;
bufVertex[i].pos = glm::vec3(bufPos[j + 0], bufPos[j + 1], bufPos[j + 2]);
bufVertex[i].nor = glm::vec3(bufNor[j + 0], bufNor[j + 1], bufNor[j + 2]);
bufVertex[i].col = glm::vec3(bufCol[j + 0], bufCol[j + 1], bufCol[j + 2]);
}
hipFree(dev_bufVertex);
hipMalloc(&dev_bufVertex, vertCount * sizeof(VertexIn));
hipMemcpy(dev_bufVertex, bufVertex, vertCount * sizeof(VertexIn), hipMemcpyHostToDevice);
hipFree(dev_bufVertexOut);
hipMalloc(&dev_bufVertexOut, vertOutCount * sizeof(VertexOut));
hipFree(dev_bufIdxOut);
hipMalloc((void**)&dev_bufIdxOut, vertOutCount * sizeof(int));
hipFree(dev_primitives);
hipMalloc(&dev_primitives, primCount * sizeof(Triangle));
hipMemset(dev_primitives, 0, primCount * sizeof(Triangle));
hipFree(dev_depthbuffer);
hipMalloc(&dev_depthbuffer, fragCount * sizeof(Fragment));
hipMemset(dev_depthbuffer, 0, fragCount * sizeof(Fragment));
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
checkCUDAError("rasterizeSetBuffers");
}
__global__ void kernShadeVerticesInstances(int n, int num_instances, VertexOut* vs_output, int* vs_output_idx, VertexIn* vs_input, int* vs_input_idx, glm::mat4* Mpvms, glm::mat3* Mms){
// n is the number of in vertices
// TODO: Can parallelize this if we do thread per output index instead of thread per input index
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n){
glm::mat4 Mpvm;
glm::vec4 new_pos;
for (int i = 0; i < num_instances; i++){
// Model-view-perspective transform for positions
Mpvm = Mpvms[i];
new_pos = Mpvm * glm::vec4(vs_input[index].pos, 1.0f);
vs_output[index + i*n].ndc_pos = glm::vec3(new_pos / new_pos.w);
vs_output[index + i*n].nor = glm::normalize(vs_input[index].nor * Mms[i]);
vs_output[index + i*n].col = vs_input[index].col;
vs_output_idx[index + i*n] = vs_input_idx[index] + i*n;
}
}
}
__global__ void kernShadeVertices(int n, VertexOut* vs_output, VertexIn* vs_input, glm::mat4 Mpvm, glm::mat3 Mm){
// Mm is the 3x3 rotation matrix computed with intervse transpose of the Mmodel matrix, for use to rotate normal vectors
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
vs_output[index].pos = vs_input[index].pos;
glm::vec4 new_pos = Mpvm * glm::vec4(vs_input[index].pos, 1.0f);
vs_output[index].ndc_pos = glm::vec3(new_pos / new_pos.w);
vs_output[index].nor = vs_input[index].nor * Mm;
vs_output[index].col = vs_input[index].col;
}
}
__global__ void kernShadeGeometries(int n, VertexOut* out_vertices, int* idx, VertexOut* in_vertices){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
VertexOut vi = in_vertices[index];
idx[index * 3] = 3*index;
idx[index * 3 + 1] = 3*index + 1;
idx[index * 3 + 2] = 3*index + 2;
out_vertices[index * 3].ndc_pos = vi.ndc_pos;
out_vertices[index * 3].col = vi.col;
out_vertices[index * 3].pos = vi.pos;
out_vertices[index * 3].nor = vi.nor;
out_vertices[index * 3 + 1].ndc_pos = vi.ndc_pos + glm::vec3(0.01,0.0,0.0);
out_vertices[index * 3 + 1].col = vi.col;
out_vertices[index * 3 + 1].pos = vi.pos;
out_vertices[index * 3 + 1].nor = vi.nor;
out_vertices[index * 3 + 2].ndc_pos = vi.ndc_pos + glm::vec3(0.0, 0.01, 0.0);
out_vertices[index * 3 + 2].col = vi.col;
out_vertices[index * 3 + 2].pos = vi.pos;
out_vertices[index * 3 + 2].nor = vi.nor;
}
}
__global__ void kernAssemblePrimitives(int n, Triangle* primitives, VertexOut* vs_output, int* idx){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
int idx0 = idx[3 * index + 0];
int idx1 = idx[3 * index + 1];
int idx2 = idx[3 * index + 2];
primitives[index].v[0] = vs_output[idx0];
primitives[index].v[1] = vs_output[idx1];
primitives[index].v[2] = vs_output[idx2];
primitives[index].ndc_pos[0] = vs_output[idx0].ndc_pos;
primitives[index].ndc_pos[1] = vs_output[idx1].ndc_pos;
primitives[index].ndc_pos[2] = vs_output[idx2].ndc_pos;
primitives[index].v[0].col = glm::vec3(1.0, 0.0, 0.0);
primitives[index].v[1].col = glm::vec3(1.0, 0.0, 0.0);
primitives[index].v[2].col = glm::vec3(1.0, 0.0, 0.0);
}
}
// Each thread is responsible for rasterizing a single triangle
__global__ void kernRasterize(int n, Cam cam, Fragment* fs_input, Triangle* primitives){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
Triangle prim = primitives[index];
AABB aabb = getAABBForTriangle(primitives[index].ndc_pos);
glm::vec3 bary;
glm::vec2 point;
glm::vec3 points;
// Snap i,j to nearest fragment coordinate
int frag_width = cam.width * AA;
int frag_height = cam.height * AA;
float dx = 2.0f / (float)frag_width;
float dy = 2.0f / (float)frag_height;
float x;
float y;
int mini = max((int)(aabb.min.x / dx) + frag_width / 2 - 2, 0);
int minj = max((int)(aabb.min.y / dy) + frag_height / 2 - 2, 0);
int maxi = min((int)(aabb.max.x / dx) + frag_width / 2 + 2, frag_width-1);
int maxj = min((int)(aabb.max.y / dy) + frag_height / 2 + 2, frag_height-1);
float depth;
int fixed_depth;
int ind;
// Iterate through fragment coordinates
for (int j = minj; j < maxj; j++){
for (int i = mini; i < maxi; i++){
ind = i + j * frag_width;
// Get the NDC coordinate
x = dx*i - dx*frag_width/2.0f + dx/2.0f;
y = dy*j - dy*frag_height/2.0f + dx/2.0f;
point[0] = x;
point[1] = y;
bary = calculateBarycentricCoordinate(primitives[index].ndc_pos, point);
if (isBarycentricCoordInBounds(bary)){
depth = -getZAtCoordinate(bary, prim.ndc_pos);
fixed_depth = (int)(depth * INT_MAX);
int old = atomicMin(&fs_input[ind].fixed_depth, fixed_depth);
if (fs_input[ind].fixed_depth == fixed_depth){
fs_input[ind].depth = depth;
fs_input[ind].color = bary.x * prim.v[0].col + bary.y * prim.v[1].col + bary.z * prim.v[2].col; //glm::vec3(1.0, 0.0, 0.0);// prim.v[0].col;
fs_input[ind].norm = bary.x * prim.v[0].nor + bary.y * prim.v[1].nor + bary.z * prim.v[2].nor;
fs_input[ind].pos = bary.x * prim.v[0].pos + bary.y * prim.v[1].pos + bary.z * prim.v[2].pos;
fs_input[ind].ndc_pos = bary.x * prim.v[0].ndc_pos + bary.y * prim.v[1].ndc_pos + bary.z * prim.v[2].ndc_pos;
//fs_input[ind].color = fs_input[ind].norm;
}
}
}
}
}
}
__global__ void kernShadeFragments(int n, Fragment* fs_input, Light light){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
if (fs_input[index].color != glm::vec3(0.0)){
glm::vec3 light_ray = glm::normalize(fs_input[index].pos - light.pos);
fs_input[index].color = fs_input[index].color * abs((glm::dot(glm::normalize(fs_input[index].norm), light_ray)));
}
}
}
void resetRasterize(){
hipMemset(dev_depthbuffer, 0, fragCount * sizeof(Fragment));
hipLaunchKernelGGL(( initDepths), dim3(numFragBlocks), dim3(MAX_THREADS), 0, 0, fragCount, dev_depthbuffer);
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
checkCUDAError("resetBuffers");
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, Cam cam) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
resetRasterize();
//Mmod = glm::mat4(1.0f);
Mview = glm::lookAt(cam.pos, cam.focus, cam.up);
Mproj = glm::perspective(cam.fovy, cam.aspect, cam.zNear, cam.zFar);
for (int i = 0; i < NUM_INSTANCES; i++){
Mmod = glm::mat4(1.0);
Mmod = glm::translate(Mmod, glm::vec3(i*0.5f,0.0f,i*-1.0f));
Mmod = glm::rotate(Mmod, i*3.14f/8.0f, glm::vec3(0.0,1.0,0.0));
Mms[i] = glm::inverseTranspose(glm::mat3(Mmod));
Mpvms[i] = Mproj * Mview * Mmod;
}
hipMalloc((void**)&dev_Mpvms, NUM_INSTANCES*sizeof(glm::mat4));
hipMemcpy(dev_Mpvms, Mpvms, NUM_INSTANCES*sizeof(glm::mat4), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_Mms, NUM_INSTANCES*sizeof(glm::mat3));
hipMemcpy(dev_Mms, Mms, NUM_INSTANCES*sizeof(glm::mat3), hipMemcpyHostToDevice);
// Vertex Shading
hipLaunchKernelGGL(( kernShadeVerticesInstances), dim3(numVertBlocks), dim3(MAX_THREADS), 0, 0, vertCount, NUM_INSTANCES, dev_bufVertexOut, dev_bufIdxOut, dev_bufVertex, dev_bufIdx, dev_Mpvms, dev_Mms);
//kernShadeVertices<<<numVertBlocks, MAX_THREADS>>>(vertCount, dev_bufVertexOut, dev_bufVertex, Mpvm);
checkCUDAError("shadeVertices");
hipFree(dev_Mpvms);
hipFree(dev_Mms);
// Primitive Assembly
hipLaunchKernelGGL(( kernAssemblePrimitives), dim3(numPrimBlocks), dim3(MAX_THREADS), 0, 0, primCount, dev_primitives, dev_bufVertexOut, dev_bufIdxOut);
checkCUDAError("assemblePrimitives");
// Rasterization
hipLaunchKernelGGL(( kernRasterize), dim3(numPrimBlocks), dim3(MAX_THREADS), 0, 0, primCount, cam, dev_depthbuffer, dev_primitives);
checkCUDAError("rasterizePrimitives");
// Fragment shading
hipLaunchKernelGGL(( kernShadeFragments), dim3(numFragBlocks), dim3(MAX_THREADS), 0, 0, fragCount, dev_depthbuffer, light);
checkCUDAError("shadeFragments");
// Copy depthbuffer colors into framebuffer
hipLaunchKernelGGL(( render), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, height, dev_depthbuffer, dev_framebuffer);
// Copy framebuffer into OpenGL buffer for OpenGL previewing
hipLaunchKernelGGL(( sendImageToPBO), dim3(blockCount2d), dim3(blockSize2d), 0, 0, pbo, width, height, dev_framebuffer);
checkCUDAError("rasterize");
iter += 1;
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
hipFree(dev_bufIdx);
dev_bufIdx = NULL;
hipFree(dev_bufIdxOut);
dev_bufIdxOut = NULL;
hipFree(dev_bufVertex);
dev_bufVertex = NULL;
hipFree(dev_bufVertexOut);
dev_bufVertexOut = NULL;
hipFree(dev_primitives);
dev_primitives = NULL;
hipFree(dev_depthbuffer);
dev_depthbuffer = NULL;
hipFree(dev_framebuffer);
dev_framebuffer = NULL;
checkCUDAError("rasterizeFree");
}
| 63fd1aa5909a959fe652f5b190b2056ce7819f8a.cu | /**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya
* @date 2012-2015
* @copyright University of Pennsylvania & STUDENT
*/
#include "rasterize.h"
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include "rasterizeTools.h"
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/constants.hpp>
#include <glm/gtc/matrix_inverse.hpp>
#include <glm/gtc/constants.hpp>
#define MAX_THREADS 128
#define AA 1
//TODO: Make this into a parameter of some kind, allow setting of scale/rot/trans
#define NUM_INSTANCES 1
static int iter;
static int width = 0;
static int height = 0;
static int *dev_bufIdx = NULL;
static int *dev_bufIdxOut = NULL;
static VertexIn *dev_bufVertex = NULL;
static VertexOut *dev_bufVertexOut = NULL;
static Triangle *dev_primitives = NULL;
static Fragment *dev_depthbuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int bufIdxSize = 0;
static int vertCount = 0;
static int vertInCount = 0;
static int vertOutCount = 0;
static Light light;
static int fragCount;
static int primCount;
static int numVertBlocks;
static int numVertInBlocks;
static int numVertOutBlocks;
static int numPrimBlocks;
static int numFragBlocks;
static glm::mat4 Mpvms[NUM_INSTANCES];
static glm::mat3 Mms[NUM_INSTANCES];
static glm::mat4* dev_Mpvms;
static glm::mat3* dev_Mms;
//static Cam cam;
static glm::mat4 Mview;
static glm::mat4 Mmod;
static glm::mat4 Mproj;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
// Writes fragment colors to the framebuffer
__global__
void render(int w, int h, Fragment *depthbuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
int tlx = x*AA;
int tly = y*AA;
glm::vec3 color(0.0);
int sx, sy;
for (int i = 0; i < AA; i++){
for (int j = 0; j < AA; j++){
sx = tlx + i;
sy = tly + j;
color += depthbuffer[sx+sy*w*AA].color;
}
}
color /= AA*AA;
framebuffer[index] = color;
}
}
__global__ void initDepths(int n, Fragment* depthbuffer){
int index = threadIdx.x + (blockDim.x*blockIdx.x);
if (index < n){
depthbuffer[index].fixed_depth = INT_MAX;
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
light.pos = glm::vec3(3.0, 3.0, 3.0);
iter = 0;
checkCUDAError("rasterizeInit");
}
/**
* Set all of the buffers necessary for rasterization.
*/
void rasterizeSetBuffers(
int _bufIdxSize, int *bufIdx,
int _vertCount, float *bufPos, float *bufNor, float *bufCol) {
bufIdxSize = _bufIdxSize;
vertCount = _vertCount;
// Vertex shading
vertInCount = _vertCount;
vertOutCount = vertInCount * NUM_INSTANCES;
fragCount = width * height * AA * AA;
primCount = vertOutCount / 3;
numVertBlocks = (vertCount - 1) / MAX_THREADS + 1;
numVertInBlocks = (vertInCount - 1) / MAX_THREADS + 1;
numVertOutBlocks = (vertOutCount - 1) / MAX_THREADS + 1;
numPrimBlocks = (primCount - 1) / MAX_THREADS + 1;
numFragBlocks = (fragCount - 1) / MAX_THREADS + 1;
printf("fragment count: %d\n", fragCount);
printf("vertex count: %d\n", vertCount);
printf("primitive count: %d\n", primCount);
//int numBlocks = (width*height - 1) / MAX_THREADS + 1;
//initDepths<<<numBlocks, MAX_THREADS>>>(width*height, dev_depthbuffer);
cudaFree(dev_bufIdx);
cudaMalloc(&dev_bufIdx, bufIdxSize * sizeof(int));
cudaMemcpy(dev_bufIdx, bufIdx, bufIdxSize * sizeof(int), cudaMemcpyHostToDevice);
VertexIn *bufVertex = new VertexIn[_vertCount];
for (int i = 0; i < vertCount; i++) {
int j = i * 3;
bufVertex[i].pos = glm::vec3(bufPos[j + 0], bufPos[j + 1], bufPos[j + 2]);
bufVertex[i].nor = glm::vec3(bufNor[j + 0], bufNor[j + 1], bufNor[j + 2]);
bufVertex[i].col = glm::vec3(bufCol[j + 0], bufCol[j + 1], bufCol[j + 2]);
}
cudaFree(dev_bufVertex);
cudaMalloc(&dev_bufVertex, vertCount * sizeof(VertexIn));
cudaMemcpy(dev_bufVertex, bufVertex, vertCount * sizeof(VertexIn), cudaMemcpyHostToDevice);
cudaFree(dev_bufVertexOut);
cudaMalloc(&dev_bufVertexOut, vertOutCount * sizeof(VertexOut));
cudaFree(dev_bufIdxOut);
cudaMalloc((void**)&dev_bufIdxOut, vertOutCount * sizeof(int));
cudaFree(dev_primitives);
cudaMalloc(&dev_primitives, primCount * sizeof(Triangle));
cudaMemset(dev_primitives, 0, primCount * sizeof(Triangle));
cudaFree(dev_depthbuffer);
cudaMalloc(&dev_depthbuffer, fragCount * sizeof(Fragment));
cudaMemset(dev_depthbuffer, 0, fragCount * sizeof(Fragment));
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
checkCUDAError("rasterizeSetBuffers");
}
__global__ void kernShadeVerticesInstances(int n, int num_instances, VertexOut* vs_output, int* vs_output_idx, VertexIn* vs_input, int* vs_input_idx, glm::mat4* Mpvms, glm::mat3* Mms){
// n is the number of in vertices
// TODO: Can parallelize this if we do thread per output index instead of thread per input index
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n){
glm::mat4 Mpvm;
glm::vec4 new_pos;
for (int i = 0; i < num_instances; i++){
// Model-view-perspective transform for positions
Mpvm = Mpvms[i];
new_pos = Mpvm * glm::vec4(vs_input[index].pos, 1.0f);
vs_output[index + i*n].ndc_pos = glm::vec3(new_pos / new_pos.w);
vs_output[index + i*n].nor = glm::normalize(vs_input[index].nor * Mms[i]);
vs_output[index + i*n].col = vs_input[index].col;
vs_output_idx[index + i*n] = vs_input_idx[index] + i*n;
}
}
}
__global__ void kernShadeVertices(int n, VertexOut* vs_output, VertexIn* vs_input, glm::mat4 Mpvm, glm::mat3 Mm){
// Mm is the 3x3 rotation matrix computed with intervse transpose of the Mmodel matrix, for use to rotate normal vectors
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
vs_output[index].pos = vs_input[index].pos;
glm::vec4 new_pos = Mpvm * glm::vec4(vs_input[index].pos, 1.0f);
vs_output[index].ndc_pos = glm::vec3(new_pos / new_pos.w);
vs_output[index].nor = vs_input[index].nor * Mm;
vs_output[index].col = vs_input[index].col;
}
}
__global__ void kernShadeGeometries(int n, VertexOut* out_vertices, int* idx, VertexOut* in_vertices){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
VertexOut vi = in_vertices[index];
idx[index * 3] = 3*index;
idx[index * 3 + 1] = 3*index + 1;
idx[index * 3 + 2] = 3*index + 2;
out_vertices[index * 3].ndc_pos = vi.ndc_pos;
out_vertices[index * 3].col = vi.col;
out_vertices[index * 3].pos = vi.pos;
out_vertices[index * 3].nor = vi.nor;
out_vertices[index * 3 + 1].ndc_pos = vi.ndc_pos + glm::vec3(0.01,0.0,0.0);
out_vertices[index * 3 + 1].col = vi.col;
out_vertices[index * 3 + 1].pos = vi.pos;
out_vertices[index * 3 + 1].nor = vi.nor;
out_vertices[index * 3 + 2].ndc_pos = vi.ndc_pos + glm::vec3(0.0, 0.01, 0.0);
out_vertices[index * 3 + 2].col = vi.col;
out_vertices[index * 3 + 2].pos = vi.pos;
out_vertices[index * 3 + 2].nor = vi.nor;
}
}
__global__ void kernAssemblePrimitives(int n, Triangle* primitives, VertexOut* vs_output, int* idx){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
int idx0 = idx[3 * index + 0];
int idx1 = idx[3 * index + 1];
int idx2 = idx[3 * index + 2];
primitives[index].v[0] = vs_output[idx0];
primitives[index].v[1] = vs_output[idx1];
primitives[index].v[2] = vs_output[idx2];
primitives[index].ndc_pos[0] = vs_output[idx0].ndc_pos;
primitives[index].ndc_pos[1] = vs_output[idx1].ndc_pos;
primitives[index].ndc_pos[2] = vs_output[idx2].ndc_pos;
primitives[index].v[0].col = glm::vec3(1.0, 0.0, 0.0);
primitives[index].v[1].col = glm::vec3(1.0, 0.0, 0.0);
primitives[index].v[2].col = glm::vec3(1.0, 0.0, 0.0);
}
}
// Each thread is responsible for rasterizing a single triangle
__global__ void kernRasterize(int n, Cam cam, Fragment* fs_input, Triangle* primitives){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
Triangle prim = primitives[index];
AABB aabb = getAABBForTriangle(primitives[index].ndc_pos);
glm::vec3 bary;
glm::vec2 point;
glm::vec3 points;
// Snap i,j to nearest fragment coordinate
int frag_width = cam.width * AA;
int frag_height = cam.height * AA;
float dx = 2.0f / (float)frag_width;
float dy = 2.0f / (float)frag_height;
float x;
float y;
int mini = max((int)(aabb.min.x / dx) + frag_width / 2 - 2, 0);
int minj = max((int)(aabb.min.y / dy) + frag_height / 2 - 2, 0);
int maxi = min((int)(aabb.max.x / dx) + frag_width / 2 + 2, frag_width-1);
int maxj = min((int)(aabb.max.y / dy) + frag_height / 2 + 2, frag_height-1);
float depth;
int fixed_depth;
int ind;
// Iterate through fragment coordinates
for (int j = minj; j < maxj; j++){
for (int i = mini; i < maxi; i++){
ind = i + j * frag_width;
// Get the NDC coordinate
x = dx*i - dx*frag_width/2.0f + dx/2.0f;
y = dy*j - dy*frag_height/2.0f + dx/2.0f;
point[0] = x;
point[1] = y;
bary = calculateBarycentricCoordinate(primitives[index].ndc_pos, point);
if (isBarycentricCoordInBounds(bary)){
depth = -getZAtCoordinate(bary, prim.ndc_pos);
fixed_depth = (int)(depth * INT_MAX);
int old = atomicMin(&fs_input[ind].fixed_depth, fixed_depth);
if (fs_input[ind].fixed_depth == fixed_depth){
fs_input[ind].depth = depth;
fs_input[ind].color = bary.x * prim.v[0].col + bary.y * prim.v[1].col + bary.z * prim.v[2].col; //glm::vec3(1.0, 0.0, 0.0);// prim.v[0].col;
fs_input[ind].norm = bary.x * prim.v[0].nor + bary.y * prim.v[1].nor + bary.z * prim.v[2].nor;
fs_input[ind].pos = bary.x * prim.v[0].pos + bary.y * prim.v[1].pos + bary.z * prim.v[2].pos;
fs_input[ind].ndc_pos = bary.x * prim.v[0].ndc_pos + bary.y * prim.v[1].ndc_pos + bary.z * prim.v[2].ndc_pos;
//fs_input[ind].color = fs_input[ind].norm;
}
}
}
}
}
}
__global__ void kernShadeFragments(int n, Fragment* fs_input, Light light){
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index < n){
if (fs_input[index].color != glm::vec3(0.0)){
glm::vec3 light_ray = glm::normalize(fs_input[index].pos - light.pos);
fs_input[index].color = fs_input[index].color * abs((glm::dot(glm::normalize(fs_input[index].norm), light_ray)));
}
}
}
void resetRasterize(){
cudaMemset(dev_depthbuffer, 0, fragCount * sizeof(Fragment));
initDepths<<<numFragBlocks, MAX_THREADS>>>(fragCount, dev_depthbuffer);
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
checkCUDAError("resetBuffers");
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, Cam cam) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
resetRasterize();
//Mmod = glm::mat4(1.0f);
Mview = glm::lookAt(cam.pos, cam.focus, cam.up);
Mproj = glm::perspective(cam.fovy, cam.aspect, cam.zNear, cam.zFar);
for (int i = 0; i < NUM_INSTANCES; i++){
Mmod = glm::mat4(1.0);
Mmod = glm::translate(Mmod, glm::vec3(i*0.5f,0.0f,i*-1.0f));
Mmod = glm::rotate(Mmod, i*3.14f/8.0f, glm::vec3(0.0,1.0,0.0));
Mms[i] = glm::inverseTranspose(glm::mat3(Mmod));
Mpvms[i] = Mproj * Mview * Mmod;
}
cudaMalloc((void**)&dev_Mpvms, NUM_INSTANCES*sizeof(glm::mat4));
cudaMemcpy(dev_Mpvms, Mpvms, NUM_INSTANCES*sizeof(glm::mat4), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_Mms, NUM_INSTANCES*sizeof(glm::mat3));
cudaMemcpy(dev_Mms, Mms, NUM_INSTANCES*sizeof(glm::mat3), cudaMemcpyHostToDevice);
// Vertex Shading
kernShadeVerticesInstances<<<numVertBlocks, MAX_THREADS>>>(vertCount, NUM_INSTANCES, dev_bufVertexOut, dev_bufIdxOut, dev_bufVertex, dev_bufIdx, dev_Mpvms, dev_Mms);
//kernShadeVertices<<<numVertBlocks, MAX_THREADS>>>(vertCount, dev_bufVertexOut, dev_bufVertex, Mpvm);
checkCUDAError("shadeVertices");
cudaFree(dev_Mpvms);
cudaFree(dev_Mms);
// Primitive Assembly
kernAssemblePrimitives<<<numPrimBlocks, MAX_THREADS>>>(primCount, dev_primitives, dev_bufVertexOut, dev_bufIdxOut);
checkCUDAError("assemblePrimitives");
// Rasterization
kernRasterize<<<numPrimBlocks, MAX_THREADS>>>(primCount, cam, dev_depthbuffer, dev_primitives);
checkCUDAError("rasterizePrimitives");
// Fragment shading
kernShadeFragments<<<numFragBlocks, MAX_THREADS>>>(fragCount, dev_depthbuffer, light);
checkCUDAError("shadeFragments");
// Copy depthbuffer colors into framebuffer
render<<<blockCount2d, blockSize2d>>>(width, height, dev_depthbuffer, dev_framebuffer);
// Copy framebuffer into OpenGL buffer for OpenGL previewing
sendImageToPBO<<<blockCount2d, blockSize2d>>>(pbo, width, height, dev_framebuffer);
checkCUDAError("rasterize");
iter += 1;
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
cudaFree(dev_bufIdx);
dev_bufIdx = NULL;
cudaFree(dev_bufIdxOut);
dev_bufIdxOut = NULL;
cudaFree(dev_bufVertex);
dev_bufVertex = NULL;
cudaFree(dev_bufVertexOut);
dev_bufVertexOut = NULL;
cudaFree(dev_primitives);
dev_primitives = NULL;
cudaFree(dev_depthbuffer);
dev_depthbuffer = NULL;
cudaFree(dev_framebuffer);
dev_framebuffer = NULL;
checkCUDAError("rasterizeFree");
}
|
0dc9db1aba7b7a19ec2d5c1ee2923751e116427f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Mark Gates
@generated from magmablas/zlanhe.cu, normal z -> s, Thu Oct 8 23:05:34 2020
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define inf_bs 32
#define max_bs 64
#define PRECISION_s
#define REAL
// =============================================================================
// inf-norm
/******************************************************************************/
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n is any size and A is stored lower.
* Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32).
* z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). */
__global__ void
slansy_inf_kernel_lower(
int n,
const float * __restrict__ A, int lda,
float * __restrict__ dwork,
int n_full_block, int n_mod_bs )
{
#if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int diag = blockIdx.x*inf_bs;
int ind = blockIdx.x*inf_bs + tx;
float res = 0.;
__shared__ float la[inf_bs][inf_bs+1];
if ( blockIdx.x < n_full_block ) {
// ------------------------------
// All full block rows
A += ind;
A += ty * lda;
// ----------
// loop over all blocks left of the diagonal block
for (int i=0; i < diag; i += inf_bs ) {
// 32x4 threads cooperatively load 32x32 block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
A += lda*inf_bs;
__syncthreads();
// compute 4 partial sums of each row, i.e.,
// for ty=0: res = sum( la[tx, 0: 7] )
// for ty=1: res = sum( la[tx, 8:15] )
// for ty=2: res = sum( la[tx,16:23] )
// for ty=3: res = sum( la[tx,24:31] )
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// load diagonal block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
// copy lower triangle to upper triangle, and
// make diagonal real (zero imaginary part)
#pragma unroll 8
for (int i=ty*8; i < ty*8 + 8; i++) {
if ( i < tx ) {
la[i][tx] = la[tx][i];
}
#ifdef COMPLEX
else if ( i == tx ) {
la[i][i] = MAGMA_S_MAKE( MAGMA_S_REAL( la[i][i] ), 0 );
}
#endif
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
// ----------
// loop over all 32x32 blocks below diagonal block
A += inf_bs;
for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) {
// load block (transposed)
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[ty+j][tx] = A[j*lda];
}
A += inf_bs;
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// last partial block, which is (n_mod_bs by inf_bs)
if ( n_mod_bs > 0 ) {
// load block (transposed), with zeros for rows outside matrix
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
if ( tx < n_mod_bs ) {
la[ty+j][tx] = A[j*lda];
}
else {
la[ty+j][tx] = MAGMA_S_ZERO;
}
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty] = MAGMA_S_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
if ( ty == 0 ) {
res = res
+ MAGMA_S_REAL( la[tx][1] )
+ MAGMA_S_REAL( la[tx][2] )
+ MAGMA_S_REAL( la[tx][3] );
dwork[ind] = res;
}
}
else {
// ------------------------------
// Last, partial block row
// Threads past end of matrix (i.e., ind >= n) are redundantly assigned
// the last row (n-1). At the end, those results are ignored -- only
// results for ind < n are saved into dwork.
if ( tx < n_mod_bs ) {
A += ind;
}
else {
A += (blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row
}
A += ty * lda;
// ----------
// loop over all blocks left of the diagonal block
// each is (n_mod_bs by inf_bs)
for (int i=0; i < diag; i += inf_bs ) {
// load block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
A += lda*inf_bs;
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=0; j < 8; j++) {
res += MAGMA_S_ABS( la[tx][j+ty*8] );
}
__syncthreads();
}
// ----------
// partial diagonal block
if ( ty == 0 && tx < n_mod_bs ) {
// sum rows left of diagonal
for (int j=0; j < tx; j++) {
res += MAGMA_S_ABS( *A );
A += lda;
}
// sum diagonal (ignoring imaginary part)
res += MAGMA_D_ABS( MAGMA_S_REAL( *A ));
A += 1;
// sum column below diagonal
for (int j=tx+1; j < n_mod_bs; j++) {
res += MAGMA_S_ABS( *A );
A += 1;
}
}
__syncthreads();
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty]= MAGMA_S_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
// rows outside matrix are ignored
if ( ty == 0 && tx < n_mod_bs ) {
res = res
+ MAGMA_S_REAL( la[tx][1] )
+ MAGMA_S_REAL( la[tx][2] )
+ MAGMA_S_REAL( la[tx][3] );
dwork[ind] = res;
}
}
#endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */
}
/******************************************************************************/
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n is any size and A is stored upper.
* Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32).
* z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200).
* The upper implementation is similar to lower, but processes blocks
* in the transposed order:
* lower goes from left over to diagonal, then down to bottom;
* upper goes from top down to diagonal, then over to right.
* Differences are noted with # in comments. */
__global__ void
slansy_inf_kernel_upper(
int n,
const float * __restrict__ A, int lda,
float * __restrict__ dwork,
int n_full_block, int n_mod_bs )
{
#if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int diag = blockIdx.x*inf_bs;
int ind = blockIdx.x*inf_bs + tx;
float res = 0.;
__shared__ float la[inf_bs][inf_bs+1];
if ( blockIdx.x < n_full_block ) {
// ------------------------------
// All full block #columns
A += blockIdx.x*inf_bs*lda + tx; //#
A += ty * lda;
// ----------
// loop over all blocks #above the diagonal block
for (int i=0; i < diag; i += inf_bs ) {
// 32x4 threads cooperatively load 32x32 block (#transposed)
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[ty+j][tx] = A[j*lda]; //#
}
A += inf_bs; //#
__syncthreads();
// compute 4 partial sums of each row, i.e.,
// for ty=0: res = sum( la[tx, 0: 7] )
// for ty=1: res = sum( la[tx, 8:15] )
// for ty=2: res = sum( la[tx,16:23] )
// for ty=3: res = sum( la[tx,24:31] )
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// load diagonal block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
// copy #upper triangle to #lower triangle, and
// make diagonal real (zero imaginary part)
#pragma unroll 8
for (int i=ty*8; i < ty*8 + 8; i++) {
if ( i > tx ) { //#
la[i][tx] = la[tx][i];
}
#ifdef COMPLEX
else if ( i == tx ) {
la[i][i] = MAGMA_S_MAKE( MAGMA_S_REAL( la[i][i] ), 0 );
}
#endif
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
// ----------
// loop over all 32x32 blocks #right of diagonal block
A += inf_bs*lda; //#
for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) {
// load block (#non-transposed)
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda]; //#
}
A += inf_bs*lda; //#
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// last partial block, which is #(inf_bs by n_mod_bs)
if ( n_mod_bs > 0 ) {
// load block (#non-transposed), with zeros for #cols outside matrix
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
if ( ty+j < n_mod_bs ) { //#
la[tx][ty+j] = A[j*lda]; //#
}
else {
la[tx][ty+j] = MAGMA_S_ZERO; //#
}
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty] = MAGMA_S_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
if ( ty == 0 ) {
res = res
+ MAGMA_S_REAL( la[tx][1] )
+ MAGMA_S_REAL( la[tx][2] )
+ MAGMA_S_REAL( la[tx][3] );
dwork[ind] = res;
}
}
else {
// ------------------------------
// Last, partial block #column
// Instead of assigning threads ind >= n to the last row (n-1), as in Lower,
// Upper simply adjusts loop bounds to avoid loading columns outside the matrix.
// Again, at the end, those results are ignored -- only
// results for ind < n are saved into dwork.
A += blockIdx.x*inf_bs*lda + tx; //#
A += ty * lda;
// ----------
// loop over all blocks #above the diagonal block
// each is #(inf_bs by n_mod_bs)
for (int i=0; i < diag; i += inf_bs ) {
// load block (#transposed), #ignoring columns outside matrix
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
if ( ty+j < n_mod_bs ) {
la[ty+j][tx] = A[j*lda];
}
}
A += inf_bs; //#
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=0; j < 8; j++) {
res += MAGMA_S_ABS( la[tx][j+ty*8] );
}
__syncthreads();
}
// ----------
// partial diagonal block
if ( ty == 0 && tx < n_mod_bs ) {
// #transpose pointer within diagonal block
// #i.e., from A = A(tx,ty), transpose to A = A(ty,tx).
A = A - tx - ty*lda + tx*lda + ty;
// sum #column above diagonal
for (int j=0; j < tx; j++) {
res += MAGMA_S_ABS( *A );
A += 1; //#
}
// sum diagonal (ignoring imaginary part)
res += MAGMA_D_ABS( MAGMA_S_REAL( *A ));
A += lda; //#
// sum #row right of diagonal
for (int j=tx+1; j < n_mod_bs; j++) {
res += MAGMA_S_ABS( *A );
A += lda; //#
}
}
__syncthreads();
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty]= MAGMA_S_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
// rows outside matrix are ignored
if ( ty == 0 && tx < n_mod_bs ) {
res = res
+ MAGMA_S_REAL( la[tx][1] )
+ MAGMA_S_REAL( la[tx][2] )
+ MAGMA_S_REAL( la[tx][3] );
dwork[ind] = res;
}
}
#endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */
}
/******************************************************************************/
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */
extern "C" void
slansy_inf(
magma_uplo_t uplo, magma_int_t n,
magmaFloat_const_ptr A, magma_int_t lda,
magmaFloat_ptr dwork,
magma_queue_t queue )
{
dim3 threads( inf_bs, 4 );
dim3 grid( magma_ceildiv( n, inf_bs ), 1 );
magma_int_t n_full_block = (n - n % inf_bs) / inf_bs;
magma_int_t n_mod_bs = n % inf_bs;
if ( uplo == MagmaLower) {
hipLaunchKernelGGL(( slansy_inf_kernel_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, A, lda, dwork, n_full_block, n_mod_bs );
}
else {
hipLaunchKernelGGL(( slansy_inf_kernel_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, A, lda, dwork, n_full_block, n_mod_bs );
}
}
// =============================================================================
// max-norm
/******************************************************************************/
/* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */
__global__ void
slansy_max_kernel_lower(
int n,
const float * __restrict__ A, int lda,
float * __restrict__ dwork )
{
int ind = blockIdx.x*max_bs + threadIdx.x;
float res = 0;
if (ind < n) {
A += ind;
for (int j=0; j < ind; ++j) {
res = max_nan( res, MAGMA_S_ABS( *A ));
A += lda;
}
// diagonal element (ignoring imaginary part)
res = max_nan( res, MAGMA_D_ABS( MAGMA_S_REAL( *A )));
dwork[ind] = res;
}
}
/******************************************************************************/
/* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. */
__global__ void
slansy_max_kernel_upper(
int n,
const float * __restrict__ A, int lda,
float * __restrict__ dwork )
{
int ind = blockIdx.x*max_bs + threadIdx.x;
float res = 0;
if (ind < n) {
A += ind;
A += (n-1)*lda;
for (int j=n-1; j > ind; j--) {
res = max_nan( res, MAGMA_S_ABS( *A ));
A -= lda;
}
// diagonal element (ignoring imaginary part)
res = max_nan( res, MAGMA_D_ABS( MAGMA_S_REAL( *A )));
dwork[ind] = res;
}
}
/******************************************************************************/
/* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */
extern "C" void
slansy_max(
magma_uplo_t uplo, magma_int_t n,
magmaFloat_const_ptr A, magma_int_t lda,
magmaFloat_ptr dwork,
magma_queue_t queue )
{
dim3 threads( max_bs );
dim3 grid( magma_ceildiv( n, max_bs ) );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( slansy_max_kernel_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, A, lda, dwork );
}
else {
hipLaunchKernelGGL(( slansy_max_kernel_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, A, lda, dwork );
}
}
/***************************************************************************//**
Purpose
-------
SLANSY returns the value of the one norm, or the Frobenius norm, or
the infinity norm, or the element of largest absolute value of a
real symmetric matrix A.
SLANSY = ( max(abs(A(i,j))), NORM = MagmaMaxNorm
(
( norm1(A), NORM = MagmaOneNorm
(
( normI(A), NORM = MagmaInfNorm
(
( normF(A), NORM = MagmaFrobeniusNorm ** not yet supported
where norm1 denotes the one norm of a matrix (maximum column sum),
normI denotes the infinity norm of a matrix (maximum row sum) and
normF denotes the Frobenius norm of a matrix (square root of sum of squares).
Note that max(abs(A(i,j))) is not a consistent matrix norm.
On error, returns SLANSY < 0: if SLANSY = -i, the i-th argument had an illegal value.
Arguments:
----------
@param[in]
norm magma_norm_t
Specifies the value to be returned in SLANSY as described above.
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is to be referenced.
- = MagmaUpper: Upper triangular part of A is referenced
- = MagmaLower: Lower triangular part of A is referenced
@param[in]
n INTEGER
The order of the matrix A. N >= 0. When N = 0, SLANSY is
set to zero.
@param[in]
dA REAL array on the GPU, dimension (LDDA,N)
The symmetric matrix A. If UPLO = MagmaUpper, the leading n by n
upper triangular part of A contains the upper triangular part
of the matrix A, and the strictly lower triangular part of A
is not referenced. If UPLO = MagmaLower, the leading n by n lower
triangular part of A contains the lower triangular part of
the matrix A, and the strictly upper triangular part of A is
not referenced. Note that the imaginary parts of the diagonal
elements need not be set and are assumed to be zero.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(N,1).
@param
dwork (workspace) REAL array on the GPU, dimension (MAX(1,LWORK)),
where LWORK >= N.
NOTE: this is different than LAPACK, where WORK is required
only for norm1 and normI. Here max-norm also requires WORK.
@param[in]
lwork INTEGER
The dimension of the array DWORK. LWORK >= max( 1, N ).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lanhe
*******************************************************************************/
extern "C" float
magmablas_slansy(
magma_norm_t norm, magma_uplo_t uplo, magma_int_t n,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
magma_int_t info = 0;
// 1-norm == inf-norm since A is symmetric
bool inf_norm = (norm == MagmaInfNorm || norm == MagmaOneNorm);
bool max_norm = (norm == MagmaMaxNorm);
// inf_norm Double-Complex requires > 16 KB shared data (arch >= 200)
#if defined(PRECISION_z)
const bool inf_implemented = (magma_getdevice_arch() >= 200);
#else
const bool inf_implemented = true;
#endif
if ( ! (max_norm || (inf_norm && inf_implemented)) )
info = -1;
else if ( uplo != MagmaUpper && uplo != MagmaLower )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < n )
info = -5;
else if ( lwork < n )
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
/* Quick return */
if ( n == 0 )
return 0;
float res = 0;
if ( inf_norm ) {
slansy_inf( uplo, n, dA, ldda, dwork, queue );
}
else {
slansy_max( uplo, n, dA, ldda, dwork, queue );
}
hipLaunchKernelGGL(( magma_max_nan_kernel), dim3(1), dim3(512), 0, queue->cuda_stream() , n, dwork );
magma_sgetvector( 1, &dwork[0], 1, &res, 1, queue );
return res;
}
| 0dc9db1aba7b7a19ec2d5c1ee2923751e116427f.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Mark Gates
@generated from magmablas/zlanhe.cu, normal z -> s, Thu Oct 8 23:05:34 2020
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define inf_bs 32
#define max_bs 64
#define PRECISION_s
#define REAL
// =============================================================================
// inf-norm
/******************************************************************************/
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n is any size and A is stored lower.
* Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32).
* z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). */
__global__ void
slansy_inf_kernel_lower(
int n,
const float * __restrict__ A, int lda,
float * __restrict__ dwork,
int n_full_block, int n_mod_bs )
{
#if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int diag = blockIdx.x*inf_bs;
int ind = blockIdx.x*inf_bs + tx;
float res = 0.;
__shared__ float la[inf_bs][inf_bs+1];
if ( blockIdx.x < n_full_block ) {
// ------------------------------
// All full block rows
A += ind;
A += ty * lda;
// ----------
// loop over all blocks left of the diagonal block
for (int i=0; i < diag; i += inf_bs ) {
// 32x4 threads cooperatively load 32x32 block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
A += lda*inf_bs;
__syncthreads();
// compute 4 partial sums of each row, i.e.,
// for ty=0: res = sum( la[tx, 0: 7] )
// for ty=1: res = sum( la[tx, 8:15] )
// for ty=2: res = sum( la[tx,16:23] )
// for ty=3: res = sum( la[tx,24:31] )
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// load diagonal block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
// copy lower triangle to upper triangle, and
// make diagonal real (zero imaginary part)
#pragma unroll 8
for (int i=ty*8; i < ty*8 + 8; i++) {
if ( i < tx ) {
la[i][tx] = la[tx][i];
}
#ifdef COMPLEX
else if ( i == tx ) {
la[i][i] = MAGMA_S_MAKE( MAGMA_S_REAL( la[i][i] ), 0 );
}
#endif
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
// ----------
// loop over all 32x32 blocks below diagonal block
A += inf_bs;
for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) {
// load block (transposed)
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[ty+j][tx] = A[j*lda];
}
A += inf_bs;
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// last partial block, which is (n_mod_bs by inf_bs)
if ( n_mod_bs > 0 ) {
// load block (transposed), with zeros for rows outside matrix
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
if ( tx < n_mod_bs ) {
la[ty+j][tx] = A[j*lda];
}
else {
la[ty+j][tx] = MAGMA_S_ZERO;
}
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty] = MAGMA_S_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
if ( ty == 0 ) {
res = res
+ MAGMA_S_REAL( la[tx][1] )
+ MAGMA_S_REAL( la[tx][2] )
+ MAGMA_S_REAL( la[tx][3] );
dwork[ind] = res;
}
}
else {
// ------------------------------
// Last, partial block row
// Threads past end of matrix (i.e., ind >= n) are redundantly assigned
// the last row (n-1). At the end, those results are ignored -- only
// results for ind < n are saved into dwork.
if ( tx < n_mod_bs ) {
A += ind;
}
else {
A += (blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row
}
A += ty * lda;
// ----------
// loop over all blocks left of the diagonal block
// each is (n_mod_bs by inf_bs)
for (int i=0; i < diag; i += inf_bs ) {
// load block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
A += lda*inf_bs;
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=0; j < 8; j++) {
res += MAGMA_S_ABS( la[tx][j+ty*8] );
}
__syncthreads();
}
// ----------
// partial diagonal block
if ( ty == 0 && tx < n_mod_bs ) {
// sum rows left of diagonal
for (int j=0; j < tx; j++) {
res += MAGMA_S_ABS( *A );
A += lda;
}
// sum diagonal (ignoring imaginary part)
res += MAGMA_D_ABS( MAGMA_S_REAL( *A ));
A += 1;
// sum column below diagonal
for (int j=tx+1; j < n_mod_bs; j++) {
res += MAGMA_S_ABS( *A );
A += 1;
}
}
__syncthreads();
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty]= MAGMA_S_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
// rows outside matrix are ignored
if ( ty == 0 && tx < n_mod_bs ) {
res = res
+ MAGMA_S_REAL( la[tx][1] )
+ MAGMA_S_REAL( la[tx][2] )
+ MAGMA_S_REAL( la[tx][3] );
dwork[ind] = res;
}
}
#endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */
}
/******************************************************************************/
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n is any size and A is stored upper.
* Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32).
* z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200).
* The upper implementation is similar to lower, but processes blocks
* in the transposed order:
* lower goes from left over to diagonal, then down to bottom;
* upper goes from top down to diagonal, then over to right.
* Differences are noted with # in comments. */
__global__ void
slansy_inf_kernel_upper(
int n,
const float * __restrict__ A, int lda,
float * __restrict__ dwork,
int n_full_block, int n_mod_bs )
{
#if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int diag = blockIdx.x*inf_bs;
int ind = blockIdx.x*inf_bs + tx;
float res = 0.;
__shared__ float la[inf_bs][inf_bs+1];
if ( blockIdx.x < n_full_block ) {
// ------------------------------
// All full block #columns
A += blockIdx.x*inf_bs*lda + tx; //#
A += ty * lda;
// ----------
// loop over all blocks #above the diagonal block
for (int i=0; i < diag; i += inf_bs ) {
// 32x4 threads cooperatively load 32x32 block (#transposed)
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[ty+j][tx] = A[j*lda]; //#
}
A += inf_bs; //#
__syncthreads();
// compute 4 partial sums of each row, i.e.,
// for ty=0: res = sum( la[tx, 0: 7] )
// for ty=1: res = sum( la[tx, 8:15] )
// for ty=2: res = sum( la[tx,16:23] )
// for ty=3: res = sum( la[tx,24:31] )
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// load diagonal block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
// copy #upper triangle to #lower triangle, and
// make diagonal real (zero imaginary part)
#pragma unroll 8
for (int i=ty*8; i < ty*8 + 8; i++) {
if ( i > tx ) { //#
la[i][tx] = la[tx][i];
}
#ifdef COMPLEX
else if ( i == tx ) {
la[i][i] = MAGMA_S_MAKE( MAGMA_S_REAL( la[i][i] ), 0 );
}
#endif
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
// ----------
// loop over all 32x32 blocks #right of diagonal block
A += inf_bs*lda; //#
for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) {
// load block (#non-transposed)
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda]; //#
}
A += inf_bs*lda; //#
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// last partial block, which is #(inf_bs by n_mod_bs)
if ( n_mod_bs > 0 ) {
// load block (#non-transposed), with zeros for #cols outside matrix
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
if ( ty+j < n_mod_bs ) { //#
la[tx][ty+j] = A[j*lda]; //#
}
else {
la[tx][ty+j] = MAGMA_S_ZERO; //#
}
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_S_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty] = MAGMA_S_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
if ( ty == 0 ) {
res = res
+ MAGMA_S_REAL( la[tx][1] )
+ MAGMA_S_REAL( la[tx][2] )
+ MAGMA_S_REAL( la[tx][3] );
dwork[ind] = res;
}
}
else {
// ------------------------------
// Last, partial block #column
// Instead of assigning threads ind >= n to the last row (n-1), as in Lower,
// Upper simply adjusts loop bounds to avoid loading columns outside the matrix.
// Again, at the end, those results are ignored -- only
// results for ind < n are saved into dwork.
A += blockIdx.x*inf_bs*lda + tx; //#
A += ty * lda;
// ----------
// loop over all blocks #above the diagonal block
// each is #(inf_bs by n_mod_bs)
for (int i=0; i < diag; i += inf_bs ) {
// load block (#transposed), #ignoring columns outside matrix
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
if ( ty+j < n_mod_bs ) {
la[ty+j][tx] = A[j*lda];
}
}
A += inf_bs; //#
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=0; j < 8; j++) {
res += MAGMA_S_ABS( la[tx][j+ty*8] );
}
__syncthreads();
}
// ----------
// partial diagonal block
if ( ty == 0 && tx < n_mod_bs ) {
// #transpose pointer within diagonal block
// #i.e., from A = A(tx,ty), transpose to A = A(ty,tx).
A = A - tx - ty*lda + tx*lda + ty;
// sum #column above diagonal
for (int j=0; j < tx; j++) {
res += MAGMA_S_ABS( *A );
A += 1; //#
}
// sum diagonal (ignoring imaginary part)
res += MAGMA_D_ABS( MAGMA_S_REAL( *A ));
A += lda; //#
// sum #row right of diagonal
for (int j=tx+1; j < n_mod_bs; j++) {
res += MAGMA_S_ABS( *A );
A += lda; //#
}
}
__syncthreads();
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty]= MAGMA_S_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
// rows outside matrix are ignored
if ( ty == 0 && tx < n_mod_bs ) {
res = res
+ MAGMA_S_REAL( la[tx][1] )
+ MAGMA_S_REAL( la[tx][2] )
+ MAGMA_S_REAL( la[tx][3] );
dwork[ind] = res;
}
}
#endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */
}
/******************************************************************************/
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */
extern "C" void
slansy_inf(
magma_uplo_t uplo, magma_int_t n,
magmaFloat_const_ptr A, magma_int_t lda,
magmaFloat_ptr dwork,
magma_queue_t queue )
{
dim3 threads( inf_bs, 4 );
dim3 grid( magma_ceildiv( n, inf_bs ), 1 );
magma_int_t n_full_block = (n - n % inf_bs) / inf_bs;
magma_int_t n_mod_bs = n % inf_bs;
if ( uplo == MagmaLower) {
slansy_inf_kernel_lower<<< grid, threads, 0, queue->cuda_stream() >>>
( n, A, lda, dwork, n_full_block, n_mod_bs );
}
else {
slansy_inf_kernel_upper<<< grid, threads, 0, queue->cuda_stream() >>>
( n, A, lda, dwork, n_full_block, n_mod_bs );
}
}
// =============================================================================
// max-norm
/******************************************************************************/
/* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */
__global__ void
slansy_max_kernel_lower(
int n,
const float * __restrict__ A, int lda,
float * __restrict__ dwork )
{
int ind = blockIdx.x*max_bs + threadIdx.x;
float res = 0;
if (ind < n) {
A += ind;
for (int j=0; j < ind; ++j) {
res = max_nan( res, MAGMA_S_ABS( *A ));
A += lda;
}
// diagonal element (ignoring imaginary part)
res = max_nan( res, MAGMA_D_ABS( MAGMA_S_REAL( *A )));
dwork[ind] = res;
}
}
/******************************************************************************/
/* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. */
__global__ void
slansy_max_kernel_upper(
int n,
const float * __restrict__ A, int lda,
float * __restrict__ dwork )
{
int ind = blockIdx.x*max_bs + threadIdx.x;
float res = 0;
if (ind < n) {
A += ind;
A += (n-1)*lda;
for (int j=n-1; j > ind; j--) {
res = max_nan( res, MAGMA_S_ABS( *A ));
A -= lda;
}
// diagonal element (ignoring imaginary part)
res = max_nan( res, MAGMA_D_ABS( MAGMA_S_REAL( *A )));
dwork[ind] = res;
}
}
/******************************************************************************/
/* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */
extern "C" void
slansy_max(
magma_uplo_t uplo, magma_int_t n,
magmaFloat_const_ptr A, magma_int_t lda,
magmaFloat_ptr dwork,
magma_queue_t queue )
{
dim3 threads( max_bs );
dim3 grid( magma_ceildiv( n, max_bs ) );
if ( uplo == MagmaLower ) {
slansy_max_kernel_lower<<< grid, threads, 0, queue->cuda_stream() >>>
( n, A, lda, dwork );
}
else {
slansy_max_kernel_upper<<< grid, threads, 0, queue->cuda_stream() >>>
( n, A, lda, dwork );
}
}
/***************************************************************************//**
Purpose
-------
SLANSY returns the value of the one norm, or the Frobenius norm, or
the infinity norm, or the element of largest absolute value of a
real symmetric matrix A.
SLANSY = ( max(abs(A(i,j))), NORM = MagmaMaxNorm
(
( norm1(A), NORM = MagmaOneNorm
(
( normI(A), NORM = MagmaInfNorm
(
( normF(A), NORM = MagmaFrobeniusNorm ** not yet supported
where norm1 denotes the one norm of a matrix (maximum column sum),
normI denotes the infinity norm of a matrix (maximum row sum) and
normF denotes the Frobenius norm of a matrix (square root of sum of squares).
Note that max(abs(A(i,j))) is not a consistent matrix norm.
On error, returns SLANSY < 0: if SLANSY = -i, the i-th argument had an illegal value.
Arguments:
----------
@param[in]
norm magma_norm_t
Specifies the value to be returned in SLANSY as described above.
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is to be referenced.
- = MagmaUpper: Upper triangular part of A is referenced
- = MagmaLower: Lower triangular part of A is referenced
@param[in]
n INTEGER
The order of the matrix A. N >= 0. When N = 0, SLANSY is
set to zero.
@param[in]
dA REAL array on the GPU, dimension (LDDA,N)
The symmetric matrix A. If UPLO = MagmaUpper, the leading n by n
upper triangular part of A contains the upper triangular part
of the matrix A, and the strictly lower triangular part of A
is not referenced. If UPLO = MagmaLower, the leading n by n lower
triangular part of A contains the lower triangular part of
the matrix A, and the strictly upper triangular part of A is
not referenced. Note that the imaginary parts of the diagonal
elements need not be set and are assumed to be zero.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(N,1).
@param
dwork (workspace) REAL array on the GPU, dimension (MAX(1,LWORK)),
where LWORK >= N.
NOTE: this is different than LAPACK, where WORK is required
only for norm1 and normI. Here max-norm also requires WORK.
@param[in]
lwork INTEGER
The dimension of the array DWORK. LWORK >= max( 1, N ).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lanhe
*******************************************************************************/
extern "C" float
magmablas_slansy(
magma_norm_t norm, magma_uplo_t uplo, magma_int_t n,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
magma_int_t info = 0;
// 1-norm == inf-norm since A is symmetric
bool inf_norm = (norm == MagmaInfNorm || norm == MagmaOneNorm);
bool max_norm = (norm == MagmaMaxNorm);
// inf_norm Double-Complex requires > 16 KB shared data (arch >= 200)
#if defined(PRECISION_z)
const bool inf_implemented = (magma_getdevice_arch() >= 200);
#else
const bool inf_implemented = true;
#endif
if ( ! (max_norm || (inf_norm && inf_implemented)) )
info = -1;
else if ( uplo != MagmaUpper && uplo != MagmaLower )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < n )
info = -5;
else if ( lwork < n )
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
/* Quick return */
if ( n == 0 )
return 0;
float res = 0;
if ( inf_norm ) {
slansy_inf( uplo, n, dA, ldda, dwork, queue );
}
else {
slansy_max( uplo, n, dA, ldda, dwork, queue );
}
magma_max_nan_kernel<<< 1, 512, 0, queue->cuda_stream() >>>( n, dwork );
magma_sgetvector( 1, &dwork[0], 1, &res, 1, queue );
return res;
}
|
4c4126a6bf73b907e4dd13b1da3c556b459f162f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Python.h>
#include <arrayobject.h>
#include <assert.h>
#include <cutil_inline.h>
#include <rocblas.h>
#include <time.h>
#include <vector>
#include <string>
#include <iostream>
#include <matrix.h>
#include <queue.h>
#include <worker.cuh>
#include <util.cuh>
#include <cost.cuh>
#include <pyconvnet.cuh>
#include <convnet.cuh>
#include "layer_hip.cuh"
#include "weights.cuh"
using namespace std;
static ConvNet* model = NULL;
static PyMethodDef _ConvNetMethods[] = { { "initModel", initModel, METH_VARARGS },
{ "startBatch", startBatch, METH_VARARGS },
{ "finishBatch", finishBatch, METH_VARARGS },
{ "checkGradients", checkGradients, METH_VARARGS },
{ "startMultiviewTest", startMultiviewTest, METH_VARARGS },
{ "startFeatureWriter", startFeatureWriter, METH_VARARGS },
{ "syncWithHost", syncWithHost, METH_VARARGS },
{ "adjustLearningRate", adjustLearningRate, METH_VARARGS },
{ "setNoiseParams", setNoiseParams, METH_VARARGS },
{ NULL, NULL }
};
#if defined(_WIN64) || defined(_WIN32)
extern "C" __declspec(dllexport) void initpyconvnet() {
(void) Py_InitModule("pyconvnet", _ConvNetMethods);
import_array();
}
#else
void INITNAME() {
(void) Py_InitModule(QUOTEME(MODELNAME), _ConvNetMethods);
import_array();
}
#endif
PyObject* initModel(PyObject *self, PyObject *args) {
assert(model == NULL);
PyListObject* pyLayerParams;
int pyMinibatchSize;
int pyDeviceID;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &pyLayerParams,
&pyMinibatchSize,
&pyDeviceID)) {
return NULL;
}
model = new ConvNet(pyLayerParams,
pyMinibatchSize,
pyDeviceID);
model->start();
return Py_BuildValue("i", 0);
}
/*
* Starts training/testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int test = 0;
if (!PyArg_ParseTuple(args, "O!|i",
&PyList_Type, &data,
&test)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
TrainingWorker* wr = new TrainingWorker(*model, *new CPUData(mvec), test);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Starts testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startMultiviewTest(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int numViews, logregIdx;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &data,
&numViews,
&logregIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
MultiviewTestWorker* wr = new MultiviewTestWorker(*model, *new CPUData(mvec), numViews, logregIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
PyObject* startFeatureWriter(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int layerIdx;
if (!PyArg_ParseTuple(args, "O!i",
&PyList_Type, &data,
&layerIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
Matrix& ftrs = *mvec.back();
mvec.pop_back();
FeatureWorker* wr = new FeatureWorker(*model, *new CPUData(mvec), ftrs, layerIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Waits for the trainer to finish training on the batch given to startBatch.
*/
PyObject* finishBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
Cost& cost = res->getResults();
PyObject* dict = PyDict_New();
CostMap& costMap = cost.getCostMap();
for (CostMap::const_iterator it = costMap.begin(); it != costMap.end(); ++it) {
PyObject* v = PyList_New(0);
for (vector<double>::const_iterator iv = it->second->begin(); iv != it->second->end(); ++iv) {
PyObject* f = PyFloat_FromDouble(*iv);
PyList_Append(v, f);
}
PyDict_SetItemString(dict, it->first.c_str(), v);
}
PyObject* retVal = Py_BuildValue("Ni", dict, cost.getNumCases());
delete res; // Deletes cost too
return retVal;
}
PyObject* checkGradients(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
if (!PyArg_ParseTuple(args, "O!",
&PyList_Type, &data)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
GradCheckWorker* wr = new GradCheckWorker(*model, *new CPUData(mvec));
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
delete res;
return Py_BuildValue("i", 0);
}
/*
* Copies weight matrices from GPU to system memory.
*/
PyObject* syncWithHost(PyObject *self, PyObject *args) {
assert(model != NULL);
SyncWorker* wr = new SyncWorker(*model);
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::SYNC_DONE);
delete res;
return Py_BuildValue("i", 0);
}
PyObject* adjustLearningRate(PyObject *self, PyObject *args) {
assert(model != NULL);
float factor;
if (!PyArg_ParseTuple(args, "f", &factor)) {
return NULL;
}
for (int i = 0; i < model->getNumLayers(); i++){
Layer& l = model->getLayer(i);
if (l.getType() == "fc" || l.getType() == "conv") {
((WeightLayer&)l).adjustLearningRate(factor);
}
}
return Py_BuildValue("i", 0);
}
PyObject* setNoiseParams(PyObject *self, PyObject *args) {
assert(model != NULL);
float epsW, wc;
if (!PyArg_ParseTuple(args, "ff", &epsW, &wc)) {
return NULL;
}
WeightLayer& l = (WeightLayer&)model->getLayer(model->getNumLayers() - 2);
l.getWeights(0).setEps(epsW);
l.getWeights(0).setWC(wc);
return Py_BuildValue("i", 0);
}
| 4c4126a6bf73b907e4dd13b1da3c556b459f162f.cu | /*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Python.h>
#include <arrayobject.h>
#include <assert.h>
#include <cutil_inline.h>
#include <cublas.h>
#include <time.h>
#include <vector>
#include <string>
#include <iostream>
#include <matrix.h>
#include <queue.h>
#include <worker.cuh>
#include <util.cuh>
#include <cost.cuh>
#include <pyconvnet.cuh>
#include <convnet.cuh>
#include "layer.cuh"
#include "weights.cuh"
using namespace std;
static ConvNet* model = NULL;
static PyMethodDef _ConvNetMethods[] = { { "initModel", initModel, METH_VARARGS },
{ "startBatch", startBatch, METH_VARARGS },
{ "finishBatch", finishBatch, METH_VARARGS },
{ "checkGradients", checkGradients, METH_VARARGS },
{ "startMultiviewTest", startMultiviewTest, METH_VARARGS },
{ "startFeatureWriter", startFeatureWriter, METH_VARARGS },
{ "syncWithHost", syncWithHost, METH_VARARGS },
{ "adjustLearningRate", adjustLearningRate, METH_VARARGS },
{ "setNoiseParams", setNoiseParams, METH_VARARGS },
{ NULL, NULL }
};
#if defined(_WIN64) || defined(_WIN32)
extern "C" __declspec(dllexport) void initpyconvnet() {
(void) Py_InitModule("pyconvnet", _ConvNetMethods);
import_array();
}
#else
void INITNAME() {
(void) Py_InitModule(QUOTEME(MODELNAME), _ConvNetMethods);
import_array();
}
#endif
PyObject* initModel(PyObject *self, PyObject *args) {
assert(model == NULL);
PyListObject* pyLayerParams;
int pyMinibatchSize;
int pyDeviceID;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &pyLayerParams,
&pyMinibatchSize,
&pyDeviceID)) {
return NULL;
}
model = new ConvNet(pyLayerParams,
pyMinibatchSize,
pyDeviceID);
model->start();
return Py_BuildValue("i", 0);
}
/*
* Starts training/testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int test = 0;
if (!PyArg_ParseTuple(args, "O!|i",
&PyList_Type, &data,
&test)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
TrainingWorker* wr = new TrainingWorker(*model, *new CPUData(mvec), test);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Starts testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startMultiviewTest(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int numViews, logregIdx;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &data,
&numViews,
&logregIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
MultiviewTestWorker* wr = new MultiviewTestWorker(*model, *new CPUData(mvec), numViews, logregIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
PyObject* startFeatureWriter(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int layerIdx;
if (!PyArg_ParseTuple(args, "O!i",
&PyList_Type, &data,
&layerIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
Matrix& ftrs = *mvec.back();
mvec.pop_back();
FeatureWorker* wr = new FeatureWorker(*model, *new CPUData(mvec), ftrs, layerIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Waits for the trainer to finish training on the batch given to startBatch.
*/
PyObject* finishBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
Cost& cost = res->getResults();
PyObject* dict = PyDict_New();
CostMap& costMap = cost.getCostMap();
for (CostMap::const_iterator it = costMap.begin(); it != costMap.end(); ++it) {
PyObject* v = PyList_New(0);
for (vector<double>::const_iterator iv = it->second->begin(); iv != it->second->end(); ++iv) {
PyObject* f = PyFloat_FromDouble(*iv);
PyList_Append(v, f);
}
PyDict_SetItemString(dict, it->first.c_str(), v);
}
PyObject* retVal = Py_BuildValue("Ni", dict, cost.getNumCases());
delete res; // Deletes cost too
return retVal;
}
PyObject* checkGradients(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
if (!PyArg_ParseTuple(args, "O!",
&PyList_Type, &data)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
GradCheckWorker* wr = new GradCheckWorker(*model, *new CPUData(mvec));
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
delete res;
return Py_BuildValue("i", 0);
}
/*
* Copies weight matrices from GPU to system memory.
*/
PyObject* syncWithHost(PyObject *self, PyObject *args) {
assert(model != NULL);
SyncWorker* wr = new SyncWorker(*model);
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::SYNC_DONE);
delete res;
return Py_BuildValue("i", 0);
}
PyObject* adjustLearningRate(PyObject *self, PyObject *args) {
assert(model != NULL);
float factor;
if (!PyArg_ParseTuple(args, "f", &factor)) {
return NULL;
}
for (int i = 0; i < model->getNumLayers(); i++){
Layer& l = model->getLayer(i);
if (l.getType() == "fc" || l.getType() == "conv") {
((WeightLayer&)l).adjustLearningRate(factor);
}
}
return Py_BuildValue("i", 0);
}
PyObject* setNoiseParams(PyObject *self, PyObject *args) {
assert(model != NULL);
float epsW, wc;
if (!PyArg_ParseTuple(args, "ff", &epsW, &wc)) {
return NULL;
}
WeightLayer& l = (WeightLayer&)model->getLayer(model->getNumLayers() - 2);
l.getWeights(0).setEps(epsW);
l.getWeights(0).setWC(wc);
return Py_BuildValue("i", 0);
}
|
162964586fd20751c7795bd74d6f6193d52decd3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: P = M * N.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include "matrixmul_kernel.hip"
#include "assist.h"
#define ERROR_CHECK { hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__);}}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
bool if_quiet = true;
unsigned int timer_compute = 0;
unsigned int timer_memory = 0;
int i, j;
char *matrix_id = NULL, *input_fn = NULL, *gold_fn = NULL;
float * deviceM = NULL, * deviceN = NULL, * deviceP = NULL;
int Mw = 0, Mh = 0, Nw = 0, Nh = 0, Pw = 0, Ph = 0;
int block_size = 0;
if (argc == 2) {
matrix_id = strdup(argv[1]);
} else {
fprintf(stderr, "Error: Wrong input parameter numbers.\n");
fprintf(stderr, "Usage:\n"
"$> ./lab2.1-matrixmul <8, 128, 512, 3072, 4096>\n"
"Examples:\n"
" $> ./lab2.1-matrixmul 128\n"
);
exit(1);
}
// Note: Matrix width and height must be multiples of block size.
if (!strcmp(matrix_id, "8")) {
Mw = Mh = Nw = Nh = Pw = Ph = 8;
block_size = BLOCK_SIZE; // thread number = block_size^2
input_fn = strdup("matrix_8.bin");
gold_fn = strdup("matrix_8.gold");
if_quiet = false; // If not display matrix contents
} else
if (!strcmp(matrix_id, "128")) {
Mw = Mh = Nw = Nh = Pw = Ph = 128;
block_size = BLOCK_SIZE; // thread number = block_size^2
input_fn = strdup("matrix_128.bin");
gold_fn = strdup("matrix_128.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "512")) {
Mw = Mh = Nw = Nh = Pw = Ph = 512;
block_size = BLOCK_SIZE; // thread number = block_size^2
input_fn = strdup("matrix_512.bin");
gold_fn = strdup("matrix_512.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "3072")) {
Mw = Mh = Nw = Nh = Pw = Ph = 3072;
block_size = BLOCK_SIZE; // thread number = block_size^2
input_fn = strdup("matrix_3072.bin");
gold_fn = strdup("matrix_3072.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "4096")) {
Mw = Mh = Nw = Nh = Pw = Ph = 4096;
block_size = BLOCK_SIZE; // thread number = block_size^2
input_fn = strdup("matrix_4096.bin");
gold_fn = strdup("matrix_4096.gold");
if_quiet = true; // If not display matrix contents
} else {
printf("***Error on %s: %d: Undefined matrix ID.\n",
__FILE__, __LINE__);
printf(" You should add it to the source code.\n");
printf(" Current available ID's are 8, 128, 512, 3072, 4096.\n");
exit(1);
}
if (block_size > Mw) {
printf("***Error on %s: %d: Block size %d is larger than matrix width %d.\n",
__FILE__, __LINE__, block_size, Mw);
printf(" You should define a smaller block size.\n");
exit(1);
}
printf("Input matrix file name: %s\n", input_fn);
// -----------------------------------------------------------------------
// Setup host side
// -----------------------------------------------------------------------
printf("Setup host side environment and launch kernel:\n");
// allocate host memory for matrices M and N
printf(" Allocate host memory for matrices M and N.\n");
printf(" M: %d x %d\n", Mw, Mh);
printf(" N: %d x %d\n", Nw, Nh);
unsigned int size_M = Mw * Mh;
unsigned int mem_size_M = sizeof(float) * size_M;
float* hostM = (float*) malloc(mem_size_M);
unsigned int size_N = Nw * (Nh);
unsigned int mem_size_N = sizeof(float) * size_N;
float* hostN = (float*) malloc(mem_size_N);
// allocate memory for the result on host side
printf(" Allocate memory for the result on host side.\n");
unsigned int size_P = Pw * Ph;
unsigned int mem_size_P = sizeof(float) * size_P;
float* hostP = (float*) malloc(mem_size_P);
// Initialize the input matrices.
printf(" Initialize the input matrices.\n");
unsigned int * matrix = ReadMatrixFile(input_fn, Pw, Ph, if_quiet);
for (i = 0; i < Mw; i++)
for (j = 0; j < Nw; j++)
hostM[i * Mw + j] = hostN[i * Mw + j] = (float) matrix[i*Mw + j];
free(matrix); matrix = NULL;
// ===================================================================
// Allocate device memory for the input matrices.
// Copy memory from the host memory to the device memory.
// ===================================================================
CUT_SAFE_CALL(cutCreateTimer(&timer_memory));
CUT_SAFE_CALL(cutStartTimer(timer_memory));
printf(" Allocate device memory.\n");
CUDA_SAFE_CALL(hipMalloc((void**) &deviceM, mem_size_M));
CUDA_SAFE_CALL(hipMalloc((void**) &deviceN, mem_size_N));
printf(" Copy host memory data to device.\n");
CUDA_SAFE_CALL(hipMemcpy(deviceM, hostM, mem_size_M,
hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(deviceN, hostN, mem_size_N,
hipMemcpyHostToDevice));
printf(" Allocate device memory for results.\n");
CUDA_SAFE_CALL(hipMalloc((void**) &deviceP, mem_size_P));
hipMemset(deviceP, 0, mem_size_P);
CUT_SAFE_CALL(cutStopTimer(timer_memory));
// ===================================================================
// Initialize the thread block and kernel grid dimensions
// and invoke the CUDA kernel.
// You may assume that each matrix dimension is a multiple
// of the defined constant block_size.
// ===================================================================
printf(" Setup kernel execution parameters.\n");
// Different ways of declarations
#if 1
dim3 block;
dim3 grid;
grid.x = Pw/block_size;
grid.y = Pw/block_size;
block.x = block_size;
block.y = block_size;
#else
dim3 block(block_size, block_size);
dim3 grid(Pw/block.x, Pw/block.y);
#endif
printf(" # of threads in a block: %d x %d (%d)\n",
block.x, block.y, block.x * block.y);
printf(" # of blocks in a grid : %d x %d (%d)\n",
grid.x, grid.y, grid.x * grid.y);
// ================================================
// Initialize the block and grid dimensions here
// ================================================
printf(" Executing the kernel...\n");
// Start the timer_compute to calculate how much time we spent on it.
CUT_SAFE_CALL(cutCreateTimer(&timer_compute));
CUT_SAFE_CALL(cutStartTimer(timer_compute));
// Invoke the CUDA kernel here
hipLaunchKernelGGL(( matrixMul), dim3(grid), dim3(block), 0, 0, deviceP, deviceM, deviceN, Mw, Nw);
// Make sure all threads have finished their jobs
// before we stop the timer_compute.
hipDeviceSynchronize();
// Stop the timer_compute
CUT_SAFE_CALL(cutStopTimer(timer_compute));
// check if kernel execution generated an error
ERROR_CHECK
CUT_CHECK_ERROR("Kernel execution failed");
// ===================================================================
// Copy the results back from the host
// ===================================================================
printf(" Copy result from device to host.\n");
CUT_SAFE_CALL(cutStartTimer(timer_memory));
hipMemcpy(hostP, deviceP, mem_size_P, hipMemcpyDeviceToHost);
CUT_SAFE_CALL(cutStopTimer(timer_memory));
// ================================================
// Show timing information
// ================================================
printf(" GPU memory access time: %f (ms)\n",
cutGetTimerValue(timer_memory));
printf(" GPU computation time : %f (ms)\n",
cutGetTimerValue(timer_compute));
printf(" GPU processing time : %f (ms)\n",
cutGetTimerValue(timer_compute) + cutGetTimerValue(timer_memory));
CUT_SAFE_CALL(cutDeleteTimer(timer_memory));
CUT_SAFE_CALL(cutDeleteTimer(timer_compute));
// ================================================
// Do comparison
// ================================================
// Full result check when input matrix is <= 512x512
//if (0) {
if (Mw * Nw > 512*512) {
printf("\nInput matrix size is too big. Skip computing reference.\n");
} else {
printf("\nCheck results with those computed by CPU.\n");
printf (" Computing reference solution.\n");
CUT_SAFE_CALL(cutCreateTimer(&timer_compute));
CUT_SAFE_CALL(cutStartTimer(timer_compute));
float* reference = (float*) malloc(mem_size_P);
computeGold(reference, hostM, hostN, Mh, Mw, Nw);
CUT_SAFE_CALL(cutStopTimer(timer_compute));
printf(" CPU Processing time : %f (ms)\n",
cutGetTimerValue(timer_compute));
CUT_SAFE_CALL(cutDeleteTimer(timer_compute));
printf(" CPU checksum: %g\n", CheckSum(reference, Mw, Nw));
matrix = (unsigned int *) malloc(Pw * Ph * sizeof(unsigned int));
for (i = 0; i < Ph; i++)
for (j = 0; j < Pw; j++)
matrix[i*Pw + j] = (unsigned int) reference[i*Pw + j];
WriteMatrixFile("lab2.1-matrixmul.gold", matrix, Pw, Ph, 1);
free(matrix); matrix = NULL;
free(reference);
}
printf(" GPU checksum: %g\n", CheckSum(hostP, Mw, Nw));
/* Write matrix C to output binary file */
matrix = (unsigned int *) malloc (Pw * Ph * sizeof(unsigned int));
for (i = 0; i < Ph; i++)
for (j = 0; j < Pw; j++)
matrix[i*Pw + j] = (unsigned int) hostP[i*Pw + j];
WriteMatrixFile("lab2.1-matrixmul.bin", matrix, Pw, Ph, 1);
free (matrix); matrix = NULL;
if (Mw >= 3072 && Mh >= 3072) {
CompareMatrixFile("lab2.1-matrixmul.bin", gold_fn, Pw, Ph, if_quiet);
} else {
CompareMatrixFile("lab2.1-matrixmul.bin", "lab2.1-matrixmul.gold",
Pw, Ph, if_quiet);
}
// clean up memory
free(hostM); free(hostN); free(hostP);
free(input_fn); free(gold_fn);
// ===================================================================
// Free the device memory
// ===================================================================
CUDA_SAFE_CALL(hipFree(deviceM));
CUDA_SAFE_CALL(hipFree(deviceN));
CUDA_SAFE_CALL(hipFree(deviceP));
}
| 162964586fd20751c7795bd74d6f6193d52decd3.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: P = M * N.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include "matrixmul_kernel.cu"
#include "assist.h"
#define ERROR_CHECK { cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__);}}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
bool if_quiet = true;
unsigned int timer_compute = 0;
unsigned int timer_memory = 0;
int i, j;
char *matrix_id = NULL, *input_fn = NULL, *gold_fn = NULL;
float * deviceM = NULL, * deviceN = NULL, * deviceP = NULL;
int Mw = 0, Mh = 0, Nw = 0, Nh = 0, Pw = 0, Ph = 0;
int block_size = 0;
if (argc == 2) {
matrix_id = strdup(argv[1]);
} else {
fprintf(stderr, "Error: Wrong input parameter numbers.\n");
fprintf(stderr, "Usage:\n"
"$> ./lab2.1-matrixmul <8, 128, 512, 3072, 4096>\n"
"Examples:\n"
" $> ./lab2.1-matrixmul 128\n"
);
exit(1);
}
// Note: Matrix width and height must be multiples of block size.
if (!strcmp(matrix_id, "8")) {
Mw = Mh = Nw = Nh = Pw = Ph = 8;
block_size = BLOCK_SIZE; // thread number = block_size^2
input_fn = strdup("matrix_8.bin");
gold_fn = strdup("matrix_8.gold");
if_quiet = false; // If not display matrix contents
} else
if (!strcmp(matrix_id, "128")) {
Mw = Mh = Nw = Nh = Pw = Ph = 128;
block_size = BLOCK_SIZE; // thread number = block_size^2
input_fn = strdup("matrix_128.bin");
gold_fn = strdup("matrix_128.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "512")) {
Mw = Mh = Nw = Nh = Pw = Ph = 512;
block_size = BLOCK_SIZE; // thread number = block_size^2
input_fn = strdup("matrix_512.bin");
gold_fn = strdup("matrix_512.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "3072")) {
Mw = Mh = Nw = Nh = Pw = Ph = 3072;
block_size = BLOCK_SIZE; // thread number = block_size^2
input_fn = strdup("matrix_3072.bin");
gold_fn = strdup("matrix_3072.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "4096")) {
Mw = Mh = Nw = Nh = Pw = Ph = 4096;
block_size = BLOCK_SIZE; // thread number = block_size^2
input_fn = strdup("matrix_4096.bin");
gold_fn = strdup("matrix_4096.gold");
if_quiet = true; // If not display matrix contents
} else {
printf("***Error on %s: %d: Undefined matrix ID.\n",
__FILE__, __LINE__);
printf(" You should add it to the source code.\n");
printf(" Current available ID's are 8, 128, 512, 3072, 4096.\n");
exit(1);
}
if (block_size > Mw) {
printf("***Error on %s: %d: Block size %d is larger than matrix width %d.\n",
__FILE__, __LINE__, block_size, Mw);
printf(" You should define a smaller block size.\n");
exit(1);
}
printf("Input matrix file name: %s\n", input_fn);
// -----------------------------------------------------------------------
// Setup host side
// -----------------------------------------------------------------------
printf("Setup host side environment and launch kernel:\n");
// allocate host memory for matrices M and N
printf(" Allocate host memory for matrices M and N.\n");
printf(" M: %d x %d\n", Mw, Mh);
printf(" N: %d x %d\n", Nw, Nh);
unsigned int size_M = Mw * Mh;
unsigned int mem_size_M = sizeof(float) * size_M;
float* hostM = (float*) malloc(mem_size_M);
unsigned int size_N = Nw * (Nh);
unsigned int mem_size_N = sizeof(float) * size_N;
float* hostN = (float*) malloc(mem_size_N);
// allocate memory for the result on host side
printf(" Allocate memory for the result on host side.\n");
unsigned int size_P = Pw * Ph;
unsigned int mem_size_P = sizeof(float) * size_P;
float* hostP = (float*) malloc(mem_size_P);
// Initialize the input matrices.
printf(" Initialize the input matrices.\n");
unsigned int * matrix = ReadMatrixFile(input_fn, Pw, Ph, if_quiet);
for (i = 0; i < Mw; i++)
for (j = 0; j < Nw; j++)
hostM[i * Mw + j] = hostN[i * Mw + j] = (float) matrix[i*Mw + j];
free(matrix); matrix = NULL;
// ===================================================================
// Allocate device memory for the input matrices.
// Copy memory from the host memory to the device memory.
// ===================================================================
CUT_SAFE_CALL(cutCreateTimer(&timer_memory));
CUT_SAFE_CALL(cutStartTimer(timer_memory));
printf(" Allocate device memory.\n");
CUDA_SAFE_CALL(cudaMalloc((void**) &deviceM, mem_size_M));
CUDA_SAFE_CALL(cudaMalloc((void**) &deviceN, mem_size_N));
printf(" Copy host memory data to device.\n");
CUDA_SAFE_CALL(cudaMemcpy(deviceM, hostM, mem_size_M,
cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(deviceN, hostN, mem_size_N,
cudaMemcpyHostToDevice));
printf(" Allocate device memory for results.\n");
CUDA_SAFE_CALL(cudaMalloc((void**) &deviceP, mem_size_P));
cudaMemset(deviceP, 0, mem_size_P);
CUT_SAFE_CALL(cutStopTimer(timer_memory));
// ===================================================================
// Initialize the thread block and kernel grid dimensions
// and invoke the CUDA kernel.
// You may assume that each matrix dimension is a multiple
// of the defined constant block_size.
// ===================================================================
printf(" Setup kernel execution parameters.\n");
// Different ways of declarations
#if 1
dim3 block;
dim3 grid;
grid.x = Pw/block_size;
grid.y = Pw/block_size;
block.x = block_size;
block.y = block_size;
#else
dim3 block(block_size, block_size);
dim3 grid(Pw/block.x, Pw/block.y);
#endif
printf(" # of threads in a block: %d x %d (%d)\n",
block.x, block.y, block.x * block.y);
printf(" # of blocks in a grid : %d x %d (%d)\n",
grid.x, grid.y, grid.x * grid.y);
// ================================================
// Initialize the block and grid dimensions here
// ================================================
printf(" Executing the kernel...\n");
// Start the timer_compute to calculate how much time we spent on it.
CUT_SAFE_CALL(cutCreateTimer(&timer_compute));
CUT_SAFE_CALL(cutStartTimer(timer_compute));
// Invoke the CUDA kernel here
matrixMul<<<grid, block>>> (deviceP, deviceM, deviceN, Mw, Nw);
// Make sure all threads have finished their jobs
// before we stop the timer_compute.
cudaThreadSynchronize();
// Stop the timer_compute
CUT_SAFE_CALL(cutStopTimer(timer_compute));
// check if kernel execution generated an error
ERROR_CHECK
CUT_CHECK_ERROR("Kernel execution failed");
// ===================================================================
// Copy the results back from the host
// ===================================================================
printf(" Copy result from device to host.\n");
CUT_SAFE_CALL(cutStartTimer(timer_memory));
cudaMemcpy(hostP, deviceP, mem_size_P, cudaMemcpyDeviceToHost);
CUT_SAFE_CALL(cutStopTimer(timer_memory));
// ================================================
// Show timing information
// ================================================
printf(" GPU memory access time: %f (ms)\n",
cutGetTimerValue(timer_memory));
printf(" GPU computation time : %f (ms)\n",
cutGetTimerValue(timer_compute));
printf(" GPU processing time : %f (ms)\n",
cutGetTimerValue(timer_compute) + cutGetTimerValue(timer_memory));
CUT_SAFE_CALL(cutDeleteTimer(timer_memory));
CUT_SAFE_CALL(cutDeleteTimer(timer_compute));
// ================================================
// Do comparison
// ================================================
// Full result check when input matrix is <= 512x512
//if (0) {
if (Mw * Nw > 512*512) {
printf("\nInput matrix size is too big. Skip computing reference.\n");
} else {
printf("\nCheck results with those computed by CPU.\n");
printf (" Computing reference solution.\n");
CUT_SAFE_CALL(cutCreateTimer(&timer_compute));
CUT_SAFE_CALL(cutStartTimer(timer_compute));
float* reference = (float*) malloc(mem_size_P);
computeGold(reference, hostM, hostN, Mh, Mw, Nw);
CUT_SAFE_CALL(cutStopTimer(timer_compute));
printf(" CPU Processing time : %f (ms)\n",
cutGetTimerValue(timer_compute));
CUT_SAFE_CALL(cutDeleteTimer(timer_compute));
printf(" CPU checksum: %g\n", CheckSum(reference, Mw, Nw));
matrix = (unsigned int *) malloc(Pw * Ph * sizeof(unsigned int));
for (i = 0; i < Ph; i++)
for (j = 0; j < Pw; j++)
matrix[i*Pw + j] = (unsigned int) reference[i*Pw + j];
WriteMatrixFile("lab2.1-matrixmul.gold", matrix, Pw, Ph, 1);
free(matrix); matrix = NULL;
free(reference);
}
printf(" GPU checksum: %g\n", CheckSum(hostP, Mw, Nw));
/* Write matrix C to output binary file */
matrix = (unsigned int *) malloc (Pw * Ph * sizeof(unsigned int));
for (i = 0; i < Ph; i++)
for (j = 0; j < Pw; j++)
matrix[i*Pw + j] = (unsigned int) hostP[i*Pw + j];
WriteMatrixFile("lab2.1-matrixmul.bin", matrix, Pw, Ph, 1);
free (matrix); matrix = NULL;
if (Mw >= 3072 && Mh >= 3072) {
CompareMatrixFile("lab2.1-matrixmul.bin", gold_fn, Pw, Ph, if_quiet);
} else {
CompareMatrixFile("lab2.1-matrixmul.bin", "lab2.1-matrixmul.gold",
Pw, Ph, if_quiet);
}
// clean up memory
free(hostM); free(hostN); free(hostP);
free(input_fn); free(gold_fn);
// ===================================================================
// Free the device memory
// ===================================================================
CUDA_SAFE_CALL(cudaFree(deviceM));
CUDA_SAFE_CALL(cudaFree(deviceN));
CUDA_SAFE_CALL(cudaFree(deviceP));
}
|
91ec3d122019b8f8f9763415b78dfd2292c0eae1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <climits>
#include <algorithm>
#define SERIAL_SCALE 3
#define SERIAL_PART (1<<SERIAL_SCALE)
extern "C" {
__global__
void initialize(int* output, int size)
{
int flatId=(blockIdx.x * blockDim.x) + threadIdx.x;
if(flatId<size)
output[flatId]=flatId;
}
__global__
void transpose(int *input, int *output, int N){
const unsigned int column=blockIdx.x*32+threadIdx.x;
const unsigned int row=blockIdx.y*32+threadIdx.y;
const unsigned int newRow=(32*blockIdx.x+threadIdx.y);
const unsigned int newColumn=32*blockIdx.y+threadIdx.x;
__shared__ int cache[32][35];
cache[threadIdx.x][threadIdx.y]=input[N*row+column];
__syncthreads();
output[newRow*N+newColumn]=cache[threadIdx.y][threadIdx.x];
}
__global__
void propagateMin(int *G,int *result,int *modified,int N)
{
int column=(blockIdx.x * blockDim.x) + threadIdx.x;
bool m=false;
#define GET(array,row) array[N*(row)+column]
#define propagate(i) \
int currG=GET(G,i); \
if(prevG==currG) \
{ \
int currR=GET(result,i); \
if(currR>prevR) \
{ \
GET(result,i)=prevR; \
m=true; \
} \
else \
{ \
prevR=currR; \
} \
} \
else \
{ \
prevR=GET(result,i); \
} \
prevG=currG;
int prevG=GET(G,0);
int prevR=GET(result,0);
for(int i=1;i<N;++i)
{
propagate(i)
}
prevG=GET(G,N-1);
prevR=GET(result,N-1);
for(int i=N-2;i>=0;--i)
{
propagate(i)
}
if(m)
*modified=-1;
#undef propagate
#undef GET
}
}
| 91ec3d122019b8f8f9763415b78dfd2292c0eae1.cu | #include <cstdio>
#include <climits>
#include <algorithm>
#define SERIAL_SCALE 3
#define SERIAL_PART (1<<SERIAL_SCALE)
extern "C" {
__global__
void initialize(int* output, int size)
{
int flatId=(blockIdx.x * blockDim.x) + threadIdx.x;
if(flatId<size)
output[flatId]=flatId;
}
__global__
void transpose(int *input, int *output, int N){
const unsigned int column=blockIdx.x*32+threadIdx.x;
const unsigned int row=blockIdx.y*32+threadIdx.y;
const unsigned int newRow=(32*blockIdx.x+threadIdx.y);
const unsigned int newColumn=32*blockIdx.y+threadIdx.x;
__shared__ int cache[32][35];
cache[threadIdx.x][threadIdx.y]=input[N*row+column];
__syncthreads();
output[newRow*N+newColumn]=cache[threadIdx.y][threadIdx.x];
}
__global__
void propagateMin(int *G,int *result,int *modified,int N)
{
int column=(blockIdx.x * blockDim.x) + threadIdx.x;
bool m=false;
#define GET(array,row) array[N*(row)+column]
#define propagate(i) \
int currG=GET(G,i); \
if(prevG==currG) \
{ \
int currR=GET(result,i); \
if(currR>prevR) \
{ \
GET(result,i)=prevR; \
m=true; \
} \
else \
{ \
prevR=currR; \
} \
} \
else \
{ \
prevR=GET(result,i); \
} \
prevG=currG;
int prevG=GET(G,0);
int prevR=GET(result,0);
for(int i=1;i<N;++i)
{
propagate(i)
}
prevG=GET(G,N-1);
prevR=GET(result,N-1);
for(int i=N-2;i>=0;--i)
{
propagate(i)
}
if(m)
*modified=-1;
#undef propagate
#undef GET
}
}
|
5be04ea4e50092655e5e3adb24ed552384c0adf4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
#include <math.h>
#include <vector>
#include "cuda_2d.h"
#include "cudachk.h"
using namespace std;
int main(void)
{
float *a;
float *b;
float *c;
// dim3 blockSize (16,1,1);
dim3 blocks (32, 32, 1);
dim3 grid (32, 32, 1);
int n = 8192;
int m = 1024;
auto start = std::chrono::system_clock::now ();
// Allocate Unified Memory accessible from CPU or GPU
cudaChk(hipMalloc (&a, n * m * sizeof(float)));
cudaChk(hipMalloc (&b, n * m * sizeof(float)));
cudaChk(hipMalloc (&c, m * m * sizeof(float)));
std::cout << "multiply " << n << " x " << m << ".\n";
auto stop = std::chrono::system_clock::now ();
chrono::duration< double > dur = stop - start;
std::cout << "alloc took " << dur.count () << " s " << std::endl;
std::vector< float > input (n * m, 4.0f);
std::vector< float > zero (n * m, 0.0f);
std::cout << "size " << input.size () << std::endl;
start = std::chrono::system_clock::now ();
// initialize x and y arrays on the host
cudaChk(hipMemcpy (a, &input[0], n * m * sizeof(float), hipMemcpyHostToDevice));
cudaChk(hipMemcpy (b, &input[0], n * m * sizeof(float), hipMemcpyHostToDevice));
cudaChk(hipMemcpy (c, &zero [0], m * m * sizeof(float), hipMemcpyHostToDevice));
cudaChk(hipDeviceSynchronize());
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "init took " << dur.count () << " s " << std::endl;
auto tstart = std::chrono::system_clock::now ();
start = std::chrono::system_clock::now ();
// Run kernel on 1M elements on the GPU
for (int i = 0; i < 1; i++) {
hipLaunchKernelGGL(( matmul), dim3(grid), dim3(blocks), 0, 0, a, b, n, c);
}
// Wait for GPU to finish before accessing on host
cudaChk(hipDeviceSynchronize());
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "multiply took " << dur.count () << " s " << std::endl;
start = std::chrono::system_clock::now ();
std::vector< float > res (m * m);
cudaChk(hipMemcpy (&res[0], c, m * m * sizeof (float), hipMemcpyDeviceToHost));
std::cout << "res: " << res[0] << "\n";
std::cout << "res: " << res[1] << "\n";
std::cout << "res: " << res[2] << "\n";
std::cout << "res: " << res[4] << "\n";
// Free memory
cudaChk(hipFree(a));
cudaChk(hipFree(b));
cudaChk(hipFree(c));
return 0;
}
| 5be04ea4e50092655e5e3adb24ed552384c0adf4.cu | #include <iostream>
#include <chrono>
#include <math.h>
#include <vector>
#include "cuda_2d.h"
#include "cudachk.h"
using namespace std;
int main(void)
{
float *a;
float *b;
float *c;
// dim3 blockSize (16,1,1);
dim3 blocks (32, 32, 1);
dim3 grid (32, 32, 1);
int n = 8192;
int m = 1024;
auto start = std::chrono::system_clock::now ();
// Allocate Unified Memory โ accessible from CPU or GPU
cudaChk(cudaMalloc (&a, n * m * sizeof(float)));
cudaChk(cudaMalloc (&b, n * m * sizeof(float)));
cudaChk(cudaMalloc (&c, m * m * sizeof(float)));
std::cout << "multiply " << n << " x " << m << ".\n";
auto stop = std::chrono::system_clock::now ();
chrono::duration< double > dur = stop - start;
std::cout << "alloc took " << dur.count () << " s " << std::endl;
std::vector< float > input (n * m, 4.0f);
std::vector< float > zero (n * m, 0.0f);
std::cout << "size " << input.size () << std::endl;
start = std::chrono::system_clock::now ();
// initialize x and y arrays on the host
cudaChk(cudaMemcpy (a, &input[0], n * m * sizeof(float), cudaMemcpyHostToDevice));
cudaChk(cudaMemcpy (b, &input[0], n * m * sizeof(float), cudaMemcpyHostToDevice));
cudaChk(cudaMemcpy (c, &zero [0], m * m * sizeof(float), cudaMemcpyHostToDevice));
cudaChk(cudaDeviceSynchronize());
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "init took " << dur.count () << " s " << std::endl;
auto tstart = std::chrono::system_clock::now ();
start = std::chrono::system_clock::now ();
// Run kernel on 1M elements on the GPU
for (int i = 0; i < 1; i++) {
matmul<<<grid, blocks>>> (a, b, n, c);
}
// Wait for GPU to finish before accessing on host
cudaChk(cudaDeviceSynchronize());
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "multiply took " << dur.count () << " s " << std::endl;
start = std::chrono::system_clock::now ();
std::vector< float > res (m * m);
cudaChk(cudaMemcpy (&res[0], c, m * m * sizeof (float), cudaMemcpyDeviceToHost));
std::cout << "res: " << res[0] << "\n";
std::cout << "res: " << res[1] << "\n";
std::cout << "res: " << res[2] << "\n";
std::cout << "res: " << res[4] << "\n";
// Free memory
cudaChk(cudaFree(a));
cudaChk(cudaFree(b));
cudaChk(cudaFree(c));
return 0;
}
|
08ff6ae06edf0ab9775a171483de32c155e198fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2018,2019,2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/solver/sgd.hpp>
#include "./clip_grad.cuh"
#include "./mixed_precision_training.cuh"
#include "./weight_decay.cuh"
namespace nbla {
template <typename T>
__global__ void kernel_update(const int num, T *data, const T *grad,
const float lr) {
NBLA_CUDA_KERNEL_LOOP(idx, num) { data[idx] -= lr * grad[idx]; }
}
template <typename T>
void SgdCuda<T>::update_impl(const string &key, VariablePtr param) {
cuda_set_device(std::stoi(this->ctx_.device_id));
Size_t size = param->size();
const T *grad = param->get_grad_pointer<T>(this->ctx_);
T *data = param->cast_data_and_get_pointer<T>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_update, size, data, grad, this->lr_);
auto &state = this->states_.at(key);
auto &t = state.t;
t = ::min(t + 1, std::numeric_limits<uint32_t>::max() - 1);
}
NBLA_DEF_WEIGHT_DECAY(SgdCuda, weight_decay_cuda);
NBLA_DEF_CLIP_GRAD_BY_NORM(SgdCuda, clip_grad_by_norm_cuda);
NBLA_DEF_CHECK_INF_GRAD(SgdCuda, check_inf_grad_cuda);
NBLA_DEF_CHECK_NAN_GRAD(SgdCuda, check_nan_grad_cuda);
NBLA_DEF_CHECK_INF_OR_NAN_GRAD(SgdCuda, check_inf_or_nan_grad_cuda);
NBLA_DEF_SCALE_GRAD(SgdCuda, scale_grad_impl_cuda);
} // namespace nbla
| 08ff6ae06edf0ab9775a171483de32c155e198fc.cu | // Copyright 2018,2019,2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/solver/sgd.hpp>
#include "./clip_grad.cuh"
#include "./mixed_precision_training.cuh"
#include "./weight_decay.cuh"
namespace nbla {
template <typename T>
__global__ void kernel_update(const int num, T *data, const T *grad,
const float lr) {
NBLA_CUDA_KERNEL_LOOP(idx, num) { data[idx] -= lr * grad[idx]; }
}
template <typename T>
void SgdCuda<T>::update_impl(const string &key, VariablePtr param) {
cuda_set_device(std::stoi(this->ctx_.device_id));
Size_t size = param->size();
const T *grad = param->get_grad_pointer<T>(this->ctx_);
T *data = param->cast_data_and_get_pointer<T>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_update, size, data, grad, this->lr_);
auto &state = this->states_.at(key);
auto &t = state.t;
t = std::min(t + 1, std::numeric_limits<uint32_t>::max() - 1);
}
NBLA_DEF_WEIGHT_DECAY(SgdCuda, weight_decay_cuda);
NBLA_DEF_CLIP_GRAD_BY_NORM(SgdCuda, clip_grad_by_norm_cuda);
NBLA_DEF_CHECK_INF_GRAD(SgdCuda, check_inf_grad_cuda);
NBLA_DEF_CHECK_NAN_GRAD(SgdCuda, check_nan_grad_cuda);
NBLA_DEF_CHECK_INF_OR_NAN_GRAD(SgdCuda, check_inf_or_nan_grad_cuda);
NBLA_DEF_SCALE_GRAD(SgdCuda, scale_grad_impl_cuda);
} // namespace nbla
|
7dc1e5de520fd767bc1a11e75505c5162f0ff66b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2018 - The OPRECOMP Project Consortium, Alma Mater Studiorum
Universit di Bologna. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_fp16.h>
#include "fp16_conversion.h"
#define KERNEL_RADIUS 8
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
#define DATA_W 4096
#define DATA_H 4096
const int DATA_SIZE = DATA_W * DATA_H * sizeof(half);
const int KERNEL_SIZE = KERNEL_W * sizeof(half);
__device__ __constant__ half d_Kernel[KERNEL_W];
__global__
void convolutionRowGPU(
half2 *d_Result,
half2 *d_Data,
int dataW,
int dataH,
int kernelR
){
int index=blockIdx.x * blockDim.x +threadIdx.x; // global thread id
if(index >= dataW*dataH) return;
int y = index/dataW;
int x = index - y*dataW;
//printf(">>> %d,%d\n", x, y);
int k, d;
half2 sum;
sum = __float2half2_rn(0.0f);
for(k = -kernelR; k <= kernelR; k++){
d = x + k;
if(d >= 0 && d < 2*dataW-1)
{
//printf("[%d] 1 k = %d (%d)\n", index, k, y * dataW + d);
//printf("[%d] 1 -> %f %f * %f\n", index, __low2float(d_Data[y * dataW + d]), __high2float(d_Data[y * dataW + d]), __half2float(d_Kernel[kernelR - k]));
sum = __hfma2(d_Data[y * dataW + d], __half2half2(d_Kernel[kernelR - k]), sum);
}
else if (d ==- 1)
{
//printf("[%d] 2 k = %d (%d) \n", index, k, y * dataW );
//printf("[%d] 2-> %f %f\n", index, __high2float(d_Data[y * dataW]), __half2float(d_Kernel[kernelR - k]));
sum = __hadd2(sum,__floats2half2_rn(0.0f, __high2float(d_Data[y * dataW]) * __half2float(d_Kernel[kernelR - k])));
}
else if (d == 2*dataW-1)
{
//printf("[%d] 3 k = %d (%d)\n", index, k, y * dataW + d);
//printf("[%d] 3-> %f %f\n", index, __low2float(d_Data[y * dataW + d - 1]), __half2float(d_Kernel[kernelR - k]));
sum = __hadd2(sum,__floats2half2_rn(__low2float(d_Data[y * dataW + d - 1]) * __half2float(d_Kernel[kernelR - k]), 0.0f));
}
}
d_Result[y * dataW + x] = sum;
}
__global__
void convolutionColumnGPU(
half2 *d_Result,
half2 *d_Data,
int dataW,
int dataH,
int kernelR
){
int index=blockIdx.x * blockDim.x +threadIdx.x; // global thread id
if(index >= dataW*dataH) return;
int y = index/dataW;
int x = index - y*dataW;
int k, d;
half2 sum;
sum = __float2half2_rn(0.0f);
for(k = -kernelR; k <= kernelR; k++){
d = y + k;
if(d >= 0 && d < dataH-1)
sum = __hfma2(d_Data[d * dataW + x], __half2half2(d_Kernel[kernelR - k]), sum);
else if (d ==- 1)
sum = __hadd2(sum,__floats2half2_rn(0.0f, __high2float(d_Data[d * dataW + x]) * __half2float(d_Kernel[kernelR - k])));
else if (d == dataH-1)
sum = __hadd2(sum,__floats2half2_rn(__low2float(d_Data[d * dataW + x]) * __half2float(d_Kernel[kernelR - k]), 0.0f));
}
d_Result[y * dataW + x] = sum;
}
int main(int argc, char **argv){
int i;
half
*h_Kernel,
*h_DataA;
half
*d_DataA,
*d_DataB;
h_Kernel = (half *)malloc(KERNEL_SIZE);
h_DataA = (half *)malloc(DATA_SIZE);
hipMalloc( (void **)&d_DataA, DATA_SIZE);
hipMalloc( (void **)&d_DataB, DATA_SIZE);
float kernelSum = 0;
for(i = 0; i < KERNEL_W; i++){
float dist = (float)(i - KERNEL_RADIUS) / (float)KERNEL_RADIUS;
float val = expf(- dist * dist / 2);
h_Kernel[i] = approx_float_to_half(val);
kernelSum += val;
}
for(i = 0; i < KERNEL_W; i++)
h_Kernel[i] = approx_float_to_half(half_to_float(h_Kernel[i])/kernelSum);
srand(5497);
for(i = 0; i < DATA_W * DATA_H; i++)
h_DataA[i] = approx_float_to_half((float)rand() / (float)RAND_MAX);
hipMemcpyToSymbol(d_Kernel, h_Kernel, KERNEL_SIZE);
hipMemcpy(d_DataA, h_DataA, DATA_SIZE, hipMemcpyHostToDevice);
int blockSize=256;
int numBlocks = ((DATA_W * DATA_H)/2+blockSize-1)/blockSize;
//for(i = 0; i < DATA_W * DATA_H; i++)
// printf("%.15f,", h_DataA[i]);
hipLaunchKernelGGL(( convolutionRowGPU), dim3(numBlocks), dim3(blockSize), 0, 0,
(half2*)d_DataB,
(half2*)d_DataA,
DATA_W/2,
DATA_H,
KERNEL_RADIUS
);
//hipMemcpy(h_DataA, d_DataB, DATA_SIZE, hipMemcpyDeviceToHost);
//for(i = 0; i < DATA_W * DATA_H; i++)
// printf("%.15f,", half_to_float(h_DataA[i]));
hipLaunchKernelGGL(( convolutionColumnGPU), dim3(numBlocks), dim3(blockSize), 0, 0,
(half2*)d_DataA,
(half2*)d_DataB,
DATA_W/2,
DATA_H,
KERNEL_RADIUS
);
hipMemcpy(h_DataA, d_DataA, DATA_SIZE, hipMemcpyDeviceToHost);
for(i = 0; i < DATA_W * DATA_H; i++)
printf("%.15f,", half_to_float(h_DataA[i]));
return 0;
}
| 7dc1e5de520fd767bc1a11e75505c5162f0ff66b.cu | /*
Copyright 2018 - The OPRECOMP Project Consortium, Alma Mater Studiorum
Universitร di Bologna. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda_fp16.h>
#include "fp16_conversion.h"
#define KERNEL_RADIUS 8
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
#define DATA_W 4096
#define DATA_H 4096
const int DATA_SIZE = DATA_W * DATA_H * sizeof(half);
const int KERNEL_SIZE = KERNEL_W * sizeof(half);
__device__ __constant__ half d_Kernel[KERNEL_W];
__global__
void convolutionRowGPU(
half2 *d_Result,
half2 *d_Data,
int dataW,
int dataH,
int kernelR
){
int index=blockIdx.x * blockDim.x +threadIdx.x; // global thread id
if(index >= dataW*dataH) return;
int y = index/dataW;
int x = index - y*dataW;
//printf(">>> %d,%d\n", x, y);
int k, d;
half2 sum;
sum = __float2half2_rn(0.0f);
for(k = -kernelR; k <= kernelR; k++){
d = x + k;
if(d >= 0 && d < 2*dataW-1)
{
//printf("[%d] 1 k = %d (%d)\n", index, k, y * dataW + d);
//printf("[%d] 1 -> %f %f * %f\n", index, __low2float(d_Data[y * dataW + d]), __high2float(d_Data[y * dataW + d]), __half2float(d_Kernel[kernelR - k]));
sum = __hfma2(d_Data[y * dataW + d], __half2half2(d_Kernel[kernelR - k]), sum);
}
else if (d ==- 1)
{
//printf("[%d] 2 k = %d (%d) \n", index, k, y * dataW );
//printf("[%d] 2-> %f %f\n", index, __high2float(d_Data[y * dataW]), __half2float(d_Kernel[kernelR - k]));
sum = __hadd2(sum,__floats2half2_rn(0.0f, __high2float(d_Data[y * dataW]) * __half2float(d_Kernel[kernelR - k])));
}
else if (d == 2*dataW-1)
{
//printf("[%d] 3 k = %d (%d)\n", index, k, y * dataW + d);
//printf("[%d] 3-> %f %f\n", index, __low2float(d_Data[y * dataW + d - 1]), __half2float(d_Kernel[kernelR - k]));
sum = __hadd2(sum,__floats2half2_rn(__low2float(d_Data[y * dataW + d - 1]) * __half2float(d_Kernel[kernelR - k]), 0.0f));
}
}
d_Result[y * dataW + x] = sum;
}
__global__
void convolutionColumnGPU(
half2 *d_Result,
half2 *d_Data,
int dataW,
int dataH,
int kernelR
){
int index=blockIdx.x * blockDim.x +threadIdx.x; // global thread id
if(index >= dataW*dataH) return;
int y = index/dataW;
int x = index - y*dataW;
int k, d;
half2 sum;
sum = __float2half2_rn(0.0f);
for(k = -kernelR; k <= kernelR; k++){
d = y + k;
if(d >= 0 && d < dataH-1)
sum = __hfma2(d_Data[d * dataW + x], __half2half2(d_Kernel[kernelR - k]), sum);
else if (d ==- 1)
sum = __hadd2(sum,__floats2half2_rn(0.0f, __high2float(d_Data[d * dataW + x]) * __half2float(d_Kernel[kernelR - k])));
else if (d == dataH-1)
sum = __hadd2(sum,__floats2half2_rn(__low2float(d_Data[d * dataW + x]) * __half2float(d_Kernel[kernelR - k]), 0.0f));
}
d_Result[y * dataW + x] = sum;
}
int main(int argc, char **argv){
int i;
half
*h_Kernel,
*h_DataA;
half
*d_DataA,
*d_DataB;
h_Kernel = (half *)malloc(KERNEL_SIZE);
h_DataA = (half *)malloc(DATA_SIZE);
cudaMalloc( (void **)&d_DataA, DATA_SIZE);
cudaMalloc( (void **)&d_DataB, DATA_SIZE);
float kernelSum = 0;
for(i = 0; i < KERNEL_W; i++){
float dist = (float)(i - KERNEL_RADIUS) / (float)KERNEL_RADIUS;
float val = expf(- dist * dist / 2);
h_Kernel[i] = approx_float_to_half(val);
kernelSum += val;
}
for(i = 0; i < KERNEL_W; i++)
h_Kernel[i] = approx_float_to_half(half_to_float(h_Kernel[i])/kernelSum);
srand(5497);
for(i = 0; i < DATA_W * DATA_H; i++)
h_DataA[i] = approx_float_to_half((float)rand() / (float)RAND_MAX);
cudaMemcpyToSymbol(d_Kernel, h_Kernel, KERNEL_SIZE);
cudaMemcpy(d_DataA, h_DataA, DATA_SIZE, cudaMemcpyHostToDevice);
int blockSize=256;
int numBlocks = ((DATA_W * DATA_H)/2+blockSize-1)/blockSize;
//for(i = 0; i < DATA_W * DATA_H; i++)
// printf("%.15f,", h_DataA[i]);
convolutionRowGPU<<<numBlocks, blockSize>>>(
(half2*)d_DataB,
(half2*)d_DataA,
DATA_W/2,
DATA_H,
KERNEL_RADIUS
);
//cudaMemcpy(h_DataA, d_DataB, DATA_SIZE, cudaMemcpyDeviceToHost);
//for(i = 0; i < DATA_W * DATA_H; i++)
// printf("%.15f,", half_to_float(h_DataA[i]));
convolutionColumnGPU<<<numBlocks, blockSize>>>(
(half2*)d_DataA,
(half2*)d_DataB,
DATA_W/2,
DATA_H,
KERNEL_RADIUS
);
cudaMemcpy(h_DataA, d_DataA, DATA_SIZE, cudaMemcpyDeviceToHost);
for(i = 0; i < DATA_W * DATA_H; i++)
printf("%.15f,", half_to_float(h_DataA[i]));
return 0;
}
|
fc76a7371b4819f3103d44db203ffe42c4c0f5ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@generated c Wed Nov 14 22:53:49 2012
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************/
/*
* Blocked version: swap several pair of line
*/
typedef struct {
cuFloatComplex *A1;
cuFloatComplex *A2;
int n, lda1, lda2, npivots;
short ipiv[BLOCK_SIZE];
} magmagpu_cswapblk_params_t;
__global__ void magmagpu_cswapblkrm( magmagpu_cswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
if( y < params.n )
{
cuFloatComplex *A1 = params.A1 + y - params.lda1;
cuFloatComplex *A2 = params.A2 + y;
for( int i = 0; i < params.npivots; i++ )
{
A1 += params.lda1;
if ( params.ipiv[i] == -1 )
continue;
cuFloatComplex tmp1 = *A1;
cuFloatComplex *tmp2 = A2 + params.ipiv[i]*params.lda2;
*A1 = *tmp2;
*tmp2 = tmp1;
}
}
}
__global__ void magmagpu_cswapblkcm( magmagpu_cswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = __mul24( y, params.lda1);
unsigned int offset2 = __mul24( y, params.lda2);
if( y < params.n )
{
cuFloatComplex *A1 = params.A1 + offset1 - 1;
cuFloatComplex *A2 = params.A2 + offset2;
for( int i = 0; i < params.npivots; i++ )
{
A1++;
if ( params.ipiv[i] == -1 )
continue;
cuFloatComplex tmp1 = *A1;
cuFloatComplex *tmp2 = A2 + params.ipiv[i];
*A1 = *tmp2;
*tmp2 = tmp1;
}
}
__syncthreads();
}
extern "C" void
magmablas_cswapblk( char storev, magma_int_t n,
cuFloatComplex *dA1T, magma_int_t lda1,
cuFloatComplex *dA2T, magma_int_t lda2,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset )
{
int blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
int k, im;
/* Quick return */
if ( n == 0 )
return;
if ( (storev == 'C') || (storev == 'c') ) {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
int sb = min(BLOCK_SIZE, i2-k);
magmagpu_cswapblk_params_t params = { dA1T+k, dA2T, n, lda1, lda2, sb };
for( int j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im)
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
hipLaunchKernelGGL(( magmagpu_cswapblkcm), dim3(blocks), dim3(blocksize), 0, magma_stream , params );
}
}else {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
int sb = min(BLOCK_SIZE, i2-k);
magmagpu_cswapblk_params_t params = { dA1T+k*lda1, dA2T, n, lda1, lda2, sb };
for( int j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im)
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
hipLaunchKernelGGL(( magmagpu_cswapblkrm), dim3(blocks), dim3(blocksize), 0, magma_stream , params );
}
}
}
| fc76a7371b4819f3103d44db203ffe42c4c0f5ed.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@generated c Wed Nov 14 22:53:49 2012
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************/
/*
* Blocked version: swap several pair of line
*/
typedef struct {
cuFloatComplex *A1;
cuFloatComplex *A2;
int n, lda1, lda2, npivots;
short ipiv[BLOCK_SIZE];
} magmagpu_cswapblk_params_t;
__global__ void magmagpu_cswapblkrm( magmagpu_cswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
if( y < params.n )
{
cuFloatComplex *A1 = params.A1 + y - params.lda1;
cuFloatComplex *A2 = params.A2 + y;
for( int i = 0; i < params.npivots; i++ )
{
A1 += params.lda1;
if ( params.ipiv[i] == -1 )
continue;
cuFloatComplex tmp1 = *A1;
cuFloatComplex *tmp2 = A2 + params.ipiv[i]*params.lda2;
*A1 = *tmp2;
*tmp2 = tmp1;
}
}
}
__global__ void magmagpu_cswapblkcm( magmagpu_cswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = __mul24( y, params.lda1);
unsigned int offset2 = __mul24( y, params.lda2);
if( y < params.n )
{
cuFloatComplex *A1 = params.A1 + offset1 - 1;
cuFloatComplex *A2 = params.A2 + offset2;
for( int i = 0; i < params.npivots; i++ )
{
A1++;
if ( params.ipiv[i] == -1 )
continue;
cuFloatComplex tmp1 = *A1;
cuFloatComplex *tmp2 = A2 + params.ipiv[i];
*A1 = *tmp2;
*tmp2 = tmp1;
}
}
__syncthreads();
}
extern "C" void
magmablas_cswapblk( char storev, magma_int_t n,
cuFloatComplex *dA1T, magma_int_t lda1,
cuFloatComplex *dA2T, magma_int_t lda2,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset )
{
int blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
int k, im;
/* Quick return */
if ( n == 0 )
return;
if ( (storev == 'C') || (storev == 'c') ) {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
int sb = min(BLOCK_SIZE, i2-k);
magmagpu_cswapblk_params_t params = { dA1T+k, dA2T, n, lda1, lda2, sb };
for( int j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im)
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
magmagpu_cswapblkcm<<< blocks, blocksize, 0, magma_stream >>>( params );
}
}else {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
int sb = min(BLOCK_SIZE, i2-k);
magmagpu_cswapblk_params_t params = { dA1T+k*lda1, dA2T, n, lda1, lda2, sb };
for( int j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im)
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
magmagpu_cswapblkrm<<< blocks, blocksize, 0, magma_stream >>>( params );
}
}
}
|
4fe6466153700470f0c2f4ec4810223f1cfc433c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "compareAndSet.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
double *ret = NULL;
hipMalloc(&ret, XSIZE*YSIZE);
int rlen = 1;
int clen = 1;
double compareVal = 1;
double tol = 1;
double ifEqualsVal = 1;
double ifLessThanVal = 1;
double ifGreaterThanVal = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
compareAndSet), dim3(gridBlock),dim3(threadBlock), 0, 0, A,ret,rlen,clen,compareVal,tol,ifEqualsVal,ifLessThanVal,ifGreaterThanVal);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
compareAndSet), dim3(gridBlock),dim3(threadBlock), 0, 0, A,ret,rlen,clen,compareVal,tol,ifEqualsVal,ifLessThanVal,ifGreaterThanVal);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
compareAndSet), dim3(gridBlock),dim3(threadBlock), 0, 0, A,ret,rlen,clen,compareVal,tol,ifEqualsVal,ifLessThanVal,ifGreaterThanVal);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4fe6466153700470f0c2f4ec4810223f1cfc433c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "compareAndSet.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
double *ret = NULL;
cudaMalloc(&ret, XSIZE*YSIZE);
int rlen = 1;
int clen = 1;
double compareVal = 1;
double tol = 1;
double ifEqualsVal = 1;
double ifLessThanVal = 1;
double ifGreaterThanVal = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
compareAndSet<<<gridBlock,threadBlock>>>(A,ret,rlen,clen,compareVal,tol,ifEqualsVal,ifLessThanVal,ifGreaterThanVal);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
compareAndSet<<<gridBlock,threadBlock>>>(A,ret,rlen,clen,compareVal,tol,ifEqualsVal,ifLessThanVal,ifGreaterThanVal);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
compareAndSet<<<gridBlock,threadBlock>>>(A,ret,rlen,clen,compareVal,tol,ifEqualsVal,ifLessThanVal,ifGreaterThanVal);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2e0d9b79d7f9f7862241308299809326fb6944c4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__(256,1) curvi_1 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_58_ = 2.0 * mu[i+2][j][k];
_t_58_ += la[i+2][j][k];
double _t_55_ = met1[i+2][j][k] * _t_58_ * met2[i+2][j][k];
double _v_26_ = c2 * u1[i+2][j][k+2];
double _v_0_ = c2 * u1[i][j+2][k+2];
_v_0_ -= c2 * u1[i][j-2][k+2];
double _v_3_ = c2 * u2[i][j+2][k+2];
_v_3_ -= c2 * u2[i][j-2][k+2];
double _v_6_ = c2 * u1[i][j+2][k-2];
_v_6_ -= c2 * u1[i][j-2][k-2];
double _v_9_ = c2 * u2[i][j+2][k-2];
_v_9_ -= c2 * u2[i][j-2][k-2];
double _v_13_ = c2 * u1[i][j+2][k+1];
_v_13_ -= c2 * u1[i][j-2][k+1];
double _v_16_ = c2 * u2[i][j+2][k+1];
_v_16_ -= c2 * u2[i][j-2][k+1];
double _v_19_ = c2 * u1[i][j+2][k-1];
_v_19_ -= c2 * u1[i][j-2][k-1];
double _v_22_ = c2 * u2[i][j+2][k-1];
_v_22_ -= c2 * u2[i][j-2][k-1];
double _v_64_ = c2 * u1[i+2][j][k+2];
_v_26_ -= c2 * u1[i+2][j][k-2];
double _v_73_ = c2 * u1[i+2][j][k-2];
double _t_56_ = _v_26_;
double _v_27_ = c1 * u1[i+2][j][k+1];
double _v_1_ = c1 * u1[i][j+1][k+2];
_v_1_ -= c1 * u1[i][j-1][k+2];
double _v_4_ = c1 * u2[i][j+1][k+2];
_v_4_ -= c1 * u2[i][j-1][k+2];
double _v_7_ = c1 * u1[i][j+1][k-2];
_v_7_ -= c1 * u1[i][j-1][k-2];
double _v_10_ = c1 * u2[i][j+1][k-2];
_v_10_ -= c1 * u2[i][j-1][k-2];
double _v_14_ = c1 * u1[i][j+1][k+1];
_v_14_ -= c1 * u1[i][j-1][k+1];
double _v_17_ = c1 * u2[i][j+1][k+1];
_v_17_ -= c1 * u2[i][j-1][k+1];
double _v_20_ = c1 * u1[i][j+1][k-1];
_v_20_ -= c1 * u1[i][j-1][k-1];
double _v_23_ = c1 * u2[i][j+1][k-1];
_v_23_ -= c1 * u2[i][j-1][k-1];
_v_27_ -= c1 * u1[i+2][j][k-1];
_t_56_ += _v_27_;
double _v_28_ = strx[i] * _t_55_ * _t_56_;
double _v_83_ = c2 * u1[i+2][j][k+1];
double _v_92_ = c2 * u1[i+2][j][k-1];
double _v_44_ = c2 * _v_28_;
double _v_29_ = c2 * u2[i+2][j][k+2];
double _v_67_ = c2 * u2[i+2][j][k+2];
_v_29_ -= c2 * u2[i+2][j][k-2];
double _v_76_ = c2 * u2[i+2][j][k-2];
double _t_63_ = _v_29_;
double _v_30_ = c1 * u2[i+2][j][k+1];
_v_30_ -= c1 * u2[i+2][j][k-1];
_t_63_ += _v_30_;
double _t_62_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k];
double _v_31_ = stry[j] * _t_62_ * _t_63_;
double _v_86_ = c2 * u2[i+2][j][k+1];
double _v_95_ = c2 * u2[i+2][j][k-1];
_v_44_ += c2 * _v_31_;
double _t_67_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k];
double _v_32_ = c2 * u3[i+2][j][k+2];
double _v_70_ = c2 * u3[i+2][j][k+2];
_v_32_ -= c2 * u3[i+2][j][k-2];
double _v_79_ = c2 * u3[i+2][j][k-2];
double _t_68_ = _v_32_;
double _v_33_ = c1 * u3[i+2][j][k+1];
_v_33_ -= c1 * u3[i+2][j][k-1];
_t_68_ += _v_33_;
double _v_34_ = _t_67_ * _t_68_;
double _v_89_ = c2 * u3[i+2][j][k+1];
double _v_98_ = c2 * u3[i+2][j][k-1];
_v_44_ += c2 * _v_34_;
double _t_76_ = 2.0 * mu[i-2][j][k];
_t_76_ += la[i-2][j][k];
double _t_73_ = met1[i-2][j][k] * _t_76_ * met2[i-2][j][k];
double _v_35_ = c2 * u1[i-2][j][k+2];
_v_64_ -= c2 * u1[i-2][j][k+2];
_v_35_ -= c2 * u1[i-2][j][k-2];
_v_73_ -= c2 * u1[i-2][j][k-2];
double _t_74_ = _v_35_;
double _v_36_ = c1 * u1[i-2][j][k+1];
_v_36_ -= c1 * u1[i-2][j][k-1];
_t_74_ += _v_36_;
double _v_37_ = strx[i] * _t_73_ * _t_74_;
_v_83_ -= c2 * u1[i-2][j][k+1];
_v_92_ -= c2 * u1[i-2][j][k-1];
_v_44_ += c2 * _v_37_;
double _v_38_ = c2 * u2[i-2][j][k+2];
_v_67_ -= c2 * u2[i-2][j][k+2];
_v_38_ -= c2 * u2[i-2][j][k-2];
_v_76_ -= c2 * u2[i-2][j][k-2];
double _t_81_ = _v_38_;
double _v_39_ = c1 * u2[i-2][j][k+1];
_v_39_ -= c1 * u2[i-2][j][k-1];
_t_81_ += _v_39_;
double _t_80_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k];
double _v_40_ = stry[j] * _t_80_ * _t_81_;
_v_86_ -= c2 * u2[i-2][j][k+1];
_v_95_ -= c2 * u2[i-2][j][k-1];
_v_44_ += c2 * _v_40_;
double _t_85_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k];
double _v_41_ = c2 * u3[i-2][j][k+2];
_v_70_ -= c2 * u3[i-2][j][k+2];
_v_41_ -= c2 * u3[i-2][j][k-2];
_v_79_ -= c2 * u3[i-2][j][k-2];
double _t_86_ = _v_41_;
double _v_42_ = c1 * u3[i-2][j][k+1];
_v_42_ -= c1 * u3[i-2][j][k-1];
_t_86_ += _v_42_;
double _v_43_ = _t_85_ * _t_86_;
_v_89_ -= c2 * u3[i-2][j][k+1];
_v_98_ -= c2 * u3[i-2][j][k-1];
_v_44_ += c2 * _v_43_;
double _t_51_ = stry[j] * _v_44_;
double _t_95_ = 2.0 * mu[i+1][j][k];
_t_95_ += la[i+1][j][k];
double _t_92_ = met1[i+1][j][k] * _t_95_ * met2[i+1][j][k];
double _v_45_ = c2 * u1[i+1][j][k+2];
_v_45_ -= c2 * u1[i+1][j][k-2];
double _t_93_ = _v_45_;
double _v_46_ = c1 * u1[i+1][j][k+1];
double _v_84_ = c1 * u1[i+1][j][k+1];
_v_46_ -= c1 * u1[i+1][j][k-1];
double _v_93_ = c1 * u1[i+1][j][k-1];
_t_93_ += _v_46_;
double _v_47_ = strx[i] * _t_92_ * _t_93_;
double _v_65_ = c1 * u1[i+1][j][k+2];
double _v_74_ = c1 * u1[i+1][j][k-2];
double _v_63_ = c1 * _v_47_;
double _v_48_ = c2 * u2[i+1][j][k+2];
_v_48_ -= c2 * u2[i+1][j][k-2];
double _t_100_ = _v_48_;
double _v_49_ = c1 * u2[i+1][j][k+1];
double _v_87_ = c1 * u2[i+1][j][k+1];
_v_49_ -= c1 * u2[i+1][j][k-1];
double _v_96_ = c1 * u2[i+1][j][k-1];
_t_100_ += _v_49_;
double _t_99_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k];
double _v_50_ = stry[j] * _t_99_ * _t_100_;
double _v_68_ = c1 * u2[i+1][j][k+2];
double _v_77_ = c1 * u2[i+1][j][k-2];
_v_63_ += c1 * _v_50_;
double _t_104_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k];
double _v_51_ = c2 * u3[i+1][j][k+2];
_v_51_ -= c2 * u3[i+1][j][k-2];
double _t_105_ = _v_51_;
double _v_52_ = c1 * u3[i+1][j][k+1];
double _v_90_ = c1 * u3[i+1][j][k+1];
_v_52_ -= c1 * u3[i+1][j][k-1];
double _v_99_ = c1 * u3[i+1][j][k-1];
_t_105_ += _v_52_;
double _v_53_ = _t_104_ * _t_105_;
double _v_71_ = c1 * u3[i+1][j][k+2];
double _v_80_ = c1 * u3[i+1][j][k-2];
_v_63_ += c1 * _v_53_;
double _t_113_ = 2.0 * mu[i-1][j][k];
_t_113_ += la[i-1][j][k];
double _t_110_ = met1[i-1][j][k] * _t_113_ * met2[i-1][j][k];
double _v_54_ = c2 * u1[i-1][j][k+2];
_v_54_ -= c2 * u1[i-1][j][k-2];
double _t_111_ = _v_54_;
double _v_55_ = c1 * u1[i-1][j][k+1];
_v_84_ -= c1 * u1[i-1][j][k+1];
_v_55_ -= c1 * u1[i-1][j][k-1];
_v_93_ -= c1 * u1[i-1][j][k-1];
_t_111_ += _v_55_;
double _v_56_ = strx[i] * _t_110_ * _t_111_;
_v_65_ -= c1 * u1[i-1][j][k+2];
_v_74_ -= c1 * u1[i-1][j][k-2];
_v_63_ += c1 * _v_56_;
double _v_57_ = c2 * u2[i-1][j][k+2];
_v_57_ -= c2 * u2[i-1][j][k-2];
double _t_118_ = _v_57_;
double _v_58_ = c1 * u2[i-1][j][k+1];
_v_87_ -= c1 * u2[i-1][j][k+1];
_v_58_ -= c1 * u2[i-1][j][k-1];
_v_96_ -= c1 * u2[i-1][j][k-1];
_t_118_ += _v_58_;
double _t_117_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k];
double _v_59_ = stry[j] * _t_117_ * _t_118_;
_v_68_ -= c1 * u2[i-1][j][k+2];
_v_77_ -= c1 * u2[i-1][j][k-2];
_v_63_ += c1 * _v_59_;
double _t_122_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k];
double _v_60_ = c2 * u3[i-1][j][k+2];
_v_60_ -= c2 * u3[i-1][j][k-2];
double _t_123_ = _v_60_;
double _v_61_ = c1 * u3[i-1][j][k+1];
_v_90_ -= c1 * u3[i-1][j][k+1];
_v_61_ -= c1 * u3[i-1][j][k-1];
_v_99_ -= c1 * u3[i-1][j][k-1];
_t_123_ += _v_61_;
double _v_62_ = _t_122_ * _t_123_;
_v_71_ -= c1 * u3[i-1][j][k+2];
_v_80_ -= c1 * u3[i-1][j][k-2];
_v_63_ += c1 * _v_62_;
_t_51_ += stry[j] * _v_63_;
double r1ic0jc0kc0 = r1[i][j][k];
r1ic0jc0kc0 += _t_51_;
double _t_144_ = _v_70_;
_t_144_ += _v_71_;
double _t_143_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2];
double _v_72_ = stry[j] * _t_143_ * _t_144_;
double _v_82_ = c2 * _v_72_;
double _t_132_ = _v_64_;
_t_132_ += _v_65_;
double _t_134_ = 2.0 * mu[i][j][k+2];
double _t_137_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_134_ += la[i][j][k+2];
double _t_131_ = met1[i][j][k+2] * _t_134_ * met2[i][j][k+2];
double _t_9_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2];
double _t_130_ = _t_131_ * _t_132_;
double _v_66_ = stry[j] * _t_130_ * strx[i];
_v_82_ += c2 * _v_66_;
double _t_138_ = _v_67_;
_t_138_ += _v_68_;
double _v_69_ = _t_137_ * _t_138_;
_v_82_ += c2 * _v_69_;
double _t_151_ = _v_73_;
_t_151_ += _v_74_;
double _t_153_ = 2.0 * mu[i][j][k-2];
_t_153_ += la[i][j][k-2];
double _t_150_ = met1[i][j][k-2] * _t_153_ * met2[i][j][k-2];
double _t_21_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2];
double _t_149_ = _t_150_ * _t_151_;
double _v_75_ = stry[j] * _t_149_ * strx[i];
_v_82_ += c2 * _v_75_;
double _t_157_ = _v_76_;
_t_157_ += _v_77_;
double _t_156_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
double _t_162_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2];
double _v_78_ = _t_156_ * _t_157_;
_v_82_ += c2 * _v_78_;
double _t_163_ = _v_79_;
_t_163_ += _v_80_;
double _v_81_ = stry[j] * _t_162_ * _t_163_;
_v_82_ += c2 * _v_81_;
double _t_127_ = _v_82_;
double _t_183_ = _v_89_;
_t_183_ += _v_90_;
double _t_182_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1];
double _v_91_ = stry[j] * _t_182_ * _t_183_;
double _v_101_ = c1 * _v_91_;
double _t_171_ = _v_83_;
_t_171_ += _v_84_;
double _t_173_ = 2.0 * mu[i][j][k+1];
double _t_176_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_173_ += la[i][j][k+1];
double _t_170_ = met1[i][j][k+1] * _t_173_ * met2[i][j][k+1];
double _t_34_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1];
double _t_169_ = _t_170_ * _t_171_;
double _v_85_ = stry[j] * _t_169_ * strx[i+2];
_v_101_ += c1 * _v_85_;
double _t_177_ = _v_86_;
_t_177_ += _v_87_;
double _v_88_ = _t_176_ * _t_177_;
_v_101_ += c1 * _v_88_;
double _t_190_ = _v_92_;
_t_190_ += _v_93_;
double _t_192_ = 2.0 * mu[i][j][k-1];
_t_192_ += la[i][j][k-1];
double _t_189_ = met1[i][j][k-1] * _t_192_ * met2[i][j][k-1];
double _t_46_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1];
double _t_188_ = _t_189_ * _t_190_;
double _v_94_ = stry[j] * _t_188_ * strx[i-2];
_v_101_ += c1 * _v_94_;
double _t_196_ = _v_95_;
_t_196_ += _v_96_;
double _t_195_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
double _t_201_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1];
double _v_97_ = _t_195_ * _t_196_;
_v_101_ += c1 * _v_97_;
double _t_202_ = _v_98_;
_t_202_ += _v_99_;
double _v_100_ = stry[j] * _t_201_ * _t_202_;
_v_101_ += c1 * _v_100_;
_t_127_ += _v_101_;
r1ic0jc0kc0 += _t_127_;
double _t_4_ = _t_137_;
double _t_5_ = _v_0_;
_t_5_ += _v_1_;
double _t_3_ = _t_4_ * _t_5_;
double _v_2_ = strx[i] * _t_3_ * stry[j+2];
double _v_12_ = c2 * _v_2_;
double _t_10_ = _v_3_;
_t_10_ += _v_4_;
double _v_5_ = _t_9_ * _t_10_;
_v_12_ += c2 * _v_5_;
double _t_16_ = _t_156_;
double _t_17_ = _v_6_;
_t_17_ += _v_7_;
double _t_15_ = _t_16_ * _t_17_;
double _v_8_ = strx[i] * _t_15_ * stry[j];
_v_12_ += c2 * _v_8_;
double _t_22_ = _v_9_;
_t_22_ += _v_10_;
double _v_11_ = _t_21_ * _t_22_;
_v_12_ += c2 * _v_11_;
double _t_0_ = _v_12_;
double _t_29_ = _t_176_;
double _t_30_ = _v_13_;
_t_30_ += _v_14_;
double _t_28_ = _t_29_ * _t_30_;
double _v_15_ = strx[i] * _t_28_ * stry[j-2];
double _v_25_ = c1 * _v_15_;
double _t_35_ = _v_16_;
_t_35_ += _v_17_;
double _v_18_ = _t_34_ * _t_35_;
_v_25_ += c1 * _v_18_;
double _t_41_ = _t_195_;
double _t_42_ = _v_19_;
_t_42_ += _v_20_;
double _t_40_ = _t_41_ * _t_42_;
double _v_21_ = strx[i] * _t_40_ * stry[j];
_v_25_ += c1 * _v_21_;
double _t_47_ = _v_22_;
_t_47_ += _v_23_;
double _v_24_ = _t_46_ * _t_47_;
_v_25_ += c1 * _v_24_;
_t_0_ += _v_25_;
r1ic0jc0kc0 += _t_0_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
}
__global__ void __launch_bounds__(128,4) curvi_2 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_i= (int)(blockDim.z);
int i0 = (int)(blockIdx.z)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.z);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3 & i>=2 & i<=N-3) {
double _v_0_ = c2 * u1[i][j+2][k+2];
_v_0_ -= c2 * u1[i][j+2][k-2];
double _t_5_ = _v_0_;
double _v_1_ = c1 * u1[i][j+2][k+1];
_v_1_ -= c1 * u1[i][j+2][k-1];
_t_5_ += _v_1_;
double _t_4_ = met1[i][j+2][k] * mu[i][j+2][k] * met3[i][j+2][k];
double _t_3_ = _t_4_ * _t_5_;
double _v_2_ = strx[i] * _t_3_ * stry[j+1];
double _v_12_ = c2 * _v_2_;
double _v_3_ = c2 * u2[i][j+2][k+2];
_v_3_ -= c2 * u2[i][j+2][k-2];
double _t_10_ = _v_3_;
double _v_4_ = c1 * u2[i][j+2][k+1];
_v_4_ -= c1 * u2[i][j+2][k-1];
_t_10_ += _v_4_;
double _t_9_ = met1[i][j+2][k] * mu[i][j+2][k] * met2[i][j+2][k];
double _v_5_ = _t_9_ * _t_10_;
_v_12_ += c2 * _v_5_;
double _v_9_ = c2 * u2[i][j-2][k+2];
_v_9_ -= c2 * u2[i][j-2][k-2];
double _t_22_ = _v_9_;
double _v_10_ = c1 * u2[i][j-2][k+1];
_v_10_ -= c1 * u2[i][j-2][k-1];
_t_22_ += _v_10_;
double _t_21_ = met1[i][j-2][k] * mu[i][j-2][k] * met2[i][j-2][k];
double _v_11_ = _t_21_ * _t_22_;
_v_12_ += c2 * _v_11_;
double _v_6_ = c2 * u1[i][j-2][k+2];
_v_6_ -= c2 * u1[i][j-2][k-2];
double _t_17_ = _v_6_;
double _v_7_ = c1 * u1[i][j-2][k+1];
_v_7_ -= c1 * u1[i][j-2][k-1];
_t_17_ += _v_7_;
double _t_16_ = met1[i][j-2][k] * mu[i][j-2][k] * met3[i][j-2][k];
double _t_15_ = _t_16_ * _t_17_;
double _v_8_ = strx[i] * _t_15_ * stry[j];
_v_12_ += c2 * _v_8_;
double _t_0_ = _v_12_;
double _v_19_ = c2 * u1[i][j-1][k+2];
_v_19_ -= c2 * u1[i][j-1][k-2];
double _t_42_ = _v_19_;
double _v_20_ = c1 * u1[i][j-1][k+1];
_v_20_ -= c1 * u1[i][j-1][k-1];
_t_42_ += _v_20_;
double _t_41_ = met1[i][j-1][k] * mu[i][j-1][k] * met3[i][j-1][k];
double _t_40_ = _t_41_ * _t_42_;
double _v_21_ = strx[i] * _t_40_ * stry[j];
double _v_25_ = c1 * _v_21_;
double _v_13_ = c2 * u1[i][j+1][k+2];
_v_13_ -= c2 * u1[i][j+1][k-2];
double _t_30_ = _v_13_;
double _v_14_ = c1 * u1[i][j+1][k+1];
_v_14_ -= c1 * u1[i][j+1][k-1];
_t_30_ += _v_14_;
double _t_29_ = met1[i][j+1][k] * mu[i][j+1][k] * met3[i][j+1][k];
double _t_28_ = _t_29_ * _t_30_;
double _v_15_ = strx[i] * _t_28_ * stry[j-1];
_v_25_ += c1 * _v_15_;
double _v_16_ = c2 * u2[i][j+1][k+2];
_v_16_ -= c2 * u2[i][j+1][k-2];
double _t_35_ = _v_16_;
double _v_17_ = c1 * u2[i][j+1][k+1];
_v_17_ -= c1 * u2[i][j+1][k-1];
_t_35_ += _v_17_;
double _t_34_ = met1[i][j+1][k] * mu[i][j+1][k] * met2[i][j+1][k];
double _v_18_ = _t_34_ * _t_35_;
_v_25_ += c1 * _v_18_;
double _v_22_ = c2 * u2[i][j-1][k+2];
_v_22_ -= c2 * u2[i][j-1][k-2];
double _t_47_ = _v_22_;
double _v_23_ = c1 * u2[i][j-1][k+1];
_v_23_ -= c1 * u2[i][j-1][k-1];
_t_47_ += _v_23_;
double _t_46_ = met1[i][j-1][k] * mu[i][j-1][k] * met2[i][j-1][k];
double _v_24_ = _t_46_ * _t_47_;
_v_25_ += c1 * _v_24_;
_t_0_ += _v_25_;
double r1ic0jc0kc0 = r1[i][j][k];
r1ic0jc0kc0 += _t_0_;
double _v_26_ = c2 * u2[i+2][j+2][k];
_v_26_ -= c2 * u2[i-2][j+2][k];
double _v_29_ = c2 * u2[i+2][j-2][k];
_v_29_ -= c2 * u2[i-2][j-2][k];
double _v_40_ = c2 * u2[i+2][j+2][k];
_v_40_ -= c2 * u2[i+2][j-2][k];
double _v_43_ = c2 * u2[i-2][j+2][k];
_v_43_ -= c2 * u2[i-2][j-2][k];
double _v_33_ = c2 * u2[i+2][j+1][k];
_v_33_ -= c2 * u2[i-2][j+1][k];
double _v_36_ = c2 * u2[i+2][j-1][k];
_v_36_ -= c2 * u2[i-2][j-1][k];
double _v_47_ = c2 * u2[i+1][j+2][k];
_v_47_ -= c2 * u2[i+1][j-2][k];
double _v_50_ = c2 * u2[i-1][j+2][k];
_v_50_ -= c2 * u2[i-1][j-2][k];
double _v_27_ = c1 * u2[i+1][j+2][k];
_v_27_ -= c1 * u2[i-1][j+2][k];
double _v_30_ = c1 * u2[i+1][j-2][k];
_v_30_ -= c1 * u2[i-1][j-2][k];
double _v_41_ = c1 * u2[i+2][j+1][k];
_v_41_ -= c1 * u2[i+2][j-1][k];
double _v_44_ = c1 * u2[i-2][j+1][k];
_v_44_ -= c1 * u2[i-2][j-1][k];
double _v_34_ = c1 * u2[i+1][j+1][k];
_v_34_ -= c1 * u2[i-1][j+1][k];
double _v_37_ = c1 * u2[i+1][j-1][k];
_v_37_ -= c1 * u2[i-1][j-1][k];
double _v_48_ = c1 * u2[i+1][j+1][k];
_v_48_ -= c1 * u2[i+1][j-1][k];
double _v_51_ = c1 * u2[i-1][j+1][k];
_v_51_ -= c1 * u2[i-1][j-1][k];
double _t_54_ = _v_26_;
_t_54_ += _v_27_;
double _t_53_ = met1[i][j+2][k] * mu[i][j+2][k] * met1[i][j+2][k];
double _v_28_ = _t_53_ * _t_54_;
double _v_32_ = c2 * _v_28_;
double _t_59_ = _v_29_;
_t_59_ += _v_30_;
double _t_58_ = met1[i][j-2][k] * mu[i][j-2][k] * met1[i][j-2][k];
double _v_31_ = _t_58_ * _t_59_;
_v_32_ += c2 * _v_31_;
double _t_51_ = _v_32_;
double _t_76_ = _v_40_;
_t_76_ += _v_41_;
double _t_75_ = met1[i+2][j][k] * la[i+2][j][k] * met1[i+2][j][k];
double _v_42_ = _t_75_ * _t_76_;
double _v_46_ = c2 * _v_42_;
double _t_81_ = _v_43_;
_t_81_ += _v_44_;
double _t_80_ = met1[i-2][j][k] * la[i-2][j][k] * met1[i-2][j][k];
double _v_45_ = _t_80_ * _t_81_;
_v_46_ += c2 * _v_45_;
_t_51_ += _v_46_;
double _t_65_ = _v_33_;
_t_65_ += _v_34_;
double _t_64_ = met1[i][j+1][k] * mu[i][j+1][k] * met1[i][j+1][k];
double _v_35_ = _t_64_ * _t_65_;
double _v_39_ = c1 * _v_35_;
double _t_70_ = _v_36_;
_t_70_ += _v_37_;
double _t_69_ = met1[i][j-1][k] * mu[i][j-1][k] * met1[i][j-1][k];
double _v_38_ = _t_69_ * _t_70_;
_v_39_ += c1 * _v_38_;
_t_51_ += _v_39_;
double _t_87_ = _v_47_;
_t_87_ += _v_48_;
double _t_86_ = met1[i+1][j][k] * la[i+1][j][k] * met1[i+1][j][k];
double _v_49_ = _t_86_ * _t_87_;
double _v_53_ = c1 * _v_49_;
double _t_92_ = _v_50_;
_t_92_ += _v_51_;
double _t_91_ = met1[i-1][j][k] * la[i-1][j][k] * met1[i-1][j][k];
double _v_52_ = _t_91_ * _t_92_;
_v_53_ += c1 * _v_52_;
_t_51_ += _v_53_;
r1ic0jc0kc0 += _t_51_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
hipMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
hipMemcpy (r1, h_r1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u1;
hipMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
hipMemcpy (u1, h_u1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u2;
hipMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
hipMemcpy (u2, h_u2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u3;
hipMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
hipMemcpy (u3, h_u3, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *mu;
hipMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *la;
hipMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met1;
hipMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
hipMemcpy (met1, h_met1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met2;
hipMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
hipMemcpy (met2, h_met2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met3;
hipMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
hipMemcpy (met3, h_met3, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met4;
hipMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
hipMemcpy (met4, h_met4, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *strx;
hipMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice);
double *stry;
hipMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
hipLaunchKernelGGL(( curvi_1) , dim3(gridconfig), dim3(blockconfig), 0, 0, r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
dim3 blockconfig_1 (16, 2, 2);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x), ceil(N, blockconfig_1.y), ceil(N, blockconfig_1.z));
hipLaunchKernelGGL(( curvi_2) , dim3(gridconfig_1), dim3(blockconfig_1), 0, 0, r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
hipMemcpy (h_r1, r1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
}
| 2e0d9b79d7f9f7862241308299809326fb6944c4.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__(256,1) curvi_1 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_58_ = 2.0 * mu[i+2][j][k];
_t_58_ += la[i+2][j][k];
double _t_55_ = met1[i+2][j][k] * _t_58_ * met2[i+2][j][k];
double _v_26_ = c2 * u1[i+2][j][k+2];
double _v_0_ = c2 * u1[i][j+2][k+2];
_v_0_ -= c2 * u1[i][j-2][k+2];
double _v_3_ = c2 * u2[i][j+2][k+2];
_v_3_ -= c2 * u2[i][j-2][k+2];
double _v_6_ = c2 * u1[i][j+2][k-2];
_v_6_ -= c2 * u1[i][j-2][k-2];
double _v_9_ = c2 * u2[i][j+2][k-2];
_v_9_ -= c2 * u2[i][j-2][k-2];
double _v_13_ = c2 * u1[i][j+2][k+1];
_v_13_ -= c2 * u1[i][j-2][k+1];
double _v_16_ = c2 * u2[i][j+2][k+1];
_v_16_ -= c2 * u2[i][j-2][k+1];
double _v_19_ = c2 * u1[i][j+2][k-1];
_v_19_ -= c2 * u1[i][j-2][k-1];
double _v_22_ = c2 * u2[i][j+2][k-1];
_v_22_ -= c2 * u2[i][j-2][k-1];
double _v_64_ = c2 * u1[i+2][j][k+2];
_v_26_ -= c2 * u1[i+2][j][k-2];
double _v_73_ = c2 * u1[i+2][j][k-2];
double _t_56_ = _v_26_;
double _v_27_ = c1 * u1[i+2][j][k+1];
double _v_1_ = c1 * u1[i][j+1][k+2];
_v_1_ -= c1 * u1[i][j-1][k+2];
double _v_4_ = c1 * u2[i][j+1][k+2];
_v_4_ -= c1 * u2[i][j-1][k+2];
double _v_7_ = c1 * u1[i][j+1][k-2];
_v_7_ -= c1 * u1[i][j-1][k-2];
double _v_10_ = c1 * u2[i][j+1][k-2];
_v_10_ -= c1 * u2[i][j-1][k-2];
double _v_14_ = c1 * u1[i][j+1][k+1];
_v_14_ -= c1 * u1[i][j-1][k+1];
double _v_17_ = c1 * u2[i][j+1][k+1];
_v_17_ -= c1 * u2[i][j-1][k+1];
double _v_20_ = c1 * u1[i][j+1][k-1];
_v_20_ -= c1 * u1[i][j-1][k-1];
double _v_23_ = c1 * u2[i][j+1][k-1];
_v_23_ -= c1 * u2[i][j-1][k-1];
_v_27_ -= c1 * u1[i+2][j][k-1];
_t_56_ += _v_27_;
double _v_28_ = strx[i] * _t_55_ * _t_56_;
double _v_83_ = c2 * u1[i+2][j][k+1];
double _v_92_ = c2 * u1[i+2][j][k-1];
double _v_44_ = c2 * _v_28_;
double _v_29_ = c2 * u2[i+2][j][k+2];
double _v_67_ = c2 * u2[i+2][j][k+2];
_v_29_ -= c2 * u2[i+2][j][k-2];
double _v_76_ = c2 * u2[i+2][j][k-2];
double _t_63_ = _v_29_;
double _v_30_ = c1 * u2[i+2][j][k+1];
_v_30_ -= c1 * u2[i+2][j][k-1];
_t_63_ += _v_30_;
double _t_62_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k];
double _v_31_ = stry[j] * _t_62_ * _t_63_;
double _v_86_ = c2 * u2[i+2][j][k+1];
double _v_95_ = c2 * u2[i+2][j][k-1];
_v_44_ += c2 * _v_31_;
double _t_67_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k];
double _v_32_ = c2 * u3[i+2][j][k+2];
double _v_70_ = c2 * u3[i+2][j][k+2];
_v_32_ -= c2 * u3[i+2][j][k-2];
double _v_79_ = c2 * u3[i+2][j][k-2];
double _t_68_ = _v_32_;
double _v_33_ = c1 * u3[i+2][j][k+1];
_v_33_ -= c1 * u3[i+2][j][k-1];
_t_68_ += _v_33_;
double _v_34_ = _t_67_ * _t_68_;
double _v_89_ = c2 * u3[i+2][j][k+1];
double _v_98_ = c2 * u3[i+2][j][k-1];
_v_44_ += c2 * _v_34_;
double _t_76_ = 2.0 * mu[i-2][j][k];
_t_76_ += la[i-2][j][k];
double _t_73_ = met1[i-2][j][k] * _t_76_ * met2[i-2][j][k];
double _v_35_ = c2 * u1[i-2][j][k+2];
_v_64_ -= c2 * u1[i-2][j][k+2];
_v_35_ -= c2 * u1[i-2][j][k-2];
_v_73_ -= c2 * u1[i-2][j][k-2];
double _t_74_ = _v_35_;
double _v_36_ = c1 * u1[i-2][j][k+1];
_v_36_ -= c1 * u1[i-2][j][k-1];
_t_74_ += _v_36_;
double _v_37_ = strx[i] * _t_73_ * _t_74_;
_v_83_ -= c2 * u1[i-2][j][k+1];
_v_92_ -= c2 * u1[i-2][j][k-1];
_v_44_ += c2 * _v_37_;
double _v_38_ = c2 * u2[i-2][j][k+2];
_v_67_ -= c2 * u2[i-2][j][k+2];
_v_38_ -= c2 * u2[i-2][j][k-2];
_v_76_ -= c2 * u2[i-2][j][k-2];
double _t_81_ = _v_38_;
double _v_39_ = c1 * u2[i-2][j][k+1];
_v_39_ -= c1 * u2[i-2][j][k-1];
_t_81_ += _v_39_;
double _t_80_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k];
double _v_40_ = stry[j] * _t_80_ * _t_81_;
_v_86_ -= c2 * u2[i-2][j][k+1];
_v_95_ -= c2 * u2[i-2][j][k-1];
_v_44_ += c2 * _v_40_;
double _t_85_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k];
double _v_41_ = c2 * u3[i-2][j][k+2];
_v_70_ -= c2 * u3[i-2][j][k+2];
_v_41_ -= c2 * u3[i-2][j][k-2];
_v_79_ -= c2 * u3[i-2][j][k-2];
double _t_86_ = _v_41_;
double _v_42_ = c1 * u3[i-2][j][k+1];
_v_42_ -= c1 * u3[i-2][j][k-1];
_t_86_ += _v_42_;
double _v_43_ = _t_85_ * _t_86_;
_v_89_ -= c2 * u3[i-2][j][k+1];
_v_98_ -= c2 * u3[i-2][j][k-1];
_v_44_ += c2 * _v_43_;
double _t_51_ = stry[j] * _v_44_;
double _t_95_ = 2.0 * mu[i+1][j][k];
_t_95_ += la[i+1][j][k];
double _t_92_ = met1[i+1][j][k] * _t_95_ * met2[i+1][j][k];
double _v_45_ = c2 * u1[i+1][j][k+2];
_v_45_ -= c2 * u1[i+1][j][k-2];
double _t_93_ = _v_45_;
double _v_46_ = c1 * u1[i+1][j][k+1];
double _v_84_ = c1 * u1[i+1][j][k+1];
_v_46_ -= c1 * u1[i+1][j][k-1];
double _v_93_ = c1 * u1[i+1][j][k-1];
_t_93_ += _v_46_;
double _v_47_ = strx[i] * _t_92_ * _t_93_;
double _v_65_ = c1 * u1[i+1][j][k+2];
double _v_74_ = c1 * u1[i+1][j][k-2];
double _v_63_ = c1 * _v_47_;
double _v_48_ = c2 * u2[i+1][j][k+2];
_v_48_ -= c2 * u2[i+1][j][k-2];
double _t_100_ = _v_48_;
double _v_49_ = c1 * u2[i+1][j][k+1];
double _v_87_ = c1 * u2[i+1][j][k+1];
_v_49_ -= c1 * u2[i+1][j][k-1];
double _v_96_ = c1 * u2[i+1][j][k-1];
_t_100_ += _v_49_;
double _t_99_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k];
double _v_50_ = stry[j] * _t_99_ * _t_100_;
double _v_68_ = c1 * u2[i+1][j][k+2];
double _v_77_ = c1 * u2[i+1][j][k-2];
_v_63_ += c1 * _v_50_;
double _t_104_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k];
double _v_51_ = c2 * u3[i+1][j][k+2];
_v_51_ -= c2 * u3[i+1][j][k-2];
double _t_105_ = _v_51_;
double _v_52_ = c1 * u3[i+1][j][k+1];
double _v_90_ = c1 * u3[i+1][j][k+1];
_v_52_ -= c1 * u3[i+1][j][k-1];
double _v_99_ = c1 * u3[i+1][j][k-1];
_t_105_ += _v_52_;
double _v_53_ = _t_104_ * _t_105_;
double _v_71_ = c1 * u3[i+1][j][k+2];
double _v_80_ = c1 * u3[i+1][j][k-2];
_v_63_ += c1 * _v_53_;
double _t_113_ = 2.0 * mu[i-1][j][k];
_t_113_ += la[i-1][j][k];
double _t_110_ = met1[i-1][j][k] * _t_113_ * met2[i-1][j][k];
double _v_54_ = c2 * u1[i-1][j][k+2];
_v_54_ -= c2 * u1[i-1][j][k-2];
double _t_111_ = _v_54_;
double _v_55_ = c1 * u1[i-1][j][k+1];
_v_84_ -= c1 * u1[i-1][j][k+1];
_v_55_ -= c1 * u1[i-1][j][k-1];
_v_93_ -= c1 * u1[i-1][j][k-1];
_t_111_ += _v_55_;
double _v_56_ = strx[i] * _t_110_ * _t_111_;
_v_65_ -= c1 * u1[i-1][j][k+2];
_v_74_ -= c1 * u1[i-1][j][k-2];
_v_63_ += c1 * _v_56_;
double _v_57_ = c2 * u2[i-1][j][k+2];
_v_57_ -= c2 * u2[i-1][j][k-2];
double _t_118_ = _v_57_;
double _v_58_ = c1 * u2[i-1][j][k+1];
_v_87_ -= c1 * u2[i-1][j][k+1];
_v_58_ -= c1 * u2[i-1][j][k-1];
_v_96_ -= c1 * u2[i-1][j][k-1];
_t_118_ += _v_58_;
double _t_117_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k];
double _v_59_ = stry[j] * _t_117_ * _t_118_;
_v_68_ -= c1 * u2[i-1][j][k+2];
_v_77_ -= c1 * u2[i-1][j][k-2];
_v_63_ += c1 * _v_59_;
double _t_122_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k];
double _v_60_ = c2 * u3[i-1][j][k+2];
_v_60_ -= c2 * u3[i-1][j][k-2];
double _t_123_ = _v_60_;
double _v_61_ = c1 * u3[i-1][j][k+1];
_v_90_ -= c1 * u3[i-1][j][k+1];
_v_61_ -= c1 * u3[i-1][j][k-1];
_v_99_ -= c1 * u3[i-1][j][k-1];
_t_123_ += _v_61_;
double _v_62_ = _t_122_ * _t_123_;
_v_71_ -= c1 * u3[i-1][j][k+2];
_v_80_ -= c1 * u3[i-1][j][k-2];
_v_63_ += c1 * _v_62_;
_t_51_ += stry[j] * _v_63_;
double r1ic0jc0kc0 = r1[i][j][k];
r1ic0jc0kc0 += _t_51_;
double _t_144_ = _v_70_;
_t_144_ += _v_71_;
double _t_143_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2];
double _v_72_ = stry[j] * _t_143_ * _t_144_;
double _v_82_ = c2 * _v_72_;
double _t_132_ = _v_64_;
_t_132_ += _v_65_;
double _t_134_ = 2.0 * mu[i][j][k+2];
double _t_137_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_134_ += la[i][j][k+2];
double _t_131_ = met1[i][j][k+2] * _t_134_ * met2[i][j][k+2];
double _t_9_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2];
double _t_130_ = _t_131_ * _t_132_;
double _v_66_ = stry[j] * _t_130_ * strx[i];
_v_82_ += c2 * _v_66_;
double _t_138_ = _v_67_;
_t_138_ += _v_68_;
double _v_69_ = _t_137_ * _t_138_;
_v_82_ += c2 * _v_69_;
double _t_151_ = _v_73_;
_t_151_ += _v_74_;
double _t_153_ = 2.0 * mu[i][j][k-2];
_t_153_ += la[i][j][k-2];
double _t_150_ = met1[i][j][k-2] * _t_153_ * met2[i][j][k-2];
double _t_21_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2];
double _t_149_ = _t_150_ * _t_151_;
double _v_75_ = stry[j] * _t_149_ * strx[i];
_v_82_ += c2 * _v_75_;
double _t_157_ = _v_76_;
_t_157_ += _v_77_;
double _t_156_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
double _t_162_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2];
double _v_78_ = _t_156_ * _t_157_;
_v_82_ += c2 * _v_78_;
double _t_163_ = _v_79_;
_t_163_ += _v_80_;
double _v_81_ = stry[j] * _t_162_ * _t_163_;
_v_82_ += c2 * _v_81_;
double _t_127_ = _v_82_;
double _t_183_ = _v_89_;
_t_183_ += _v_90_;
double _t_182_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1];
double _v_91_ = stry[j] * _t_182_ * _t_183_;
double _v_101_ = c1 * _v_91_;
double _t_171_ = _v_83_;
_t_171_ += _v_84_;
double _t_173_ = 2.0 * mu[i][j][k+1];
double _t_176_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_173_ += la[i][j][k+1];
double _t_170_ = met1[i][j][k+1] * _t_173_ * met2[i][j][k+1];
double _t_34_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1];
double _t_169_ = _t_170_ * _t_171_;
double _v_85_ = stry[j] * _t_169_ * strx[i+2];
_v_101_ += c1 * _v_85_;
double _t_177_ = _v_86_;
_t_177_ += _v_87_;
double _v_88_ = _t_176_ * _t_177_;
_v_101_ += c1 * _v_88_;
double _t_190_ = _v_92_;
_t_190_ += _v_93_;
double _t_192_ = 2.0 * mu[i][j][k-1];
_t_192_ += la[i][j][k-1];
double _t_189_ = met1[i][j][k-1] * _t_192_ * met2[i][j][k-1];
double _t_46_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1];
double _t_188_ = _t_189_ * _t_190_;
double _v_94_ = stry[j] * _t_188_ * strx[i-2];
_v_101_ += c1 * _v_94_;
double _t_196_ = _v_95_;
_t_196_ += _v_96_;
double _t_195_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
double _t_201_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1];
double _v_97_ = _t_195_ * _t_196_;
_v_101_ += c1 * _v_97_;
double _t_202_ = _v_98_;
_t_202_ += _v_99_;
double _v_100_ = stry[j] * _t_201_ * _t_202_;
_v_101_ += c1 * _v_100_;
_t_127_ += _v_101_;
r1ic0jc0kc0 += _t_127_;
double _t_4_ = _t_137_;
double _t_5_ = _v_0_;
_t_5_ += _v_1_;
double _t_3_ = _t_4_ * _t_5_;
double _v_2_ = strx[i] * _t_3_ * stry[j+2];
double _v_12_ = c2 * _v_2_;
double _t_10_ = _v_3_;
_t_10_ += _v_4_;
double _v_5_ = _t_9_ * _t_10_;
_v_12_ += c2 * _v_5_;
double _t_16_ = _t_156_;
double _t_17_ = _v_6_;
_t_17_ += _v_7_;
double _t_15_ = _t_16_ * _t_17_;
double _v_8_ = strx[i] * _t_15_ * stry[j];
_v_12_ += c2 * _v_8_;
double _t_22_ = _v_9_;
_t_22_ += _v_10_;
double _v_11_ = _t_21_ * _t_22_;
_v_12_ += c2 * _v_11_;
double _t_0_ = _v_12_;
double _t_29_ = _t_176_;
double _t_30_ = _v_13_;
_t_30_ += _v_14_;
double _t_28_ = _t_29_ * _t_30_;
double _v_15_ = strx[i] * _t_28_ * stry[j-2];
double _v_25_ = c1 * _v_15_;
double _t_35_ = _v_16_;
_t_35_ += _v_17_;
double _v_18_ = _t_34_ * _t_35_;
_v_25_ += c1 * _v_18_;
double _t_41_ = _t_195_;
double _t_42_ = _v_19_;
_t_42_ += _v_20_;
double _t_40_ = _t_41_ * _t_42_;
double _v_21_ = strx[i] * _t_40_ * stry[j];
_v_25_ += c1 * _v_21_;
double _t_47_ = _v_22_;
_t_47_ += _v_23_;
double _v_24_ = _t_46_ * _t_47_;
_v_25_ += c1 * _v_24_;
_t_0_ += _v_25_;
r1ic0jc0kc0 += _t_0_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
}
__global__ void __launch_bounds__(128,4) curvi_2 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_i= (int)(blockDim.z);
int i0 = (int)(blockIdx.z)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.z);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3 & i>=2 & i<=N-3) {
double _v_0_ = c2 * u1[i][j+2][k+2];
_v_0_ -= c2 * u1[i][j+2][k-2];
double _t_5_ = _v_0_;
double _v_1_ = c1 * u1[i][j+2][k+1];
_v_1_ -= c1 * u1[i][j+2][k-1];
_t_5_ += _v_1_;
double _t_4_ = met1[i][j+2][k] * mu[i][j+2][k] * met3[i][j+2][k];
double _t_3_ = _t_4_ * _t_5_;
double _v_2_ = strx[i] * _t_3_ * stry[j+1];
double _v_12_ = c2 * _v_2_;
double _v_3_ = c2 * u2[i][j+2][k+2];
_v_3_ -= c2 * u2[i][j+2][k-2];
double _t_10_ = _v_3_;
double _v_4_ = c1 * u2[i][j+2][k+1];
_v_4_ -= c1 * u2[i][j+2][k-1];
_t_10_ += _v_4_;
double _t_9_ = met1[i][j+2][k] * mu[i][j+2][k] * met2[i][j+2][k];
double _v_5_ = _t_9_ * _t_10_;
_v_12_ += c2 * _v_5_;
double _v_9_ = c2 * u2[i][j-2][k+2];
_v_9_ -= c2 * u2[i][j-2][k-2];
double _t_22_ = _v_9_;
double _v_10_ = c1 * u2[i][j-2][k+1];
_v_10_ -= c1 * u2[i][j-2][k-1];
_t_22_ += _v_10_;
double _t_21_ = met1[i][j-2][k] * mu[i][j-2][k] * met2[i][j-2][k];
double _v_11_ = _t_21_ * _t_22_;
_v_12_ += c2 * _v_11_;
double _v_6_ = c2 * u1[i][j-2][k+2];
_v_6_ -= c2 * u1[i][j-2][k-2];
double _t_17_ = _v_6_;
double _v_7_ = c1 * u1[i][j-2][k+1];
_v_7_ -= c1 * u1[i][j-2][k-1];
_t_17_ += _v_7_;
double _t_16_ = met1[i][j-2][k] * mu[i][j-2][k] * met3[i][j-2][k];
double _t_15_ = _t_16_ * _t_17_;
double _v_8_ = strx[i] * _t_15_ * stry[j];
_v_12_ += c2 * _v_8_;
double _t_0_ = _v_12_;
double _v_19_ = c2 * u1[i][j-1][k+2];
_v_19_ -= c2 * u1[i][j-1][k-2];
double _t_42_ = _v_19_;
double _v_20_ = c1 * u1[i][j-1][k+1];
_v_20_ -= c1 * u1[i][j-1][k-1];
_t_42_ += _v_20_;
double _t_41_ = met1[i][j-1][k] * mu[i][j-1][k] * met3[i][j-1][k];
double _t_40_ = _t_41_ * _t_42_;
double _v_21_ = strx[i] * _t_40_ * stry[j];
double _v_25_ = c1 * _v_21_;
double _v_13_ = c2 * u1[i][j+1][k+2];
_v_13_ -= c2 * u1[i][j+1][k-2];
double _t_30_ = _v_13_;
double _v_14_ = c1 * u1[i][j+1][k+1];
_v_14_ -= c1 * u1[i][j+1][k-1];
_t_30_ += _v_14_;
double _t_29_ = met1[i][j+1][k] * mu[i][j+1][k] * met3[i][j+1][k];
double _t_28_ = _t_29_ * _t_30_;
double _v_15_ = strx[i] * _t_28_ * stry[j-1];
_v_25_ += c1 * _v_15_;
double _v_16_ = c2 * u2[i][j+1][k+2];
_v_16_ -= c2 * u2[i][j+1][k-2];
double _t_35_ = _v_16_;
double _v_17_ = c1 * u2[i][j+1][k+1];
_v_17_ -= c1 * u2[i][j+1][k-1];
_t_35_ += _v_17_;
double _t_34_ = met1[i][j+1][k] * mu[i][j+1][k] * met2[i][j+1][k];
double _v_18_ = _t_34_ * _t_35_;
_v_25_ += c1 * _v_18_;
double _v_22_ = c2 * u2[i][j-1][k+2];
_v_22_ -= c2 * u2[i][j-1][k-2];
double _t_47_ = _v_22_;
double _v_23_ = c1 * u2[i][j-1][k+1];
_v_23_ -= c1 * u2[i][j-1][k-1];
_t_47_ += _v_23_;
double _t_46_ = met1[i][j-1][k] * mu[i][j-1][k] * met2[i][j-1][k];
double _v_24_ = _t_46_ * _t_47_;
_v_25_ += c1 * _v_24_;
_t_0_ += _v_25_;
double r1ic0jc0kc0 = r1[i][j][k];
r1ic0jc0kc0 += _t_0_;
double _v_26_ = c2 * u2[i+2][j+2][k];
_v_26_ -= c2 * u2[i-2][j+2][k];
double _v_29_ = c2 * u2[i+2][j-2][k];
_v_29_ -= c2 * u2[i-2][j-2][k];
double _v_40_ = c2 * u2[i+2][j+2][k];
_v_40_ -= c2 * u2[i+2][j-2][k];
double _v_43_ = c2 * u2[i-2][j+2][k];
_v_43_ -= c2 * u2[i-2][j-2][k];
double _v_33_ = c2 * u2[i+2][j+1][k];
_v_33_ -= c2 * u2[i-2][j+1][k];
double _v_36_ = c2 * u2[i+2][j-1][k];
_v_36_ -= c2 * u2[i-2][j-1][k];
double _v_47_ = c2 * u2[i+1][j+2][k];
_v_47_ -= c2 * u2[i+1][j-2][k];
double _v_50_ = c2 * u2[i-1][j+2][k];
_v_50_ -= c2 * u2[i-1][j-2][k];
double _v_27_ = c1 * u2[i+1][j+2][k];
_v_27_ -= c1 * u2[i-1][j+2][k];
double _v_30_ = c1 * u2[i+1][j-2][k];
_v_30_ -= c1 * u2[i-1][j-2][k];
double _v_41_ = c1 * u2[i+2][j+1][k];
_v_41_ -= c1 * u2[i+2][j-1][k];
double _v_44_ = c1 * u2[i-2][j+1][k];
_v_44_ -= c1 * u2[i-2][j-1][k];
double _v_34_ = c1 * u2[i+1][j+1][k];
_v_34_ -= c1 * u2[i-1][j+1][k];
double _v_37_ = c1 * u2[i+1][j-1][k];
_v_37_ -= c1 * u2[i-1][j-1][k];
double _v_48_ = c1 * u2[i+1][j+1][k];
_v_48_ -= c1 * u2[i+1][j-1][k];
double _v_51_ = c1 * u2[i-1][j+1][k];
_v_51_ -= c1 * u2[i-1][j-1][k];
double _t_54_ = _v_26_;
_t_54_ += _v_27_;
double _t_53_ = met1[i][j+2][k] * mu[i][j+2][k] * met1[i][j+2][k];
double _v_28_ = _t_53_ * _t_54_;
double _v_32_ = c2 * _v_28_;
double _t_59_ = _v_29_;
_t_59_ += _v_30_;
double _t_58_ = met1[i][j-2][k] * mu[i][j-2][k] * met1[i][j-2][k];
double _v_31_ = _t_58_ * _t_59_;
_v_32_ += c2 * _v_31_;
double _t_51_ = _v_32_;
double _t_76_ = _v_40_;
_t_76_ += _v_41_;
double _t_75_ = met1[i+2][j][k] * la[i+2][j][k] * met1[i+2][j][k];
double _v_42_ = _t_75_ * _t_76_;
double _v_46_ = c2 * _v_42_;
double _t_81_ = _v_43_;
_t_81_ += _v_44_;
double _t_80_ = met1[i-2][j][k] * la[i-2][j][k] * met1[i-2][j][k];
double _v_45_ = _t_80_ * _t_81_;
_v_46_ += c2 * _v_45_;
_t_51_ += _v_46_;
double _t_65_ = _v_33_;
_t_65_ += _v_34_;
double _t_64_ = met1[i][j+1][k] * mu[i][j+1][k] * met1[i][j+1][k];
double _v_35_ = _t_64_ * _t_65_;
double _v_39_ = c1 * _v_35_;
double _t_70_ = _v_36_;
_t_70_ += _v_37_;
double _t_69_ = met1[i][j-1][k] * mu[i][j-1][k] * met1[i][j-1][k];
double _v_38_ = _t_69_ * _t_70_;
_v_39_ += c1 * _v_38_;
_t_51_ += _v_39_;
double _t_87_ = _v_47_;
_t_87_ += _v_48_;
double _t_86_ = met1[i+1][j][k] * la[i+1][j][k] * met1[i+1][j][k];
double _v_49_ = _t_86_ * _t_87_;
double _v_53_ = c1 * _v_49_;
double _t_92_ = _v_50_;
_t_92_ += _v_51_;
double _t_91_ = met1[i-1][j][k] * la[i-1][j][k] * met1[i-1][j][k];
double _v_52_ = _t_91_ * _t_92_;
_v_53_ += c1 * _v_52_;
_t_51_ += _v_53_;
r1ic0jc0kc0 += _t_51_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
curvi_1 <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
dim3 blockconfig_1 (16, 2, 2);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x), ceil(N, blockconfig_1.y), ceil(N, blockconfig_1.z));
curvi_2 <<<gridconfig_1, blockconfig_1>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
3cfa8099043b35f33a17239410d011f47c743cc0.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define SIZE 1024
// __global__ . host .
__global__ void VectorAdd(int *a, int *b, int *c, int n)
{
// .
// threadIdx( ) .
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
__global__ void VectorBlockAdd(int *a, int *b, int *c, int n)
{
int tid = blockIdx.x;
if (tid < n)
c[tid] = a[tid] + b[tid];
}
int main()
{
int *a, *b, *c, *d;
int *d_a, *d_b, *d_c, *d_d;
// .
a = (int *)malloc(SIZE*sizeof(int));
b = (int *)malloc(SIZE*sizeof(int));
c = (int *)malloc(SIZE*sizeof(int));
d = (int *)malloc(SIZE*sizeof(int));
// hipMalloc(destination, number of byte) device .
hipMalloc(&d_a, SIZE*sizeof(int));
hipMalloc(&d_b, SIZE*sizeof(int));
hipMalloc(&d_c, SIZE*sizeof(int));
hipMalloc(&d_d, SIZE*sizeof(int));
//
for (int i = 0; i<SIZE; ++i)
{
a[i] = i;
b[i] = i;
c[i] = 0;
d[i] = 0;
}
// hipMemcpy(destination, source, number of byte, hipMemcpyHostToDevice) .
hipMemcpy(d_a, a, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_c, c, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_d, d, SIZE*sizeof(int), hipMemcpyHostToDevice);
// .
// parameter . .
// SIZE 1024 .
VectorAdd << < 1, SIZE >> >(d_a, d_b, d_c, SIZE);
VectorBlockAdd << < SIZE, 1 >> >(d_a, d_b, d_d, SIZE);
//hipMemcpy(source, destination, number of byte, cudaMemDeviceToHost) ( ) .
hipMemcpy(a, d_a, SIZE*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(b, d_b, SIZE*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(c, d_c, SIZE*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(d, d_d, SIZE*sizeof(int), hipMemcpyDeviceToHost);
printf("vector c: \n");
for (int i = 0; i<SIZE; ++i)
printf("c[%d] = %d\n", i, c[i]);
printf("vector d: \n");
for (int i = 0; i<SIZE; ++i)
printf("d[%d] = %d\n", i, d[i]);
//
free(a);
free(b);
free(c);
free(d);
// hipFree(d_a)
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_d);
return 0;
}
| 3cfa8099043b35f33a17239410d011f47c743cc0.cu | #include "device_launch_parameters.h"
#include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define SIZE 1024
// __global__์ ํตํด์ ์ปค๋์์ ํ์ํ๋ค. host์์ ํธ์ถ๋๋ค.
__global__ void VectorAdd(int *a, int *b, int *c, int n)
{
// ์๋ง์ ์ค๋ ๋๊ฐ ๋์์ ์ฒ๋ฆฌํ๋ค.
// ๋ฐ๋ผ์ threadIdx(์ค๋ ๋ ์ธ๋ฑ์ค)๋ฅผ ํตํด์ ์ค๋ ๋๋ค์ ๊ตฌ๋ณํ๋ค.
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
__global__ void VectorBlockAdd(int *a, int *b, int *c, int n)
{
int tid = blockIdx.x;
if (tid < n)
c[tid] = a[tid] + b[tid];
}
int main()
{
int *a, *b, *c, *d;
int *d_a, *d_b, *d_c, *d_d;
// ํธ์คํธ์ ๋ฉ๋ชจ๋ฆฌ์ ํ ๋นํ๋ค.
a = (int *)malloc(SIZE*sizeof(int));
b = (int *)malloc(SIZE*sizeof(int));
c = (int *)malloc(SIZE*sizeof(int));
d = (int *)malloc(SIZE*sizeof(int));
// cudaMalloc(destination, number of byte)๋ก device์ ๋ฉ๋ชจ๋ฆฌ๋ฅผ ํ ๋นํ๋ค.
cudaMalloc(&d_a, SIZE*sizeof(int));
cudaMalloc(&d_b, SIZE*sizeof(int));
cudaMalloc(&d_c, SIZE*sizeof(int));
cudaMalloc(&d_d, SIZE*sizeof(int));
// ์ด๊ธฐํ
for (int i = 0; i<SIZE; ++i)
{
a[i] = i;
b[i] = i;
c[i] = 0;
d[i] = 0;
}
// cudaMemcpy(destination, source, number of byte, cudaMemcpyHostToDevice)๋ก ํธ์คํธ์์ ๋๋ฐ์ด์ค๋ก ๋ฉ๋ชจ๋ฆฌ๋ฅผ ์นดํผํ๋ค.
cudaMemcpy(d_a, a, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_d, d, SIZE*sizeof(int), cudaMemcpyHostToDevice);
// ํจ์ ํธ์ถ์ ์ํด์ ์๋ก์ด ์ ํ
์ค ์์๋ฅผ ์ถ๊ฐํ ํ์๊ฐ ์๋ค.
// ์ฒซ๋ฒ์งธ parameter๋ ๋ธ๋ญ์ ์์ด๋ค. ์์ ์์๋ ์ค๋ ๋ ๋ธ๋ญ์ด ํ๋์ด๋ค.
// SIZE๋ 1024๊ฐ์ ์ค๋ ๋๋ฅผ ์๋ฏธํ๋ค.
VectorAdd << < 1, SIZE >> >(d_a, d_b, d_c, SIZE);
VectorBlockAdd << < SIZE, 1 >> >(d_a, d_b, d_d, SIZE);
//cudaMemcpy(source, destination, number of byte, cudaMemDeviceToHost)๋ก ๋๋ฐ์ด์ค์ ๋ฉ๋ชจ๋ฆฌ(์ฐ์ฐ ๊ฒฐ๊ณผ ๋ฐ์ดํฐ)๋ฅผ ํธ์คํธ์ ์นดํผํ๋ค.
cudaMemcpy(a, d_a, SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(b, d_b, SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(c, d_c, SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(d, d_d, SIZE*sizeof(int), cudaMemcpyDeviceToHost);
printf("vector c: \n");
for (int i = 0; i<SIZE; ++i)
printf("c[%d] = %d\n", i, c[i]);
printf("vector d: \n");
for (int i = 0; i<SIZE; ++i)
printf("d[%d] = %d\n", i, d[i]);
// ํธ์คํธ์ ๋ฉ๋ชจ๋ฆฌ ํ ๋น ํด์
free(a);
free(b);
free(c);
free(d);
// cudaFree(d_a)๋ฅผ ํตํด ๋๋ฐ์ด์ค์ ๋ฉ๋ชจ๋ฆฌ๋ฅผ ํ ๋น ํด์
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_d);
return 0;
}
|
1a84852c580cd13a4c45cd67d81588f6c544b281.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/common/nd_index_offset_helper.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/user/kernels/upsample_kernel.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void UpsampleTrilinear3DForward(const int64_t elem_cnt, const T* in_dptr,
NdIndexOffsetHelper<int64_t, 5> in_helper,
NdIndexOffsetHelper<int64_t, 5> out_helper,
const int64_t in_depth, const int64_t in_height,
const int64_t in_width, const T rdepth, const T rheight,
const T rwidth, const bool align_corners, T* out_dptr) {
CUDA_1D_KERNEL_LOOP(index, elem_cnt) {
int64_t n, c, d, h, w;
out_helper.OffsetToNdIndex(index, n, c, d, h, w);
const T t1r = GetAreaPixel(rdepth, d, align_corners);
const int64_t t1 = t1r;
const int64_t t1p = (t1 < in_depth - 1) ? 1 : 0;
const T t1lambda = t1r - t1;
const T t0lambda = static_cast<T>(1.) - t1lambda;
const T h1r = GetAreaPixel(rheight, h, align_corners);
const int64_t h1 = h1r;
const int64_t h1p = (h1 < in_height - 1) ? 1 : 0;
const T h1lambda = h1r - h1;
const T h0lambda = static_cast<T>(1.) - h1lambda;
const T w1r = GetAreaPixel(rwidth, w, align_corners);
const int64_t w1 = w1r;
const int64_t w1p = (w1 < in_width - 1) ? 1 : 0;
const T w1lambda = w1r - w1;
const T w0lambda = static_cast<T>(1.) - w1lambda;
const T* pos1 = &in_dptr[in_helper.NdIndexToOffset(n, c, t1, h1, w1)];
out_dptr[index] =
t0lambda
* (h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[w1p])
+ h1lambda
* (w0lambda * pos1[h1p * in_width] + w1lambda * pos1[h1p * in_width + w1p]))
+ t1lambda
* (h0lambda
* (w0lambda * pos1[t1p * in_height * in_width]
+ w1lambda * pos1[t1p * in_height * in_width + w1p])
+ h1lambda
* (w0lambda * pos1[t1p * in_height * in_width + h1p * in_width]
+ w1lambda * pos1[t1p * in_height * in_width + h1p * in_width + w1p]));
}
}
template<typename T>
__global__ void UpsampleTrilinear3DBackward(const int64_t elem_cnt, const T* dy_dptr,
NdIndexOffsetHelper<int64_t, 5> dy_helper,
NdIndexOffsetHelper<int64_t, 5> dx_helper,
const int64_t in_depth, const int64_t in_height,
const int64_t in_width, const T rdepth, const T rheight,
const T rwidth, const bool align_corners, T* dx_dptr) {
CUDA_1D_KERNEL_LOOP(index, elem_cnt) {
int64_t n, c, d, h, w;
dy_helper.OffsetToNdIndex(index, n, c, d, h, w);
const T t1r = GetAreaPixel(rdepth, d, align_corners);
const int64_t t1 = t1r;
const int64_t t1p = (t1 < in_depth - 1) ? 1 : 0;
const T t1lambda = t1r - t1;
const T t0lambda = static_cast<T>(1.) - t1lambda;
const T h1r = GetAreaPixel(rheight, h, align_corners);
const int64_t h1 = h1r;
const int64_t h1p = (h1 < in_height - 1) ? 1 : 0;
const T h1lambda = h1r - h1;
const T h0lambda = static_cast<T>(1.) - h1lambda;
const T w1r = GetAreaPixel(rwidth, w, align_corners);
const int64_t w1 = w1r;
const int64_t w1p = (w1 < in_width - 1) ? 1 : 0;
const T w1lambda = w1r - w1;
const T w0lambda = static_cast<T>(1.) - w1lambda;
T* pos1 = &dx_dptr[dx_helper.NdIndexToOffset(n, c, t1, h1, w1)];
const T* pos2 = &dy_dptr[index];
cuda::atomic::FastAdd(pos1, 0, elem_cnt, t0lambda * h0lambda * w0lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, w1p, elem_cnt, t0lambda * h0lambda * w1lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, h1p * in_width, elem_cnt, t0lambda * h1lambda * w0lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, h1p * in_width + w1p, elem_cnt,
t0lambda * h1lambda * w1lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, t1p * in_height * in_width, elem_cnt,
t1lambda * h0lambda * w0lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, t1p * in_height * in_width + w1p, elem_cnt,
t1lambda * h0lambda * w1lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, t1p * in_height * in_width + h1p * in_width, elem_cnt,
t1lambda * h1lambda * w0lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, t1p * in_height * in_width + h1p * in_width + w1p, elem_cnt,
t1lambda * h1lambda * w1lambda * pos2[0]);
}
}
} // namespace
template<typename T>
class UpsampleTrilinear3DGPUKernel final : public user_op::OpKernel {
public:
UpsampleTrilinear3DGPUKernel() = default;
~UpsampleTrilinear3DGPUKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x_tensor = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* y_tensor = ctx->Tensor4ArgNameAndIndex("y", 0);
const bool align_corners = ctx->Attr<bool>("align_corners");
const int64_t elem_cnt = y_tensor->shape_view().elem_cnt();
NdIndexOffsetHelper<int64_t, 5> in_helper(
x_tensor->shape_view().At(0), x_tensor->shape_view().At(1), x_tensor->shape_view().At(2),
x_tensor->shape_view().At(3), x_tensor->shape_view().At(4));
NdIndexOffsetHelper<int64_t, 5> out_helper(
y_tensor->shape_view().At(0), y_tensor->shape_view().At(1), y_tensor->shape_view().At(2),
y_tensor->shape_view().At(3), y_tensor->shape_view().At(4));
const int64_t in_depth = x_tensor->shape_view().At(2);
const int64_t in_height = x_tensor->shape_view().At(3);
const int64_t in_width = x_tensor->shape_view().At(4);
const int64_t out_depth = y_tensor->shape_view().At(2);
const int64_t out_height = y_tensor->shape_view().At(3);
const int64_t out_width = y_tensor->shape_view().At(4);
const std::vector<int64_t> output_size = ctx->Attr<std::vector<int64_t>>("output_size");
double depth_scale = ctx->Attr<double>("depth_scale");
double height_scale = ctx->Attr<double>("height_scale");
double width_scale = ctx->Attr<double>("width_scale");
if (!output_size.empty()) {
depth_scale = static_cast<double>(out_depth) / static_cast<double>(in_depth);
height_scale = static_cast<double>(out_height) / static_cast<double>(in_height);
width_scale = static_cast<double>(out_width) / static_cast<double>(in_width);
}
const T scale_depth = GetAreaPixelScale(in_depth, out_depth, align_corners, depth_scale);
const T scale_height = GetAreaPixelScale(in_height, out_height, align_corners, height_scale);
const T scale_width = GetAreaPixelScale(in_width, out_width, align_corners, width_scale);
RUN_CUDA_KERNEL((UpsampleTrilinear3DForward<T>), ctx->stream(), elem_cnt, elem_cnt,
x_tensor->dptr<T>(), in_helper, out_helper, x_tensor->shape_view().At(2),
x_tensor->shape_view().At(3), x_tensor->shape_view().At(4), scale_depth,
scale_height, scale_width, align_corners, y_tensor->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
template<typename T>
class UpsampleTrilinearGrad3DGPUKernel final : public user_op::OpKernel {
public:
UpsampleTrilinearGrad3DGPUKernel() = default;
~UpsampleTrilinearGrad3DGPUKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
user_op::Tensor* dx_tensor = ctx->Tensor4ArgNameAndIndex("dx", 0);
Memset<DeviceType::kCUDA>(ctx->stream(), dx_tensor->mut_dptr<T>(), 0,
dx_tensor->shape_view().elem_cnt() * sizeof(T));
const user_op::Tensor* dy_tensor = ctx->Tensor4ArgNameAndIndex("dy", 0);
const bool align_corners = ctx->Attr<bool>("align_corners");
const int64_t elem_cnt = dy_tensor->shape_view().elem_cnt();
NdIndexOffsetHelper<int64_t, 5> dy_helper(
dy_tensor->shape_view().At(0), dy_tensor->shape_view().At(1), dy_tensor->shape_view().At(2),
dy_tensor->shape_view().At(3), dy_tensor->shape_view().At(4));
NdIndexOffsetHelper<int64_t, 5> dx_helper(
dx_tensor->shape_view().At(0), dx_tensor->shape_view().At(1), dx_tensor->shape_view().At(2),
dx_tensor->shape_view().At(3), dx_tensor->shape_view().At(4));
const int64_t in_depth = dx_tensor->shape_view().At(2);
const int64_t in_height = dx_tensor->shape_view().At(3);
const int64_t in_width = dx_tensor->shape_view().At(4);
const int64_t out_depth = dy_tensor->shape_view().At(2);
const int64_t out_height = dy_tensor->shape_view().At(3);
const int64_t out_width = dy_tensor->shape_view().At(4);
const std::vector<int64_t> output_size = ctx->Attr<std::vector<int64_t>>("output_size");
double depth_scale = ctx->Attr<double>("depth_scale");
double height_scale = ctx->Attr<double>("height_scale");
double width_scale = ctx->Attr<double>("width_scale");
if (!output_size.empty()) {
depth_scale = static_cast<double>(out_depth) / static_cast<double>(in_depth);
height_scale = static_cast<double>(out_height) / static_cast<double>(in_height);
width_scale = static_cast<double>(out_width) / static_cast<double>(in_width);
}
const T scale_depth = GetAreaPixelScale(in_depth, out_depth, align_corners, depth_scale);
const T scale_height = GetAreaPixelScale(in_height, out_height, align_corners, height_scale);
const T scale_width = GetAreaPixelScale(in_width, out_width, align_corners, width_scale);
RUN_CUDA_KERNEL((UpsampleTrilinear3DBackward<T>), ctx->stream(), elem_cnt, elem_cnt,
dy_tensor->dptr<T>(), dy_helper, dx_helper, dx_tensor->shape_view().At(2),
dx_tensor->shape_view().At(3), dx_tensor->shape_view().At(4), scale_depth,
scale_height, scale_width, align_corners, dx_tensor->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_UPSAMPTRILINEAR3D_CUDA_KERNEL(dtype) \
REGISTER_USER_KERNEL("upsample_trilinear_3d") \
.SetCreateFn<UpsampleTrilinear3DGPUKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)); \
REGISTER_USER_KERNEL("upsample_trilinear_3d_grad") \
.SetCreateFn<UpsampleTrilinearGrad3DGPUKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value));
REGISTER_UPSAMPTRILINEAR3D_CUDA_KERNEL(float)
REGISTER_UPSAMPTRILINEAR3D_CUDA_KERNEL(double)
} // namespace oneflow
| 1a84852c580cd13a4c45cd67d81588f6c544b281.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/common/nd_index_offset_helper.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/user/kernels/upsample_kernel.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void UpsampleTrilinear3DForward(const int64_t elem_cnt, const T* in_dptr,
NdIndexOffsetHelper<int64_t, 5> in_helper,
NdIndexOffsetHelper<int64_t, 5> out_helper,
const int64_t in_depth, const int64_t in_height,
const int64_t in_width, const T rdepth, const T rheight,
const T rwidth, const bool align_corners, T* out_dptr) {
CUDA_1D_KERNEL_LOOP(index, elem_cnt) {
int64_t n, c, d, h, w;
out_helper.OffsetToNdIndex(index, n, c, d, h, w);
const T t1r = GetAreaPixel(rdepth, d, align_corners);
const int64_t t1 = t1r;
const int64_t t1p = (t1 < in_depth - 1) ? 1 : 0;
const T t1lambda = t1r - t1;
const T t0lambda = static_cast<T>(1.) - t1lambda;
const T h1r = GetAreaPixel(rheight, h, align_corners);
const int64_t h1 = h1r;
const int64_t h1p = (h1 < in_height - 1) ? 1 : 0;
const T h1lambda = h1r - h1;
const T h0lambda = static_cast<T>(1.) - h1lambda;
const T w1r = GetAreaPixel(rwidth, w, align_corners);
const int64_t w1 = w1r;
const int64_t w1p = (w1 < in_width - 1) ? 1 : 0;
const T w1lambda = w1r - w1;
const T w0lambda = static_cast<T>(1.) - w1lambda;
const T* pos1 = &in_dptr[in_helper.NdIndexToOffset(n, c, t1, h1, w1)];
out_dptr[index] =
t0lambda
* (h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[w1p])
+ h1lambda
* (w0lambda * pos1[h1p * in_width] + w1lambda * pos1[h1p * in_width + w1p]))
+ t1lambda
* (h0lambda
* (w0lambda * pos1[t1p * in_height * in_width]
+ w1lambda * pos1[t1p * in_height * in_width + w1p])
+ h1lambda
* (w0lambda * pos1[t1p * in_height * in_width + h1p * in_width]
+ w1lambda * pos1[t1p * in_height * in_width + h1p * in_width + w1p]));
}
}
template<typename T>
__global__ void UpsampleTrilinear3DBackward(const int64_t elem_cnt, const T* dy_dptr,
NdIndexOffsetHelper<int64_t, 5> dy_helper,
NdIndexOffsetHelper<int64_t, 5> dx_helper,
const int64_t in_depth, const int64_t in_height,
const int64_t in_width, const T rdepth, const T rheight,
const T rwidth, const bool align_corners, T* dx_dptr) {
CUDA_1D_KERNEL_LOOP(index, elem_cnt) {
int64_t n, c, d, h, w;
dy_helper.OffsetToNdIndex(index, n, c, d, h, w);
const T t1r = GetAreaPixel(rdepth, d, align_corners);
const int64_t t1 = t1r;
const int64_t t1p = (t1 < in_depth - 1) ? 1 : 0;
const T t1lambda = t1r - t1;
const T t0lambda = static_cast<T>(1.) - t1lambda;
const T h1r = GetAreaPixel(rheight, h, align_corners);
const int64_t h1 = h1r;
const int64_t h1p = (h1 < in_height - 1) ? 1 : 0;
const T h1lambda = h1r - h1;
const T h0lambda = static_cast<T>(1.) - h1lambda;
const T w1r = GetAreaPixel(rwidth, w, align_corners);
const int64_t w1 = w1r;
const int64_t w1p = (w1 < in_width - 1) ? 1 : 0;
const T w1lambda = w1r - w1;
const T w0lambda = static_cast<T>(1.) - w1lambda;
T* pos1 = &dx_dptr[dx_helper.NdIndexToOffset(n, c, t1, h1, w1)];
const T* pos2 = &dy_dptr[index];
cuda::atomic::FastAdd(pos1, 0, elem_cnt, t0lambda * h0lambda * w0lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, w1p, elem_cnt, t0lambda * h0lambda * w1lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, h1p * in_width, elem_cnt, t0lambda * h1lambda * w0lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, h1p * in_width + w1p, elem_cnt,
t0lambda * h1lambda * w1lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, t1p * in_height * in_width, elem_cnt,
t1lambda * h0lambda * w0lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, t1p * in_height * in_width + w1p, elem_cnt,
t1lambda * h0lambda * w1lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, t1p * in_height * in_width + h1p * in_width, elem_cnt,
t1lambda * h1lambda * w0lambda * pos2[0]);
cuda::atomic::FastAdd(pos1, t1p * in_height * in_width + h1p * in_width + w1p, elem_cnt,
t1lambda * h1lambda * w1lambda * pos2[0]);
}
}
} // namespace
template<typename T>
class UpsampleTrilinear3DGPUKernel final : public user_op::OpKernel {
public:
UpsampleTrilinear3DGPUKernel() = default;
~UpsampleTrilinear3DGPUKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x_tensor = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* y_tensor = ctx->Tensor4ArgNameAndIndex("y", 0);
const bool align_corners = ctx->Attr<bool>("align_corners");
const int64_t elem_cnt = y_tensor->shape_view().elem_cnt();
NdIndexOffsetHelper<int64_t, 5> in_helper(
x_tensor->shape_view().At(0), x_tensor->shape_view().At(1), x_tensor->shape_view().At(2),
x_tensor->shape_view().At(3), x_tensor->shape_view().At(4));
NdIndexOffsetHelper<int64_t, 5> out_helper(
y_tensor->shape_view().At(0), y_tensor->shape_view().At(1), y_tensor->shape_view().At(2),
y_tensor->shape_view().At(3), y_tensor->shape_view().At(4));
const int64_t in_depth = x_tensor->shape_view().At(2);
const int64_t in_height = x_tensor->shape_view().At(3);
const int64_t in_width = x_tensor->shape_view().At(4);
const int64_t out_depth = y_tensor->shape_view().At(2);
const int64_t out_height = y_tensor->shape_view().At(3);
const int64_t out_width = y_tensor->shape_view().At(4);
const std::vector<int64_t> output_size = ctx->Attr<std::vector<int64_t>>("output_size");
double depth_scale = ctx->Attr<double>("depth_scale");
double height_scale = ctx->Attr<double>("height_scale");
double width_scale = ctx->Attr<double>("width_scale");
if (!output_size.empty()) {
depth_scale = static_cast<double>(out_depth) / static_cast<double>(in_depth);
height_scale = static_cast<double>(out_height) / static_cast<double>(in_height);
width_scale = static_cast<double>(out_width) / static_cast<double>(in_width);
}
const T scale_depth = GetAreaPixelScale(in_depth, out_depth, align_corners, depth_scale);
const T scale_height = GetAreaPixelScale(in_height, out_height, align_corners, height_scale);
const T scale_width = GetAreaPixelScale(in_width, out_width, align_corners, width_scale);
RUN_CUDA_KERNEL((UpsampleTrilinear3DForward<T>), ctx->stream(), elem_cnt, elem_cnt,
x_tensor->dptr<T>(), in_helper, out_helper, x_tensor->shape_view().At(2),
x_tensor->shape_view().At(3), x_tensor->shape_view().At(4), scale_depth,
scale_height, scale_width, align_corners, y_tensor->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
template<typename T>
class UpsampleTrilinearGrad3DGPUKernel final : public user_op::OpKernel {
public:
UpsampleTrilinearGrad3DGPUKernel() = default;
~UpsampleTrilinearGrad3DGPUKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
user_op::Tensor* dx_tensor = ctx->Tensor4ArgNameAndIndex("dx", 0);
Memset<DeviceType::kCUDA>(ctx->stream(), dx_tensor->mut_dptr<T>(), 0,
dx_tensor->shape_view().elem_cnt() * sizeof(T));
const user_op::Tensor* dy_tensor = ctx->Tensor4ArgNameAndIndex("dy", 0);
const bool align_corners = ctx->Attr<bool>("align_corners");
const int64_t elem_cnt = dy_tensor->shape_view().elem_cnt();
NdIndexOffsetHelper<int64_t, 5> dy_helper(
dy_tensor->shape_view().At(0), dy_tensor->shape_view().At(1), dy_tensor->shape_view().At(2),
dy_tensor->shape_view().At(3), dy_tensor->shape_view().At(4));
NdIndexOffsetHelper<int64_t, 5> dx_helper(
dx_tensor->shape_view().At(0), dx_tensor->shape_view().At(1), dx_tensor->shape_view().At(2),
dx_tensor->shape_view().At(3), dx_tensor->shape_view().At(4));
const int64_t in_depth = dx_tensor->shape_view().At(2);
const int64_t in_height = dx_tensor->shape_view().At(3);
const int64_t in_width = dx_tensor->shape_view().At(4);
const int64_t out_depth = dy_tensor->shape_view().At(2);
const int64_t out_height = dy_tensor->shape_view().At(3);
const int64_t out_width = dy_tensor->shape_view().At(4);
const std::vector<int64_t> output_size = ctx->Attr<std::vector<int64_t>>("output_size");
double depth_scale = ctx->Attr<double>("depth_scale");
double height_scale = ctx->Attr<double>("height_scale");
double width_scale = ctx->Attr<double>("width_scale");
if (!output_size.empty()) {
depth_scale = static_cast<double>(out_depth) / static_cast<double>(in_depth);
height_scale = static_cast<double>(out_height) / static_cast<double>(in_height);
width_scale = static_cast<double>(out_width) / static_cast<double>(in_width);
}
const T scale_depth = GetAreaPixelScale(in_depth, out_depth, align_corners, depth_scale);
const T scale_height = GetAreaPixelScale(in_height, out_height, align_corners, height_scale);
const T scale_width = GetAreaPixelScale(in_width, out_width, align_corners, width_scale);
RUN_CUDA_KERNEL((UpsampleTrilinear3DBackward<T>), ctx->stream(), elem_cnt, elem_cnt,
dy_tensor->dptr<T>(), dy_helper, dx_helper, dx_tensor->shape_view().At(2),
dx_tensor->shape_view().At(3), dx_tensor->shape_view().At(4), scale_depth,
scale_height, scale_width, align_corners, dx_tensor->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_UPSAMPTRILINEAR3D_CUDA_KERNEL(dtype) \
REGISTER_USER_KERNEL("upsample_trilinear_3d") \
.SetCreateFn<UpsampleTrilinear3DGPUKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)); \
REGISTER_USER_KERNEL("upsample_trilinear_3d_grad") \
.SetCreateFn<UpsampleTrilinearGrad3DGPUKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value));
REGISTER_UPSAMPTRILINEAR3D_CUDA_KERNEL(float)
REGISTER_UPSAMPTRILINEAR3D_CUDA_KERNEL(double)
} // namespace oneflow
|
b7029832cbc5ff73692c9213aa1d61de1a1b22b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <stdlib.h>
#include <time.h>
#include <papi.h>
#define PAPI_ERROR(n,v) (fprintf(stderr, "%s failed with code %d\n", (n), (v)))
#include "../common/kernels.cu"
__host__
int main (int argc, char * argv[]) {
int retval;
// arguments
int version = atoi(argv[1]);
int nthreads = atoi(argv[2]);
int nblocks = atoi(argv[3]);
int N = nthreads;
// Initialize library
retval = PAPI_library_init(PAPI_VER_CURRENT);
if (retval != PAPI_VER_CURRENT) {
PAPI_ERROR("PAPI_library_init", retval);
}
fprintf(stderr, "PAPI version %4d %6d %7d\n", PAPI_VERSION_MAJOR(PAPI_VERSION), PAPI_VERSION_MINOR(PAPI_VERSION), PAPI_VERSION_REVISION(PAPI_VERSION));
int eventcnt = 0;
int tmp_eventcnt = argc - 4;
char ** tmp_argv = argv + 4;
char ** tmp_names = new char*[tmp_eventcnt];
int * tmp_events = new int[tmp_eventcnt];
for (int i = 0; i < tmp_eventcnt; ++i) {
fprintf(stderr, "%s\n", tmp_argv[i]);
retval = PAPI_event_name_to_code(tmp_argv[i], tmp_events + eventcnt);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_event_name_to_code", retval);
else {
fprintf(stderr, "Event \"%s\" --- Code: %x\n", tmp_argv[i], tmp_names[eventcnt]);
tmp_names[eventcnt++] = tmp_argv[i];
}
}
char ** names = new char*[eventcnt];
int * events = new int[eventcnt];
long long int * values = new long long int[eventcnt];
for (int i = 0 ; i < eventcnt; ++i) {
names[i] = tmp_names[i];
events[i] = tmp_events[i];
}
memset(values, 0, sizeof(long long int));
free(tmp_names);
free(tmp_events);
int *host_arr = (int *) malloc(sizeof(int) * N);
int *dev_arr;
srand(time(NULL));
for(int i = 0; i < N; ++i)
host_arr[i] = rand();
hipMalloc(&dev_arr, sizeof(int) * N);
hipMemcpy(dev_arr, &host_arr, sizeof(int) * N, hipMemcpyHostToDevice);
int set = PAPI_NULL;
retval = PAPI_create_eventset(&set);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_create_eventset", retval);
retval = PAPI_add_events(set, events, eventcnt);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_add_events", retval);
retval = PAPI_start(set);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_start", retval);
if (version == 1)
hipLaunchKernelGGL(( kernel_one), dim3(nthreads), dim3(nblocks) , 0, 0, dev_arr, N);
else
hipLaunchKernelGGL(( kernel_two), dim3(nthreads), dim3(nblocks) , 0, 0, dev_arr, N);
retval = PAPI_stop(set, values);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_stop", retval);
for (int i = 0; i < eventcnt; ++i)
printf("%s\t%x\t%lld\n", names[i], events[i], values[i]);
retval = PAPI_cleanup_eventset(set);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_cleanup_eventset", retval);
retval = PAPI_destroy_eventset(&set);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_destroy_eventset", retval);
free(names);
free(events);
free(values);
// Shutdown library
PAPI_shutdown();
return 0;
}
| b7029832cbc5ff73692c9213aa1d61de1a1b22b8.cu | #include <cstdio>
#include <stdlib.h>
#include <time.h>
#include <papi.h>
#define PAPI_ERROR(n,v) (fprintf(stderr, "%s failed with code %d\n", (n), (v)))
#include "../common/kernels.cu"
__host__
int main (int argc, char * argv[]) {
int retval;
// arguments
int version = atoi(argv[1]);
int nthreads = atoi(argv[2]);
int nblocks = atoi(argv[3]);
int N = nthreads;
// Initialize library
retval = PAPI_library_init(PAPI_VER_CURRENT);
if (retval != PAPI_VER_CURRENT) {
PAPI_ERROR("PAPI_library_init", retval);
}
fprintf(stderr, "PAPI version %4d %6d %7d\n", PAPI_VERSION_MAJOR(PAPI_VERSION), PAPI_VERSION_MINOR(PAPI_VERSION), PAPI_VERSION_REVISION(PAPI_VERSION));
int eventcnt = 0;
int tmp_eventcnt = argc - 4;
char ** tmp_argv = argv + 4;
char ** tmp_names = new char*[tmp_eventcnt];
int * tmp_events = new int[tmp_eventcnt];
for (int i = 0; i < tmp_eventcnt; ++i) {
fprintf(stderr, "%s\n", tmp_argv[i]);
retval = PAPI_event_name_to_code(tmp_argv[i], tmp_events + eventcnt);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_event_name_to_code", retval);
else {
fprintf(stderr, "Event \"%s\" --- Code: %x\n", tmp_argv[i], tmp_names[eventcnt]);
tmp_names[eventcnt++] = tmp_argv[i];
}
}
char ** names = new char*[eventcnt];
int * events = new int[eventcnt];
long long int * values = new long long int[eventcnt];
for (int i = 0 ; i < eventcnt; ++i) {
names[i] = tmp_names[i];
events[i] = tmp_events[i];
}
memset(values, 0, sizeof(long long int));
free(tmp_names);
free(tmp_events);
int *host_arr = (int *) malloc(sizeof(int) * N);
int *dev_arr;
srand(time(NULL));
for(int i = 0; i < N; ++i)
host_arr[i] = rand();
cudaMalloc(&dev_arr, sizeof(int) * N);
cudaMemcpy(dev_arr, &host_arr, sizeof(int) * N, cudaMemcpyHostToDevice);
int set = PAPI_NULL;
retval = PAPI_create_eventset(&set);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_create_eventset", retval);
retval = PAPI_add_events(set, events, eventcnt);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_add_events", retval);
retval = PAPI_start(set);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_start", retval);
if (version == 1)
kernel_one<<< nthreads, nblocks >>>(dev_arr, N);
else
kernel_two<<< nthreads, nblocks >>>(dev_arr, N);
retval = PAPI_stop(set, values);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_stop", retval);
for (int i = 0; i < eventcnt; ++i)
printf("%s\t%x\t%lld\n", names[i], events[i], values[i]);
retval = PAPI_cleanup_eventset(set);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_cleanup_eventset", retval);
retval = PAPI_destroy_eventset(&set);
if (retval != PAPI_OK)
PAPI_ERROR("PAPI_destroy_eventset", retval);
free(names);
free(events);
free(values);
// Shutdown library
PAPI_shutdown();
return 0;
}
|
799bd4b61dbd0506fc0ba50d7146c68cc87a06f2.hip | // !!! This is a file automatically generated by hipify!!!
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
namespace at { namespace native {
void acos_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "acos_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
void asin_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "asin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asin(a);
});
});
}
void atan_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "atan_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atan(a);
});
});
}
void sin_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sin(a);
});
});
}
void cos_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "cos_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cos(a);
});
});
}
void sinh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sinh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
});
}
void cosh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "cosh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cosh(a);
});
});
}
void tanh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "tanh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
void acosh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "acosh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acosh(a);
});
});
}
void asinh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "asinh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asinh(a);
});
});
}
void atanh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "atanh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atanh(a);
});
});
}
void tan_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "tan_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tan(a);
});
});
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda);
REGISTER_DISPATCH(acosh_stub, &acosh_kernel_cuda);
REGISTER_DISPATCH(asinh_stub, &asinh_kernel_cuda);
REGISTER_DISPATCH(atanh_stub, &atanh_kernel_cuda);
REGISTER_DISPATCH(asin_stub, &asin_kernel_cuda);
REGISTER_DISPATCH(atan_stub, &atan_kernel_cuda);
REGISTER_DISPATCH(sin_stub, &sin_kernel_cuda);
REGISTER_DISPATCH(cos_stub, &cos_kernel_cuda);
REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda);
REGISTER_DISPATCH(cosh_stub, &cosh_kernel_cuda);
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_cuda);
REGISTER_DISPATCH(tan_stub, &tan_kernel_cuda);
}} // namespace at::native
| 799bd4b61dbd0506fc0ba50d7146c68cc87a06f2.cu | #include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
namespace at { namespace native {
void acos_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "acos_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
void asin_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "asin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asin(a);
});
});
}
void atan_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "atan_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atan(a);
});
});
}
void sin_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sin(a);
});
});
}
void cos_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "cos_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cos(a);
});
});
}
void sinh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sinh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
});
}
void cosh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "cosh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cosh(a);
});
});
}
void tanh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "tanh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
void acosh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "acosh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acosh(a);
});
});
}
void asinh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "asinh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asinh(a);
});
});
}
void atanh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "atanh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atanh(a);
});
});
}
void tan_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "tan_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tan(a);
});
});
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda);
REGISTER_DISPATCH(acosh_stub, &acosh_kernel_cuda);
REGISTER_DISPATCH(asinh_stub, &asinh_kernel_cuda);
REGISTER_DISPATCH(atanh_stub, &atanh_kernel_cuda);
REGISTER_DISPATCH(asin_stub, &asin_kernel_cuda);
REGISTER_DISPATCH(atan_stub, &atan_kernel_cuda);
REGISTER_DISPATCH(sin_stub, &sin_kernel_cuda);
REGISTER_DISPATCH(cos_stub, &cos_kernel_cuda);
REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda);
REGISTER_DISPATCH(cosh_stub, &cosh_kernel_cuda);
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_cuda);
REGISTER_DISPATCH(tan_stub, &tan_kernel_cuda);
}} // namespace at::native
|
98189af42522b7f84c3af93c0024112d487eeaaa.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 98189af42522b7f84c3af93c0024112d487eeaaa.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.