hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
4ee29972cf216b1993e83563f7a3f4115eecc75f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include "helper_math.h"
#include "FastDeviceMinMax.h"
#include "Logger.h"
#include "CUDAAssert.h"
__device__ unsigned __bfind(unsigned i) { unsigned b; asm volatile("bfind.u32 %0, %1; " : "=r"(b) : "r"(i)); return b; }
__device__ __inline__ uint sign_extend_s8x4(uint i) { uint v; asm("prmt.b32 %0, %1, 0x0, 0x0000BA98;" : "=r"(v) : "r"(i)); return v; }
__device__ __inline__ uint extract_byte(uint i, uint n) { return (i >> (n * 8)) & 0xFF; }
__device__ const float4* BVHTreeNodes;
__device__ const float4* TriangleWoopCoordinates;
__device__ const int* MappingFromTriangleAddressToIndex;
#define DYNAMIC_FETCH 1
#define TRIANGLE_POSTPONING 1
#define STACK_POP(X) { --stackPtr; if (stackPtr < SM_STACK_SIZE) X = traversalStackSM[threadIdx.x][threadIdx.y][stackPtr]; else X = traversalStack[stackPtr - SM_STACK_SIZE]; }
#define STACK_PUSH(X) { if (stackPtr < SM_STACK_SIZE) traversalStackSM[threadIdx.x][threadIdx.y][stackPtr] = X; else traversalStack[stackPtr - SM_STACK_SIZE] = X; stackPtr++; }
__global__ void rtTraceCWBVHDynamicFetch(
Ray* rayBuffer,
Hit* rayResultBuffer,
int rayCount,
int* finishedRayCount
)
{
const float ooeps = exp2f(-80.0f);
const int STACK_SIZE = 32;
uint2 traversalStack[STACK_SIZE];
const int SM_STACK_SIZE = 8; // Slightly smaller stack size than the paper (12), as this seems faster on my GTX1080
__shared__ uint2 traversalStackSM[32][2][SM_STACK_SIZE];
int rayidx;
float3 orig, dir;
float tmin, tmax;
float idirx, idiry, idirz;
uint octinv;
uint2 nodeGroup = make_uint2(0);
uint2 triangleGroup = make_uint2(0);
char stackPtr = 0;
int hitAddr = -1;
float2 triangleuv;
__shared__ int nextRayArray[2];
const float4* localBVHTreeNodes = BVHTreeNodes;
const float4* localTriangleWoopCoordinates = TriangleWoopCoordinates;
do
{
int& rayBase = nextRayArray[threadIdx.y];
bool terminated = stackPtr == 0 && nodeGroup.y <= 0x00FFFFFF && triangleGroup.y == 0;
const unsigned int maskTerminated = __ballot_sync(__activemask(), terminated);
const int numTerminated = __popc(maskTerminated);
const int idxTerminated = __popc(maskTerminated & ((1u << threadIdx.x) - 1));
if (terminated)
{
if (idxTerminated == 0)
rayBase = atomicAdd(finishedRayCount, numTerminated);
rayidx = rayBase + idxTerminated;
if (rayidx >= rayCount)
break;
orig = make_float3(rayBuffer[rayidx].origin_tmin);
dir = make_float3(rayBuffer[rayidx].dir_tmax);
tmin = rayBuffer[rayidx].origin_tmin.w;
tmax = rayBuffer[rayidx].dir_tmax.w;
idirx = 1.0f / (fabsf(dir.x) > ooeps ? dir.x : copysignf(ooeps, dir.x)); // inverse ray direction
idiry = 1.0f / (fabsf(dir.y) > ooeps ? dir.y : copysignf(ooeps, dir.y)); // inverse ray direction
idirz = 1.0f / (fabsf(dir.z) > ooeps ? dir.z : copysignf(ooeps, dir.z)); // inverse ray direction
octinv = ((dir.x < 0 ? 1 : 0) << 2) | ((dir.y < 0 ? 1 : 0) << 1) | ((dir.z < 0 ? 1 : 0) << 0);
octinv = 7 - octinv;
nodeGroup = make_uint2(0, 0b10000000000000000000000000000000);
triangleGroup = make_uint2(0);
stackPtr = 0;
hitAddr = -1;
}
#if DYNAMIC_FETCH
int lostLoopIterations = 0;
#endif
do
{
if (nodeGroup.y > 0x00FFFFFF)
{
const unsigned int hits = nodeGroup.y;
const unsigned int imask = nodeGroup.y;
const unsigned int child_bit_index = __bfind(hits);
const unsigned int child_node_base_index = nodeGroup.x;
nodeGroup.y &= ~(1 << child_bit_index);
if (nodeGroup.y > 0x00FFFFFF)
{
STACK_PUSH(nodeGroup);
}
{
const unsigned int slot_index = (child_bit_index - 24) ^ octinv;
const unsigned int octinv4 = octinv * 0x01010101u;
const unsigned int relative_index = __popc(imask & ~(0xFFFFFFFF << slot_index));
const unsigned int child_node_index = child_node_base_index + relative_index;
float4 n0, n1, n2, n3, n4;
n0 = __ldg(localBVHTreeNodes + child_node_index * 5 + 0);
n1 = __ldg(localBVHTreeNodes + child_node_index * 5 + 1);
n2 = __ldg(localBVHTreeNodes + child_node_index * 5 + 2);
n3 = __ldg(localBVHTreeNodes + child_node_index * 5 + 3);
n4 = __ldg(localBVHTreeNodes + child_node_index * 5 + 4);
float3 p = make_float3(n0);
int3 e;
e.x = *((char*)&n0.w + 0);
e.y = *((char*)&n0.w + 1);
e.z = *((char*)&n0.w + 2);
nodeGroup.x = float_as_uint(n1.x);
triangleGroup.x = float_as_uint(n1.y);
triangleGroup.y = 0;
unsigned int hitmask = 0;
const float adjusted_idirx = uint_as_float((e.x + 127) << 23) * idirx;
const float adjusted_idiry = uint_as_float((e.y + 127) << 23) * idiry;
const float adjusted_idirz = uint_as_float((e.z + 127) << 23) * idirz;
const float origx = -(orig.x - p.x) * idirx;
const float origy = -(orig.y - p.y) * idiry;
const float origz = -(orig.z - p.z) * idirz;
{
// First 4
const unsigned int meta4 = float_as_uint(n1.z);
const unsigned int is_inner4 = (meta4 & (meta4 << 1)) & 0x10101010;
const unsigned int inner_mask4 = sign_extend_s8x4(is_inner4 << 3);
const unsigned int bit_index4 = (meta4 ^ (octinv4 & inner_mask4)) & 0x1F1F1F1F;
const unsigned int child_bits4 = (meta4 >> 5) & 0x07070707;
// Potential micro-optimization: use PRMT to do the selection here, as described by the paper
uint swizzledLox = (idirx < 0) ? float_as_uint(n3.z) : float_as_uint(n2.x);
uint swizzledHix = (idirx < 0) ? float_as_uint(n2.x) : float_as_uint(n3.z);
uint swizzledLoy = (idiry < 0) ? float_as_uint(n4.x) : float_as_uint(n2.z);
uint swizzledHiy = (idiry < 0) ? float_as_uint(n2.z) : float_as_uint(n4.x);
uint swizzledLoz = (idirz < 0) ? float_as_uint(n4.z) : float_as_uint(n3.x);
uint swizzledHiz = (idirz < 0) ? float_as_uint(n3.x) : float_as_uint(n4.z);
float tminx[4];
float tminy[4];
float tminz[4];
float tmaxx[4];
float tmaxy[4];
float tmaxz[4];
tminx[0] = ((swizzledLox >> 0) & 0xFF) * adjusted_idirx + origx;
tminx[1] = ((swizzledLox >> 8) & 0xFF) * adjusted_idirx + origx;
tminx[2] = ((swizzledLox >> 16) & 0xFF) * adjusted_idirx + origx;
tminx[3] = ((swizzledLox >> 24) & 0xFF) * adjusted_idirx + origx;
tminy[0] = ((swizzledLoy >> 0) & 0xFF) * adjusted_idiry + origy;
tminy[1] = ((swizzledLoy >> 8) & 0xFF) * adjusted_idiry + origy;
tminy[2] = ((swizzledLoy >> 16) & 0xFF) * adjusted_idiry + origy;
tminy[3] = ((swizzledLoy >> 24) & 0xFF) * adjusted_idiry + origy;
tminz[0] = ((swizzledLoz >> 0) & 0xFF) * adjusted_idirz + origz;
tminz[1] = ((swizzledLoz >> 8) & 0xFF) * adjusted_idirz + origz;
tminz[2] = ((swizzledLoz >> 16) & 0xFF) * adjusted_idirz + origz;
tminz[3] = ((swizzledLoz >> 24) & 0xFF) * adjusted_idirz + origz;
tmaxx[0] = ((swizzledHix >> 0) & 0xFF) * adjusted_idirx + origx;
tmaxx[1] = ((swizzledHix >> 8) & 0xFF) * adjusted_idirx + origx;
tmaxx[2] = ((swizzledHix >> 16) & 0xFF) * adjusted_idirx + origx;
tmaxx[3] = ((swizzledHix >> 24) & 0xFF) * adjusted_idirx + origx;
tmaxy[0] = ((swizzledHiy >> 0) & 0xFF) * adjusted_idiry + origy;
tmaxy[1] = ((swizzledHiy >> 8) & 0xFF) * adjusted_idiry + origy;
tmaxy[2] = ((swizzledHiy >> 16) & 0xFF) * adjusted_idiry + origy;
tmaxy[3] = ((swizzledHiy >> 24) & 0xFF) * adjusted_idiry + origy;
tmaxz[0] = ((swizzledHiz >> 0) & 0xFF) * adjusted_idirz + origz;
tmaxz[1] = ((swizzledHiz >> 8) & 0xFF) * adjusted_idirz + origz;
tmaxz[2] = ((swizzledHiz >> 16) & 0xFF) * adjusted_idirz + origz;
tmaxz[3] = ((swizzledHiz >> 24) & 0xFF) * adjusted_idirz + origz;
for (int childIndex = 0; childIndex < 4; childIndex++)
{
// Use VMIN, VMAX to compute the slabs
const float cmin = fmaxf(fmax_fmax(tminx[childIndex], tminy[childIndex], tminz[childIndex]), tmin);
const float cmax = fminf(fmin_fmin(tmaxx[childIndex], tmaxy[childIndex], tmaxz[childIndex]), tmax);
bool intersected = cmin <= cmax;
// Potential micro-optimization: use VSHL to implement this part, as described by the paper
if (intersected)
{
const unsigned int child_bits = extract_byte(child_bits4, childIndex);
const unsigned int bit_index = extract_byte(bit_index4, childIndex);
hitmask |= child_bits << bit_index;
}
}
}
{
// Second 4
const unsigned int meta4 = float_as_uint(n1.w);
const unsigned int is_inner4 = (meta4 & (meta4 << 1)) & 0x10101010;
const unsigned int inner_mask4 = sign_extend_s8x4(is_inner4 << 3);
const unsigned int bit_index4 = (meta4 ^ (octinv4 & inner_mask4)) & 0x1F1F1F1F;
const unsigned int child_bits4 = (meta4 >> 5) & 0x07070707;
// Potential micro-optimization: use PRMT to do the selection here, as described by the paper
uint swizzledLox = (idirx < 0) ? float_as_uint(n3.w) : float_as_uint(n2.y);
uint swizzledHix = (idirx < 0) ? float_as_uint(n2.y) : float_as_uint(n3.w);
uint swizzledLoy = (idiry < 0) ? float_as_uint(n4.y) : float_as_uint(n2.w);
uint swizzledHiy = (idiry < 0) ? float_as_uint(n2.w) : float_as_uint(n4.y);
uint swizzledLoz = (idirz < 0) ? float_as_uint(n4.w) : float_as_uint(n3.y);
uint swizzledHiz = (idirz < 0) ? float_as_uint(n3.y) : float_as_uint(n4.w);
float tminx[4];
float tminy[4];
float tminz[4];
float tmaxx[4];
float tmaxy[4];
float tmaxz[4];
tminx[0] = ((swizzledLox >> 0) & 0xFF) * adjusted_idirx + origx;
tminx[1] = ((swizzledLox >> 8) & 0xFF) * adjusted_idirx + origx;
tminx[2] = ((swizzledLox >> 16) & 0xFF) * adjusted_idirx + origx;
tminx[3] = ((swizzledLox >> 24) & 0xFF) * adjusted_idirx + origx;
tminy[0] = ((swizzledLoy >> 0) & 0xFF) * adjusted_idiry + origy;
tminy[1] = ((swizzledLoy >> 8) & 0xFF) * adjusted_idiry + origy;
tminy[2] = ((swizzledLoy >> 16) & 0xFF) * adjusted_idiry + origy;
tminy[3] = ((swizzledLoy >> 24) & 0xFF) * adjusted_idiry + origy;
tminz[0] = ((swizzledLoz >> 0) & 0xFF) * adjusted_idirz + origz;
tminz[1] = ((swizzledLoz >> 8) & 0xFF) * adjusted_idirz + origz;
tminz[2] = ((swizzledLoz >> 16) & 0xFF) * adjusted_idirz + origz;
tminz[3] = ((swizzledLoz >> 24) & 0xFF) * adjusted_idirz + origz;
tmaxx[0] = ((swizzledHix >> 0) & 0xFF) * adjusted_idirx + origx;
tmaxx[1] = ((swizzledHix >> 8) & 0xFF) * adjusted_idirx + origx;
tmaxx[2] = ((swizzledHix >> 16) & 0xFF) * adjusted_idirx + origx;
tmaxx[3] = ((swizzledHix >> 24) & 0xFF) * adjusted_idirx + origx;
tmaxy[0] = ((swizzledHiy >> 0) & 0xFF) * adjusted_idiry + origy;
tmaxy[1] = ((swizzledHiy >> 8) & 0xFF) * adjusted_idiry + origy;
tmaxy[2] = ((swizzledHiy >> 16) & 0xFF) * adjusted_idiry + origy;
tmaxy[3] = ((swizzledHiy >> 24) & 0xFF) * adjusted_idiry + origy;
tmaxz[0] = ((swizzledHiz >> 0) & 0xFF) * adjusted_idirz + origz;
tmaxz[1] = ((swizzledHiz >> 8) & 0xFF) * adjusted_idirz + origz;
tmaxz[2] = ((swizzledHiz >> 16) & 0xFF) * adjusted_idirz + origz;
tmaxz[3] = ((swizzledHiz >> 24) & 0xFF) * adjusted_idirz + origz;
for (int childIndex = 0; childIndex < 4; childIndex++)
{
// Use VMIN, VMAX to compute the slabs
const float cmin = fmaxf(fmax_fmax(tminx[childIndex], tminy[childIndex], tminz[childIndex]), tmin);
const float cmax = fminf(fmin_fmin(tmaxx[childIndex], tmaxy[childIndex], tmaxz[childIndex]), tmax);
bool intersected = cmin <= cmax;
// Potential micro-optimization: use VSHL to implement this part, as described by the paper
if (intersected)
{
const unsigned int child_bits = extract_byte(child_bits4, childIndex);
const unsigned int bit_index = extract_byte(bit_index4, childIndex);
hitmask |= child_bits << bit_index;
}
}
}
nodeGroup.y = (hitmask & 0xFF000000) | (*((byte*)&n0.w + 3));
triangleGroup.y = hitmask & 0x00FFFFFF;
}
}
else
{
triangleGroup = nodeGroup;
nodeGroup = make_uint2(0);
}
#if TRIANGLE_POSTPONING
const int totalThreads = __popc(__activemask());
#endif
while (triangleGroup.y != 0)
{
#if TRIANGLE_POSTPONING
const float Rt = 0.2;
const int threshold = totalThreads * Rt;
const int numActiveThreads = __popc(__activemask());
if (numActiveThreads < threshold)
{
STACK_PUSH(triangleGroup);
break;
}
#endif
int triangleIndex = __bfind(triangleGroup.y);
int triAddr = triangleGroup.x * 3 + triangleIndex * 3;
float4 v00 = __ldg(localTriangleWoopCoordinates + triAddr + 0);
float4 v11 = __ldg(localTriangleWoopCoordinates + triAddr + 1);
float4 v22 = __ldg(localTriangleWoopCoordinates + triAddr + 2);
float Oz = v00.w - orig.x*v00.x - orig.y*v00.y - orig.z*v00.z;
float invDz = 1.0f / (dir.x*v00.x + dir.y*v00.y + dir.z*v00.z);
float t = Oz * invDz;
float Ox = v11.w + orig.x*v11.x + orig.y*v11.y + orig.z*v11.z;
float Dx = dir.x * v11.x + dir.y * v11.y + dir.z * v11.z;
float u = Ox + t * Dx;
float Oy = v22.w + orig.x*v22.x + orig.y*v22.y + orig.z*v22.z;
float Dy = dir.x*v22.x + dir.y*v22.y + dir.z*v22.z;
float v = Oy + t*Dy;
if (t > tmin && t < tmax)
{
if (u >= 0.0f && u <= 1.0f)
{
if (v >= 0.0f && u + v <= 1.0f)
{
triangleuv.x = u;
triangleuv.y = v;
tmax = t;
hitAddr = triAddr;
}
}
}
triangleGroup.y &= ~(1 << triangleIndex);
}
if (nodeGroup.y <= 0x00FFFFFF)
{
if (stackPtr > 0)
{
STACK_POP(nodeGroup);
}
else
{
rayResultBuffer[rayidx].t_triId_u_v = make_float4(tmax, int_as_float(hitAddr), triangleuv.x, triangleuv.y);
break;
}
}
#if DYNAMIC_FETCH
const int Nd = 4;
const int Nw = 16;
lostLoopIterations += __popc(__activemask()) - Nd;
if (lostLoopIterations >= Nw)
break;
#endif
} while (true);
} while (true);
}
__host__ void rtBindCWBVHData(
const float4* InBVHTreeNodes,
const float4* InTriangleWoopCoordinates,
const int* InMappingFromTriangleAddressToIndex)
{
cudaCheck(hipMemcpyToSymbol(MappingFromTriangleAddressToIndex, &InMappingFromTriangleAddressToIndex, 1 * sizeof(InMappingFromTriangleAddressToIndex)));
cudaCheck(hipMemcpyToSymbol(TriangleWoopCoordinates, &InTriangleWoopCoordinates, 1 * sizeof(InTriangleWoopCoordinates)));
cudaCheck(hipMemcpyToSymbol(BVHTreeNodes, &InBVHTreeNodes, 1 * sizeof(InBVHTreeNodes)));
}
__host__ void rtTraceCWBVH(
Ray* rayBuffer,
Hit* rayResultBuffer,
int rayCount
)
{
float elapsedTime;
hipEvent_t startEvent, stopEvent;
cudaCheck(hipEventCreate(&startEvent));
cudaCheck(hipEventCreate(&stopEvent));
int* cudaFinishedRayCount;
cudaCheck(hipMalloc(&cudaFinishedRayCount, sizeof(int)));
dim3 blockDim(32, 2);
dim3 gridDim(32, 32);
hipProfilerStart();
cudaCheck(hipEventRecord(startEvent, 0));
{
hipMemset(cudaFinishedRayCount, 0, sizeof(int));
hipLaunchKernelGGL(( rtTraceCWBVHDynamicFetch) , dim3(gridDim), dim3(blockDim) , 0, 0,
rayBuffer,
rayResultBuffer,
rayCount,
cudaFinishedRayCount
);
}
cudaCheck(hipEventRecord(stopEvent, 0));
cudaCheck(hipEventSynchronize(stopEvent));
cudaCheck(hipEventElapsedTime(&elapsedTime, startEvent, stopEvent));
Log("%.3fMS, %.2fMRays/s (rtTraceCWBVH Dynamic Fetch)", elapsedTime, (float)rayCount / 1000000.0f / (elapsedTime / 1000.0f));
hipProfilerStop();
hipFree(cudaFinishedRayCount);
}
| 4ee29972cf216b1993e83563f7a3f4115eecc75f.cu | #include <cuda_profiler_api.h>
#include "helper_math.h"
#include "FastDeviceMinMax.h"
#include "Logger.h"
#include "CUDAAssert.h"
__device__ unsigned __bfind(unsigned i) { unsigned b; asm volatile("bfind.u32 %0, %1; " : "=r"(b) : "r"(i)); return b; }
__device__ __inline__ uint sign_extend_s8x4(uint i) { uint v; asm("prmt.b32 %0, %1, 0x0, 0x0000BA98;" : "=r"(v) : "r"(i)); return v; }
__device__ __inline__ uint extract_byte(uint i, uint n) { return (i >> (n * 8)) & 0xFF; }
__device__ const float4* BVHTreeNodes;
__device__ const float4* TriangleWoopCoordinates;
__device__ const int* MappingFromTriangleAddressToIndex;
#define DYNAMIC_FETCH 1
#define TRIANGLE_POSTPONING 1
#define STACK_POP(X) { --stackPtr; if (stackPtr < SM_STACK_SIZE) X = traversalStackSM[threadIdx.x][threadIdx.y][stackPtr]; else X = traversalStack[stackPtr - SM_STACK_SIZE]; }
#define STACK_PUSH(X) { if (stackPtr < SM_STACK_SIZE) traversalStackSM[threadIdx.x][threadIdx.y][stackPtr] = X; else traversalStack[stackPtr - SM_STACK_SIZE] = X; stackPtr++; }
__global__ void rtTraceCWBVHDynamicFetch(
Ray* rayBuffer,
Hit* rayResultBuffer,
int rayCount,
int* finishedRayCount
)
{
const float ooeps = exp2f(-80.0f);
const int STACK_SIZE = 32;
uint2 traversalStack[STACK_SIZE];
const int SM_STACK_SIZE = 8; // Slightly smaller stack size than the paper (12), as this seems faster on my GTX1080
__shared__ uint2 traversalStackSM[32][2][SM_STACK_SIZE];
int rayidx;
float3 orig, dir;
float tmin, tmax;
float idirx, idiry, idirz;
uint octinv;
uint2 nodeGroup = make_uint2(0);
uint2 triangleGroup = make_uint2(0);
char stackPtr = 0;
int hitAddr = -1;
float2 triangleuv;
__shared__ int nextRayArray[2];
const float4* localBVHTreeNodes = BVHTreeNodes;
const float4* localTriangleWoopCoordinates = TriangleWoopCoordinates;
do
{
int& rayBase = nextRayArray[threadIdx.y];
bool terminated = stackPtr == 0 && nodeGroup.y <= 0x00FFFFFF && triangleGroup.y == 0;
const unsigned int maskTerminated = __ballot_sync(__activemask(), terminated);
const int numTerminated = __popc(maskTerminated);
const int idxTerminated = __popc(maskTerminated & ((1u << threadIdx.x) - 1));
if (terminated)
{
if (idxTerminated == 0)
rayBase = atomicAdd(finishedRayCount, numTerminated);
rayidx = rayBase + idxTerminated;
if (rayidx >= rayCount)
break;
orig = make_float3(rayBuffer[rayidx].origin_tmin);
dir = make_float3(rayBuffer[rayidx].dir_tmax);
tmin = rayBuffer[rayidx].origin_tmin.w;
tmax = rayBuffer[rayidx].dir_tmax.w;
idirx = 1.0f / (fabsf(dir.x) > ooeps ? dir.x : copysignf(ooeps, dir.x)); // inverse ray direction
idiry = 1.0f / (fabsf(dir.y) > ooeps ? dir.y : copysignf(ooeps, dir.y)); // inverse ray direction
idirz = 1.0f / (fabsf(dir.z) > ooeps ? dir.z : copysignf(ooeps, dir.z)); // inverse ray direction
octinv = ((dir.x < 0 ? 1 : 0) << 2) | ((dir.y < 0 ? 1 : 0) << 1) | ((dir.z < 0 ? 1 : 0) << 0);
octinv = 7 - octinv;
nodeGroup = make_uint2(0, 0b10000000000000000000000000000000);
triangleGroup = make_uint2(0);
stackPtr = 0;
hitAddr = -1;
}
#if DYNAMIC_FETCH
int lostLoopIterations = 0;
#endif
do
{
if (nodeGroup.y > 0x00FFFFFF)
{
const unsigned int hits = nodeGroup.y;
const unsigned int imask = nodeGroup.y;
const unsigned int child_bit_index = __bfind(hits);
const unsigned int child_node_base_index = nodeGroup.x;
nodeGroup.y &= ~(1 << child_bit_index);
if (nodeGroup.y > 0x00FFFFFF)
{
STACK_PUSH(nodeGroup);
}
{
const unsigned int slot_index = (child_bit_index - 24) ^ octinv;
const unsigned int octinv4 = octinv * 0x01010101u;
const unsigned int relative_index = __popc(imask & ~(0xFFFFFFFF << slot_index));
const unsigned int child_node_index = child_node_base_index + relative_index;
float4 n0, n1, n2, n3, n4;
n0 = __ldg(localBVHTreeNodes + child_node_index * 5 + 0);
n1 = __ldg(localBVHTreeNodes + child_node_index * 5 + 1);
n2 = __ldg(localBVHTreeNodes + child_node_index * 5 + 2);
n3 = __ldg(localBVHTreeNodes + child_node_index * 5 + 3);
n4 = __ldg(localBVHTreeNodes + child_node_index * 5 + 4);
float3 p = make_float3(n0);
int3 e;
e.x = *((char*)&n0.w + 0);
e.y = *((char*)&n0.w + 1);
e.z = *((char*)&n0.w + 2);
nodeGroup.x = float_as_uint(n1.x);
triangleGroup.x = float_as_uint(n1.y);
triangleGroup.y = 0;
unsigned int hitmask = 0;
const float adjusted_idirx = uint_as_float((e.x + 127) << 23) * idirx;
const float adjusted_idiry = uint_as_float((e.y + 127) << 23) * idiry;
const float adjusted_idirz = uint_as_float((e.z + 127) << 23) * idirz;
const float origx = -(orig.x - p.x) * idirx;
const float origy = -(orig.y - p.y) * idiry;
const float origz = -(orig.z - p.z) * idirz;
{
// First 4
const unsigned int meta4 = float_as_uint(n1.z);
const unsigned int is_inner4 = (meta4 & (meta4 << 1)) & 0x10101010;
const unsigned int inner_mask4 = sign_extend_s8x4(is_inner4 << 3);
const unsigned int bit_index4 = (meta4 ^ (octinv4 & inner_mask4)) & 0x1F1F1F1F;
const unsigned int child_bits4 = (meta4 >> 5) & 0x07070707;
// Potential micro-optimization: use PRMT to do the selection here, as described by the paper
uint swizzledLox = (idirx < 0) ? float_as_uint(n3.z) : float_as_uint(n2.x);
uint swizzledHix = (idirx < 0) ? float_as_uint(n2.x) : float_as_uint(n3.z);
uint swizzledLoy = (idiry < 0) ? float_as_uint(n4.x) : float_as_uint(n2.z);
uint swizzledHiy = (idiry < 0) ? float_as_uint(n2.z) : float_as_uint(n4.x);
uint swizzledLoz = (idirz < 0) ? float_as_uint(n4.z) : float_as_uint(n3.x);
uint swizzledHiz = (idirz < 0) ? float_as_uint(n3.x) : float_as_uint(n4.z);
float tminx[4];
float tminy[4];
float tminz[4];
float tmaxx[4];
float tmaxy[4];
float tmaxz[4];
tminx[0] = ((swizzledLox >> 0) & 0xFF) * adjusted_idirx + origx;
tminx[1] = ((swizzledLox >> 8) & 0xFF) * adjusted_idirx + origx;
tminx[2] = ((swizzledLox >> 16) & 0xFF) * adjusted_idirx + origx;
tminx[3] = ((swizzledLox >> 24) & 0xFF) * adjusted_idirx + origx;
tminy[0] = ((swizzledLoy >> 0) & 0xFF) * adjusted_idiry + origy;
tminy[1] = ((swizzledLoy >> 8) & 0xFF) * adjusted_idiry + origy;
tminy[2] = ((swizzledLoy >> 16) & 0xFF) * adjusted_idiry + origy;
tminy[3] = ((swizzledLoy >> 24) & 0xFF) * adjusted_idiry + origy;
tminz[0] = ((swizzledLoz >> 0) & 0xFF) * adjusted_idirz + origz;
tminz[1] = ((swizzledLoz >> 8) & 0xFF) * adjusted_idirz + origz;
tminz[2] = ((swizzledLoz >> 16) & 0xFF) * adjusted_idirz + origz;
tminz[3] = ((swizzledLoz >> 24) & 0xFF) * adjusted_idirz + origz;
tmaxx[0] = ((swizzledHix >> 0) & 0xFF) * adjusted_idirx + origx;
tmaxx[1] = ((swizzledHix >> 8) & 0xFF) * adjusted_idirx + origx;
tmaxx[2] = ((swizzledHix >> 16) & 0xFF) * adjusted_idirx + origx;
tmaxx[3] = ((swizzledHix >> 24) & 0xFF) * adjusted_idirx + origx;
tmaxy[0] = ((swizzledHiy >> 0) & 0xFF) * adjusted_idiry + origy;
tmaxy[1] = ((swizzledHiy >> 8) & 0xFF) * adjusted_idiry + origy;
tmaxy[2] = ((swizzledHiy >> 16) & 0xFF) * adjusted_idiry + origy;
tmaxy[3] = ((swizzledHiy >> 24) & 0xFF) * adjusted_idiry + origy;
tmaxz[0] = ((swizzledHiz >> 0) & 0xFF) * adjusted_idirz + origz;
tmaxz[1] = ((swizzledHiz >> 8) & 0xFF) * adjusted_idirz + origz;
tmaxz[2] = ((swizzledHiz >> 16) & 0xFF) * adjusted_idirz + origz;
tmaxz[3] = ((swizzledHiz >> 24) & 0xFF) * adjusted_idirz + origz;
for (int childIndex = 0; childIndex < 4; childIndex++)
{
// Use VMIN, VMAX to compute the slabs
const float cmin = fmaxf(fmax_fmax(tminx[childIndex], tminy[childIndex], tminz[childIndex]), tmin);
const float cmax = fminf(fmin_fmin(tmaxx[childIndex], tmaxy[childIndex], tmaxz[childIndex]), tmax);
bool intersected = cmin <= cmax;
// Potential micro-optimization: use VSHL to implement this part, as described by the paper
if (intersected)
{
const unsigned int child_bits = extract_byte(child_bits4, childIndex);
const unsigned int bit_index = extract_byte(bit_index4, childIndex);
hitmask |= child_bits << bit_index;
}
}
}
{
// Second 4
const unsigned int meta4 = float_as_uint(n1.w);
const unsigned int is_inner4 = (meta4 & (meta4 << 1)) & 0x10101010;
const unsigned int inner_mask4 = sign_extend_s8x4(is_inner4 << 3);
const unsigned int bit_index4 = (meta4 ^ (octinv4 & inner_mask4)) & 0x1F1F1F1F;
const unsigned int child_bits4 = (meta4 >> 5) & 0x07070707;
// Potential micro-optimization: use PRMT to do the selection here, as described by the paper
uint swizzledLox = (idirx < 0) ? float_as_uint(n3.w) : float_as_uint(n2.y);
uint swizzledHix = (idirx < 0) ? float_as_uint(n2.y) : float_as_uint(n3.w);
uint swizzledLoy = (idiry < 0) ? float_as_uint(n4.y) : float_as_uint(n2.w);
uint swizzledHiy = (idiry < 0) ? float_as_uint(n2.w) : float_as_uint(n4.y);
uint swizzledLoz = (idirz < 0) ? float_as_uint(n4.w) : float_as_uint(n3.y);
uint swizzledHiz = (idirz < 0) ? float_as_uint(n3.y) : float_as_uint(n4.w);
float tminx[4];
float tminy[4];
float tminz[4];
float tmaxx[4];
float tmaxy[4];
float tmaxz[4];
tminx[0] = ((swizzledLox >> 0) & 0xFF) * adjusted_idirx + origx;
tminx[1] = ((swizzledLox >> 8) & 0xFF) * adjusted_idirx + origx;
tminx[2] = ((swizzledLox >> 16) & 0xFF) * adjusted_idirx + origx;
tminx[3] = ((swizzledLox >> 24) & 0xFF) * adjusted_idirx + origx;
tminy[0] = ((swizzledLoy >> 0) & 0xFF) * adjusted_idiry + origy;
tminy[1] = ((swizzledLoy >> 8) & 0xFF) * adjusted_idiry + origy;
tminy[2] = ((swizzledLoy >> 16) & 0xFF) * adjusted_idiry + origy;
tminy[3] = ((swizzledLoy >> 24) & 0xFF) * adjusted_idiry + origy;
tminz[0] = ((swizzledLoz >> 0) & 0xFF) * adjusted_idirz + origz;
tminz[1] = ((swizzledLoz >> 8) & 0xFF) * adjusted_idirz + origz;
tminz[2] = ((swizzledLoz >> 16) & 0xFF) * adjusted_idirz + origz;
tminz[3] = ((swizzledLoz >> 24) & 0xFF) * adjusted_idirz + origz;
tmaxx[0] = ((swizzledHix >> 0) & 0xFF) * adjusted_idirx + origx;
tmaxx[1] = ((swizzledHix >> 8) & 0xFF) * adjusted_idirx + origx;
tmaxx[2] = ((swizzledHix >> 16) & 0xFF) * adjusted_idirx + origx;
tmaxx[3] = ((swizzledHix >> 24) & 0xFF) * adjusted_idirx + origx;
tmaxy[0] = ((swizzledHiy >> 0) & 0xFF) * adjusted_idiry + origy;
tmaxy[1] = ((swizzledHiy >> 8) & 0xFF) * adjusted_idiry + origy;
tmaxy[2] = ((swizzledHiy >> 16) & 0xFF) * adjusted_idiry + origy;
tmaxy[3] = ((swizzledHiy >> 24) & 0xFF) * adjusted_idiry + origy;
tmaxz[0] = ((swizzledHiz >> 0) & 0xFF) * adjusted_idirz + origz;
tmaxz[1] = ((swizzledHiz >> 8) & 0xFF) * adjusted_idirz + origz;
tmaxz[2] = ((swizzledHiz >> 16) & 0xFF) * adjusted_idirz + origz;
tmaxz[3] = ((swizzledHiz >> 24) & 0xFF) * adjusted_idirz + origz;
for (int childIndex = 0; childIndex < 4; childIndex++)
{
// Use VMIN, VMAX to compute the slabs
const float cmin = fmaxf(fmax_fmax(tminx[childIndex], tminy[childIndex], tminz[childIndex]), tmin);
const float cmax = fminf(fmin_fmin(tmaxx[childIndex], tmaxy[childIndex], tmaxz[childIndex]), tmax);
bool intersected = cmin <= cmax;
// Potential micro-optimization: use VSHL to implement this part, as described by the paper
if (intersected)
{
const unsigned int child_bits = extract_byte(child_bits4, childIndex);
const unsigned int bit_index = extract_byte(bit_index4, childIndex);
hitmask |= child_bits << bit_index;
}
}
}
nodeGroup.y = (hitmask & 0xFF000000) | (*((byte*)&n0.w + 3));
triangleGroup.y = hitmask & 0x00FFFFFF;
}
}
else
{
triangleGroup = nodeGroup;
nodeGroup = make_uint2(0);
}
#if TRIANGLE_POSTPONING
const int totalThreads = __popc(__activemask());
#endif
while (triangleGroup.y != 0)
{
#if TRIANGLE_POSTPONING
const float Rt = 0.2;
const int threshold = totalThreads * Rt;
const int numActiveThreads = __popc(__activemask());
if (numActiveThreads < threshold)
{
STACK_PUSH(triangleGroup);
break;
}
#endif
int triangleIndex = __bfind(triangleGroup.y);
int triAddr = triangleGroup.x * 3 + triangleIndex * 3;
float4 v00 = __ldg(localTriangleWoopCoordinates + triAddr + 0);
float4 v11 = __ldg(localTriangleWoopCoordinates + triAddr + 1);
float4 v22 = __ldg(localTriangleWoopCoordinates + triAddr + 2);
float Oz = v00.w - orig.x*v00.x - orig.y*v00.y - orig.z*v00.z;
float invDz = 1.0f / (dir.x*v00.x + dir.y*v00.y + dir.z*v00.z);
float t = Oz * invDz;
float Ox = v11.w + orig.x*v11.x + orig.y*v11.y + orig.z*v11.z;
float Dx = dir.x * v11.x + dir.y * v11.y + dir.z * v11.z;
float u = Ox + t * Dx;
float Oy = v22.w + orig.x*v22.x + orig.y*v22.y + orig.z*v22.z;
float Dy = dir.x*v22.x + dir.y*v22.y + dir.z*v22.z;
float v = Oy + t*Dy;
if (t > tmin && t < tmax)
{
if (u >= 0.0f && u <= 1.0f)
{
if (v >= 0.0f && u + v <= 1.0f)
{
triangleuv.x = u;
triangleuv.y = v;
tmax = t;
hitAddr = triAddr;
}
}
}
triangleGroup.y &= ~(1 << triangleIndex);
}
if (nodeGroup.y <= 0x00FFFFFF)
{
if (stackPtr > 0)
{
STACK_POP(nodeGroup);
}
else
{
rayResultBuffer[rayidx].t_triId_u_v = make_float4(tmax, int_as_float(hitAddr), triangleuv.x, triangleuv.y);
break;
}
}
#if DYNAMIC_FETCH
const int Nd = 4;
const int Nw = 16;
lostLoopIterations += __popc(__activemask()) - Nd;
if (lostLoopIterations >= Nw)
break;
#endif
} while (true);
} while (true);
}
__host__ void rtBindCWBVHData(
const float4* InBVHTreeNodes,
const float4* InTriangleWoopCoordinates,
const int* InMappingFromTriangleAddressToIndex)
{
cudaCheck(cudaMemcpyToSymbol(MappingFromTriangleAddressToIndex, &InMappingFromTriangleAddressToIndex, 1 * sizeof(InMappingFromTriangleAddressToIndex)));
cudaCheck(cudaMemcpyToSymbol(TriangleWoopCoordinates, &InTriangleWoopCoordinates, 1 * sizeof(InTriangleWoopCoordinates)));
cudaCheck(cudaMemcpyToSymbol(BVHTreeNodes, &InBVHTreeNodes, 1 * sizeof(InBVHTreeNodes)));
}
__host__ void rtTraceCWBVH(
Ray* rayBuffer,
Hit* rayResultBuffer,
int rayCount
)
{
float elapsedTime;
cudaEvent_t startEvent, stopEvent;
cudaCheck(cudaEventCreate(&startEvent));
cudaCheck(cudaEventCreate(&stopEvent));
int* cudaFinishedRayCount;
cudaCheck(cudaMalloc(&cudaFinishedRayCount, sizeof(int)));
dim3 blockDim(32, 2);
dim3 gridDim(32, 32);
cudaProfilerStart();
cudaCheck(cudaEventRecord(startEvent, 0));
{
cudaMemset(cudaFinishedRayCount, 0, sizeof(int));
rtTraceCWBVHDynamicFetch <<< gridDim, blockDim >>> (
rayBuffer,
rayResultBuffer,
rayCount,
cudaFinishedRayCount
);
}
cudaCheck(cudaEventRecord(stopEvent, 0));
cudaCheck(cudaEventSynchronize(stopEvent));
cudaCheck(cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent));
Log("%.3fMS, %.2fMRays/s (rtTraceCWBVH Dynamic Fetch)", elapsedTime, (float)rayCount / 1000000.0f / (elapsedTime / 1000.0f));
cudaProfilerStop();
cudaFree(cudaFinishedRayCount);
}
|
83688efe392dabaeee56263deed48340501e9810.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu.h"
#include "helper.h"
#include <hip/hip_runtime.h>
__global__ void update_list(const Params *params, Particle *particles, int *linked_cells, int *linked_particles) {
const size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part) {
int cell_index = calc_cell_index((particles[id].x0), (particles[id].x1), (particles[id].x2), params);
linked_particles[id] = atomicExch(&(linked_cells[cell_index]), id);
particles[id].on_gpu = true;
}
}
__global__ void update_pos(const Params *params, Particle *particles) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part) update_pos(id, params, particles);
}
__global__ void calc_velocity(const Params *params, Particle *particles) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part) calc_velocity(id, params, particles);
}
__global__ void calc_force(const Params *params, Particle *particles, const int *linked_cells, const int *linked_particles) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part) calc_force(id, params, particles, linked_cells, linked_particles);
}
__global__ void set_list(int *list, int length, int value) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < length) list[id] = value;
}
//#### Hybrid methods ####
__global__ void update_list(const Params *params, Particle *particles, int *linked_cells, int *linked_particles, const int cell_border, int *active_particles, int *cntr) {
const size_t id = blockIdx.x * blockDim.x + threadIdx.x;
//TODO: assert active_particles initalized with -1
if (id < params->num_part) {
int cell_id0 = (particles[id].x0 - params->x0_min) / params->length0;
if (cell_id0 >= cell_border) {
particles[id].on_gpu = true;
active_particles[atomicAdd(cntr, 1)] = id;
} else {
particles[id].on_gpu = false;
}
if (((cell_id0 >= cell_border - 1) && (cell_id0 <= params->cells0 - 1)) || (cell_id0 == 0)) {
int cell_index = calc_cell_index((particles[id].x0), (particles[id].x1), (particles[id].x2), params);
//TODO
if (cell_index < 0) printf("cellid0 %d part %d\n", cell_id0, id);
linked_particles[id] = atomicExch(&(linked_cells[cell_index]), id);
}
}
}
__global__ void filter_particles(const Params *params, Particle *particles, const int cell_border, Particle *filtered_particles, int *cntr, const bool only_border) {
const size_t id = blockIdx.x * blockDim.x + threadIdx.x;
//TODO: assert filtered_particles initalized with ?? or correct size
//TODO: assert cntr 0
if (id < params->num_part) {
int cell_id0 = (particles[id].x0 - params->x0_min) / params->length0;
// ALL: 0 to cells0 - 1
// GPU: cell_border to cells0 - 1
// CPU: 0 to cell_border - 1
bool predicate = (only_border) ? (particles[id].on_gpu) && !((cell_id0 > cell_border) && (cell_id0 < params->cells0 - 1)) : particles[id].on_gpu;
if (predicate) {
filtered_particles[atomicAdd(cntr, 1)] = particles[id];
}
}
}
__global__ void update_pos(const Params *params, Particle *particles, const int *active_particles) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part){
id = active_particles[id];
if (id != -1)
update_pos(id, params, particles);
}
}
__global__ void calc_velocity(const Params *params, Particle *particles, const int *active_particles) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part){
id = active_particles[id];
if (id != -1)
calc_velocity(id, params, particles);
}
}
__global__ void calc_force(const Params *params, Particle *particles, const int *linked_cells, const int *linked_particles, const int *active_particles) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part){
id = active_particles[id];
if (id != -1)
calc_force(id, params, particles, linked_cells, linked_particles);
}
}
__global__ void replace_particles(Particle *particles, Particle *filtered_particles, const int cntr) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < cntr){
int id = filtered_particles[j].id;
particles[id] = filtered_particles[j];
}
}
| 83688efe392dabaeee56263deed48340501e9810.cu | #include "gpu.h"
#include "helper.h"
#include <cuda_runtime.h>
__global__ void update_list(const Params *params, Particle *particles, int *linked_cells, int *linked_particles) {
const size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part) {
int cell_index = calc_cell_index((particles[id].x0), (particles[id].x1), (particles[id].x2), params);
linked_particles[id] = atomicExch(&(linked_cells[cell_index]), id);
particles[id].on_gpu = true;
}
}
__global__ void update_pos(const Params *params, Particle *particles) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part) update_pos(id, params, particles);
}
__global__ void calc_velocity(const Params *params, Particle *particles) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part) calc_velocity(id, params, particles);
}
__global__ void calc_force(const Params *params, Particle *particles, const int *linked_cells, const int *linked_particles) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part) calc_force(id, params, particles, linked_cells, linked_particles);
}
__global__ void set_list(int *list, int length, int value) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < length) list[id] = value;
}
//#### Hybrid methods ####
__global__ void update_list(const Params *params, Particle *particles, int *linked_cells, int *linked_particles, const int cell_border, int *active_particles, int *cntr) {
const size_t id = blockIdx.x * blockDim.x + threadIdx.x;
//TODO: assert active_particles initalized with -1
if (id < params->num_part) {
int cell_id0 = (particles[id].x0 - params->x0_min) / params->length0;
if (cell_id0 >= cell_border) {
particles[id].on_gpu = true;
active_particles[atomicAdd(cntr, 1)] = id;
} else {
particles[id].on_gpu = false;
}
if (((cell_id0 >= cell_border - 1) && (cell_id0 <= params->cells0 - 1)) || (cell_id0 == 0)) {
int cell_index = calc_cell_index((particles[id].x0), (particles[id].x1), (particles[id].x2), params);
//TODO
if (cell_index < 0) printf("cellid0 %d part %d\n", cell_id0, id);
linked_particles[id] = atomicExch(&(linked_cells[cell_index]), id);
}
}
}
__global__ void filter_particles(const Params *params, Particle *particles, const int cell_border, Particle *filtered_particles, int *cntr, const bool only_border) {
const size_t id = blockIdx.x * blockDim.x + threadIdx.x;
//TODO: assert filtered_particles initalized with ?? or correct size
//TODO: assert cntr 0
if (id < params->num_part) {
int cell_id0 = (particles[id].x0 - params->x0_min) / params->length0;
// ALL: 0 to cells0 - 1
// GPU: cell_border to cells0 - 1
// CPU: 0 to cell_border - 1
bool predicate = (only_border) ? (particles[id].on_gpu) && !((cell_id0 > cell_border) && (cell_id0 < params->cells0 - 1)) : particles[id].on_gpu;
if (predicate) {
filtered_particles[atomicAdd(cntr, 1)] = particles[id];
}
}
}
__global__ void update_pos(const Params *params, Particle *particles, const int *active_particles) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part){
id = active_particles[id];
if (id != -1)
update_pos(id, params, particles);
}
}
__global__ void calc_velocity(const Params *params, Particle *particles, const int *active_particles) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part){
id = active_particles[id];
if (id != -1)
calc_velocity(id, params, particles);
}
}
__global__ void calc_force(const Params *params, Particle *particles, const int *linked_cells, const int *linked_particles, const int *active_particles) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < params->num_part){
id = active_particles[id];
if (id != -1)
calc_force(id, params, particles, linked_cells, linked_particles);
}
}
__global__ void replace_particles(Particle *particles, Particle *filtered_particles, const int cntr) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < cntr){
int id = filtered_particles[j].id;
particles[id] = filtered_particles[j];
}
}
|
806e07f1e5710d227bfa56af64c1efb88c30b28c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_plugin_helper.h"
#include "interpolate.h"
#include "plugin_factory.h"
#include "windvector.cuh"
#include <ogr_spatialref.h>
#define HIMAN_AUXILIARY_INCLUDE
#include "cache.h"
#undef HIMAN_AUXILIARY_INCLUDE
static std::map<size_t, double*> longitudeCache;
static std::mutex cacheMutex;
/*
* Calculate results. At this point it as assumed that U and V are in correct form.
*/
__global__ void Calculate(const float* __restrict__ d_u, const float* __restrict__ d_v, float* __restrict__ d_speed,
float* __restrict__ d_dir, himan::plugin::HPWindVectorTargetType targetType, size_t N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
const float U = d_u[idx];
const float V = d_v[idx];
d_speed[idx] = __fsqrt_rn(U * U + V * V);
if (targetType != himan::plugin::kGust)
{
int offset = 180;
if (targetType == himan::plugin::kSea || targetType == himan::plugin::kIce)
{
offset = 0;
}
float dir = himan::constants::kRad * atan2(U, V) + offset;
// modulo operator is supposedly slow on cuda ?
/*
* quote:
*
* Integer division and modulo operation are costly: tens of instructions on devices of
* compute capability 1.x, below 20 instructions on devices of compute capability 2.x and
* higher.
*/
// reduce the angle
while (dir > 360)
{
dir -= 360;
}
// force it to be the positive remainder, so that 0 <= dir < 360
while (dir < 0)
{
dir += 360;
}
d_dir[idx] = round(dir);
}
}
}
void himan::plugin::windvector_cuda::RunCuda(std::shared_ptr<const plugin_configuration> conf,
std::shared_ptr<info<float>> myTargetInfo, const param& UParam,
const param& VParam, HPWindVectorTargetType itsTargetType)
{
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
// Allocate device arrays
float* d_u = 0;
float* d_v = 0;
float* d_speed = 0;
float* d_dir = 0;
// Allocate memory on device
const size_t N = myTargetInfo->SizeLocations();
const size_t memsize = N * sizeof(float);
// Fetch U & V, unpack to device, do not copy to host
auto UInfo =
cuda::Fetch<float>(conf, myTargetInfo->Time(), myTargetInfo->Level(), UParam, myTargetInfo->ForecastType());
auto VInfo =
cuda::Fetch<float>(conf, myTargetInfo->Time(), myTargetInfo->Level(), VParam, myTargetInfo->ForecastType());
if (!UInfo || !VInfo)
{
return;
}
CUDA_CHECK(hipMalloc((void**)&d_u, memsize));
CUDA_CHECK(hipMalloc((void**)&d_v, memsize));
CUDA_CHECK(hipMalloc((void**)&d_speed, memsize));
if (itsTargetType != kGust)
{
CUDA_CHECK(hipMalloc((void**)&d_dir, memsize));
}
cuda::Unpack(UInfo, stream, d_u);
cuda::Unpack(VInfo, stream, d_v);
// Rotate components; data already at device memory
if (UInfo->Grid()->UVRelativeToGrid())
{
double* d_lon = windvector_cuda::CacheLongitudeCoordinates(UInfo->Grid().get(), stream);
latitude_longitude_grid dummy(kBottomLeft, point(), point(), 0, 0, earth_shape<double>());
himan::interpolate::RotateVectorComponentsGPU(UInfo->Grid().get(), &dummy, UInfo->Data(), VInfo->Data(), stream,
d_u, d_v, d_lon);
CUDA_CHECK(hipStreamSynchronize(stream));
UInfo->Grid()->UVRelativeToGrid(false);
VInfo->Grid()->UVRelativeToGrid(false);
}
// Copy to host
CUDA_CHECK(hipMemcpyAsync(UInfo->Data().ValuesAsPOD(), d_u, memsize, hipMemcpyDeviceToHost, stream));
CUDA_CHECK(hipMemcpyAsync(VInfo->Data().ValuesAsPOD(), d_v, memsize, hipMemcpyDeviceToHost, stream));
CUDA_CHECK(hipStreamSynchronize(stream));
// And finally insert to cache
if (conf->UseCacheForReads())
{
auto c = GET_PLUGIN(cache);
c->Insert(UInfo);
c->Insert(VInfo);
}
if (myTargetInfo->Level().Type() == kHybrid)
{
const size_t paramIndex = myTargetInfo->Index<param>();
for (myTargetInfo->Reset<param>(); myTargetInfo->Next<param>();)
{
myTargetInfo->Set<level>(UInfo->Level());
}
myTargetInfo->Index<param>(paramIndex);
}
// dims
const int blockSize = 256;
const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1);
hipLaunchKernelGGL(( Calculate), dim3(gridSize), dim3(blockSize), 0, stream, d_u, d_v, d_speed, d_dir, itsTargetType, N);
// block until the stream has completed
CUDA_CHECK(hipStreamSynchronize(stream));
// check if kernel execution generated an error
CUDA_CHECK_ERROR_MSG("Kernel invocation");
myTargetInfo->Index<param>(0);
cuda::ReleaseInfo(myTargetInfo, d_speed, stream);
if (itsTargetType != kGust)
{
myTargetInfo->Index<param>(1);
cuda::ReleaseInfo(myTargetInfo, d_dir, stream);
}
CUDA_CHECK(hipStreamSynchronize(stream));
// Free device memory
CUDA_CHECK(hipFree(d_u));
CUDA_CHECK(hipFree(d_v));
CUDA_CHECK(hipFree(d_speed));
if (d_dir)
{
CUDA_CHECK(hipFree(d_dir));
}
if (itsTargetType == kGust)
{
// Check aggregation period from source file
const auto agg = UInfo->Param().Aggregation();
if (agg.Type() != kUnknownAggregationType)
{
auto& par = myTargetInfo->Param();
par.Aggregation(agg);
}
}
CUDA_CHECK(hipStreamDestroy(stream));
}
double* himan::plugin::windvector_cuda::CacheLongitudeCoordinates(const himan::grid* g, hipStream_t& stream)
{
const size_t hash = g->Hash();
himan::logger log("windvector_cuda");
if (longitudeCache.find(hash) == longitudeCache.end())
{
std::lock_guard<std::mutex> lock(cacheMutex);
if (longitudeCache.find(hash) == longitudeCache.end())
{
double* d_lon = nullptr;
std::vector<double> lon(g->Size());
CUDA_CHECK(hipMalloc((void**)&d_lon, g->Size() * sizeof(double)));
for (size_t i = 0; i < g->Size(); i++)
{
lon[i] = g->LatLon(i).X();
}
CUDA_CHECK(hipMemcpyAsync(d_lon, lon.data(), g->Size() * sizeof(double), hipMemcpyHostToDevice, stream));
longitudeCache.emplace(hash, d_lon);
log.Trace("Add longitude cache for " + std::to_string(hash));
CUDA_CHECK(hipStreamSynchronize(stream));
}
else
{
log.Trace("Found longitude cache for " + std::to_string(hash));
}
}
else
{
log.Trace("Found longitude cache for " + std::to_string(hash));
}
return longitudeCache[hash];
}
void himan::plugin::windvector_cuda::FreeLongitudeCache()
{
himan::logger log("windvector_cuda");
std::lock_guard<std::mutex> lock(cacheMutex);
for (auto& p : longitudeCache)
{
if (p.second == nullptr)
{
continue;
}
CUDA_CHECK(hipFree(p.second));
log.Trace("Cleared longitude cache for " + std::to_string(p.first));
}
longitudeCache.clear();
}
| 806e07f1e5710d227bfa56af64c1efb88c30b28c.cu | #include "cuda_plugin_helper.h"
#include "interpolate.h"
#include "plugin_factory.h"
#include "windvector.cuh"
#include <ogr_spatialref.h>
#define HIMAN_AUXILIARY_INCLUDE
#include "cache.h"
#undef HIMAN_AUXILIARY_INCLUDE
static std::map<size_t, double*> longitudeCache;
static std::mutex cacheMutex;
/*
* Calculate results. At this point it as assumed that U and V are in correct form.
*/
__global__ void Calculate(const float* __restrict__ d_u, const float* __restrict__ d_v, float* __restrict__ d_speed,
float* __restrict__ d_dir, himan::plugin::HPWindVectorTargetType targetType, size_t N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
const float U = d_u[idx];
const float V = d_v[idx];
d_speed[idx] = __fsqrt_rn(U * U + V * V);
if (targetType != himan::plugin::kGust)
{
int offset = 180;
if (targetType == himan::plugin::kSea || targetType == himan::plugin::kIce)
{
offset = 0;
}
float dir = himan::constants::kRad * atan2(U, V) + offset;
// modulo operator is supposedly slow on cuda ?
/*
* quote:
*
* Integer division and modulo operation are costly: tens of instructions on devices of
* compute capability 1.x, below 20 instructions on devices of compute capability 2.x and
* higher.
*/
// reduce the angle
while (dir > 360)
{
dir -= 360;
}
// force it to be the positive remainder, so that 0 <= dir < 360
while (dir < 0)
{
dir += 360;
}
d_dir[idx] = round(dir);
}
}
}
void himan::plugin::windvector_cuda::RunCuda(std::shared_ptr<const plugin_configuration> conf,
std::shared_ptr<info<float>> myTargetInfo, const param& UParam,
const param& VParam, HPWindVectorTargetType itsTargetType)
{
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
// Allocate device arrays
float* d_u = 0;
float* d_v = 0;
float* d_speed = 0;
float* d_dir = 0;
// Allocate memory on device
const size_t N = myTargetInfo->SizeLocations();
const size_t memsize = N * sizeof(float);
// Fetch U & V, unpack to device, do not copy to host
auto UInfo =
cuda::Fetch<float>(conf, myTargetInfo->Time(), myTargetInfo->Level(), UParam, myTargetInfo->ForecastType());
auto VInfo =
cuda::Fetch<float>(conf, myTargetInfo->Time(), myTargetInfo->Level(), VParam, myTargetInfo->ForecastType());
if (!UInfo || !VInfo)
{
return;
}
CUDA_CHECK(cudaMalloc((void**)&d_u, memsize));
CUDA_CHECK(cudaMalloc((void**)&d_v, memsize));
CUDA_CHECK(cudaMalloc((void**)&d_speed, memsize));
if (itsTargetType != kGust)
{
CUDA_CHECK(cudaMalloc((void**)&d_dir, memsize));
}
cuda::Unpack(UInfo, stream, d_u);
cuda::Unpack(VInfo, stream, d_v);
// Rotate components; data already at device memory
if (UInfo->Grid()->UVRelativeToGrid())
{
double* d_lon = windvector_cuda::CacheLongitudeCoordinates(UInfo->Grid().get(), stream);
latitude_longitude_grid dummy(kBottomLeft, point(), point(), 0, 0, earth_shape<double>());
himan::interpolate::RotateVectorComponentsGPU(UInfo->Grid().get(), &dummy, UInfo->Data(), VInfo->Data(), stream,
d_u, d_v, d_lon);
CUDA_CHECK(cudaStreamSynchronize(stream));
UInfo->Grid()->UVRelativeToGrid(false);
VInfo->Grid()->UVRelativeToGrid(false);
}
// Copy to host
CUDA_CHECK(cudaMemcpyAsync(UInfo->Data().ValuesAsPOD(), d_u, memsize, cudaMemcpyDeviceToHost, stream));
CUDA_CHECK(cudaMemcpyAsync(VInfo->Data().ValuesAsPOD(), d_v, memsize, cudaMemcpyDeviceToHost, stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
// And finally insert to cache
if (conf->UseCacheForReads())
{
auto c = GET_PLUGIN(cache);
c->Insert(UInfo);
c->Insert(VInfo);
}
if (myTargetInfo->Level().Type() == kHybrid)
{
const size_t paramIndex = myTargetInfo->Index<param>();
for (myTargetInfo->Reset<param>(); myTargetInfo->Next<param>();)
{
myTargetInfo->Set<level>(UInfo->Level());
}
myTargetInfo->Index<param>(paramIndex);
}
// dims
const int blockSize = 256;
const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1);
Calculate<<<gridSize, blockSize, 0, stream>>>(d_u, d_v, d_speed, d_dir, itsTargetType, N);
// block until the stream has completed
CUDA_CHECK(cudaStreamSynchronize(stream));
// check if kernel execution generated an error
CUDA_CHECK_ERROR_MSG("Kernel invocation");
myTargetInfo->Index<param>(0);
cuda::ReleaseInfo(myTargetInfo, d_speed, stream);
if (itsTargetType != kGust)
{
myTargetInfo->Index<param>(1);
cuda::ReleaseInfo(myTargetInfo, d_dir, stream);
}
CUDA_CHECK(cudaStreamSynchronize(stream));
// Free device memory
CUDA_CHECK(cudaFree(d_u));
CUDA_CHECK(cudaFree(d_v));
CUDA_CHECK(cudaFree(d_speed));
if (d_dir)
{
CUDA_CHECK(cudaFree(d_dir));
}
if (itsTargetType == kGust)
{
// Check aggregation period from source file
const auto agg = UInfo->Param().Aggregation();
if (agg.Type() != kUnknownAggregationType)
{
auto& par = myTargetInfo->Param();
par.Aggregation(agg);
}
}
CUDA_CHECK(cudaStreamDestroy(stream));
}
double* himan::plugin::windvector_cuda::CacheLongitudeCoordinates(const himan::grid* g, cudaStream_t& stream)
{
const size_t hash = g->Hash();
himan::logger log("windvector_cuda");
if (longitudeCache.find(hash) == longitudeCache.end())
{
std::lock_guard<std::mutex> lock(cacheMutex);
if (longitudeCache.find(hash) == longitudeCache.end())
{
double* d_lon = nullptr;
std::vector<double> lon(g->Size());
CUDA_CHECK(cudaMalloc((void**)&d_lon, g->Size() * sizeof(double)));
for (size_t i = 0; i < g->Size(); i++)
{
lon[i] = g->LatLon(i).X();
}
CUDA_CHECK(cudaMemcpyAsync(d_lon, lon.data(), g->Size() * sizeof(double), cudaMemcpyHostToDevice, stream));
longitudeCache.emplace(hash, d_lon);
log.Trace("Add longitude cache for " + std::to_string(hash));
CUDA_CHECK(cudaStreamSynchronize(stream));
}
else
{
log.Trace("Found longitude cache for " + std::to_string(hash));
}
}
else
{
log.Trace("Found longitude cache for " + std::to_string(hash));
}
return longitudeCache[hash];
}
void himan::plugin::windvector_cuda::FreeLongitudeCache()
{
himan::logger log("windvector_cuda");
std::lock_guard<std::mutex> lock(cacheMutex);
for (auto& p : longitudeCache)
{
if (p.second == nullptr)
{
continue;
}
CUDA_CHECK(cudaFree(p.second));
log.Trace("Cleared longitude cache for " + std::to_string(p.first));
}
longitudeCache.clear();
}
|
2607aa419795166049c865334dba5ac2b983516a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <exception>
#include <string>
#define MEASURE_EXEC_TIME
#include <stream_compaction/efficient.h>
#include "radix_sort.h"
namespace std
{
class InvalidArgument : public exception
{
public:
virtual const char *what() const throw()
{
return "One or more invalid arguments detected";
}
};
}
namespace ParallelRadixSort
{
template <class T>
__global__ void kernClassify(uint32_t n, T mask,
uint32_t * __restrict__ notbools, uint32_t * __restrict__ bools, const T * __restrict__ idata)
{
uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= n) return;
uint32_t t = static_cast<uint32_t>((idata[tid] & mask) == 0);
notbools[tid] = t;
bools[tid] = t ^ 0x1;
}
template <class T>
__global__ void kernScatter(uint32_t n,
uint32_t * __restrict__ nobools, uint32_t * __restrict__ noindices, uint32_t * __restrict__ yesindices,
T * __restrict__ odata, const T * __restrict__ idata)
{
uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= n) return;
uint32_t isBitZero = nobools[tid];
uint32_t idx;
if (isBitZero) idx = noindices[tid]; else idx = yesindices[tid];
odata[idx] = idata[tid];
}
#ifdef MEASURE_EXEC_TIME
template <class T>
float sort(int n, T *odata, const T *idata, T bitMask, bool lsb)
#else
template <class T>
void sort(int n, T *odata, const T *idata, T bitMask, bool lsb)
#endif
{
if (n <= 0 || !odata || !idata)
{
throw std::InvalidArgument();
}
int segSize = StreamCompaction::Efficient4::computeSegmentSize(2 * n);
const size_t kDevArraySizeInByte = StreamCompaction::Efficient4::computeActualMemSize(2 * n);
T *idata_dev = 0;
T *odata_dev = 0;
uint32_t *noyes_bools_dev = 0;
uint32_t *indices_dev = 0;
hipMalloc(&idata_dev, n * sizeof(T));
hipMalloc(&odata_dev, n * sizeof(T));
hipMalloc(&noyes_bools_dev, kDevArraySizeInByte);
hipMalloc(&indices_dev, kDevArraySizeInByte);
hipMemcpy(idata_dev, idata, n * sizeof(T), hipMemcpyHostToDevice);
const int threadsPerBlock = 256;
int numBlocks = (n + threadsPerBlock - 1) / threadsPerBlock;
int numBits = 8 * sizeof(T);
T mask = lsb ? 1 : (1 << (numBits - 1));
#ifdef MEASURE_EXEC_TIME
float execTime = 0.f;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
for (int i = 0; i < numBits; ++i)
{
if (!(bitMask & mask)) continue; // do not consider this bit
hipLaunchKernelGGL(( kernClassify), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, n, mask, noyes_bools_dev, noyes_bools_dev + n, idata_dev);
StreamCompaction::Efficient4::scanHelper(2 * n, indices_dev, noyes_bools_dev);
hipLaunchKernelGGL(( kernScatter), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, n, noyes_bools_dev, indices_dev, indices_dev + n, odata_dev, idata_dev);
if (lsb) mask <<= 1; else mask >>= 1;
T *tmp = odata_dev;
odata_dev = idata_dev;
idata_dev = tmp;
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&execTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
#else
for (int i = 0; i < numBits; ++i)
{
if (!(bitMask & mask)) continue; // do not consider this bit
kernClassify << <numBlocks, threadsPerBlock >> >(n, mask, noyes_bools_dev, noyes_bools_dev + n, idata_dev);
StreamCompaction::Efficient::scanHelper(segSize, 2 * n, indices_dev, noyes_bools_dev);
kernScatter << <numBlocks, threadsPerBlock >> >(n, noyes_bools_dev, indices_dev, indices_dev + n, odata_dev, idata_dev);
if (lsb) mask <<= 1; else mask >>= 1;
T *tmp = odata_dev;
odata_dev = idata_dev;
idata_dev = tmp;
}
#endif
hipMemcpy(odata, idata_dev, n * sizeof(T), hipMemcpyDeviceToHost);
hipFree(idata_dev);
hipFree(odata_dev);
hipFree(noyes_bools_dev);
hipFree(indices_dev);
#ifdef MEASURE_EXEC_TIME
return execTime;
#endif
}
// Since template definition is not visible to users (main.obj in this case),
// we need to explicitly tell the compiler to generate all the template implementations
// that will be used later
#ifdef MEASURE_EXEC_TIME
template float sort<uint32_t>(int n, uint32_t *odata, const uint32_t *idata, uint32_t bitMask, bool lsb);
#else
template void sort<uint32_t>(int n, uint32_t *odata, const uint32_t *idata, uint32_t bitMask, bool lsb);
#endif
#ifdef MEASURE_EXEC_TIME
template <class T>
float thrustSort(int n, T *odata, const T *idata)
{
if (n <= 0 || !odata || !idata)
{
throw std::InvalidArgument();
}
T *iodata_dev = 0;
hipMalloc(&iodata_dev, n * sizeof(T));
hipMemcpy(iodata_dev, idata, n * sizeof(T), hipMemcpyHostToDevice);
thrust::device_ptr<T> thrust_iodata(iodata_dev);
float execTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
thrust::stable_sort(thrust_iodata, thrust_iodata + n);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&execTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
hipMemcpy(odata, iodata_dev, n * sizeof(T), hipMemcpyDeviceToHost);
hipFree(iodata_dev);
return execTime;
}
template float thrustSort(int n, uint32_t *odata, const uint32_t *idata);
#endif
} | 2607aa419795166049c865334dba5ac2b983516a.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <exception>
#include <string>
#define MEASURE_EXEC_TIME
#include <stream_compaction/efficient.h>
#include "radix_sort.h"
namespace std
{
class InvalidArgument : public exception
{
public:
virtual const char *what() const throw()
{
return "One or more invalid arguments detected";
}
};
}
namespace ParallelRadixSort
{
template <class T>
__global__ void kernClassify(uint32_t n, T mask,
uint32_t * __restrict__ notbools, uint32_t * __restrict__ bools, const T * __restrict__ idata)
{
uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= n) return;
uint32_t t = static_cast<uint32_t>((idata[tid] & mask) == 0);
notbools[tid] = t;
bools[tid] = t ^ 0x1;
}
template <class T>
__global__ void kernScatter(uint32_t n,
uint32_t * __restrict__ nobools, uint32_t * __restrict__ noindices, uint32_t * __restrict__ yesindices,
T * __restrict__ odata, const T * __restrict__ idata)
{
uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= n) return;
uint32_t isBitZero = nobools[tid];
uint32_t idx;
if (isBitZero) idx = noindices[tid]; else idx = yesindices[tid];
odata[idx] = idata[tid];
}
#ifdef MEASURE_EXEC_TIME
template <class T>
float sort(int n, T *odata, const T *idata, T bitMask, bool lsb)
#else
template <class T>
void sort(int n, T *odata, const T *idata, T bitMask, bool lsb)
#endif
{
if (n <= 0 || !odata || !idata)
{
throw std::InvalidArgument();
}
int segSize = StreamCompaction::Efficient4::computeSegmentSize(2 * n);
const size_t kDevArraySizeInByte = StreamCompaction::Efficient4::computeActualMemSize(2 * n);
T *idata_dev = 0;
T *odata_dev = 0;
uint32_t *noyes_bools_dev = 0;
uint32_t *indices_dev = 0;
cudaMalloc(&idata_dev, n * sizeof(T));
cudaMalloc(&odata_dev, n * sizeof(T));
cudaMalloc(&noyes_bools_dev, kDevArraySizeInByte);
cudaMalloc(&indices_dev, kDevArraySizeInByte);
cudaMemcpy(idata_dev, idata, n * sizeof(T), cudaMemcpyHostToDevice);
const int threadsPerBlock = 256;
int numBlocks = (n + threadsPerBlock - 1) / threadsPerBlock;
int numBits = 8 * sizeof(T);
T mask = lsb ? 1 : (1 << (numBits - 1));
#ifdef MEASURE_EXEC_TIME
float execTime = 0.f;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for (int i = 0; i < numBits; ++i)
{
if (!(bitMask & mask)) continue; // do not consider this bit
kernClassify<<<numBlocks, threadsPerBlock>>>(n, mask, noyes_bools_dev, noyes_bools_dev + n, idata_dev);
StreamCompaction::Efficient4::scanHelper(2 * n, indices_dev, noyes_bools_dev);
kernScatter<<<numBlocks, threadsPerBlock>>>(n, noyes_bools_dev, indices_dev, indices_dev + n, odata_dev, idata_dev);
if (lsb) mask <<= 1; else mask >>= 1;
T *tmp = odata_dev;
odata_dev = idata_dev;
idata_dev = tmp;
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&execTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
#else
for (int i = 0; i < numBits; ++i)
{
if (!(bitMask & mask)) continue; // do not consider this bit
kernClassify << <numBlocks, threadsPerBlock >> >(n, mask, noyes_bools_dev, noyes_bools_dev + n, idata_dev);
StreamCompaction::Efficient::scanHelper(segSize, 2 * n, indices_dev, noyes_bools_dev);
kernScatter << <numBlocks, threadsPerBlock >> >(n, noyes_bools_dev, indices_dev, indices_dev + n, odata_dev, idata_dev);
if (lsb) mask <<= 1; else mask >>= 1;
T *tmp = odata_dev;
odata_dev = idata_dev;
idata_dev = tmp;
}
#endif
cudaMemcpy(odata, idata_dev, n * sizeof(T), cudaMemcpyDeviceToHost);
cudaFree(idata_dev);
cudaFree(odata_dev);
cudaFree(noyes_bools_dev);
cudaFree(indices_dev);
#ifdef MEASURE_EXEC_TIME
return execTime;
#endif
}
// Since template definition is not visible to users (main.obj in this case),
// we need to explicitly tell the compiler to generate all the template implementations
// that will be used later
#ifdef MEASURE_EXEC_TIME
template float sort<uint32_t>(int n, uint32_t *odata, const uint32_t *idata, uint32_t bitMask, bool lsb);
#else
template void sort<uint32_t>(int n, uint32_t *odata, const uint32_t *idata, uint32_t bitMask, bool lsb);
#endif
#ifdef MEASURE_EXEC_TIME
template <class T>
float thrustSort(int n, T *odata, const T *idata)
{
if (n <= 0 || !odata || !idata)
{
throw std::InvalidArgument();
}
T *iodata_dev = 0;
cudaMalloc(&iodata_dev, n * sizeof(T));
cudaMemcpy(iodata_dev, idata, n * sizeof(T), cudaMemcpyHostToDevice);
thrust::device_ptr<T> thrust_iodata(iodata_dev);
float execTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
thrust::stable_sort(thrust_iodata, thrust_iodata + n);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&execTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(odata, iodata_dev, n * sizeof(T), cudaMemcpyDeviceToHost);
cudaFree(iodata_dev);
return execTime;
}
template float thrustSort(int n, uint32_t *odata, const uint32_t *idata);
#endif
} |
7513acc90249d6a17f5a25895762e84ae06865aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlag2c.cu, mixed zc -> ds, Mon Jun 25 18:24:10 2018
@author Mark Gates
*/
#include "magma_internal.h"
// mixed precision generation has issues with SINGLE PRECISION, so use PRECISION_z
#define PRECISION_d
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to dlat2s and zlaset.
*/
__global__
void dlag2s_kernel(
int m, int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) );
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) );
}
}
}
}
/***************************************************************************//**
Purpose
-------
DLAG2S converts a double-real matrix, A,
to a single-real matrix, SA.
RMAX is the overflow for the single-real arithmetic.
DLAG2S checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. m >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the m-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,m).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the m-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,m).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lag2
*******************************************************************************/
extern "C" void
magmablas_dlag2s(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,m) )
*info = -4;
else if ( ldsa < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
hipLaunchKernelGGL(( dlag2s_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, A, lda, SA, ldsa, rmax );
hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
| 7513acc90249d6a17f5a25895762e84ae06865aa.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlag2c.cu, mixed zc -> ds, Mon Jun 25 18:24:10 2018
@author Mark Gates
*/
#include "magma_internal.h"
// mixed precision generation has issues with SINGLE PRECISION, so use PRECISION_z
#define PRECISION_d
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to dlat2s and zlaset.
*/
__global__
void dlag2s_kernel(
int m, int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) );
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp), MAGMA_D_IMAG(tmp) );
}
}
}
}
/***************************************************************************//**
Purpose
-------
DLAG2S converts a double-real matrix, A,
to a single-real matrix, SA.
RMAX is the overflow for the single-real arithmetic.
DLAG2S checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. m >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the m-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,m).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the m-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,m).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lag2
*******************************************************************************/
extern "C" void
magmablas_dlag2s(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,m) )
*info = -4;
else if ( ldsa < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
dlag2s_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( m, n, A, lda, SA, ldsa, rmax );
cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
|
901624b24c66bdee86bbecf6766bc620da1d17d8.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 901624b24c66bdee86bbecf6766bc620da1d17d8.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
1f24f2144548d32dfa1d809b47fb7d2b0b92f865.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include "transformerKernels.h"
/**
@file
Implemented the cuda kernel function and its launcher
that required by transformer model.
Currently, fp16 and fp32 versions are provided
*/
namespace lightseq {
namespace cuda {
/**
@brief: select_beam_rough_topk
one block for one beam, compute the log seq probability ended with every token
in
vocab, base on the previous log seq probability and current step's logit, select
rough topK candidate.
@thread
gridDim.x = batch_size * beam_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, beam_size, vocab_size], cur step logit
logit_bias: [vocab_size], logit bias
seq_probs: [batch_size, beam_size], prefix sequence log probability
seq_score: [batch_size, beam_size], prefix sequence score
alive_seq: [batch_size, beam_size, max_step], prefix sequence id
can_idx: [batch_size, beam_size, vocab_size], topk candidate's index
can_score: [batch_size, beam_size, vocab_size], topk candidate's score
num_beam_can: [1 + batch_size * beam_size].
the first ele save the number of topk candidate of the whole batch
the remaining batch_size * beam_size ele save the number of topk candidate
of each beam
vocab_size: the vocab size of decoder
max_step: max decode step
length_norm: length penlty value for current step
cur_step: current step
diverse_lambda: lambda for diverse beam search
*/
template <typename T, int beam_size>
__global__ void select_beam_rough_topk(
const T* logits, const T* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq, int* can_idx,
float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, float diverse_lambda, int end_id) {
if (cur_step != 0 && alive_seq[blockIdx.x * max_step + cur_step] == end_id) {
// this is a finished beam
if (threadIdx.x == 0) {
num_beam_can[blockIdx.x + 1] = 1; // generate one candidate
int pos = atomicAdd(num_beam_can, 1); // get a candidate pos
if (diverse_lambda == 0) {
can_score[pos] =
seq_score[blockIdx.x]; // this beam's score will not be change
} else {
// add the beam id offset in score to sort in each beam
int batch_id = blockIdx.x / beam_size;
can_score[pos] = seq_score[blockIdx.x] +
(blockIdx.x - batch_id) * min_log_probability;
}
can_idx[pos] = end_id + (blockIdx.x % beam_size) * vocab_size; // EOS
}
return;
}
/* step1: compute each thread's max_logit and sum_exp_logit, store in
* rough_top_kth_logit, sum_exp_logit */
const int block_start = blockIdx.x * vocab_size;
const int left_idx = block_start + threadIdx.x;
const int right_idx = (blockIdx.x + 1) * vocab_size;
float rough_top_kth_logit = CUDA_FLOAT_INF_NEG;
float sum_exp_logit = 0;
for (int i = left_idx; i < right_idx; i += blockDim.x) {
float lgt = (float)logits[i] + (float)__ldg(&logit_bias[i - block_start]);
rough_top_kth_logit = fmaxf(rough_top_kth_logit, lgt);
}
float max_logit = blockReduceMax(rough_top_kth_logit);
__shared__ float s_max_logit;
if (threadIdx.x == 0) {
s_max_logit = max_logit;
}
__syncthreads();
for (int i = left_idx; i < right_idx; i += blockDim.x) {
float lgt =
fmaxf((float)(logits[i]) + (float)__ldg(&logit_bias[i - block_start]) -
s_max_logit,
logit_thresh_min);
sum_exp_logit += expf(lgt);
}
/*
step2: compute rough top-kth-logits and sum_exp_logit among the whole beam,
saved into s_topk and
s_log_prob_base
*/
__shared__ float
s_log_prob_base; // prefix sequence log prob - log_sum_exp_logit
__shared__ float s_topk; // rough top k-th value of logits
__shared__ int num_cur_beam_can; // candidate number for this beam
sum_exp_logit = blockReduceSum(sum_exp_logit);
rough_top_kth_logit = blockRoughTopK<float, beam_size>(rough_top_kth_logit);
if (threadIdx.x == 0) {
s_log_prob_base = seq_probs[blockIdx.x] - logf(sum_exp_logit) - s_max_logit;
s_topk = rough_top_kth_logit;
num_cur_beam_can = 0;
}
/*
step3 : select the candidate token with logits bigger than s_topk,
compute the seq probability ended with them,
save the probability, token_index, selected token number.
*/
int idx = left_idx;
int batch_id = blockIdx.x / beam_size;
int batch_start_pos = batch_id * beam_size * vocab_size;
// int unk_vocab_id = vocab_size - 3; // last three element: unk, start, eos
__shared__ int l_n; // current iteration candidate number
for (int iter = 0; iter < (vocab_size + blockDim.x - 1) / blockDim.x;
iter++) {
// zero the counter
if (threadIdx.x == 0) l_n = 0;
__syncthreads();
float lgt = CUDA_FLOAT_INF_NEG - 1.f; // min s_topk is CUDA_FLOAT_INF_NEG
int pos;
int vocab_id = idx - block_start;
// if ((vocab_id < vocab_size) && (vocab_id != unk_vocab_id)) {
if (vocab_id < vocab_size) {
lgt = (float)(logits[idx]) + (float)__ldg(&logit_bias[vocab_id]);
if (lgt >= s_topk)
// pos: relative pos inside this iteration
pos = atomicAdd(&l_n, 1);
}
__syncthreads();
// leader increments the global counter
if (threadIdx.x == 0) {
atomicAdd(&num_cur_beam_can, l_n);
l_n = atomicAdd(num_beam_can, l_n);
}
__syncthreads();
// threads with true predicates write their elements
if ((lgt >= s_topk)) {
pos += l_n; // increment local pos by global counter
if (diverse_lambda == 0) {
can_score[pos] = fmaxf((lgt + s_log_prob_base) * length_norm,
min_log_probability + 1.f) +
batch_id * min_log_probability;
} else {
can_score[pos] = fmaxf((lgt + s_log_prob_base) * length_norm,
min_log_probability + 1.f) +
blockIdx.x * min_log_probability;
}
can_idx[pos] = idx - batch_start_pos;
}
__syncthreads();
idx += blockDim.x;
}
if (threadIdx.x == 0) {
num_beam_can[blockIdx.x + 1] = num_cur_beam_can;
}
}
template <typename T>
void select_beam_rough_topk_launcher(
const T* logits, const T* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq, int* can_idx,
float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, int step_token_num,
int max_thread_per_block, hipStream_t stream, int beam_size,
float diverse_lambda, int end_id) {
if (beam_size == 1)
hipLaunchKernelGGL(( select_beam_rough_topk<T, 1>)
, dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, seq_probs, seq_score, alive_seq, can_idx,
can_score, num_beam_can, vocab_size, max_step, length_norm,
cur_step, diverse_lambda, end_id);
if (beam_size == 2)
hipLaunchKernelGGL(( select_beam_rough_topk<T, 2>)
, dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, seq_probs, seq_score, alive_seq, can_idx,
can_score, num_beam_can, vocab_size, max_step, length_norm,
cur_step, diverse_lambda, end_id);
if (beam_size == 4)
hipLaunchKernelGGL(( select_beam_rough_topk<T, 4>)
, dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, seq_probs, seq_score, alive_seq, can_idx,
can_score, num_beam_can, vocab_size, max_step, length_norm,
cur_step, diverse_lambda, end_id);
if (beam_size == 8)
hipLaunchKernelGGL(( select_beam_rough_topk<T, 8>)
, dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, seq_probs, seq_score, alive_seq, can_idx,
can_score, num_beam_can, vocab_size, max_step, length_norm,
cur_step, diverse_lambda, end_id);
if (beam_size == 16)
hipLaunchKernelGGL(( select_beam_rough_topk<T, 16>)
, dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, seq_probs, seq_score, alive_seq, can_idx,
can_score, num_beam_can, vocab_size, max_step, length_norm,
cur_step, diverse_lambda, end_id);
if (beam_size == 32)
hipLaunchKernelGGL(( select_beam_rough_topk<T, 32>)
, dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, seq_probs, seq_score, alive_seq, can_idx,
can_score, num_beam_can, vocab_size, max_step, length_norm,
cur_step, diverse_lambda, end_id);
}
template void select_beam_rough_topk_launcher<float>(
const float* logits, const float* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq, int* can_idx,
float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, int step_token_num,
int max_thread_per_block, hipStream_t stream, int beam_size,
float diverse_lambda, int end_id);
template void select_beam_rough_topk_launcher<__half>(
const __half* logits, const __half* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq, int* can_idx,
float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, int step_token_num,
int max_thread_per_block, hipStream_t stream, int beam_size,
float diverse_lambda, int end_id);
/**
@brief: ker_diverse_beam_search
Add different diverse score to can_score in each beam
@thread
gridDim.x = batch_size * beam_size
blockDim.x = max_thread_per_block
@param
can_score: [batch_size * beam_size * candidates_size] candidates_size is
dynamic
can_ids: [batch_size * beam_size * candidates_size]
num_beam_can: [1 + batch_size * beam_size]
*/
__global__ void ker_diverse_beam_search(float* can_score, int* can_ids,
int* num_beam_can, int beam_size,
float diverse_lambda, int vocab_size) {
int total_candidates = num_beam_can[0];
num_beam_can += 1;
int can_pos = num_beam_can[blockIdx.x];
int batch_id = blockIdx.x / beam_size;
int beam_score_left_idx = can_pos + threadIdx.x;
int beam_score_right_idx = blockIdx.x == (gridDim.x - 1)
? total_candidates
: num_beam_can[blockIdx.x + 1];
for (int idx = beam_score_left_idx; idx < beam_score_right_idx;
idx += blockDim.x) {
atomicAdd(can_score + idx, batch_id * min_log_probability -
min_log_probability * blockIdx.x -
diverse_lambda * (idx - can_pos + 1));
int ori_can_idx = can_ids[idx]; // can_beam_id * vocab_size + vocab_id
int can_beam_id = ori_can_idx / vocab_size;
int can_vocab_id = ori_can_idx % vocab_size;
can_ids[idx] =
(can_beam_id + (idx - can_pos) * beam_size) * vocab_size + can_vocab_id;
}
}
void ker_diverse_beam_search_launcher(float* can_score, int* can_ids,
int* num_beam_can, int step_token_num,
int max_thread_per_block,
hipStream_t stream, int beam_size,
float diverse_lambda, int vocab_size) {
hipLaunchKernelGGL(( ker_diverse_beam_search), dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
can_score, can_ids, num_beam_can, beam_size, diverse_lambda, vocab_size);
}
/**
@brief: ker_bias_relu
add bias, activated by relu
@thread
gridDim.x = batch_size * batch_seq_len
blockDim.x = max_thread_per_block
@param
input: [batch_size * batch_seq_len, feature_dim]
bias: [feature_dim]
feature_dim: the dim of input feature
*/
template <typename T>
__global__ void ker_bias_relu(T* input, const T* bias, int feature_dim) {
int offset = blockIdx.x * feature_dim;
for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) {
int cur_offset = offset + idx;
input[cur_offset] = max(input[cur_offset] + __ldg(&bias[idx]), (T)0.f);
}
}
template <>
__global__ void ker_bias_relu<__half>(__half* input, const __half* bias,
int feature_dim) {
int offset = blockIdx.x * feature_dim;
half2* pinput = (half2*)input;
const half2* pbias = (const half2*)bias;
for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) {
int cur_offset = offset + idx;
float2 f2_inp = __half22float2(pinput[cur_offset]);
float2 f2_bias = __half22float2(__ldg(&pbias[idx]));
f2_inp.x = fmaxf(f2_inp.x + f2_bias.x, 0.f);
f2_inp.y = fmaxf(f2_inp.y + f2_bias.y, 0.f);
pinput[cur_offset] = __float22half2_rn(f2_inp);
}
}
template <typename T>
void ker_bias_relu_launcher(int batch_token_num, int block_dim,
hipStream_t stream, T* input, const T* bias,
int feature_dim) {
hipLaunchKernelGGL(( ker_bias_relu<T>)
, dim3(batch_token_num), dim3(block_dim), 0, stream, input, bias, feature_dim);
}
template <>
void ker_bias_relu_launcher<__half>(int batch_token_num, int block_dim,
hipStream_t stream, __half* input,
const __half* bias, int feature_dim) {
hipLaunchKernelGGL(( ker_bias_relu<__half>)
, dim3(batch_token_num), dim3(block_dim), 0, stream, input, bias, feature_dim / 2);
}
template void ker_bias_relu_launcher<float>(int batch_token_num, int block_dim,
hipStream_t stream, float* input,
const float* bias, int feature_dim);
template void ker_bias_relu_launcher<__half>(int batch_token_num, int block_dim,
hipStream_t stream, __half* input,
const __half* bias,
int feature_dim);
/**
@brief: ker_norm_layer
layer normalization
@thread
gridDim.x = batch_size * batch_seq_len
blockDim.x = max_thread_per_block
@param
matrix: [batch_size, batch_seq_len, hidden_size]
scale: [hidden_size]
bias: [hidden_size]
*/
template <typename T>
__global__ void ker_norm_layer(T* matrix, const T* scale, const T* bias,
int hidden_size) {
uint block_start = blockIdx.x * hidden_size;
uint start = block_start + threadIdx.x;
uint end = block_start + hidden_size;
float val = 0.0;
for (uint i = start; i < end; i += blockDim.x) {
val += matrix[i];
}
// step 0. compute mean
__shared__ float s_mean;
float reduce_res = blockReduceSum<float>(val);
if (threadIdx.x == 0) s_mean = reduce_res / float(hidden_size);
__syncthreads();
// step 1. compute variance
val = 0.0;
for (uint i = start; i < end; i += blockDim.x) {
float tmp = matrix[i] - s_mean;
val += tmp * tmp;
}
__shared__ float s_var;
reduce_res = blockReduceSum(val);
if (threadIdx.x == 0)
s_var = rsqrtf(reduce_res / float(hidden_size) + epsilon);
__syncthreads();
// step 2. layer norm
for (uint i = start; i < end; i += blockDim.x) {
val = matrix[i] - s_mean;
matrix[i] = val * s_var * __ldg(&scale[i - block_start]) +
__ldg(&bias[i - block_start]);
}
}
template <>
__global__ void ker_norm_layer<__half>(__half* matrix, const __half* scale,
const __half* bias,
int half_hidden_size) {
uint block_start = blockIdx.x * half_hidden_size;
uint start = block_start + threadIdx.x;
uint end = blockIdx.x * half_hidden_size + half_hidden_size;
half2* pmatrix = (half2*)matrix;
const half2* pscale = (const half2*)scale;
const half2* pbias = (const half2*)bias;
float mean_dim = float(half_hidden_size) * 2.f;
float val = 0.0;
// step 0. compute mean
for (uint i = start; i < end; i += blockDim.x) {
float2 local_f2 = safe_half2_to_float2(pmatrix[i]);
val += local_f2.x + local_f2.y;
}
__shared__ float s_mean;
float reduce_res = blockReduceSum<float>(val);
if (threadIdx.x == 0) s_mean = reduce_res / mean_dim;
__syncthreads();
// step 1. compute variance
val = 0.0;
for (uint i = start; i < end; i += blockDim.x) {
float2 local_f2 = safe_half2_to_float2(pmatrix[i]);
float tmpx = local_f2.x - s_mean;
float tmpy = local_f2.y - s_mean;
val += tmpx * tmpx + tmpy * tmpy;
}
__shared__ float s_var;
reduce_res = blockReduceSum(val);
if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / mean_dim + epsilon);
__syncthreads();
// step 2. layer norm
for (uint i = start; i < end; i += blockDim.x) {
float2 scale_val = __half22float2(__ldg(&pscale[i - block_start]));
float2 bias_val = __half22float2(__ldg(&pbias[i - block_start]));
float2 local_f2 = safe_half2_to_float2(pmatrix[i]);
local_f2.x = (local_f2.x - s_mean) * s_var * scale_val.x + bias_val.x;
local_f2.y = (local_f2.y - s_mean) * s_var * scale_val.y + bias_val.y;
pmatrix[i] = __float22half2_rn(local_f2);
}
}
template <typename T>
void ker_norm_layer_launcher(int token_num, int hidden_size,
hipStream_t stream, T* matrix, const T* scale,
const T* bias, int max_thread_per_block) {
hipLaunchKernelGGL(( ker_norm_layer<T>), dim3(token_num), dim3(max_thread_per_block), 0, stream,
matrix, scale, bias, hidden_size);
}
template <>
void ker_norm_layer_launcher<__half>(int token_num, int hidden_size,
hipStream_t stream, __half* matrix,
const __half* scale, const __half* bias,
int max_thread_per_block) {
hipLaunchKernelGGL(( ker_norm_layer<__half>), dim3(token_num), dim3(max_thread_per_block), 0, stream,
matrix, scale, bias, hidden_size / 2);
}
template void ker_norm_layer_launcher<float>(int token_num, int hidden_size,
hipStream_t stream, float* matrix,
const float* scale,
const float* bias,
int max_thread_per_block);
template void ker_norm_layer_launcher<__half>(
int token_num, int hidden_size, hipStream_t stream, __half* matrix,
const __half* scale, const __half* bias, int max_thread_per_block);
/**
@brief: ker_norm_layer_resual
layer normalization, and add an residual_bias to input
@thread
gridDim.x = batch_size * batch_seq_len
blockDim.x = max_thread_per_block
@param
matrix: [batch_size, batch_seq_len, hidden_size]
scale: [hidden_size]
bias: [hidden_size]
residual_bias: [hidden_size]
*/
template <typename T>
__global__ void ker_norm_layer_resual(T* input, T* output, const T* scale,
const T* bias, const T* residual_bias,
const int hidden_size, bool is_post_ln) {
uint block_start = blockIdx.x * hidden_size;
uint start = block_start + threadIdx.x;
uint end = block_start + hidden_size;
float val = 0.0;
for (uint i = start; i < end; i += blockDim.x) {
val += input[i];
}
// step 0. compute mean
__shared__ float s_mean;
float reduce_res = blockReduceSum<float>(val);
if (threadIdx.x == 0) s_mean = reduce_res / float(hidden_size);
__syncthreads();
// step 1. compute variance
val = 0.0;
for (uint i = start; i < end; i += blockDim.x) {
float tmp = input[i] - s_mean;
val += tmp * tmp;
}
__shared__ float s_var;
reduce_res = blockReduceSum(val);
if (threadIdx.x == 0)
s_var = rsqrtf(reduce_res / float(hidden_size) + epsilon);
__syncthreads();
// step 2. layer norm
for (uint i = start; i < end; i += blockDim.x) {
val = input[i] - s_mean;
output[i] = val * s_var * __ldg(&scale[i - block_start]) +
__ldg(&bias[i - block_start]);
if (is_post_ln) {
input[i] = output[i] + __ldg(&residual_bias[i - block_start]);
} else {
input[i] += __ldg(&residual_bias[i - block_start]);
}
}
}
template <>
__global__ void ker_norm_layer_resual<__half>(
__half* input, __half* output, const __half* scale, const __half* bias,
const __half* residual_bias, const int half_hidden_size, bool is_post_ln) {
uint block_start = blockIdx.x * half_hidden_size;
uint start = block_start + threadIdx.x;
uint end = blockIdx.x * half_hidden_size + half_hidden_size;
half2* pinput = (half2*)input;
half2* poutput = (half2*)output;
const half2* pscale = (const half2*)scale;
const half2* pbias = (const half2*)bias;
const half2* presidual_bias = (const half2*)residual_bias;
float mean_dim = float(half_hidden_size) * 2.f;
float val = 0.0;
// step 0. compute mean
for (uint i = start; i < end; i += blockDim.x) {
float2 local_f2 = safe_half2_to_float2(pinput[i]);
val += local_f2.x + local_f2.y;
}
__shared__ float s_mean;
float reduce_res = blockReduceSum<float>(val);
if (threadIdx.x == 0) s_mean = reduce_res / mean_dim;
__syncthreads();
// step 1. compute variance
val = 0.0;
for (uint i = start; i < end; i += blockDim.x) {
float2 local_f2 = safe_half2_to_float2(pinput[i]);
float tmpx = local_f2.x - s_mean;
float tmpy = local_f2.y - s_mean;
val += tmpx * tmpx + tmpy * tmpy;
}
__shared__ float s_var;
reduce_res = blockReduceSum(val);
if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / mean_dim + epsilon);
__syncthreads();
// step 2. layer norm
for (uint i = start; i < end; i += blockDim.x) {
float2 scale_val = __half22float2(__ldg(&pscale[i - block_start]));
float2 bias_val = __half22float2(__ldg(&pbias[i - block_start]));
float2 local_f2 = safe_half2_to_float2(pinput[i]);
local_f2.x = (local_f2.x - s_mean) * s_var * scale_val.x + bias_val.x;
local_f2.y = (local_f2.y - s_mean) * s_var * scale_val.y + bias_val.y;
poutput[i] = __float22half2_rn(local_f2);
if (!is_post_ln) {
local_f2 = safe_half2_to_float2(pinput[i]);
}
float2 residual_bias_val =
__half22float2(__ldg(&presidual_bias[i - block_start]));
float2 new_input_f2;
new_input_f2.x = local_f2.x + residual_bias_val.x;
new_input_f2.y = local_f2.y + residual_bias_val.y;
pinput[i] = __float22half2_rn(new_input_f2);
}
}
template <typename T>
void ker_norm_layer_resual_launcher(int token_num, int hidden_size,
hipStream_t stream, T* input, T* output,
const T* scale, const T* bias,
const T* residual_bias,
const int max_thread_per_block,
bool is_post_ln) {
hipLaunchKernelGGL(( ker_norm_layer_resual<T>), dim3(token_num), dim3(max_thread_per_block), 0, stream,
input, output, scale, bias, residual_bias, hidden_size, is_post_ln);
}
template <>
void ker_norm_layer_resual_launcher<__half>(int token_num, int hidden_size,
hipStream_t stream, __half* input,
__half* output, const __half* scale,
const __half* bias,
const __half* residual_bias,
const int max_thread_per_block,
bool is_post_ln) {
hipLaunchKernelGGL(( ker_norm_layer_resual<__half>), dim3(token_num), dim3(max_thread_per_block), 0, stream,
input, output, scale, bias, residual_bias, hidden_size / 2, is_post_ln);
}
template void ker_norm_layer_resual_launcher<float>(
int token_num, int hidden_size, hipStream_t stream, float* input,
float* output, const float* scale, const float* bias,
const float* residual_bias, const int max_thread_per_block,
bool is_post_ln);
template void ker_norm_layer_resual_launcher<__half>(
int token_num, int hidden_size, hipStream_t stream, __half* input,
__half* output, const __half* scale, const __half* bias,
const __half* residual_bias, const int max_thread_per_block,
bool is_post_ln);
/**
@brief: ker_enc_embedding
for encoder, look up token embedding, add position embedding
@thread
gridDim.x = batch_size
gridDim.y = batch_seq_len
blockDim.x = max_thread_per_block
@param
token_emb: [vocab_size, hidden_size]
pos_emb: [max_step, hidden_size]
token_id: input token id, [batch_size, batch_seq_len]
output: result, [batch_size, batch_seq_len, hidden_size]
padding_mask: record the padding token, [batch_size, batch_seq_len]
padding_id, the padding token id
*/
template <typename T>
__global__ void ker_enc_embedding(const T* token_emb, const T* pos_emb,
const int* token_id, T* output,
int* padding_mask, int padding_id,
const int hidden_size) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int start = target_pos * hidden_size + threadIdx.x;
int end = (target_pos + 1) * hidden_size;
int tid = token_id[target_pos];
if (tid == padding_id) {
// for padding id
if (threadIdx.x == 0) padding_mask[target_pos] = 1;
for (uint i = start; i < end; i += blockDim.x) {
// output[target_pos * blockDim.x + threadIdx.x] = 0.f;
output[i] = 0.f;
}
return;
}
if (threadIdx.x == 0) {
padding_mask[target_pos] = 0;
}
for (uint i = start; i < end; i += blockDim.x) {
int offset = i - target_pos * hidden_size;
output[i] = token_emb[tid * hidden_size + offset] +
pos_emb[blockIdx.y * hidden_size + offset];
}
}
template <>
__global__ void ker_enc_embedding<__half>(const __half* token_emb,
const __half* pos_emb,
const int* token_id, __half* output,
int* padding_mask, int padding_id,
const int half_hidden_size) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int start = target_pos * half_hidden_size + threadIdx.x;
int end = (target_pos + 1) * half_hidden_size;
int tid = token_id[target_pos];
half2* output_h = (half2*)output;
if (tid == padding_id) {
// for padding id
if (threadIdx.x == 0) padding_mask[target_pos] = 1;
for (uint i = start; i < end; i += blockDim.x) {
output_h[i] = __float2half2_rn(0.f);
}
return;
}
if (threadIdx.x == 0) {
padding_mask[target_pos] = 0;
}
for (uint i = start; i < end; i += blockDim.x) {
int offset = i - target_pos * half_hidden_size;
float2 te = __half22float2(
((const half2*)token_emb)[tid * half_hidden_size + offset]);
float2 pe = __half22float2(
((const half2*)pos_emb)[blockIdx.y * half_hidden_size + offset]);
te.x += pe.x;
te.y += pe.y;
output_h[i] = __float22half2_rn(te);
}
}
template <typename T>
void ker_enc_embedding_launcher(int batch_size, int batch_seq_len,
int hidden_size, hipStream_t stream,
const T* token_emb, const T* pos_emb,
const int* token_id, T* output,
int* padding_mask, int padding_id,
int max_thread_per_block) {
hipLaunchKernelGGL(( ker_enc_embedding<T>)
, dim3(dim3(batch_size, batch_seq_len)), dim3(max_thread_per_block), 0, stream,
token_emb, pos_emb, token_id, output, padding_mask, padding_id,
hidden_size);
}
template <>
void ker_enc_embedding_launcher<__half>(int batch_size, int batch_seq_len,
int hidden_size, hipStream_t stream,
const __half* token_emb,
const __half* pos_emb,
const int* token_id, __half* output,
int* padding_mask, int padding_id,
int max_thread_per_block) {
hipLaunchKernelGGL(( ker_enc_embedding<__half>)
, dim3(dim3(batch_size, batch_seq_len)), dim3(max_thread_per_block), 0, stream,
token_emb, pos_emb, token_id, output, padding_mask, padding_id,
hidden_size / 2);
}
template void ker_enc_embedding_launcher<float>(
int batch_size, int batch_seq_len, int hidden_size, hipStream_t stream,
const float* token_emb, const float* pos_emb, const int* token_id,
float* output, int* padding_mask, int padding_id, int max_thread_per_block);
template void ker_enc_embedding_launcher<__half>(
int batch_size, int batch_seq_len, int hidden_size, hipStream_t stream,
const __half* token_emb, const __half* pos_emb, const int* token_id,
__half* output, int* padding_mask, int padding_id,
int max_thread_per_block);
/**
@brief: ker_dec_embedding
for decoder, look up token embedding, add position embedding
@thread
gridDim.x = batch_size * beam_size
blockDim.x = max_thread_per_block
@param
token_emb: [hidden_size, vocab_size], note, it is different with encoder
pos_emb: [max_step, hidden_size]
token_id: input token id, [batch_size, beam_size, max_step]
output: result, [batch_size, beam_size, hidden_size]
step: current step
max_step: max decoder steps
vocab_size: vocabulary size
*/
template <typename T>
__global__ void ker_dec_embedding(const T* token_emb, const T* pos_emb,
const int* token_id, T* output, int step,
int max_step, int vocab_size,
int hidden_size) {
for (uint offset = threadIdx.x; offset < hidden_size; offset += blockDim.x) {
int token_idx = token_id[blockIdx.x * max_step + step];
output[blockIdx.x * hidden_size + offset] =
token_emb[offset * vocab_size + token_idx] +
pos_emb[step * hidden_size + offset];
}
}
template <typename T>
void ker_dec_embedding_launcher(int step_token_num, int hidden_size,
hipStream_t stream, const T* token_emb,
const T* pos_emb, const int* token_id,
T* output, int step, int max_step,
int vocab_size, int max_thread_per_block) {
hipLaunchKernelGGL(( ker_dec_embedding<T>), dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
token_emb, pos_emb, token_id, output, step, max_step, vocab_size,
hidden_size);
}
template void ker_dec_embedding_launcher<float>(
int step_token_num, int hidden_size, hipStream_t stream,
const float* token_emb, const float* pos_emb, const int* token_id,
float* output, int step, int max_step, int vocab_size,
int max_thread_per_block);
template void ker_dec_embedding_launcher<__half>(
int step_token_num, int hidden_size, hipStream_t stream,
const __half* token_emb, const __half* pos_emb, const int* token_id,
__half* output, int step, int max_step, int vocab_size,
int max_thread_per_block);
/**
@brief: ker_arrange_encself_qkv
split and reshape ori_qkv matrix into new_q, new_k, new_v during encoder
self-attention
ori_qkv is the result of gemm
@thread
gridDim.x = batch_size * batch_seq_len
gridDim.y = 3
blockDim.x = max_thread_per_block
@param
ori_qkv: [batch_size, batch_seq_len, 3, hidden_size]
qkv_bias: [3, hidden_size]
new_qkv: [3, batch_size, head_num, batch_seq_len, dim_per_head]
max_batch_dim: max_batch_size * max_seq_len * hidden_size
batch_seq_len: the sequence length of the current batch
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
*/
template <typename T>
__global__ void ker_arrange_encself_qkv(const T* ori_qkv, const T* qkv_bias,
T* new_qkv, int max_batch_dim,
int batch_seq_len, int dim_per_head,
int head_num) {
int hidden_size = dim_per_head * head_num;
int batch_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int qkv_offset = max_batch_dim * blockIdx.y;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head);
new_qkv[qkv_offset + target_id] =
ori_qkv[(blockIdx.x * gridDim.y + blockIdx.y) * hidden_size + i] +
__ldg(&qkv_bias[blockIdx.y * hidden_size + i]);
}
}
template <>
__global__ void ker_arrange_encself_qkv<__half>(
const __half* ori_qkv, const __half* qkv_bias, __half* new_qkv,
int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num) {
int hidden_size = dim_per_head * head_num;
int batch_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int qkv_offset = max_batch_dim * blockIdx.y;
int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head);
const half2* p_ori_qkv = (const half2*)ori_qkv;
const half2* p_bias = (const half2*)qkv_bias;
half2* p_new_qkv = (half2*)new_qkv;
p_new_qkv[qkv_offset + target_id] = __hadd2(
p_ori_qkv[(blockIdx.x * gridDim.y + blockIdx.y) * hidden_size + i],
__ldg(&p_bias[blockIdx.y * hidden_size + i]));
}
}
template <typename T>
void ker_arrange_encself_qkv_launcher(int batch_token_num, int hidden_size,
hipStream_t stream, const T* ori_qkv,
const T* qkv_bias, T* new_qkv,
int max_batch_dim, int batch_seq_len,
int dim_per_head, int head_num,
int max_thread_per_block) {
hipLaunchKernelGGL(( ker_arrange_encself_qkv<T>)
, dim3(dim3(batch_token_num, 3)), dim3(max_thread_per_block), 0, stream,
ori_qkv, qkv_bias, new_qkv, max_batch_dim, batch_seq_len,
dim_per_head, head_num);
}
template <>
void ker_arrange_encself_qkv_launcher<__half>(
int batch_token_num, int hidden_size, hipStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_qkv,
int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num,
int max_thread_per_block) {
hipLaunchKernelGGL(( ker_arrange_encself_qkv<__half>)
, dim3(dim3(batch_token_num, 3)), dim3(max_thread_per_block), 0, stream,
ori_qkv, qkv_bias, new_qkv, max_batch_dim / 2, batch_seq_len,
dim_per_head / 2, head_num);
}
template void ker_arrange_encself_qkv_launcher<float>(
int batch_token_num, int hidden_size, hipStream_t stream,
const float* ori_qkv, const float* qkv_bias, float* new_qkv,
int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num,
int max_thread_per_block);
template void ker_arrange_encself_qkv_launcher<__half>(
int batch_token_num, int hidden_size, hipStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_qkv,
int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num,
int max_thread_per_block);
/**
@brief: ker_arrange_decself_qkv
split and reshape ori_qkv matrix into new_q, new_k, new_v during decoder
self-attention
ori_qkv is the result of gemm
@thread
gridDim.x = batch_size * beam_size
gridDim.y = 3
blockDim.x = max_thread_per_block
@param
ori_qkv: [batch_size, beam_size, 3, hidden_size]
qkv_bias: [3, hidden_size]
new_q: new query. [batch_size, beam_size, hidden_size]
new_k: new key. [batch_size, beam_size, head_num, max_step, dim_per_head]
new_v: new value. [batch_size, beam_size, head_num, max_step, dim_per_head]
head_num: head number in multi-head attention
dim_per_head: dim of one head in multi-head attention
max_step: max decode step
step_id: current step id
*/
template <typename T>
__global__ void ker_arrange_decself_qkv(const T* ori_qkv, const T* qkv_bias,
T* new_q, T* new_k, T* new_v,
int head_num, int dim_per_head,
int max_step, int step_id) {
int hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
// blockdim is equal to hidden_size
T val = ori_qkv[(blockIdx.x * gridDim.y + blockIdx.y) * hidden_size + i] +
__ldg(&qkv_bias[blockIdx.y * hidden_size + i]);
int seq_id =
blockIdx.x; // obvious seq_id = batch_id * beam_size + beam_id
if (blockIdx.y == 0) {
// for query
new_q[seq_id * hidden_size + i] = val;
return;
}
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int target_id = targetid_4dim(seq_id, head_id, step_id, dim_id, head_num,
max_step, dim_per_head);
if (blockIdx.y == 1) {
// for key
new_k[target_id] = val;
} else {
// for value
new_v[target_id] = val;
}
}
}
template <>
__global__ void ker_arrange_decself_qkv<__half>(
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* new_v, int head_num, int dim_per_head, int max_step, int step_id) {
int half_hidden_size = dim_per_head * head_num;
const half2* p_qkv = (const half2*)ori_qkv;
const half2* p_bias = (const half2*)qkv_bias;
for (std::size_t i = threadIdx.x; i < half_hidden_size; i += blockDim.x) {
half2 val = __hadd2(
p_qkv[(blockIdx.x * gridDim.y + blockIdx.y) * half_hidden_size + i],
__ldg(&p_bias[blockIdx.y * half_hidden_size + i]));
// obviousseq_id = batch_id * beam_size + beam_id
int seq_id = blockIdx.x;
if (blockIdx.y == 0) {
// for query
((half2*)new_q)[seq_id * half_hidden_size + i] = val;
return;
}
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int target_id = targetid_4dim(seq_id, head_id, step_id, dim_id, head_num,
max_step, dim_per_head);
if (blockIdx.y == 1) {
// for key
((half2*)new_k)[target_id] = val;
} else {
// for value
((half2*)new_v)[target_id] = val;
}
}
}
template <typename T>
void ker_arrange_decself_qkv_launcher(int step_token_num, int hidden_size,
hipStream_t stream, const T* ori_qkv,
const T* qkv_bias, T* new_q, T* new_k,
T* new_v, int head_num, int dim_per_head,
int max_step, int step_id,
int max_thread_per_block) {
hipLaunchKernelGGL(( ker_arrange_decself_qkv<T>)
, dim3(dim3(step_token_num, 3)), dim3(max_thread_per_block), 0, stream,
ori_qkv, qkv_bias, new_q, new_k, new_v, head_num, dim_per_head,
max_step, step_id);
}
template <>
void ker_arrange_decself_qkv_launcher<__half>(
int step_token_num, int hidden_size, hipStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* new_v, int head_num, int dim_per_head, int max_step, int step_id,
int max_thread_per_block) {
hipLaunchKernelGGL(( ker_arrange_decself_qkv<__half>)
, dim3(dim3(step_token_num, 3)), dim3(max_thread_per_block), 0, stream,
ori_qkv, qkv_bias, new_q, new_k, new_v, head_num, dim_per_head / 2,
max_step, step_id);
}
template void ker_arrange_decself_qkv_launcher<float>(
int step_token_num, int hidden_size, hipStream_t stream,
const float* ori_qkv, const float* qkv_bias, float* new_q, float* new_k,
float* new_v, int head_num, int dim_per_head, int max_step, int step_id,
int max_thread_per_block);
template void ker_arrange_decself_qkv_launcher<__half>(
int step_token_num, int hidden_size, hipStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* new_v, int head_num, int dim_per_head, int max_step, int step_id,
int max_thread_per_block);
/**
@brief: ker_arrange_encdec_kv
split and reshape ori_kv matrix into new_k, new_v before enc-dec attention
it will be call once on encoder ouput
@thread
gridDim.x = batch_size * batch_seq_len
gridDim.y = dec_layer_num * 2
blockDim.x = max_thread_per_block
@param
ori_kv: [batch_size, batch_seq_len, dec_layer_num, 2, hidden_size]
kv_bias: [dec_layer_num, 2, hidden_size]
new_k: [batch_size, head_num, batch_seq_len, dim_per_head] per layer,
with an offset in offset_per_layer between layers.
new_v: [batch_size, head_num, batch_seq_len, dim_per_head] per layer,
with an offset in offset_per_layer between layers.
offset_per_layer: max_batch_size * max_step * hidden_size
batch_seq_len: sequence length of current batch
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
*/
template <typename T>
__global__ void ker_arrange_encdec_kv(const T* ori_kv, const T* kv_bias,
T* new_k, T* new_v, int offset_per_layer,
int batch_seq_len, int dim_per_head,
int head_num) {
int hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
T val = ori_kv[(blockIdx.x * gridDim.y + blockIdx.y) * hidden_size + i] +
__ldg(&kv_bias[blockIdx.y * hidden_size + i]);
int seq_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int layer_id = blockIdx.y >> 1;
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int layer_offset = layer_id * offset_per_layer;
int target_id = targetid_4dim(seq_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head) +
layer_offset;
if (blockIdx.y & 1) {
// for value
new_v[target_id] = val;
} else {
// for key
new_k[target_id] = val;
}
}
}
template <>
__global__ void ker_arrange_encdec_kv<__half>(
const __half* ori_kv, const __half* kv_bias, __half* new_k, __half* new_v,
int offset_per_layer, int batch_seq_len, int dim_per_head, int head_num) {
int half_hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < half_hidden_size; i += blockDim.x) {
const half2* p_ori_kv = (const half2*)ori_kv;
const half2* p_kv_bias = (const half2*)kv_bias;
half2 val = __hadd2(
p_ori_kv[(blockIdx.x * gridDim.y + blockIdx.y) * half_hidden_size + i],
__ldg(&p_kv_bias[blockIdx.y * half_hidden_size + i]));
int seq_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int layer_id = blockIdx.y >> 1;
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int layer_offset = layer_id * offset_per_layer;
int target_id = targetid_4dim(seq_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head) +
layer_offset;
if (blockIdx.y & 1) {
// for value
((half2*)new_v)[target_id] = val;
} else {
// for key
((half2*)new_k)[target_id] = val;
}
}
}
template <typename T>
void ker_arrange_encdec_kv_launcher(int batch_token_num, int dec_layer_num,
int hidden_size, hipStream_t stream,
const T* ori_kv, const T* kv_bias, T* new_k,
T* new_v, int offset_per_layer,
int batch_seq_len, int dim_per_head,
int head_num, int max_thread_per_block) {
hipLaunchKernelGGL(( ker_arrange_encdec_kv<T>)
, dim3(dim3(batch_token_num, dec_layer_num * 2)), dim3(max_thread_per_block), 0,
stream, ori_kv, kv_bias, new_k, new_v, offset_per_layer,
batch_seq_len, dim_per_head, head_num);
}
template <>
void ker_arrange_encdec_kv_launcher<__half>(
int batch_token_num, int dec_layer_num, int hidden_size,
hipStream_t stream, const __half* ori_kv, const __half* kv_bias,
__half* new_k, __half* new_v, int offset_per_layer, int batch_seq_len,
int dim_per_head, int head_num, int max_thread_per_block) {
hipLaunchKernelGGL(( ker_arrange_encdec_kv<__half>)
, dim3(dim3(batch_token_num, dec_layer_num * 2)), dim3(max_thread_per_block), 0,
stream, ori_kv, kv_bias, new_k, new_v, offset_per_layer / 2,
batch_seq_len, dim_per_head / 2, head_num);
}
template void ker_arrange_encdec_kv_launcher<float>(
int batch_token_num, int dec_layer_num, int hidden_size,
hipStream_t stream, const float* ori_kv, const float* kv_bias,
float* new_k, float* new_v, int offset_per_layer, int batch_seq_len,
int dim_per_head, int head_num, int max_thread_per_block);
template void ker_arrange_encdec_kv_launcher<__half>(
int batch_token_num, int dec_layer_num, int hidden_size,
hipStream_t stream, const __half* ori_kv, const __half* kv_bias,
__half* new_k, __half* new_v, int offset_per_layer, int batch_seq_len,
int dim_per_head, int head_num, int max_thread_per_block);
/**
@brief: ker_arrange_encdec_q
reshape ori_q into new_q and add bias
during enc-dec attention
ori_q is the result of gemm
@thread
gridDim.x = batch_size * beam_size
blockDim.x = max_thread_per_block
@param
ori_q: [batch_size, beam_size, hidden_size]
q_bias: [hidden_size]
new_q: [batch_size, head_num, beam_size, dim_per_head]
beam_size: beam size of beam search
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
*/
template <typename T>
__global__ void ker_arrange_encdec_q(const T* ori_q, const T* q_bias, T* new_q,
int beam_size, int dim_per_head,
int head_num) {
int hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
T val = ori_q[blockIdx.x * hidden_size + i] + __ldg(&q_bias[i]);
int batch_id = blockIdx.x / beam_size;
int beam_id = blockIdx.x % beam_size;
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
new_q[targetid_4dim(batch_id, head_id, beam_id, dim_id, head_num, beam_size,
dim_per_head)] = val;
}
}
template <>
__global__ void ker_arrange_encdec_q<__half>(const __half* ori_q,
const __half* q_bias,
__half* new_q, int beam_size,
int dim_per_head, int head_num) {
int half_hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < half_hidden_size; i += blockDim.x) {
const half2* p_q = (const half2*)ori_q;
const half2* p_bias = (const half2*)q_bias;
half2 val =
__hadd2(p_q[blockIdx.x * half_hidden_size + i], __ldg(&p_bias[i]));
int batch_id = blockIdx.x / beam_size;
int beam_id = blockIdx.x % beam_size;
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
((half2*)new_q)[targetid_4dim(batch_id, head_id, beam_id, dim_id, head_num,
beam_size, dim_per_head)] = val;
}
}
template <typename T>
void ker_arrange_encdec_q_launcher(int step_token_num, int hidden_size,
hipStream_t stream, const T* ori_q,
const T* q_bias, T* new_q, int beam_size,
int dim_per_head, int head_num,
int max_thread_per_block) {
hipLaunchKernelGGL(( ker_arrange_encdec_q<T>), dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
ori_q, q_bias, new_q, beam_size, dim_per_head, head_num);
}
template <>
void ker_arrange_encdec_q_launcher<__half>(
int step_token_num, int hidden_size, hipStream_t stream,
const __half* ori_q, const __half* q_bias, __half* new_q, int beam_size,
int dim_per_head, int head_num, int max_thread_per_block) {
hipLaunchKernelGGL(( ker_arrange_encdec_q<__half>)
, dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
ori_q, q_bias, new_q, beam_size, dim_per_head / 2, head_num);
}
template void ker_arrange_encdec_q_launcher<float>(
int step_token_num, int hidden_size, hipStream_t stream,
const float* ori_q, const float* q_bias, float* new_q, int beam_size,
int dim_per_head, int head_num, int max_thread_per_block);
template void ker_arrange_encdec_q_launcher<__half>(
int step_token_num, int hidden_size, hipStream_t stream,
const __half* ori_q, const __half* q_bias, __half* new_q, int beam_size,
int dim_per_head, int head_num, int max_thread_per_block);
/**
@brief: ker_correlation_softmax_encself
query-key correlation softmax for encoder self attention
@thread
gridDim.x = batch_size
gridDim.y = head_num * batch_seq_len
blockDim.x = batch_seq_len
@param
correlation: [batch_size, head_num, batch_seq_len, batch_seq_len]
src_padding_mask: [batch_size, batch_seq_len],
indicating which token is a padding token.
*/
template <typename T>
__global__ void ker_correlation_softmax_encself(T* correlation,
const int* src_padding_mask) {
int idx = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
if (src_padding_mask[blockIdx.x * blockDim.x + blockIdx.y % blockDim.x]) {
correlation[idx] = (T) 0.f;
return;
}
int mask = src_padding_mask[blockIdx.x * blockDim.x + threadIdx.x];
float val = (float)correlation[idx];
float max_val = blockReduceMax<float>(mask ? CUDA_FLOAT_INF_NEG : val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = mask ? 0.f : expf(fmaxf(logit_thresh_min, val - smax));
float rsum = blockReduceSum<float>(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
correlation[idx] = (T)(val / (ssum + epsilon));
}
template <typename T>
void ker_correlation_softmax_encself_launcher(int batch_size, int batch_seq_len,
int head_num, hipStream_t stream,
T* correlation,
const int* src_padding_mask) {
hipLaunchKernelGGL(( ker_correlation_softmax_encself<T>)
, dim3(dim3(batch_size, head_num * batch_seq_len)), dim3(batch_seq_len), 0,
stream, correlation, src_padding_mask);
}
template void ker_correlation_softmax_encself_launcher<float>(
int batch_size, int batch_seq_len, int head_num, hipStream_t stream,
float* correlation, const int* src_padding_mask);
template void ker_correlation_softmax_encself_launcher<__half>(
int batch_size, int batch_seq_len, int head_num, hipStream_t stream,
__half* correlation, const int* src_padding_mask);
/**
@brief: ker_correlation_softmax_decself
query-key correlation softmax for decoder self attention
@thread
gridDim.x = batch_size * beam_size * head_num
blockDim.x = cur_step + 1
@param
correlation: [batch_size, beam_size, head_num, cur_step + 1]
*/
template <typename T>
__global__ void ker_correlation_softmax_decself(T* correlation) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float val = (float)correlation[idx];
float max_val = blockReduceMax(val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = expf(fmaxf(logit_thresh_min, val - smax));
float rsum = blockReduceSum(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
correlation[idx] = (T)(val / ssum);
}
template <typename T>
void ker_correlation_softmax_decself_launcher(int batch_head_num, int step_num,
hipStream_t stream,
T* correlation) {
hipLaunchKernelGGL(( ker_correlation_softmax_decself), dim3(batch_head_num), dim3(step_num), 0, stream,
correlation);
}
template void ker_correlation_softmax_decself_launcher<float>(
int batch_head_num, int step_num, hipStream_t stream, float* correlation);
template void ker_correlation_softmax_decself_launcher<__half>(
int batch_head_num, int step_num, hipStream_t stream, __half* correlation);
/**
@brief: ker_correlation_softmax_encdec
query-key correlation softmax for encoder-decoder attention
@thread
gridDim.x = batch_size
gridDim.y = head_num * beam_size
blockDim.x = batch_seq_len
@param
correlation: [batch_size, head_num, beam_size, batch_seq_len]
src_padding_mask: [batch_size, batch_seq_len]
indicating which token is a padding token.
*/
template <typename T>
__global__ void ker_correlation_softmax_encdec(T* correlation,
const int* src_padding_mask) {
int idx = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
int mask = src_padding_mask[blockIdx.x * blockDim.x + threadIdx.x];
float val = (float)correlation[idx];
float max_val = blockReduceMax(mask ? CUDA_FLOAT_INF_NEG : val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = mask ? 0.f : expf(fmaxf(logit_thresh_min, val - smax));
float rsum = blockReduceSum(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
correlation[idx] = (T)(val / (ssum + epsilon));
}
template <typename T>
void ker_correlation_softmax_encdec_launcher(
int batch_size, int head_num_per_seq, int batch_seq_len,
hipStream_t stream, T* correlation, const int* src_padding_mask) {
hipLaunchKernelGGL(( ker_correlation_softmax_encdec<T>)
, dim3(dim3(batch_size, head_num_per_seq)), dim3(batch_seq_len), 0, stream,
correlation, src_padding_mask);
}
template void ker_correlation_softmax_encdec_launcher<float>(
int batch_size, int head_num_per_seq, int batch_seq_len,
hipStream_t stream, float* correlation, const int* src_padding_mask);
template void ker_correlation_softmax_encdec_launcher<__half>(
int batch_size, int head_num_per_seq, int batch_seq_len,
hipStream_t stream, __half* correlation, const int* src_padding_mask);
/**
@brief: ker_arrange_atten_output
reshape Scaled Dot-Product Attention output.
It will be used by both encoder and decoder
token_num = batch_seq_len, for encoder
= beam_size, for decoder
@thread
gridDim.x = batch_size * ${token_num}
blockDim.x = max_thread_per_block
@param
ori_q: [batch_size, head_num, ${token_num}, dim_per_head]
new_q: [batch_size, ${token_num}, hidden_size]
beam_size : for decoder, beam_size is beam_size; for encoder, beam_size is
batch_seq_len
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
*/
template <typename T>
__global__ void ker_arrange_atten_output(const T* ori_q, T* new_q,
int beam_size, int dim_per_head,
int head_num) {
int hidden_size = dim_per_head * head_num;
int batch_id = blockIdx.x / beam_size;
// note, for encoder, beam_id is token_id; for decoder, beam_id is beam_id
int beam_id = blockIdx.x % beam_size;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
new_q[blockIdx.x * hidden_size + i] = ori_q[targetid_4dim(
batch_id, head_id, beam_id, dim_id, head_num, beam_size, dim_per_head)];
}
}
template <>
__global__ void ker_arrange_atten_output<__half>(const __half* ori_q,
__half* new_q, int beam_size,
int dim_per_head,
int head_num) {
int batch_id = blockIdx.x / beam_size;
// note, for encoder, beam_id is token_id; for decoder, beam_id is beam_id
int beam_id = blockIdx.x % beam_size;
int half_hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < half_hidden_size; i += blockDim.x) {
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
const half2* p_ori_q = (const half2*)ori_q;
half2* p_new_q = (half2*)new_q;
p_new_q[blockIdx.x * half_hidden_size + i] = p_ori_q[targetid_4dim(
batch_id, head_id, beam_id, dim_id, head_num, beam_size, dim_per_head)];
}
}
template <typename T>
void ker_arrange_atten_output_launcher(int batch_token_num, int hidden_size,
hipStream_t stream, const T* ori_q,
T* new_q, int beam_size,
int dim_per_head, int head_num,
int max_thread_per_block) {
hipLaunchKernelGGL(( ker_arrange_atten_output<T>)
, dim3(batch_token_num), dim3(max_thread_per_block), 0, stream,
ori_q, new_q, beam_size, dim_per_head, head_num);
}
template <>
void ker_arrange_atten_output_launcher<__half>(
int batch_token_num, int hidden_size, hipStream_t stream,
const __half* ori_q, __half* new_q, int beam_size, int dim_per_head,
int head_num, int max_thread_per_block) {
hipLaunchKernelGGL(( ker_arrange_atten_output<__half>)
, dim3(batch_token_num), dim3(max_thread_per_block), 0, stream,
ori_q, new_q, beam_size, dim_per_head / 2, head_num);
}
template void ker_arrange_atten_output_launcher<float>(
int batch_token_num, int hidden_size, hipStream_t stream,
const float* ori_q, float* new_q, int beam_size, int dim_per_head,
int head_num, int max_thread_per_block);
template void ker_arrange_atten_output_launcher<__half>(
int batch_token_num, int hidden_size, hipStream_t stream,
const __half* ori_q, __half* new_q, int beam_size, int dim_per_head,
int head_num, int max_thread_per_block);
/**
@brief: ker_refresh_result
refresh alive_seq, seq_probs, seq_score, num_finish_beam based on
sorted candidate
@thread
gridDim.x = batch_size
gridDim.y = beam_size
blockDim.x = max_step
@param
can_idx: [none], no certain length, determined by rough candidate number
can_score: [none], no certain length, determined by rough candidate number
num_can_per_beam: [batch_size * beam_size]
save exclusive_scan_sum of the beam candidate number array
e.g. [0,2,5,1] -> [0, 0, 2, 7]
old_alive_seq: [batch_size, beam_size, max_step]
new_alive_seq: [batch_size, beam_size, max_step]
seq_probs: [batch_size, beam_size]
seq_score: [batch_size, beam_size]
performing length penlty on seq_probs will get seq_probs
num_finish_beam: record current finished beam.
it will be use to decide whether early stop during beam_search
vocab_size: target vocabulary size
cur_step: current step
length_norm: length penlty norm value
*/
__global__ void ker_refresh_result(const int* can_idx, const float* can_score,
const int* num_can_per_beam,
const int* old_alive_seq, int* new_alive_seq,
float* seq_probs, float* seq_score,
int* num_finish_beam, int vocab_size,
int cur_step, float length_norm,
float diverse_lambda, int end_id) {
// step1 update alive_seq
int can_pos = num_can_per_beam[blockIdx.x * gridDim.y] + blockIdx.y;
int ori_can_idx = can_idx[can_pos]; // can_beam_id * vocab_size + vocab_id
int can_beam_id = ori_can_idx / vocab_size;
int can_vocab_id = ori_can_idx % vocab_size;
int rank_id;
if (diverse_lambda != 0) {
rank_id = can_beam_id / gridDim.y; // rank in each beam
can_beam_id %= gridDim.y;
}
int thread_vocab_id;
if (threadIdx.x > cur_step + 1) {
thread_vocab_id = end_id;
} else if (threadIdx.x == cur_step + 1) {
// add current step generate vocabulary id
thread_vocab_id = can_vocab_id;
} else {
// threadIdx.x <= cur_step
thread_vocab_id = old_alive_seq[targetid_3dim(
blockIdx.x, can_beam_id, threadIdx.x, gridDim.y, blockDim.x)];
}
new_alive_seq[targetid_3dim(blockIdx.x, blockIdx.y, threadIdx.x, gridDim.y,
blockDim.x)] = thread_vocab_id;
// step2 update seq_probs if alive seq when not eos
if (cur_step == 0 || can_vocab_id != end_id) {
// alive seq
if (threadIdx.x == 0) {
if (diverse_lambda == 0) {
seq_probs[blockIdx.x * gridDim.y + blockIdx.y] =
(can_score[can_pos] - blockIdx.x * min_log_probability) /
length_norm; // recover it
} else {
seq_probs[blockIdx.x * gridDim.y + blockIdx.y] =
(can_score[can_pos] - blockIdx.x * min_log_probability +
diverse_lambda * (rank_id + 1)) /
length_norm;
}
}
return;
}
// step3 update seq_score, num_finish_beam if finish seq
if (threadIdx.x == 0) {
atomicAdd(num_finish_beam, 1);
}
int seq_last_id = old_alive_seq[targetid_3dim(
blockIdx.x, can_beam_id, cur_step, gridDim.y, blockDim.x)];
// update finished seq score
if (threadIdx.x == 0) {
// note, with batch offset value, to sort between batch element
if (diverse_lambda == 0) {
seq_score[blockIdx.x * gridDim.y + blockIdx.y] = can_score[can_pos];
} else {
seq_score[blockIdx.x * gridDim.y + blockIdx.y] =
can_score[can_pos] + diverse_lambda * (rank_id + 1);
}
}
} // namespace cuda
/**
@brief: ker_refresh_cache
supply current step's projected k,v to K, V cache
@thread
gridDim.x = decoder_layer_num * (step_id + 1)
gridDim.y = batch_size * beam_size * 2
blockDim.x = max_thread_per_block
@param
num_can_per_beam: [batch_size, beam_size]
can_idx: [none], no certain length, determined by rough candidate number
self_k_bgeem: [batch_size, beam_size, head_num, max_step, dim_per_head] *
decoder_layer_num
self_v_bgeem: [batch_size, beam_size, head_num, max_step, dim_per_head] *
decoder_layer_num
new_self_k_bgeem: [batch_size, beam_size, head_num, max_step, dim_per_head] *
decoder_layer_num
new_self_v_bgeem: [batch_size, beam_size, head_num, max_step, dim_per_head] *
decoder_layer_num
self_k_bgeem_offset = max_batch_size * max_step * hidden_size * beam_size
beam_size : beam size for beam_search
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
vocab_size: the vocab size of decoder
cur_step: current step
max_step: max decode step
*/
template <typename T>
__global__ void ker_refresh_cache(const int* num_can_per_beam,
const int* can_idx, const T* self_k_bgeem,
const T* self_v_bgeem, T* new_self_k_bgeem,
T* new_self_v_bgeem, int self_k_bgeem_offset,
int beam_size, int dim_per_head, int head_num,
int vocab_size, int cur_step, int max_step,
bool diverse, int end_id) {
int layer_id = blockIdx.x / (cur_step + 1);
int step_id = blockIdx.x % (cur_step + 1);
int kv_id = blockIdx.y & 1;
int beam_id_global = blockIdx.y >> 1;
int batch_id = beam_id_global / beam_size;
int beam_id = beam_id_global % beam_size;
int hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int can_pos = num_can_per_beam[batch_id * beam_size] + beam_id;
int can_beam_id =
can_idx[can_pos] / vocab_size; // can_beam_id * vocab_size + vocab_id
if (diverse) can_beam_id %= beam_size;
if (can_idx[can_pos] % vocab_size == end_id) {
return;
}
int base_pos = targetid_5dim(batch_id, 0, head_id, step_id, dim_id,
beam_size, head_num, max_step, dim_per_head) +
layer_id * self_k_bgeem_offset;
int beam_offset = hidden_size * max_step;
int ori_id = base_pos + beam_offset * can_beam_id;
int new_id = base_pos + beam_offset * beam_id;
if (kv_id == 0) {
// for key
new_self_k_bgeem[new_id] = self_k_bgeem[ori_id];
} else {
// for value
new_self_v_bgeem[new_id] = self_v_bgeem[ori_id];
}
}
}
template <>
__global__ void ker_refresh_cache<__half>(
const int* num_can_per_beam, const int* can_idx, const __half* self_k_bgeem,
const __half* self_v_bgeem, __half* new_self_k_bgeem,
__half* new_self_v_bgeem, int self_k_bgeem_offset, int beam_size,
int dim_per_head, int head_num, int vocab_size, int cur_step, int max_step,
bool diverse, int end_id) {
int layer_id = blockIdx.x / (cur_step + 1);
int step_id = blockIdx.x % (cur_step + 1);
int kv_id = blockIdx.y & 1;
int beam_id_global = blockIdx.y >> 1;
int batch_id = beam_id_global / beam_size;
int beam_id = beam_id_global % beam_size;
int half_hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < half_hidden_size; i += blockDim.x) {
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int can_pos = num_can_per_beam[batch_id * beam_size] + beam_id;
int can_beam_id =
can_idx[can_pos] / vocab_size; // can_beam_id * vocab_size + vocab_id
if (diverse) can_beam_id %= beam_size;
if (cur_step != 0 && can_idx[can_pos] % vocab_size == end_id) {
return;
}
int base_pos = targetid_5dim(batch_id, 0, head_id, step_id, dim_id,
beam_size, head_num, max_step, dim_per_head) +
layer_id * self_k_bgeem_offset;
int beam_offset = half_hidden_size * max_step;
int ori_id = base_pos + beam_offset * can_beam_id;
int new_id = base_pos + beam_offset * beam_id;
if (kv_id == 0) {
// for key
((half2*)new_self_k_bgeem)[new_id] = ((half2*)self_k_bgeem)[ori_id];
} else {
// for value
((half2*)new_self_v_bgeem)[new_id] = ((half2*)self_v_bgeem)[ori_id];
}
}
}
template <typename T>
void ker_refresh_cache_launcher(
int grid_dim_x, int grid_dim_y, int block_dim, hipStream_t stream,
const int* num_can_per_beam, const int* can_idx, const T* self_k_bgeem,
const T* self_v_bgeem, T* new_self_k_bgeem, T* new_self_v_bgeem,
int self_k_bgeem_offset, int beam_size, int dim_per_head, int head_num,
int vocab_size, int cur_step, int max_step, bool diverse, int end_id) {
hipLaunchKernelGGL(( ker_refresh_cache<T>), dim3(dim3(grid_dim_x, grid_dim_y)), dim3(block_dim), 0, stream,
num_can_per_beam, can_idx, self_k_bgeem, self_v_bgeem, new_self_k_bgeem,
new_self_v_bgeem, self_k_bgeem_offset, beam_size, dim_per_head, head_num,
vocab_size, cur_step, max_step, diverse, end_id);
}
template <>
void ker_refresh_cache_launcher<__half>(
int grid_dim_x, int grid_dim_y, int block_dim, hipStream_t stream,
const int* num_can_per_beam, const int* can_idx, const __half* self_k_bgeem,
const __half* self_v_bgeem, __half* new_self_k_bgeem,
__half* new_self_v_bgeem, int self_k_bgeem_offset, int beam_size,
int dim_per_head, int head_num, int vocab_size, int cur_step, int max_step,
bool diverse, int end_id) {
hipLaunchKernelGGL(( ker_refresh_cache<__half>)
, dim3(dim3(grid_dim_x, grid_dim_y)), dim3(block_dim / 2), 0, stream,
num_can_per_beam, can_idx, self_k_bgeem, self_v_bgeem,
new_self_k_bgeem, new_self_v_bgeem, self_k_bgeem_offset / 2,
beam_size, dim_per_head / 2, head_num, vocab_size, cur_step, max_step,
diverse, end_id);
}
template void ker_refresh_cache_launcher<float>(
int grid_dim_x, int grid_dim_y, int block_dim, hipStream_t stream,
const int* num_can_per_beam, const int* can_idx, const float* self_k_bgeem,
const float* self_v_bgeem, float* new_self_k_bgeem, float* new_self_v_bgeem,
int self_k_bgeem_offset, int beam_size, int dim_per_head, int head_num,
int vocab_size, int cur_step, int max_step, bool diverse, int end_id);
template void ker_refresh_cache_launcher<__half>(
int grid_dim_x, int grid_dim_y, int block_dim, hipStream_t stream,
const int* num_can_per_beam, const int* can_idx, const __half* self_k_bgeem,
const __half* self_v_bgeem, __half* new_self_k_bgeem,
__half* new_self_v_bgeem, int self_k_bgeem_offset, int beam_size,
int dim_per_head, int head_num, int vocab_size, int cur_step, int max_step,
bool diverse, int end_id);
/**
@brief: ker_write_trg_tokenid_pos_penalty
write result from alive seq to output, for length_penlty >= 0
or length_penlty < 0 and decode to max_decode_step
simply output the beam0 as final result
@thread
gridDim.x = batch_size
blockDim.x = cur_step + 1
@param
alive_seq: [batch_size, beam_size, max_step], <start> is the first token in
each beam
output: [batch_size, cur_step + 1], no <start> and at least one <eos> in the
last of seq
*/
__global__ void ker_write_trg_tokenid_pos_penalty(const int* alive_seq,
float* seq_score, int* output,
int max_step, int beam_size) {
int target_id =
targetid_3dim(blockIdx.x, 0, threadIdx.x + 1, beam_size, max_step);
output[blockIdx.x * blockDim.x + threadIdx.x] = alive_seq[target_id];
if (threadIdx.x == 0) {
seq_score[blockIdx.x] =
seq_score[blockIdx.x * beam_size] - blockIdx.x * min_log_probability;
}
}
/**
@brief: ker_write_trg_tokenid_neg_penalty
write result from alive seq to output,
for length_penlty < 0 and all beam has reach it's eos
compute each beam's score and select the top beam
@thread
gridDim.x = batch_size
blockDim.x = cur_step + 1
@param
alive_seq: [batch_size, beam_size, max_step], <start> is the first token in
each beam
seq_score: [batch_size, beam_size], the length_penlty < 0, seq_score is also
the sum_log_probs
output: [batch_size, cur_step + 1], no <start> and at least one <eos> in the
last of seq
*/
__global__ void ker_write_trg_tokenid_neg_penalty(const int* alive_seq,
const float* seq_score,
int* output, int max_step,
int beam_size, int vocab_size,
int end_id) {
__shared__ float seq_final_score;
__shared__ int res_beam_id;
if (threadIdx.x == 0) {
seq_final_score = CUDA_FLOAT_INF_NEG;
res_beam_id = 0;
}
for (int beam_id = 0; beam_id < beam_size; beam_id++) {
int target_id = targetid_3dim(blockIdx.x, beam_id, threadIdx.x + 1,
beam_size, max_step);
int seq_len =
blockReduceSum(int(alive_seq[target_id] != end_id)); // compute seq len
if (threadIdx.x == 0) {
float cur_beam_score = seq_score[blockIdx.x * beam_size + beam_id] -
blockIdx.x * min_log_probability; // recover prob
cur_beam_score /= (float(seq_len) + epsilon);
if (cur_beam_score > seq_final_score) {
seq_final_score = cur_beam_score;
res_beam_id = beam_id;
}
}
__syncthreads();
}
int target_id = targetid_3dim(blockIdx.x, res_beam_id, threadIdx.x + 1,
beam_size, max_step);
output[blockIdx.x * blockDim.x + threadIdx.x] = alive_seq[target_id];
// output[blockIdx.x * blockDim.x + threadIdx.x] =
// int(seq_final_score[threadIdx.x]);
}
/**
@brief: ker_write_topk_result
write result from alive seq to output, recover seq_score
for length_penlty > 0
@thread
gridDim.x = batch_size * beam_size
blockDim.x = cur_step + 1
@param
alive_seq: [batch_size, beam_size, max_step], <start> is the first token in
each beam
seq_score: [batch_size, beam_size]
seq_probs: [batch_size, beam_size]
output: [batch_size, cur_step + 1], no <start> and at least one <eos> in the
last of seq
*/
__global__ void ker_write_topk_result(const int* alive_seq, float* seq_score,
int* res_seq, int vocab_size,
int max_step, int beam_size, int end_id) {
res_seq[blockIdx.x * blockDim.x + threadIdx.x] =
alive_seq[blockIdx.x * max_step + threadIdx.x + 1];
if (threadIdx.x == 0) {
seq_score[blockIdx.x] -= (blockIdx.x / beam_size) * min_log_probability;
res_seq[blockIdx.x * blockDim.x + blockDim.x - 1] = end_id;
}
}
/**
@brief: ker_topk_sample
quick rough topk sampling from logits
@thread
gridDim.x = batch_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, logits_seq_len, vocab_size]
old_input_ids: [batch_size, batch_seq_len]
new_input_ids: [batch_size, batch_seq_len+1]
unfinished: [1]
curandstate: [batch_size]
*/
template <typename T, int k>
__global__ void ker_topk_sample(const T* logits, const T* logit_bias,
int* old_input_ids, int* new_input_ids,
const int vocab_size, const int max_step,
const int batch_seq_len, int logits_seq_len,
int* unfinished, hiprandState_t* curandstate,
int eos_id) {
int last_token_idx_in_batch = blockIdx.x * max_step + batch_seq_len - 1;
/* add EOS to end if last token is EOS */
if (batch_seq_len > 1 && old_input_ids[last_token_idx_in_batch] == eos_id) {
if (threadIdx.x == 0) {
old_input_ids[last_token_idx_in_batch + 1] = eos_id;
}
return;
}
int logits_token_idx_in_batch =
blockIdx.x * logits_seq_len + logits_seq_len - 1;
int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size;
/*
step1. find max logit and rough Kth logit over the whole vocab
*/
__shared__ float s_max_logit, s_topk_logit;
float rough_top_kth_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
rough_top_kth_logit = fmaxf(
rough_top_kth_logit,
(float)(logits[idx]) +
(float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]));
}
float max_logit = blockReduceMax(rough_top_kth_logit);
rough_top_kth_logit = blockRoughTopK<float, k>(rough_top_kth_logit);
if (threadIdx.x == 0) {
s_topk_logit = rough_top_kth_logit;
s_max_logit = max_logit;
}
__syncthreads();
__shared__ int s_tid;
if (k != 1) {
/* step2 hold one logit per thread which larger than Kth logit and sample
* from them */
float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG;
int topk_tid = vocab_size;
// int test_num = 0;
__shared__ float s_topk_exp_sum;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit =
(float)logits[idx] +
(float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]);
float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min));
// if (logit >= s_topk_logit) test_num++;
if (logit >= s_topk_logit && logit_exp > topk_exp) {
topk_exp = logit_exp;
topk_tid = idx - left_logit_idx + threadIdx.x;
}
}
// test_num = blockReduceSum(test_num);
// __shared__ int s_test_num;
// if (threadIdx.x == 0) {
// s_test_num = test_num;
// if (s_test_num != 1) printf("sample from top %d\n", s_test_num);
// // printf("sample from top %s", test_num);
// }
// __syncthreads();
if (topk_tid == vocab_size) topk_exp = 0;
topk_exp_sum = blockReduceSum(topk_exp);
if (threadIdx.x == 0) {
s_topk_exp_sum = topk_exp_sum;
}
__syncthreads();
/* calculate cumulative probability */
float topk_prob = topk_exp / s_topk_exp_sum;
float prefix_sum_prob;
typedef hipcub::BlockScan<float, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
BlockScan(temp_storage).InclusiveSum(topk_prob, prefix_sum_prob);
__shared__ float random_x;
if (threadIdx.x == 0) {
random_x = hiprand_uniform(curandstate + blockIdx.x);
}
__syncthreads();
if (threadIdx.x == 0) {
s_tid = vocab_size;
}
__syncthreads();
int threadID = threadIdx.x;
__shared__ int s_threadID;
__shared__ float s_max_prob;
if (random_x > prefix_sum_prob) threadID = blockDim.x;
threadID = blockReduceMin(threadID);
float max_prob = blockReduceMax(topk_prob);
if (threadIdx.x == 0) {
s_threadID = threadID;
s_max_prob = max_prob;
}
__syncthreads();
if (threadIdx.x == s_threadID) {
s_tid = topk_tid;
}
__syncthreads();
if (s_tid == vocab_size && topk_prob == s_max_prob) {
s_tid = topk_tid;
}
__syncthreads();
} else {
s_tid = vocab_size;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit =
(float)logits[idx] +
(float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]);
if (logit == s_max_logit) {
s_tid = idx - left_logit_idx + threadIdx.x;
}
}
__syncthreads();
}
/* if new sampled tid is not EOS, set unfinish TRUE */
if (threadIdx.x == 0) {
if (s_tid != eos_id) unfinished[0] = 1;
}
/* step3 write back new sampled ids */
if (threadIdx.x == 0) {
old_input_ids[last_token_idx_in_batch + 1] = s_tid;
}
}
template <typename T>
void ker_topk_sample_launcher(int batch_size, int batch_seq_len,
const int max_step, int logits_seq_len,
int max_thread_per_block, hipStream_t stream,
const T* logits, const T* logit_bias,
int* old_input_ids, int* new_input_ids,
const int vocab_size, const int k,
int* unfinished, hiprandState_t* curandstate,
int eos_id) {
if (k == 1)
hipLaunchKernelGGL(( ker_topk_sample<T, 1>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 2)
hipLaunchKernelGGL(( ker_topk_sample<T, 2>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 4)
hipLaunchKernelGGL(( ker_topk_sample<T, 4>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 8)
hipLaunchKernelGGL(( ker_topk_sample<T, 8>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 16)
hipLaunchKernelGGL(( ker_topk_sample<T, 16>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 32)
hipLaunchKernelGGL(( ker_topk_sample<T, 32>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else {
throw std::invalid_argument("topk argument should be in [1,2,4,8,16,32]");
}
}
template void ker_topk_sample_launcher<float>(
int batch_size, int batch_seq_len, const int max_step, int logits_seq_len,
int max_thread_per_block, hipStream_t stream, const float* logits,
const float* logit_bias, int* old_input_ids, int* new_input_idx,
const int vocab_size, const int k, int* unfinished,
hiprandState_t* curandstate, int eos_id);
template void ker_topk_sample_launcher<__half>(
int batch_size, int batch_seq_len, const int max_step, int logits_seq_len,
int max_thread_per_block, hipStream_t stream, const __half* logits,
const __half* logit_bias, int* old_input_ids, int* new_input_idx,
const int vocab_size, const int k, int* unfinished,
hiprandState_t* curandstate, int eos_id);
/**
@brief: ker_topp_sample
quick rough topp sampling from logits
@thread
gridDim.x = batch_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, logits_seq_len, vocab_size]
old_input_ids: [batch_size, batch_seq_len]
new_input_ids: [batch_size, batch_seq_len+1]
unfinished: [1]
curandstate: [batch_size]
*/
template <typename T>
__global__ void ker_topp_sample(const T* logits, const T* logit_bias,
int* old_input_ids, int* new_input_ids,
const int vocab_size, const int max_step,
const int batch_seq_len, int logits_seq_len,
int* unfinished, float p,
hiprandState_t* curandstate, int eos_id) {
int token_idx_in_batch = blockIdx.x * max_step + batch_seq_len - 1;
/* add EOS to end if last token is EOS */
if (batch_seq_len > 1 && old_input_ids[token_idx_in_batch] == eos_id) {
if (threadIdx.x == 0) {
old_input_ids[token_idx_in_batch + 1] = eos_id;
}
return;
}
int logits_token_idx_in_batch =
blockIdx.x * logits_seq_len + logits_seq_len - 1;
int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size;
/* step1. find max logit in each thread and sample from these probs with
* nucleus sampling */
__shared__ float s_max_logit;
float max_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
max_logit = fmaxf(max_logit, (float)logits[idx]) +
(float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]);
}
float max_logit_array[1];
max_logit_array[0] = max_logit;
typedef cub::BlockRadixSort<float, 1024, 1> BlockRadixSort;
__shared__ typename BlockRadixSort::TempStorage sort_temp_storage;
BlockRadixSort(sort_temp_storage).SortDescending(max_logit_array);
float presum_max_logit_exp;
max_logit = max_logit_array[0];
float block_max_logit = blockReduceMax(max_logit);
if (threadIdx.x == 0) {
s_max_logit = block_max_logit;
}
__syncthreads();
float biased_logit_exp =
expf(fmaxf(max_logit - s_max_logit, logit_thresh_min));
typedef hipcub::BlockScan<float, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage presum_temp_storage;
BlockScan(presum_temp_storage)
.InclusiveSum(biased_logit_exp, presum_max_logit_exp);
float topp_exp_threshold;
if (threadIdx.x == blockDim.x - 1) {
topp_exp_threshold = p * presum_max_logit_exp;
}
__shared__ float s_presum_logit_exp_threshold;
if (presum_max_logit_exp > topp_exp_threshold) {
presum_max_logit_exp = CUDA_FLOAT_INF_NEG;
}
float logit_exp_threshold = blockReduceMax(presum_max_logit_exp);
if (threadIdx.x == 0) {
s_presum_logit_exp_threshold = logit_exp_threshold;
}
__syncthreads();
__shared__ float s_logit_threshold;
if (presum_max_logit_exp == s_presum_logit_exp_threshold) {
s_logit_threshold = max_logit;
}
__syncthreads();
/* step2 hold one logit per thread which larger than Kth logit and sample
* from them */
float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG;
int topk_tid = vocab_size;
int test_num = 0;
__shared__ float s_topk_exp_sum;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit = (float)logits[idx] +
(float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]);
float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min));
if (logit >= s_logit_threshold) test_num++;
if (logit >= s_logit_threshold && logit_exp > topk_exp) {
topk_exp = logit_exp;
topk_tid = idx - left_logit_idx + threadIdx.x;
}
}
test_num = blockReduceSum(test_num);
if (topk_tid == vocab_size) topk_exp = 0;
topk_exp_sum = blockReduceSum(topk_exp);
if (threadIdx.x == 0) {
s_topk_exp_sum = topk_exp_sum;
}
__syncthreads();
/* calculate cumulative probability */
float topk_prob = topk_exp / s_topk_exp_sum;
float prefix_sum_prob;
BlockScan(presum_temp_storage).InclusiveSum(topk_prob, prefix_sum_prob);
__shared__ float random_x;
if (threadIdx.x == 0) {
random_x = hiprand_uniform(curandstate + blockIdx.x);
}
__syncthreads();
__shared__ int s_tid;
if (threadIdx.x == 0) {
s_tid = vocab_size;
}
__syncthreads();
int threadID = threadIdx.x;
__shared__ int s_threadID;
__shared__ float s_max_prob;
if (random_x > prefix_sum_prob) threadID = blockDim.x;
threadID = blockReduceMin(threadID);
float max_prob = blockReduceMax(topk_prob);
if (threadIdx.x == 0) {
s_threadID = threadID;
s_max_prob = max_prob;
}
__syncthreads();
if (threadIdx.x == s_threadID) {
s_tid = topk_tid;
}
__syncthreads();
if (s_tid == vocab_size && topk_prob == s_max_prob) {
s_tid = topk_tid;
}
__syncthreads();
/* if new sampled tid is not EOS, set unfinish TRUE */
if (threadIdx.x == 0) {
if (s_tid != eos_id) unfinished[0] = 1;
}
/* step3 write back new sampled ids */
if (threadIdx.x == 0) {
old_input_ids[token_idx_in_batch + 1] = s_tid;
}
}
template <typename T>
void ker_topp_sample_launcher(int batch_size, int batch_seq_len,
const int max_step, int logits_seq_len,
int max_thread_per_block, hipStream_t stream,
const T* logits, const T* logit_bias,
int* old_input_ids, int* new_input_ids,
const int vocab_size, const float p,
int* unfinished, hiprandState_t* curandstate,
int eos_id) {
hipLaunchKernelGGL(( ker_topp_sample<T>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, p, curandstate, eos_id);
}
template void ker_topp_sample_launcher<float>(
int batch_size, int batch_seq_len, const int max_step, int logits_seq_len,
int max_thread_per_block, hipStream_t stream, const float* logits,
const float* logit_bias, int* old_input_ids, int* new_input_idx,
const int vocab_size, const float p, int* unfinished,
hiprandState_t* curandstate, int eos_id);
template void ker_topp_sample_launcher<__half>(
int batch_size, int batch_seq_len, const int max_step, int logits_seq_len,
int max_thread_per_block, hipStream_t stream, const __half* logits,
const __half* logit_bias, int* old_input_ids, int* new_input_idx,
const int vocab_size, const float p, int* unfinished,
hiprandState_t* curandstate, int eos_id);
/**
@brief: ker_bias_gelu
add bias, activated by gelu
@thread
gridDim.x = batch_size * batch_seq_len
blockDim.x = max_thread_per_block
@param
input: [batch_size * batch_seq_len, feature_dim]
bias: [feature_dim]
feature_dim: the dim of input feature
*/
template <typename T>
__global__ void ker_bias_gelu(T* input, const T* bias, int feature_dim) {
int offset = blockIdx.x * feature_dim;
for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) {
int cur_offset = offset + idx;
input[cur_offset] = gelu<float>(input[cur_offset] + __ldg(&bias[idx]));
}
}
/* fp16 version */
template <>
__global__ void ker_bias_gelu<__half>(__half* input, const __half* bias,
int feature_dim) {
int offset = blockIdx.x * feature_dim;
half2* pinput = (half2*)input;
const half2* pbias = (const half2*)bias;
for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) {
int cur_offset = offset + idx;
pinput[cur_offset] =
gelu<half2>(__hadd2(pinput[cur_offset], __ldg(&pbias[idx])));
}
}
template <typename T>
void ker_bias_gelu_launcher(int batch_token_num, int block_dim,
hipStream_t stream, T* input, const T* bias,
int feature_dim) {
hipLaunchKernelGGL(( ker_bias_gelu<T>)
, dim3(batch_token_num), dim3(block_dim), 0, stream, input, bias, feature_dim);
}
template <>
void ker_bias_gelu_launcher<__half>(int batch_token_num, int block_dim,
hipStream_t stream, __half* input,
const __half* bias, int feature_dim) {
hipLaunchKernelGGL(( ker_bias_gelu<__half>)
, dim3(batch_token_num), dim3(block_dim), 0, stream, input, bias, feature_dim / 2);
}
template void ker_bias_gelu_launcher<float>(int batch_token_num, int block_dim,
hipStream_t stream, float* input,
const float* bias, int feature_dim);
template void ker_bias_gelu_launcher<__half>(int batch_token_num, int block_dim,
hipStream_t stream, __half* input,
const __half* bias,
int feature_dim);
__global__ void ker_curand_setup(hiprandState_t* state) {
/* Each thread gets same seed, a different sequence
number, no offset */
hiprand_init(clock(), blockIdx.x, 0, &state[blockIdx.x]);
}
} // namespace cuda
} // namespace lightseq
| 1f24f2144548d32dfa1d809b47fb7d2b0b92f865.cu | #include "common.h"
#include "transformerKernels.h"
/**
@file
Implemented the cuda kernel function and its launcher
that required by transformer model.
Currently, fp16 and fp32 versions are provided
*/
namespace lightseq {
namespace cuda {
/**
@brief: select_beam_rough_topk
one block for one beam, compute the log seq probability ended with every token
in
vocab, base on the previous log seq probability and current step's logit, select
rough topK candidate.
@thread
gridDim.x = batch_size * beam_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, beam_size, vocab_size], cur step logit
logit_bias: [vocab_size], logit bias
seq_probs: [batch_size, beam_size], prefix sequence log probability
seq_score: [batch_size, beam_size], prefix sequence score
alive_seq: [batch_size, beam_size, max_step], prefix sequence id
can_idx: [batch_size, beam_size, vocab_size], topk candidate's index
can_score: [batch_size, beam_size, vocab_size], topk candidate's score
num_beam_can: [1 + batch_size * beam_size].
the first ele save the number of topk candidate of the whole batch
the remaining batch_size * beam_size ele save the number of topk candidate
of each beam
vocab_size: the vocab size of decoder
max_step: max decode step
length_norm: length penlty value for current step
cur_step: current step
diverse_lambda: lambda for diverse beam search
*/
template <typename T, int beam_size>
__global__ void select_beam_rough_topk(
const T* logits, const T* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq, int* can_idx,
float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, float diverse_lambda, int end_id) {
if (cur_step != 0 && alive_seq[blockIdx.x * max_step + cur_step] == end_id) {
// this is a finished beam
if (threadIdx.x == 0) {
num_beam_can[blockIdx.x + 1] = 1; // generate one candidate
int pos = atomicAdd(num_beam_can, 1); // get a candidate pos
if (diverse_lambda == 0) {
can_score[pos] =
seq_score[blockIdx.x]; // this beam's score will not be change
} else {
// add the beam id offset in score to sort in each beam
int batch_id = blockIdx.x / beam_size;
can_score[pos] = seq_score[blockIdx.x] +
(blockIdx.x - batch_id) * min_log_probability;
}
can_idx[pos] = end_id + (blockIdx.x % beam_size) * vocab_size; // EOS
}
return;
}
/* step1: compute each thread's max_logit and sum_exp_logit, store in
* rough_top_kth_logit, sum_exp_logit */
const int block_start = blockIdx.x * vocab_size;
const int left_idx = block_start + threadIdx.x;
const int right_idx = (blockIdx.x + 1) * vocab_size;
float rough_top_kth_logit = CUDA_FLOAT_INF_NEG;
float sum_exp_logit = 0;
for (int i = left_idx; i < right_idx; i += blockDim.x) {
float lgt = (float)logits[i] + (float)__ldg(&logit_bias[i - block_start]);
rough_top_kth_logit = fmaxf(rough_top_kth_logit, lgt);
}
float max_logit = blockReduceMax(rough_top_kth_logit);
__shared__ float s_max_logit;
if (threadIdx.x == 0) {
s_max_logit = max_logit;
}
__syncthreads();
for (int i = left_idx; i < right_idx; i += blockDim.x) {
float lgt =
fmaxf((float)(logits[i]) + (float)__ldg(&logit_bias[i - block_start]) -
s_max_logit,
logit_thresh_min);
sum_exp_logit += expf(lgt);
}
/*
step2: compute rough top-kth-logits and sum_exp_logit among the whole beam,
saved into s_topk and
s_log_prob_base
*/
__shared__ float
s_log_prob_base; // prefix sequence log prob - log_sum_exp_logit
__shared__ float s_topk; // rough top k-th value of logits
__shared__ int num_cur_beam_can; // candidate number for this beam
sum_exp_logit = blockReduceSum(sum_exp_logit);
rough_top_kth_logit = blockRoughTopK<float, beam_size>(rough_top_kth_logit);
if (threadIdx.x == 0) {
s_log_prob_base = seq_probs[blockIdx.x] - logf(sum_exp_logit) - s_max_logit;
s_topk = rough_top_kth_logit;
num_cur_beam_can = 0;
}
/*
step3 : select the candidate token with logits bigger than s_topk,
compute the seq probability ended with them,
save the probability, token_index, selected token number.
*/
int idx = left_idx;
int batch_id = blockIdx.x / beam_size;
int batch_start_pos = batch_id * beam_size * vocab_size;
// int unk_vocab_id = vocab_size - 3; // last three element: unk, start, eos
__shared__ int l_n; // current iteration candidate number
for (int iter = 0; iter < (vocab_size + blockDim.x - 1) / blockDim.x;
iter++) {
// zero the counter
if (threadIdx.x == 0) l_n = 0;
__syncthreads();
float lgt = CUDA_FLOAT_INF_NEG - 1.f; // min s_topk is CUDA_FLOAT_INF_NEG
int pos;
int vocab_id = idx - block_start;
// if ((vocab_id < vocab_size) && (vocab_id != unk_vocab_id)) {
if (vocab_id < vocab_size) {
lgt = (float)(logits[idx]) + (float)__ldg(&logit_bias[vocab_id]);
if (lgt >= s_topk)
// pos: relative pos inside this iteration
pos = atomicAdd(&l_n, 1);
}
__syncthreads();
// leader increments the global counter
if (threadIdx.x == 0) {
atomicAdd(&num_cur_beam_can, l_n);
l_n = atomicAdd(num_beam_can, l_n);
}
__syncthreads();
// threads with true predicates write their elements
if ((lgt >= s_topk)) {
pos += l_n; // increment local pos by global counter
if (diverse_lambda == 0) {
can_score[pos] = fmaxf((lgt + s_log_prob_base) * length_norm,
min_log_probability + 1.f) +
batch_id * min_log_probability;
} else {
can_score[pos] = fmaxf((lgt + s_log_prob_base) * length_norm,
min_log_probability + 1.f) +
blockIdx.x * min_log_probability;
}
can_idx[pos] = idx - batch_start_pos;
}
__syncthreads();
idx += blockDim.x;
}
if (threadIdx.x == 0) {
num_beam_can[blockIdx.x + 1] = num_cur_beam_can;
}
}
template <typename T>
void select_beam_rough_topk_launcher(
const T* logits, const T* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq, int* can_idx,
float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, int step_token_num,
int max_thread_per_block, cudaStream_t stream, int beam_size,
float diverse_lambda, int end_id) {
if (beam_size == 1)
select_beam_rough_topk<T, 1>
<<<step_token_num, max_thread_per_block, 0, stream>>>(
logits, logit_bias, seq_probs, seq_score, alive_seq, can_idx,
can_score, num_beam_can, vocab_size, max_step, length_norm,
cur_step, diverse_lambda, end_id);
if (beam_size == 2)
select_beam_rough_topk<T, 2>
<<<step_token_num, max_thread_per_block, 0, stream>>>(
logits, logit_bias, seq_probs, seq_score, alive_seq, can_idx,
can_score, num_beam_can, vocab_size, max_step, length_norm,
cur_step, diverse_lambda, end_id);
if (beam_size == 4)
select_beam_rough_topk<T, 4>
<<<step_token_num, max_thread_per_block, 0, stream>>>(
logits, logit_bias, seq_probs, seq_score, alive_seq, can_idx,
can_score, num_beam_can, vocab_size, max_step, length_norm,
cur_step, diverse_lambda, end_id);
if (beam_size == 8)
select_beam_rough_topk<T, 8>
<<<step_token_num, max_thread_per_block, 0, stream>>>(
logits, logit_bias, seq_probs, seq_score, alive_seq, can_idx,
can_score, num_beam_can, vocab_size, max_step, length_norm,
cur_step, diverse_lambda, end_id);
if (beam_size == 16)
select_beam_rough_topk<T, 16>
<<<step_token_num, max_thread_per_block, 0, stream>>>(
logits, logit_bias, seq_probs, seq_score, alive_seq, can_idx,
can_score, num_beam_can, vocab_size, max_step, length_norm,
cur_step, diverse_lambda, end_id);
if (beam_size == 32)
select_beam_rough_topk<T, 32>
<<<step_token_num, max_thread_per_block, 0, stream>>>(
logits, logit_bias, seq_probs, seq_score, alive_seq, can_idx,
can_score, num_beam_can, vocab_size, max_step, length_norm,
cur_step, diverse_lambda, end_id);
}
template void select_beam_rough_topk_launcher<float>(
const float* logits, const float* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq, int* can_idx,
float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, int step_token_num,
int max_thread_per_block, cudaStream_t stream, int beam_size,
float diverse_lambda, int end_id);
template void select_beam_rough_topk_launcher<__half>(
const __half* logits, const __half* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq, int* can_idx,
float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, int step_token_num,
int max_thread_per_block, cudaStream_t stream, int beam_size,
float diverse_lambda, int end_id);
/**
@brief: ker_diverse_beam_search
Add different diverse score to can_score in each beam
@thread
gridDim.x = batch_size * beam_size
blockDim.x = max_thread_per_block
@param
can_score: [batch_size * beam_size * candidates_size] candidates_size is
dynamic
can_ids: [batch_size * beam_size * candidates_size]
num_beam_can: [1 + batch_size * beam_size]
*/
__global__ void ker_diverse_beam_search(float* can_score, int* can_ids,
int* num_beam_can, int beam_size,
float diverse_lambda, int vocab_size) {
int total_candidates = num_beam_can[0];
num_beam_can += 1;
int can_pos = num_beam_can[blockIdx.x];
int batch_id = blockIdx.x / beam_size;
int beam_score_left_idx = can_pos + threadIdx.x;
int beam_score_right_idx = blockIdx.x == (gridDim.x - 1)
? total_candidates
: num_beam_can[blockIdx.x + 1];
for (int idx = beam_score_left_idx; idx < beam_score_right_idx;
idx += blockDim.x) {
atomicAdd(can_score + idx, batch_id * min_log_probability -
min_log_probability * blockIdx.x -
diverse_lambda * (idx - can_pos + 1));
int ori_can_idx = can_ids[idx]; // can_beam_id * vocab_size + vocab_id
int can_beam_id = ori_can_idx / vocab_size;
int can_vocab_id = ori_can_idx % vocab_size;
can_ids[idx] =
(can_beam_id + (idx - can_pos) * beam_size) * vocab_size + can_vocab_id;
}
}
void ker_diverse_beam_search_launcher(float* can_score, int* can_ids,
int* num_beam_can, int step_token_num,
int max_thread_per_block,
cudaStream_t stream, int beam_size,
float diverse_lambda, int vocab_size) {
ker_diverse_beam_search<<<step_token_num, max_thread_per_block, 0, stream>>>(
can_score, can_ids, num_beam_can, beam_size, diverse_lambda, vocab_size);
}
/**
@brief: ker_bias_relu
add bias, activated by relu
@thread
gridDim.x = batch_size * batch_seq_len
blockDim.x = max_thread_per_block
@param
input: [batch_size * batch_seq_len, feature_dim]
bias: [feature_dim]
feature_dim: the dim of input feature
*/
template <typename T>
__global__ void ker_bias_relu(T* input, const T* bias, int feature_dim) {
int offset = blockIdx.x * feature_dim;
for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) {
int cur_offset = offset + idx;
input[cur_offset] = max(input[cur_offset] + __ldg(&bias[idx]), (T)0.f);
}
}
template <>
__global__ void ker_bias_relu<__half>(__half* input, const __half* bias,
int feature_dim) {
int offset = blockIdx.x * feature_dim;
half2* pinput = (half2*)input;
const half2* pbias = (const half2*)bias;
for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) {
int cur_offset = offset + idx;
float2 f2_inp = __half22float2(pinput[cur_offset]);
float2 f2_bias = __half22float2(__ldg(&pbias[idx]));
f2_inp.x = fmaxf(f2_inp.x + f2_bias.x, 0.f);
f2_inp.y = fmaxf(f2_inp.y + f2_bias.y, 0.f);
pinput[cur_offset] = __float22half2_rn(f2_inp);
}
}
template <typename T>
void ker_bias_relu_launcher(int batch_token_num, int block_dim,
cudaStream_t stream, T* input, const T* bias,
int feature_dim) {
ker_bias_relu<T>
<<<batch_token_num, block_dim, 0, stream>>>(input, bias, feature_dim);
}
template <>
void ker_bias_relu_launcher<__half>(int batch_token_num, int block_dim,
cudaStream_t stream, __half* input,
const __half* bias, int feature_dim) {
ker_bias_relu<__half>
<<<batch_token_num, block_dim, 0, stream>>>(input, bias, feature_dim / 2);
}
template void ker_bias_relu_launcher<float>(int batch_token_num, int block_dim,
cudaStream_t stream, float* input,
const float* bias, int feature_dim);
template void ker_bias_relu_launcher<__half>(int batch_token_num, int block_dim,
cudaStream_t stream, __half* input,
const __half* bias,
int feature_dim);
/**
@brief: ker_norm_layer
layer normalization
@thread
gridDim.x = batch_size * batch_seq_len
blockDim.x = max_thread_per_block
@param
matrix: [batch_size, batch_seq_len, hidden_size]
scale: [hidden_size]
bias: [hidden_size]
*/
template <typename T>
__global__ void ker_norm_layer(T* matrix, const T* scale, const T* bias,
int hidden_size) {
uint block_start = blockIdx.x * hidden_size;
uint start = block_start + threadIdx.x;
uint end = block_start + hidden_size;
float val = 0.0;
for (uint i = start; i < end; i += blockDim.x) {
val += matrix[i];
}
// step 0. compute mean
__shared__ float s_mean;
float reduce_res = blockReduceSum<float>(val);
if (threadIdx.x == 0) s_mean = reduce_res / float(hidden_size);
__syncthreads();
// step 1. compute variance
val = 0.0;
for (uint i = start; i < end; i += blockDim.x) {
float tmp = matrix[i] - s_mean;
val += tmp * tmp;
}
__shared__ float s_var;
reduce_res = blockReduceSum(val);
if (threadIdx.x == 0)
s_var = rsqrtf(reduce_res / float(hidden_size) + epsilon);
__syncthreads();
// step 2. layer norm
for (uint i = start; i < end; i += blockDim.x) {
val = matrix[i] - s_mean;
matrix[i] = val * s_var * __ldg(&scale[i - block_start]) +
__ldg(&bias[i - block_start]);
}
}
template <>
__global__ void ker_norm_layer<__half>(__half* matrix, const __half* scale,
const __half* bias,
int half_hidden_size) {
uint block_start = blockIdx.x * half_hidden_size;
uint start = block_start + threadIdx.x;
uint end = blockIdx.x * half_hidden_size + half_hidden_size;
half2* pmatrix = (half2*)matrix;
const half2* pscale = (const half2*)scale;
const half2* pbias = (const half2*)bias;
float mean_dim = float(half_hidden_size) * 2.f;
float val = 0.0;
// step 0. compute mean
for (uint i = start; i < end; i += blockDim.x) {
float2 local_f2 = safe_half2_to_float2(pmatrix[i]);
val += local_f2.x + local_f2.y;
}
__shared__ float s_mean;
float reduce_res = blockReduceSum<float>(val);
if (threadIdx.x == 0) s_mean = reduce_res / mean_dim;
__syncthreads();
// step 1. compute variance
val = 0.0;
for (uint i = start; i < end; i += blockDim.x) {
float2 local_f2 = safe_half2_to_float2(pmatrix[i]);
float tmpx = local_f2.x - s_mean;
float tmpy = local_f2.y - s_mean;
val += tmpx * tmpx + tmpy * tmpy;
}
__shared__ float s_var;
reduce_res = blockReduceSum(val);
if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / mean_dim + epsilon);
__syncthreads();
// step 2. layer norm
for (uint i = start; i < end; i += blockDim.x) {
float2 scale_val = __half22float2(__ldg(&pscale[i - block_start]));
float2 bias_val = __half22float2(__ldg(&pbias[i - block_start]));
float2 local_f2 = safe_half2_to_float2(pmatrix[i]);
local_f2.x = (local_f2.x - s_mean) * s_var * scale_val.x + bias_val.x;
local_f2.y = (local_f2.y - s_mean) * s_var * scale_val.y + bias_val.y;
pmatrix[i] = __float22half2_rn(local_f2);
}
}
template <typename T>
void ker_norm_layer_launcher(int token_num, int hidden_size,
cudaStream_t stream, T* matrix, const T* scale,
const T* bias, int max_thread_per_block) {
ker_norm_layer<T><<<token_num, max_thread_per_block, 0, stream>>>(
matrix, scale, bias, hidden_size);
}
template <>
void ker_norm_layer_launcher<__half>(int token_num, int hidden_size,
cudaStream_t stream, __half* matrix,
const __half* scale, const __half* bias,
int max_thread_per_block) {
ker_norm_layer<__half><<<token_num, max_thread_per_block, 0, stream>>>(
matrix, scale, bias, hidden_size / 2);
}
template void ker_norm_layer_launcher<float>(int token_num, int hidden_size,
cudaStream_t stream, float* matrix,
const float* scale,
const float* bias,
int max_thread_per_block);
template void ker_norm_layer_launcher<__half>(
int token_num, int hidden_size, cudaStream_t stream, __half* matrix,
const __half* scale, const __half* bias, int max_thread_per_block);
/**
@brief: ker_norm_layer_resual
layer normalization, and add an residual_bias to input
@thread
gridDim.x = batch_size * batch_seq_len
blockDim.x = max_thread_per_block
@param
matrix: [batch_size, batch_seq_len, hidden_size]
scale: [hidden_size]
bias: [hidden_size]
residual_bias: [hidden_size]
*/
template <typename T>
__global__ void ker_norm_layer_resual(T* input, T* output, const T* scale,
const T* bias, const T* residual_bias,
const int hidden_size, bool is_post_ln) {
uint block_start = blockIdx.x * hidden_size;
uint start = block_start + threadIdx.x;
uint end = block_start + hidden_size;
float val = 0.0;
for (uint i = start; i < end; i += blockDim.x) {
val += input[i];
}
// step 0. compute mean
__shared__ float s_mean;
float reduce_res = blockReduceSum<float>(val);
if (threadIdx.x == 0) s_mean = reduce_res / float(hidden_size);
__syncthreads();
// step 1. compute variance
val = 0.0;
for (uint i = start; i < end; i += blockDim.x) {
float tmp = input[i] - s_mean;
val += tmp * tmp;
}
__shared__ float s_var;
reduce_res = blockReduceSum(val);
if (threadIdx.x == 0)
s_var = rsqrtf(reduce_res / float(hidden_size) + epsilon);
__syncthreads();
// step 2. layer norm
for (uint i = start; i < end; i += blockDim.x) {
val = input[i] - s_mean;
output[i] = val * s_var * __ldg(&scale[i - block_start]) +
__ldg(&bias[i - block_start]);
if (is_post_ln) {
input[i] = output[i] + __ldg(&residual_bias[i - block_start]);
} else {
input[i] += __ldg(&residual_bias[i - block_start]);
}
}
}
template <>
__global__ void ker_norm_layer_resual<__half>(
__half* input, __half* output, const __half* scale, const __half* bias,
const __half* residual_bias, const int half_hidden_size, bool is_post_ln) {
uint block_start = blockIdx.x * half_hidden_size;
uint start = block_start + threadIdx.x;
uint end = blockIdx.x * half_hidden_size + half_hidden_size;
half2* pinput = (half2*)input;
half2* poutput = (half2*)output;
const half2* pscale = (const half2*)scale;
const half2* pbias = (const half2*)bias;
const half2* presidual_bias = (const half2*)residual_bias;
float mean_dim = float(half_hidden_size) * 2.f;
float val = 0.0;
// step 0. compute mean
for (uint i = start; i < end; i += blockDim.x) {
float2 local_f2 = safe_half2_to_float2(pinput[i]);
val += local_f2.x + local_f2.y;
}
__shared__ float s_mean;
float reduce_res = blockReduceSum<float>(val);
if (threadIdx.x == 0) s_mean = reduce_res / mean_dim;
__syncthreads();
// step 1. compute variance
val = 0.0;
for (uint i = start; i < end; i += blockDim.x) {
float2 local_f2 = safe_half2_to_float2(pinput[i]);
float tmpx = local_f2.x - s_mean;
float tmpy = local_f2.y - s_mean;
val += tmpx * tmpx + tmpy * tmpy;
}
__shared__ float s_var;
reduce_res = blockReduceSum(val);
if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / mean_dim + epsilon);
__syncthreads();
// step 2. layer norm
for (uint i = start; i < end; i += blockDim.x) {
float2 scale_val = __half22float2(__ldg(&pscale[i - block_start]));
float2 bias_val = __half22float2(__ldg(&pbias[i - block_start]));
float2 local_f2 = safe_half2_to_float2(pinput[i]);
local_f2.x = (local_f2.x - s_mean) * s_var * scale_val.x + bias_val.x;
local_f2.y = (local_f2.y - s_mean) * s_var * scale_val.y + bias_val.y;
poutput[i] = __float22half2_rn(local_f2);
if (!is_post_ln) {
local_f2 = safe_half2_to_float2(pinput[i]);
}
float2 residual_bias_val =
__half22float2(__ldg(&presidual_bias[i - block_start]));
float2 new_input_f2;
new_input_f2.x = local_f2.x + residual_bias_val.x;
new_input_f2.y = local_f2.y + residual_bias_val.y;
pinput[i] = __float22half2_rn(new_input_f2);
}
}
template <typename T>
void ker_norm_layer_resual_launcher(int token_num, int hidden_size,
cudaStream_t stream, T* input, T* output,
const T* scale, const T* bias,
const T* residual_bias,
const int max_thread_per_block,
bool is_post_ln) {
ker_norm_layer_resual<T><<<token_num, max_thread_per_block, 0, stream>>>(
input, output, scale, bias, residual_bias, hidden_size, is_post_ln);
}
template <>
void ker_norm_layer_resual_launcher<__half>(int token_num, int hidden_size,
cudaStream_t stream, __half* input,
__half* output, const __half* scale,
const __half* bias,
const __half* residual_bias,
const int max_thread_per_block,
bool is_post_ln) {
ker_norm_layer_resual<__half><<<token_num, max_thread_per_block, 0, stream>>>(
input, output, scale, bias, residual_bias, hidden_size / 2, is_post_ln);
}
template void ker_norm_layer_resual_launcher<float>(
int token_num, int hidden_size, cudaStream_t stream, float* input,
float* output, const float* scale, const float* bias,
const float* residual_bias, const int max_thread_per_block,
bool is_post_ln);
template void ker_norm_layer_resual_launcher<__half>(
int token_num, int hidden_size, cudaStream_t stream, __half* input,
__half* output, const __half* scale, const __half* bias,
const __half* residual_bias, const int max_thread_per_block,
bool is_post_ln);
/**
@brief: ker_enc_embedding
for encoder, look up token embedding, add position embedding
@thread
gridDim.x = batch_size
gridDim.y = batch_seq_len
blockDim.x = max_thread_per_block
@param
token_emb: [vocab_size, hidden_size]
pos_emb: [max_step, hidden_size]
token_id: input token id, [batch_size, batch_seq_len]
output: result, [batch_size, batch_seq_len, hidden_size]
padding_mask: record the padding token, [batch_size, batch_seq_len]
padding_id, the padding token id
*/
template <typename T>
__global__ void ker_enc_embedding(const T* token_emb, const T* pos_emb,
const int* token_id, T* output,
int* padding_mask, int padding_id,
const int hidden_size) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int start = target_pos * hidden_size + threadIdx.x;
int end = (target_pos + 1) * hidden_size;
int tid = token_id[target_pos];
if (tid == padding_id) {
// for padding id
if (threadIdx.x == 0) padding_mask[target_pos] = 1;
for (uint i = start; i < end; i += blockDim.x) {
// output[target_pos * blockDim.x + threadIdx.x] = 0.f;
output[i] = 0.f;
}
return;
}
if (threadIdx.x == 0) {
padding_mask[target_pos] = 0;
}
for (uint i = start; i < end; i += blockDim.x) {
int offset = i - target_pos * hidden_size;
output[i] = token_emb[tid * hidden_size + offset] +
pos_emb[blockIdx.y * hidden_size + offset];
}
}
template <>
__global__ void ker_enc_embedding<__half>(const __half* token_emb,
const __half* pos_emb,
const int* token_id, __half* output,
int* padding_mask, int padding_id,
const int half_hidden_size) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int start = target_pos * half_hidden_size + threadIdx.x;
int end = (target_pos + 1) * half_hidden_size;
int tid = token_id[target_pos];
half2* output_h = (half2*)output;
if (tid == padding_id) {
// for padding id
if (threadIdx.x == 0) padding_mask[target_pos] = 1;
for (uint i = start; i < end; i += blockDim.x) {
output_h[i] = __float2half2_rn(0.f);
}
return;
}
if (threadIdx.x == 0) {
padding_mask[target_pos] = 0;
}
for (uint i = start; i < end; i += blockDim.x) {
int offset = i - target_pos * half_hidden_size;
float2 te = __half22float2(
((const half2*)token_emb)[tid * half_hidden_size + offset]);
float2 pe = __half22float2(
((const half2*)pos_emb)[blockIdx.y * half_hidden_size + offset]);
te.x += pe.x;
te.y += pe.y;
output_h[i] = __float22half2_rn(te);
}
}
template <typename T>
void ker_enc_embedding_launcher(int batch_size, int batch_seq_len,
int hidden_size, cudaStream_t stream,
const T* token_emb, const T* pos_emb,
const int* token_id, T* output,
int* padding_mask, int padding_id,
int max_thread_per_block) {
ker_enc_embedding<T>
<<<dim3(batch_size, batch_seq_len), max_thread_per_block, 0, stream>>>(
token_emb, pos_emb, token_id, output, padding_mask, padding_id,
hidden_size);
}
template <>
void ker_enc_embedding_launcher<__half>(int batch_size, int batch_seq_len,
int hidden_size, cudaStream_t stream,
const __half* token_emb,
const __half* pos_emb,
const int* token_id, __half* output,
int* padding_mask, int padding_id,
int max_thread_per_block) {
ker_enc_embedding<__half>
<<<dim3(batch_size, batch_seq_len), max_thread_per_block, 0, stream>>>(
token_emb, pos_emb, token_id, output, padding_mask, padding_id,
hidden_size / 2);
}
template void ker_enc_embedding_launcher<float>(
int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream,
const float* token_emb, const float* pos_emb, const int* token_id,
float* output, int* padding_mask, int padding_id, int max_thread_per_block);
template void ker_enc_embedding_launcher<__half>(
int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream,
const __half* token_emb, const __half* pos_emb, const int* token_id,
__half* output, int* padding_mask, int padding_id,
int max_thread_per_block);
/**
@brief: ker_dec_embedding
for decoder, look up token embedding, add position embedding
@thread
gridDim.x = batch_size * beam_size
blockDim.x = max_thread_per_block
@param
token_emb: [hidden_size, vocab_size], note, it is different with encoder
pos_emb: [max_step, hidden_size]
token_id: input token id, [batch_size, beam_size, max_step]
output: result, [batch_size, beam_size, hidden_size]
step: current step
max_step: max decoder steps
vocab_size: vocabulary size
*/
template <typename T>
__global__ void ker_dec_embedding(const T* token_emb, const T* pos_emb,
const int* token_id, T* output, int step,
int max_step, int vocab_size,
int hidden_size) {
for (uint offset = threadIdx.x; offset < hidden_size; offset += blockDim.x) {
int token_idx = token_id[blockIdx.x * max_step + step];
output[blockIdx.x * hidden_size + offset] =
token_emb[offset * vocab_size + token_idx] +
pos_emb[step * hidden_size + offset];
}
}
template <typename T>
void ker_dec_embedding_launcher(int step_token_num, int hidden_size,
cudaStream_t stream, const T* token_emb,
const T* pos_emb, const int* token_id,
T* output, int step, int max_step,
int vocab_size, int max_thread_per_block) {
ker_dec_embedding<T><<<step_token_num, max_thread_per_block, 0, stream>>>(
token_emb, pos_emb, token_id, output, step, max_step, vocab_size,
hidden_size);
}
template void ker_dec_embedding_launcher<float>(
int step_token_num, int hidden_size, cudaStream_t stream,
const float* token_emb, const float* pos_emb, const int* token_id,
float* output, int step, int max_step, int vocab_size,
int max_thread_per_block);
template void ker_dec_embedding_launcher<__half>(
int step_token_num, int hidden_size, cudaStream_t stream,
const __half* token_emb, const __half* pos_emb, const int* token_id,
__half* output, int step, int max_step, int vocab_size,
int max_thread_per_block);
/**
@brief: ker_arrange_encself_qkv
split and reshape ori_qkv matrix into new_q, new_k, new_v during encoder
self-attention
ori_qkv is the result of gemm
@thread
gridDim.x = batch_size * batch_seq_len
gridDim.y = 3
blockDim.x = max_thread_per_block
@param
ori_qkv: [batch_size, batch_seq_len, 3, hidden_size]
qkv_bias: [3, hidden_size]
new_qkv: [3, batch_size, head_num, batch_seq_len, dim_per_head]
max_batch_dim: max_batch_size * max_seq_len * hidden_size
batch_seq_len: the sequence length of the current batch
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
*/
template <typename T>
__global__ void ker_arrange_encself_qkv(const T* ori_qkv, const T* qkv_bias,
T* new_qkv, int max_batch_dim,
int batch_seq_len, int dim_per_head,
int head_num) {
int hidden_size = dim_per_head * head_num;
int batch_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int qkv_offset = max_batch_dim * blockIdx.y;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head);
new_qkv[qkv_offset + target_id] =
ori_qkv[(blockIdx.x * gridDim.y + blockIdx.y) * hidden_size + i] +
__ldg(&qkv_bias[blockIdx.y * hidden_size + i]);
}
}
template <>
__global__ void ker_arrange_encself_qkv<__half>(
const __half* ori_qkv, const __half* qkv_bias, __half* new_qkv,
int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num) {
int hidden_size = dim_per_head * head_num;
int batch_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int qkv_offset = max_batch_dim * blockIdx.y;
int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head);
const half2* p_ori_qkv = (const half2*)ori_qkv;
const half2* p_bias = (const half2*)qkv_bias;
half2* p_new_qkv = (half2*)new_qkv;
p_new_qkv[qkv_offset + target_id] = __hadd2(
p_ori_qkv[(blockIdx.x * gridDim.y + blockIdx.y) * hidden_size + i],
__ldg(&p_bias[blockIdx.y * hidden_size + i]));
}
}
template <typename T>
void ker_arrange_encself_qkv_launcher(int batch_token_num, int hidden_size,
cudaStream_t stream, const T* ori_qkv,
const T* qkv_bias, T* new_qkv,
int max_batch_dim, int batch_seq_len,
int dim_per_head, int head_num,
int max_thread_per_block) {
ker_arrange_encself_qkv<T>
<<<dim3(batch_token_num, 3), max_thread_per_block, 0, stream>>>(
ori_qkv, qkv_bias, new_qkv, max_batch_dim, batch_seq_len,
dim_per_head, head_num);
}
template <>
void ker_arrange_encself_qkv_launcher<__half>(
int batch_token_num, int hidden_size, cudaStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_qkv,
int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num,
int max_thread_per_block) {
ker_arrange_encself_qkv<__half>
<<<dim3(batch_token_num, 3), max_thread_per_block, 0, stream>>>(
ori_qkv, qkv_bias, new_qkv, max_batch_dim / 2, batch_seq_len,
dim_per_head / 2, head_num);
}
template void ker_arrange_encself_qkv_launcher<float>(
int batch_token_num, int hidden_size, cudaStream_t stream,
const float* ori_qkv, const float* qkv_bias, float* new_qkv,
int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num,
int max_thread_per_block);
template void ker_arrange_encself_qkv_launcher<__half>(
int batch_token_num, int hidden_size, cudaStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_qkv,
int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num,
int max_thread_per_block);
/**
@brief: ker_arrange_decself_qkv
split and reshape ori_qkv matrix into new_q, new_k, new_v during decoder
self-attention
ori_qkv is the result of gemm
@thread
gridDim.x = batch_size * beam_size
gridDim.y = 3
blockDim.x = max_thread_per_block
@param
ori_qkv: [batch_size, beam_size, 3, hidden_size]
qkv_bias: [3, hidden_size]
new_q: new query. [batch_size, beam_size, hidden_size]
new_k: new key. [batch_size, beam_size, head_num, max_step, dim_per_head]
new_v: new value. [batch_size, beam_size, head_num, max_step, dim_per_head]
head_num: head number in multi-head attention
dim_per_head: dim of one head in multi-head attention
max_step: max decode step
step_id: current step id
*/
template <typename T>
__global__ void ker_arrange_decself_qkv(const T* ori_qkv, const T* qkv_bias,
T* new_q, T* new_k, T* new_v,
int head_num, int dim_per_head,
int max_step, int step_id) {
int hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
// blockdim is equal to hidden_size
T val = ori_qkv[(blockIdx.x * gridDim.y + blockIdx.y) * hidden_size + i] +
__ldg(&qkv_bias[blockIdx.y * hidden_size + i]);
int seq_id =
blockIdx.x; // obvious, seq_id = batch_id * beam_size + beam_id
if (blockIdx.y == 0) {
// for query
new_q[seq_id * hidden_size + i] = val;
return;
}
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int target_id = targetid_4dim(seq_id, head_id, step_id, dim_id, head_num,
max_step, dim_per_head);
if (blockIdx.y == 1) {
// for key
new_k[target_id] = val;
} else {
// for value
new_v[target_id] = val;
}
}
}
template <>
__global__ void ker_arrange_decself_qkv<__half>(
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* new_v, int head_num, int dim_per_head, int max_step, int step_id) {
int half_hidden_size = dim_per_head * head_num;
const half2* p_qkv = (const half2*)ori_qkv;
const half2* p_bias = (const half2*)qkv_bias;
for (std::size_t i = threadIdx.x; i < half_hidden_size; i += blockDim.x) {
half2 val = __hadd2(
p_qkv[(blockIdx.x * gridDim.y + blockIdx.y) * half_hidden_size + i],
__ldg(&p_bias[blockIdx.y * half_hidden_size + i]));
// obvious,seq_id = batch_id * beam_size + beam_id
int seq_id = blockIdx.x;
if (blockIdx.y == 0) {
// for query
((half2*)new_q)[seq_id * half_hidden_size + i] = val;
return;
}
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int target_id = targetid_4dim(seq_id, head_id, step_id, dim_id, head_num,
max_step, dim_per_head);
if (blockIdx.y == 1) {
// for key
((half2*)new_k)[target_id] = val;
} else {
// for value
((half2*)new_v)[target_id] = val;
}
}
}
template <typename T>
void ker_arrange_decself_qkv_launcher(int step_token_num, int hidden_size,
cudaStream_t stream, const T* ori_qkv,
const T* qkv_bias, T* new_q, T* new_k,
T* new_v, int head_num, int dim_per_head,
int max_step, int step_id,
int max_thread_per_block) {
ker_arrange_decself_qkv<T>
<<<dim3(step_token_num, 3), max_thread_per_block, 0, stream>>>(
ori_qkv, qkv_bias, new_q, new_k, new_v, head_num, dim_per_head,
max_step, step_id);
}
template <>
void ker_arrange_decself_qkv_launcher<__half>(
int step_token_num, int hidden_size, cudaStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* new_v, int head_num, int dim_per_head, int max_step, int step_id,
int max_thread_per_block) {
ker_arrange_decself_qkv<__half>
<<<dim3(step_token_num, 3), max_thread_per_block, 0, stream>>>(
ori_qkv, qkv_bias, new_q, new_k, new_v, head_num, dim_per_head / 2,
max_step, step_id);
}
template void ker_arrange_decself_qkv_launcher<float>(
int step_token_num, int hidden_size, cudaStream_t stream,
const float* ori_qkv, const float* qkv_bias, float* new_q, float* new_k,
float* new_v, int head_num, int dim_per_head, int max_step, int step_id,
int max_thread_per_block);
template void ker_arrange_decself_qkv_launcher<__half>(
int step_token_num, int hidden_size, cudaStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* new_v, int head_num, int dim_per_head, int max_step, int step_id,
int max_thread_per_block);
/**
@brief: ker_arrange_encdec_kv
split and reshape ori_kv matrix into new_k, new_v before enc-dec attention
it will be call once on encoder ouput
@thread
gridDim.x = batch_size * batch_seq_len
gridDim.y = dec_layer_num * 2
blockDim.x = max_thread_per_block
@param
ori_kv: [batch_size, batch_seq_len, dec_layer_num, 2, hidden_size]
kv_bias: [dec_layer_num, 2, hidden_size]
new_k: [batch_size, head_num, batch_seq_len, dim_per_head] per layer,
with an offset in offset_per_layer between layers.
new_v: [batch_size, head_num, batch_seq_len, dim_per_head] per layer,
with an offset in offset_per_layer between layers.
offset_per_layer: max_batch_size * max_step * hidden_size
batch_seq_len: sequence length of current batch
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
*/
template <typename T>
__global__ void ker_arrange_encdec_kv(const T* ori_kv, const T* kv_bias,
T* new_k, T* new_v, int offset_per_layer,
int batch_seq_len, int dim_per_head,
int head_num) {
int hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
T val = ori_kv[(blockIdx.x * gridDim.y + blockIdx.y) * hidden_size + i] +
__ldg(&kv_bias[blockIdx.y * hidden_size + i]);
int seq_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int layer_id = blockIdx.y >> 1;
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int layer_offset = layer_id * offset_per_layer;
int target_id = targetid_4dim(seq_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head) +
layer_offset;
if (blockIdx.y & 1) {
// for value
new_v[target_id] = val;
} else {
// for key
new_k[target_id] = val;
}
}
}
template <>
__global__ void ker_arrange_encdec_kv<__half>(
const __half* ori_kv, const __half* kv_bias, __half* new_k, __half* new_v,
int offset_per_layer, int batch_seq_len, int dim_per_head, int head_num) {
int half_hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < half_hidden_size; i += blockDim.x) {
const half2* p_ori_kv = (const half2*)ori_kv;
const half2* p_kv_bias = (const half2*)kv_bias;
half2 val = __hadd2(
p_ori_kv[(blockIdx.x * gridDim.y + blockIdx.y) * half_hidden_size + i],
__ldg(&p_kv_bias[blockIdx.y * half_hidden_size + i]));
int seq_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int layer_id = blockIdx.y >> 1;
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int layer_offset = layer_id * offset_per_layer;
int target_id = targetid_4dim(seq_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head) +
layer_offset;
if (blockIdx.y & 1) {
// for value
((half2*)new_v)[target_id] = val;
} else {
// for key
((half2*)new_k)[target_id] = val;
}
}
}
template <typename T>
void ker_arrange_encdec_kv_launcher(int batch_token_num, int dec_layer_num,
int hidden_size, cudaStream_t stream,
const T* ori_kv, const T* kv_bias, T* new_k,
T* new_v, int offset_per_layer,
int batch_seq_len, int dim_per_head,
int head_num, int max_thread_per_block) {
ker_arrange_encdec_kv<T>
<<<dim3(batch_token_num, dec_layer_num * 2), max_thread_per_block, 0,
stream>>>(ori_kv, kv_bias, new_k, new_v, offset_per_layer,
batch_seq_len, dim_per_head, head_num);
}
template <>
void ker_arrange_encdec_kv_launcher<__half>(
int batch_token_num, int dec_layer_num, int hidden_size,
cudaStream_t stream, const __half* ori_kv, const __half* kv_bias,
__half* new_k, __half* new_v, int offset_per_layer, int batch_seq_len,
int dim_per_head, int head_num, int max_thread_per_block) {
ker_arrange_encdec_kv<__half>
<<<dim3(batch_token_num, dec_layer_num * 2), max_thread_per_block, 0,
stream>>>(ori_kv, kv_bias, new_k, new_v, offset_per_layer / 2,
batch_seq_len, dim_per_head / 2, head_num);
}
template void ker_arrange_encdec_kv_launcher<float>(
int batch_token_num, int dec_layer_num, int hidden_size,
cudaStream_t stream, const float* ori_kv, const float* kv_bias,
float* new_k, float* new_v, int offset_per_layer, int batch_seq_len,
int dim_per_head, int head_num, int max_thread_per_block);
template void ker_arrange_encdec_kv_launcher<__half>(
int batch_token_num, int dec_layer_num, int hidden_size,
cudaStream_t stream, const __half* ori_kv, const __half* kv_bias,
__half* new_k, __half* new_v, int offset_per_layer, int batch_seq_len,
int dim_per_head, int head_num, int max_thread_per_block);
/**
@brief: ker_arrange_encdec_q
reshape ori_q into new_q and add bias
during enc-dec attention
ori_q is the result of gemm
@thread
gridDim.x = batch_size * beam_size
blockDim.x = max_thread_per_block
@param
ori_q: [batch_size, beam_size, hidden_size]
q_bias: [hidden_size]
new_q: [batch_size, head_num, beam_size, dim_per_head]
beam_size: beam size of beam search
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
*/
template <typename T>
__global__ void ker_arrange_encdec_q(const T* ori_q, const T* q_bias, T* new_q,
int beam_size, int dim_per_head,
int head_num) {
int hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
T val = ori_q[blockIdx.x * hidden_size + i] + __ldg(&q_bias[i]);
int batch_id = blockIdx.x / beam_size;
int beam_id = blockIdx.x % beam_size;
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
new_q[targetid_4dim(batch_id, head_id, beam_id, dim_id, head_num, beam_size,
dim_per_head)] = val;
}
}
template <>
__global__ void ker_arrange_encdec_q<__half>(const __half* ori_q,
const __half* q_bias,
__half* new_q, int beam_size,
int dim_per_head, int head_num) {
int half_hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < half_hidden_size; i += blockDim.x) {
const half2* p_q = (const half2*)ori_q;
const half2* p_bias = (const half2*)q_bias;
half2 val =
__hadd2(p_q[blockIdx.x * half_hidden_size + i], __ldg(&p_bias[i]));
int batch_id = blockIdx.x / beam_size;
int beam_id = blockIdx.x % beam_size;
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
((half2*)new_q)[targetid_4dim(batch_id, head_id, beam_id, dim_id, head_num,
beam_size, dim_per_head)] = val;
}
}
template <typename T>
void ker_arrange_encdec_q_launcher(int step_token_num, int hidden_size,
cudaStream_t stream, const T* ori_q,
const T* q_bias, T* new_q, int beam_size,
int dim_per_head, int head_num,
int max_thread_per_block) {
ker_arrange_encdec_q<T><<<step_token_num, max_thread_per_block, 0, stream>>>(
ori_q, q_bias, new_q, beam_size, dim_per_head, head_num);
}
template <>
void ker_arrange_encdec_q_launcher<__half>(
int step_token_num, int hidden_size, cudaStream_t stream,
const __half* ori_q, const __half* q_bias, __half* new_q, int beam_size,
int dim_per_head, int head_num, int max_thread_per_block) {
ker_arrange_encdec_q<__half>
<<<step_token_num, max_thread_per_block, 0, stream>>>(
ori_q, q_bias, new_q, beam_size, dim_per_head / 2, head_num);
}
template void ker_arrange_encdec_q_launcher<float>(
int step_token_num, int hidden_size, cudaStream_t stream,
const float* ori_q, const float* q_bias, float* new_q, int beam_size,
int dim_per_head, int head_num, int max_thread_per_block);
template void ker_arrange_encdec_q_launcher<__half>(
int step_token_num, int hidden_size, cudaStream_t stream,
const __half* ori_q, const __half* q_bias, __half* new_q, int beam_size,
int dim_per_head, int head_num, int max_thread_per_block);
/**
@brief: ker_correlation_softmax_encself
query-key correlation softmax for encoder self attention
@thread
gridDim.x = batch_size
gridDim.y = head_num * batch_seq_len
blockDim.x = batch_seq_len
@param
correlation: [batch_size, head_num, batch_seq_len, batch_seq_len]
src_padding_mask: [batch_size, batch_seq_len],
indicating which token is a padding token.
*/
template <typename T>
__global__ void ker_correlation_softmax_encself(T* correlation,
const int* src_padding_mask) {
int idx = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
if (src_padding_mask[blockIdx.x * blockDim.x + blockIdx.y % blockDim.x]) {
correlation[idx] = (T) 0.f;
return;
}
int mask = src_padding_mask[blockIdx.x * blockDim.x + threadIdx.x];
float val = (float)correlation[idx];
float max_val = blockReduceMax<float>(mask ? CUDA_FLOAT_INF_NEG : val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = mask ? 0.f : expf(fmaxf(logit_thresh_min, val - smax));
float rsum = blockReduceSum<float>(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
correlation[idx] = (T)(val / (ssum + epsilon));
}
template <typename T>
void ker_correlation_softmax_encself_launcher(int batch_size, int batch_seq_len,
int head_num, cudaStream_t stream,
T* correlation,
const int* src_padding_mask) {
ker_correlation_softmax_encself<T>
<<<dim3(batch_size, head_num * batch_seq_len), batch_seq_len, 0,
stream>>>(correlation, src_padding_mask);
}
template void ker_correlation_softmax_encself_launcher<float>(
int batch_size, int batch_seq_len, int head_num, cudaStream_t stream,
float* correlation, const int* src_padding_mask);
template void ker_correlation_softmax_encself_launcher<__half>(
int batch_size, int batch_seq_len, int head_num, cudaStream_t stream,
__half* correlation, const int* src_padding_mask);
/**
@brief: ker_correlation_softmax_decself
query-key correlation softmax for decoder self attention
@thread
gridDim.x = batch_size * beam_size * head_num
blockDim.x = cur_step + 1
@param
correlation: [batch_size, beam_size, head_num, cur_step + 1]
*/
template <typename T>
__global__ void ker_correlation_softmax_decself(T* correlation) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float val = (float)correlation[idx];
float max_val = blockReduceMax(val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = expf(fmaxf(logit_thresh_min, val - smax));
float rsum = blockReduceSum(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
correlation[idx] = (T)(val / ssum);
}
template <typename T>
void ker_correlation_softmax_decself_launcher(int batch_head_num, int step_num,
cudaStream_t stream,
T* correlation) {
ker_correlation_softmax_decself<<<batch_head_num, step_num, 0, stream>>>(
correlation);
}
template void ker_correlation_softmax_decself_launcher<float>(
int batch_head_num, int step_num, cudaStream_t stream, float* correlation);
template void ker_correlation_softmax_decself_launcher<__half>(
int batch_head_num, int step_num, cudaStream_t stream, __half* correlation);
/**
@brief: ker_correlation_softmax_encdec
query-key correlation softmax for encoder-decoder attention
@thread
gridDim.x = batch_size
gridDim.y = head_num * beam_size
blockDim.x = batch_seq_len
@param
correlation: [batch_size, head_num, beam_size, batch_seq_len]
src_padding_mask: [batch_size, batch_seq_len]
indicating which token is a padding token.
*/
template <typename T>
__global__ void ker_correlation_softmax_encdec(T* correlation,
const int* src_padding_mask) {
int idx = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
int mask = src_padding_mask[blockIdx.x * blockDim.x + threadIdx.x];
float val = (float)correlation[idx];
float max_val = blockReduceMax(mask ? CUDA_FLOAT_INF_NEG : val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = mask ? 0.f : expf(fmaxf(logit_thresh_min, val - smax));
float rsum = blockReduceSum(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
correlation[idx] = (T)(val / (ssum + epsilon));
}
template <typename T>
void ker_correlation_softmax_encdec_launcher(
int batch_size, int head_num_per_seq, int batch_seq_len,
cudaStream_t stream, T* correlation, const int* src_padding_mask) {
ker_correlation_softmax_encdec<T>
<<<dim3(batch_size, head_num_per_seq), batch_seq_len, 0, stream>>>(
correlation, src_padding_mask);
}
template void ker_correlation_softmax_encdec_launcher<float>(
int batch_size, int head_num_per_seq, int batch_seq_len,
cudaStream_t stream, float* correlation, const int* src_padding_mask);
template void ker_correlation_softmax_encdec_launcher<__half>(
int batch_size, int head_num_per_seq, int batch_seq_len,
cudaStream_t stream, __half* correlation, const int* src_padding_mask);
/**
@brief: ker_arrange_atten_output
reshape Scaled Dot-Product Attention output.
It will be used by both encoder and decoder
token_num = batch_seq_len, for encoder
= beam_size, for decoder
@thread
gridDim.x = batch_size * ${token_num}
blockDim.x = max_thread_per_block
@param
ori_q: [batch_size, head_num, ${token_num}, dim_per_head]
new_q: [batch_size, ${token_num}, hidden_size]
beam_size : for decoder, beam_size is beam_size; for encoder, beam_size is
batch_seq_len
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
*/
template <typename T>
__global__ void ker_arrange_atten_output(const T* ori_q, T* new_q,
int beam_size, int dim_per_head,
int head_num) {
int hidden_size = dim_per_head * head_num;
int batch_id = blockIdx.x / beam_size;
// note, for encoder, beam_id is token_id; for decoder, beam_id is beam_id
int beam_id = blockIdx.x % beam_size;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
new_q[blockIdx.x * hidden_size + i] = ori_q[targetid_4dim(
batch_id, head_id, beam_id, dim_id, head_num, beam_size, dim_per_head)];
}
}
template <>
__global__ void ker_arrange_atten_output<__half>(const __half* ori_q,
__half* new_q, int beam_size,
int dim_per_head,
int head_num) {
int batch_id = blockIdx.x / beam_size;
// note, for encoder, beam_id is token_id; for decoder, beam_id is beam_id
int beam_id = blockIdx.x % beam_size;
int half_hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < half_hidden_size; i += blockDim.x) {
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
const half2* p_ori_q = (const half2*)ori_q;
half2* p_new_q = (half2*)new_q;
p_new_q[blockIdx.x * half_hidden_size + i] = p_ori_q[targetid_4dim(
batch_id, head_id, beam_id, dim_id, head_num, beam_size, dim_per_head)];
}
}
template <typename T>
void ker_arrange_atten_output_launcher(int batch_token_num, int hidden_size,
cudaStream_t stream, const T* ori_q,
T* new_q, int beam_size,
int dim_per_head, int head_num,
int max_thread_per_block) {
ker_arrange_atten_output<T>
<<<batch_token_num, max_thread_per_block, 0, stream>>>(
ori_q, new_q, beam_size, dim_per_head, head_num);
}
template <>
void ker_arrange_atten_output_launcher<__half>(
int batch_token_num, int hidden_size, cudaStream_t stream,
const __half* ori_q, __half* new_q, int beam_size, int dim_per_head,
int head_num, int max_thread_per_block) {
ker_arrange_atten_output<__half>
<<<batch_token_num, max_thread_per_block, 0, stream>>>(
ori_q, new_q, beam_size, dim_per_head / 2, head_num);
}
template void ker_arrange_atten_output_launcher<float>(
int batch_token_num, int hidden_size, cudaStream_t stream,
const float* ori_q, float* new_q, int beam_size, int dim_per_head,
int head_num, int max_thread_per_block);
template void ker_arrange_atten_output_launcher<__half>(
int batch_token_num, int hidden_size, cudaStream_t stream,
const __half* ori_q, __half* new_q, int beam_size, int dim_per_head,
int head_num, int max_thread_per_block);
/**
@brief: ker_refresh_result
refresh alive_seq, seq_probs, seq_score, num_finish_beam based on
sorted candidate
@thread
gridDim.x = batch_size
gridDim.y = beam_size
blockDim.x = max_step
@param
can_idx: [none], no certain length, determined by rough candidate number
can_score: [none], no certain length, determined by rough candidate number
num_can_per_beam: [batch_size * beam_size]
save exclusive_scan_sum of the beam candidate number array
e.g. [0,2,5,1] -> [0, 0, 2, 7]
old_alive_seq: [batch_size, beam_size, max_step]
new_alive_seq: [batch_size, beam_size, max_step]
seq_probs: [batch_size, beam_size]
seq_score: [batch_size, beam_size]
performing length penlty on seq_probs will get seq_probs
num_finish_beam: record current finished beam.
it will be use to decide whether early stop during beam_search
vocab_size: target vocabulary size
cur_step: current step
length_norm: length penlty norm value
*/
__global__ void ker_refresh_result(const int* can_idx, const float* can_score,
const int* num_can_per_beam,
const int* old_alive_seq, int* new_alive_seq,
float* seq_probs, float* seq_score,
int* num_finish_beam, int vocab_size,
int cur_step, float length_norm,
float diverse_lambda, int end_id) {
// step1 update alive_seq
int can_pos = num_can_per_beam[blockIdx.x * gridDim.y] + blockIdx.y;
int ori_can_idx = can_idx[can_pos]; // can_beam_id * vocab_size + vocab_id
int can_beam_id = ori_can_idx / vocab_size;
int can_vocab_id = ori_can_idx % vocab_size;
int rank_id;
if (diverse_lambda != 0) {
rank_id = can_beam_id / gridDim.y; // rank in each beam
can_beam_id %= gridDim.y;
}
int thread_vocab_id;
if (threadIdx.x > cur_step + 1) {
thread_vocab_id = end_id;
} else if (threadIdx.x == cur_step + 1) {
// add current step generate vocabulary id
thread_vocab_id = can_vocab_id;
} else {
// threadIdx.x <= cur_step
thread_vocab_id = old_alive_seq[targetid_3dim(
blockIdx.x, can_beam_id, threadIdx.x, gridDim.y, blockDim.x)];
}
new_alive_seq[targetid_3dim(blockIdx.x, blockIdx.y, threadIdx.x, gridDim.y,
blockDim.x)] = thread_vocab_id;
// step2 update seq_probs if alive seq when not eos
if (cur_step == 0 || can_vocab_id != end_id) {
// alive seq
if (threadIdx.x == 0) {
if (diverse_lambda == 0) {
seq_probs[blockIdx.x * gridDim.y + blockIdx.y] =
(can_score[can_pos] - blockIdx.x * min_log_probability) /
length_norm; // recover it
} else {
seq_probs[blockIdx.x * gridDim.y + blockIdx.y] =
(can_score[can_pos] - blockIdx.x * min_log_probability +
diverse_lambda * (rank_id + 1)) /
length_norm;
}
}
return;
}
// step3 update seq_score, num_finish_beam if finish seq
if (threadIdx.x == 0) {
atomicAdd(num_finish_beam, 1);
}
int seq_last_id = old_alive_seq[targetid_3dim(
blockIdx.x, can_beam_id, cur_step, gridDim.y, blockDim.x)];
// update finished seq score
if (threadIdx.x == 0) {
// note, with batch offset value, to sort between batch element
if (diverse_lambda == 0) {
seq_score[blockIdx.x * gridDim.y + blockIdx.y] = can_score[can_pos];
} else {
seq_score[blockIdx.x * gridDim.y + blockIdx.y] =
can_score[can_pos] + diverse_lambda * (rank_id + 1);
}
}
} // namespace cuda
/**
@brief: ker_refresh_cache
supply current step's projected k,v to K, V cache
@thread
gridDim.x = decoder_layer_num * (step_id + 1)
gridDim.y = batch_size * beam_size * 2
blockDim.x = max_thread_per_block
@param
num_can_per_beam: [batch_size, beam_size]
can_idx: [none], no certain length, determined by rough candidate number
self_k_bgeem: [batch_size, beam_size, head_num, max_step, dim_per_head] *
decoder_layer_num
self_v_bgeem: [batch_size, beam_size, head_num, max_step, dim_per_head] *
decoder_layer_num
new_self_k_bgeem: [batch_size, beam_size, head_num, max_step, dim_per_head] *
decoder_layer_num
new_self_v_bgeem: [batch_size, beam_size, head_num, max_step, dim_per_head] *
decoder_layer_num
self_k_bgeem_offset = max_batch_size * max_step * hidden_size * beam_size
beam_size : beam size for beam_search
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
vocab_size: the vocab size of decoder
cur_step: current step
max_step: max decode step
*/
template <typename T>
__global__ void ker_refresh_cache(const int* num_can_per_beam,
const int* can_idx, const T* self_k_bgeem,
const T* self_v_bgeem, T* new_self_k_bgeem,
T* new_self_v_bgeem, int self_k_bgeem_offset,
int beam_size, int dim_per_head, int head_num,
int vocab_size, int cur_step, int max_step,
bool diverse, int end_id) {
int layer_id = blockIdx.x / (cur_step + 1);
int step_id = blockIdx.x % (cur_step + 1);
int kv_id = blockIdx.y & 1;
int beam_id_global = blockIdx.y >> 1;
int batch_id = beam_id_global / beam_size;
int beam_id = beam_id_global % beam_size;
int hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) {
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int can_pos = num_can_per_beam[batch_id * beam_size] + beam_id;
int can_beam_id =
can_idx[can_pos] / vocab_size; // can_beam_id * vocab_size + vocab_id
if (diverse) can_beam_id %= beam_size;
if (can_idx[can_pos] % vocab_size == end_id) {
return;
}
int base_pos = targetid_5dim(batch_id, 0, head_id, step_id, dim_id,
beam_size, head_num, max_step, dim_per_head) +
layer_id * self_k_bgeem_offset;
int beam_offset = hidden_size * max_step;
int ori_id = base_pos + beam_offset * can_beam_id;
int new_id = base_pos + beam_offset * beam_id;
if (kv_id == 0) {
// for key
new_self_k_bgeem[new_id] = self_k_bgeem[ori_id];
} else {
// for value
new_self_v_bgeem[new_id] = self_v_bgeem[ori_id];
}
}
}
template <>
__global__ void ker_refresh_cache<__half>(
const int* num_can_per_beam, const int* can_idx, const __half* self_k_bgeem,
const __half* self_v_bgeem, __half* new_self_k_bgeem,
__half* new_self_v_bgeem, int self_k_bgeem_offset, int beam_size,
int dim_per_head, int head_num, int vocab_size, int cur_step, int max_step,
bool diverse, int end_id) {
int layer_id = blockIdx.x / (cur_step + 1);
int step_id = blockIdx.x % (cur_step + 1);
int kv_id = blockIdx.y & 1;
int beam_id_global = blockIdx.y >> 1;
int batch_id = beam_id_global / beam_size;
int beam_id = beam_id_global % beam_size;
int half_hidden_size = dim_per_head * head_num;
for (std::size_t i = threadIdx.x; i < half_hidden_size; i += blockDim.x) {
int head_id = i / dim_per_head;
int dim_id = i % dim_per_head;
int can_pos = num_can_per_beam[batch_id * beam_size] + beam_id;
int can_beam_id =
can_idx[can_pos] / vocab_size; // can_beam_id * vocab_size + vocab_id
if (diverse) can_beam_id %= beam_size;
if (cur_step != 0 && can_idx[can_pos] % vocab_size == end_id) {
return;
}
int base_pos = targetid_5dim(batch_id, 0, head_id, step_id, dim_id,
beam_size, head_num, max_step, dim_per_head) +
layer_id * self_k_bgeem_offset;
int beam_offset = half_hidden_size * max_step;
int ori_id = base_pos + beam_offset * can_beam_id;
int new_id = base_pos + beam_offset * beam_id;
if (kv_id == 0) {
// for key
((half2*)new_self_k_bgeem)[new_id] = ((half2*)self_k_bgeem)[ori_id];
} else {
// for value
((half2*)new_self_v_bgeem)[new_id] = ((half2*)self_v_bgeem)[ori_id];
}
}
}
template <typename T>
void ker_refresh_cache_launcher(
int grid_dim_x, int grid_dim_y, int block_dim, cudaStream_t stream,
const int* num_can_per_beam, const int* can_idx, const T* self_k_bgeem,
const T* self_v_bgeem, T* new_self_k_bgeem, T* new_self_v_bgeem,
int self_k_bgeem_offset, int beam_size, int dim_per_head, int head_num,
int vocab_size, int cur_step, int max_step, bool diverse, int end_id) {
ker_refresh_cache<T><<<dim3(grid_dim_x, grid_dim_y), block_dim, 0, stream>>>(
num_can_per_beam, can_idx, self_k_bgeem, self_v_bgeem, new_self_k_bgeem,
new_self_v_bgeem, self_k_bgeem_offset, beam_size, dim_per_head, head_num,
vocab_size, cur_step, max_step, diverse, end_id);
}
template <>
void ker_refresh_cache_launcher<__half>(
int grid_dim_x, int grid_dim_y, int block_dim, cudaStream_t stream,
const int* num_can_per_beam, const int* can_idx, const __half* self_k_bgeem,
const __half* self_v_bgeem, __half* new_self_k_bgeem,
__half* new_self_v_bgeem, int self_k_bgeem_offset, int beam_size,
int dim_per_head, int head_num, int vocab_size, int cur_step, int max_step,
bool diverse, int end_id) {
ker_refresh_cache<__half>
<<<dim3(grid_dim_x, grid_dim_y), block_dim / 2, 0, stream>>>(
num_can_per_beam, can_idx, self_k_bgeem, self_v_bgeem,
new_self_k_bgeem, new_self_v_bgeem, self_k_bgeem_offset / 2,
beam_size, dim_per_head / 2, head_num, vocab_size, cur_step, max_step,
diverse, end_id);
}
template void ker_refresh_cache_launcher<float>(
int grid_dim_x, int grid_dim_y, int block_dim, cudaStream_t stream,
const int* num_can_per_beam, const int* can_idx, const float* self_k_bgeem,
const float* self_v_bgeem, float* new_self_k_bgeem, float* new_self_v_bgeem,
int self_k_bgeem_offset, int beam_size, int dim_per_head, int head_num,
int vocab_size, int cur_step, int max_step, bool diverse, int end_id);
template void ker_refresh_cache_launcher<__half>(
int grid_dim_x, int grid_dim_y, int block_dim, cudaStream_t stream,
const int* num_can_per_beam, const int* can_idx, const __half* self_k_bgeem,
const __half* self_v_bgeem, __half* new_self_k_bgeem,
__half* new_self_v_bgeem, int self_k_bgeem_offset, int beam_size,
int dim_per_head, int head_num, int vocab_size, int cur_step, int max_step,
bool diverse, int end_id);
/**
@brief: ker_write_trg_tokenid_pos_penalty
write result from alive seq to output, for length_penlty >= 0
or length_penlty < 0 and decode to max_decode_step
simply output the beam0 as final result
@thread
gridDim.x = batch_size
blockDim.x = cur_step + 1
@param
alive_seq: [batch_size, beam_size, max_step], <start> is the first token in
each beam
output: [batch_size, cur_step + 1], no <start> and at least one <eos> in the
last of seq
*/
__global__ void ker_write_trg_tokenid_pos_penalty(const int* alive_seq,
float* seq_score, int* output,
int max_step, int beam_size) {
int target_id =
targetid_3dim(blockIdx.x, 0, threadIdx.x + 1, beam_size, max_step);
output[blockIdx.x * blockDim.x + threadIdx.x] = alive_seq[target_id];
if (threadIdx.x == 0) {
seq_score[blockIdx.x] =
seq_score[blockIdx.x * beam_size] - blockIdx.x * min_log_probability;
}
}
/**
@brief: ker_write_trg_tokenid_neg_penalty
write result from alive seq to output,
for length_penlty < 0 and all beam has reach it's eos
compute each beam's score and select the top beam
@thread
gridDim.x = batch_size
blockDim.x = cur_step + 1
@param
alive_seq: [batch_size, beam_size, max_step], <start> is the first token in
each beam
seq_score: [batch_size, beam_size], the length_penlty < 0, seq_score is also
the sum_log_probs
output: [batch_size, cur_step + 1], no <start> and at least one <eos> in the
last of seq
*/
__global__ void ker_write_trg_tokenid_neg_penalty(const int* alive_seq,
const float* seq_score,
int* output, int max_step,
int beam_size, int vocab_size,
int end_id) {
__shared__ float seq_final_score;
__shared__ int res_beam_id;
if (threadIdx.x == 0) {
seq_final_score = CUDA_FLOAT_INF_NEG;
res_beam_id = 0;
}
for (int beam_id = 0; beam_id < beam_size; beam_id++) {
int target_id = targetid_3dim(blockIdx.x, beam_id, threadIdx.x + 1,
beam_size, max_step);
int seq_len =
blockReduceSum(int(alive_seq[target_id] != end_id)); // compute seq len
if (threadIdx.x == 0) {
float cur_beam_score = seq_score[blockIdx.x * beam_size + beam_id] -
blockIdx.x * min_log_probability; // recover prob
cur_beam_score /= (float(seq_len) + epsilon);
if (cur_beam_score > seq_final_score) {
seq_final_score = cur_beam_score;
res_beam_id = beam_id;
}
}
__syncthreads();
}
int target_id = targetid_3dim(blockIdx.x, res_beam_id, threadIdx.x + 1,
beam_size, max_step);
output[blockIdx.x * blockDim.x + threadIdx.x] = alive_seq[target_id];
// output[blockIdx.x * blockDim.x + threadIdx.x] =
// int(seq_final_score[threadIdx.x]);
}
/**
@brief: ker_write_topk_result
write result from alive seq to output, recover seq_score
for length_penlty > 0
@thread
gridDim.x = batch_size * beam_size
blockDim.x = cur_step + 1
@param
alive_seq: [batch_size, beam_size, max_step], <start> is the first token in
each beam
seq_score: [batch_size, beam_size]
seq_probs: [batch_size, beam_size]
output: [batch_size, cur_step + 1], no <start> and at least one <eos> in the
last of seq
*/
__global__ void ker_write_topk_result(const int* alive_seq, float* seq_score,
int* res_seq, int vocab_size,
int max_step, int beam_size, int end_id) {
res_seq[blockIdx.x * blockDim.x + threadIdx.x] =
alive_seq[blockIdx.x * max_step + threadIdx.x + 1];
if (threadIdx.x == 0) {
seq_score[blockIdx.x] -= (blockIdx.x / beam_size) * min_log_probability;
res_seq[blockIdx.x * blockDim.x + blockDim.x - 1] = end_id;
}
}
/**
@brief: ker_topk_sample
quick rough topk sampling from logits
@thread
gridDim.x = batch_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, logits_seq_len, vocab_size]
old_input_ids: [batch_size, batch_seq_len]
new_input_ids: [batch_size, batch_seq_len+1]
unfinished: [1]
curandstate: [batch_size]
*/
template <typename T, int k>
__global__ void ker_topk_sample(const T* logits, const T* logit_bias,
int* old_input_ids, int* new_input_ids,
const int vocab_size, const int max_step,
const int batch_seq_len, int logits_seq_len,
int* unfinished, curandState* curandstate,
int eos_id) {
int last_token_idx_in_batch = blockIdx.x * max_step + batch_seq_len - 1;
/* add EOS to end if last token is EOS */
if (batch_seq_len > 1 && old_input_ids[last_token_idx_in_batch] == eos_id) {
if (threadIdx.x == 0) {
old_input_ids[last_token_idx_in_batch + 1] = eos_id;
}
return;
}
int logits_token_idx_in_batch =
blockIdx.x * logits_seq_len + logits_seq_len - 1;
int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size;
/*
step1. find max logit and rough Kth logit over the whole vocab
*/
__shared__ float s_max_logit, s_topk_logit;
float rough_top_kth_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
rough_top_kth_logit = fmaxf(
rough_top_kth_logit,
(float)(logits[idx]) +
(float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]));
}
float max_logit = blockReduceMax(rough_top_kth_logit);
rough_top_kth_logit = blockRoughTopK<float, k>(rough_top_kth_logit);
if (threadIdx.x == 0) {
s_topk_logit = rough_top_kth_logit;
s_max_logit = max_logit;
}
__syncthreads();
__shared__ int s_tid;
if (k != 1) {
/* step2 hold one logit per thread which larger than Kth logit and sample
* from them */
float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG;
int topk_tid = vocab_size;
// int test_num = 0;
__shared__ float s_topk_exp_sum;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit =
(float)logits[idx] +
(float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]);
float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min));
// if (logit >= s_topk_logit) test_num++;
if (logit >= s_topk_logit && logit_exp > topk_exp) {
topk_exp = logit_exp;
topk_tid = idx - left_logit_idx + threadIdx.x;
}
}
// test_num = blockReduceSum(test_num);
// __shared__ int s_test_num;
// if (threadIdx.x == 0) {
// s_test_num = test_num;
// if (s_test_num != 1) printf("sample from top %d\n", s_test_num);
// // printf("sample from top %s", test_num);
// }
// __syncthreads();
if (topk_tid == vocab_size) topk_exp = 0;
topk_exp_sum = blockReduceSum(topk_exp);
if (threadIdx.x == 0) {
s_topk_exp_sum = topk_exp_sum;
}
__syncthreads();
/* calculate cumulative probability */
float topk_prob = topk_exp / s_topk_exp_sum;
float prefix_sum_prob;
typedef cub::BlockScan<float, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
BlockScan(temp_storage).InclusiveSum(topk_prob, prefix_sum_prob);
__shared__ float random_x;
if (threadIdx.x == 0) {
random_x = curand_uniform(curandstate + blockIdx.x);
}
__syncthreads();
if (threadIdx.x == 0) {
s_tid = vocab_size;
}
__syncthreads();
int threadID = threadIdx.x;
__shared__ int s_threadID;
__shared__ float s_max_prob;
if (random_x > prefix_sum_prob) threadID = blockDim.x;
threadID = blockReduceMin(threadID);
float max_prob = blockReduceMax(topk_prob);
if (threadIdx.x == 0) {
s_threadID = threadID;
s_max_prob = max_prob;
}
__syncthreads();
if (threadIdx.x == s_threadID) {
s_tid = topk_tid;
}
__syncthreads();
if (s_tid == vocab_size && topk_prob == s_max_prob) {
s_tid = topk_tid;
}
__syncthreads();
} else {
s_tid = vocab_size;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit =
(float)logits[idx] +
(float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]);
if (logit == s_max_logit) {
s_tid = idx - left_logit_idx + threadIdx.x;
}
}
__syncthreads();
}
/* if new sampled tid is not EOS, set unfinish TRUE */
if (threadIdx.x == 0) {
if (s_tid != eos_id) unfinished[0] = 1;
}
/* step3 write back new sampled ids */
if (threadIdx.x == 0) {
old_input_ids[last_token_idx_in_batch + 1] = s_tid;
}
}
template <typename T>
void ker_topk_sample_launcher(int batch_size, int batch_seq_len,
const int max_step, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream,
const T* logits, const T* logit_bias,
int* old_input_ids, int* new_input_ids,
const int vocab_size, const int k,
int* unfinished, curandState* curandstate,
int eos_id) {
if (k == 1)
ker_topk_sample<T, 1><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 2)
ker_topk_sample<T, 2><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 4)
ker_topk_sample<T, 4><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 8)
ker_topk_sample<T, 8><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 16)
ker_topk_sample<T, 16><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 32)
ker_topk_sample<T, 32><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else {
throw std::invalid_argument("topk argument should be in [1,2,4,8,16,32]");
}
}
template void ker_topk_sample_launcher<float>(
int batch_size, int batch_seq_len, const int max_step, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream, const float* logits,
const float* logit_bias, int* old_input_ids, int* new_input_idx,
const int vocab_size, const int k, int* unfinished,
curandState* curandstate, int eos_id);
template void ker_topk_sample_launcher<__half>(
int batch_size, int batch_seq_len, const int max_step, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream, const __half* logits,
const __half* logit_bias, int* old_input_ids, int* new_input_idx,
const int vocab_size, const int k, int* unfinished,
curandState* curandstate, int eos_id);
/**
@brief: ker_topp_sample
quick rough topp sampling from logits
@thread
gridDim.x = batch_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, logits_seq_len, vocab_size]
old_input_ids: [batch_size, batch_seq_len]
new_input_ids: [batch_size, batch_seq_len+1]
unfinished: [1]
curandstate: [batch_size]
*/
template <typename T>
__global__ void ker_topp_sample(const T* logits, const T* logit_bias,
int* old_input_ids, int* new_input_ids,
const int vocab_size, const int max_step,
const int batch_seq_len, int logits_seq_len,
int* unfinished, float p,
curandState* curandstate, int eos_id) {
int token_idx_in_batch = blockIdx.x * max_step + batch_seq_len - 1;
/* add EOS to end if last token is EOS */
if (batch_seq_len > 1 && old_input_ids[token_idx_in_batch] == eos_id) {
if (threadIdx.x == 0) {
old_input_ids[token_idx_in_batch + 1] = eos_id;
}
return;
}
int logits_token_idx_in_batch =
blockIdx.x * logits_seq_len + logits_seq_len - 1;
int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size;
/* step1. find max logit in each thread and sample from these probs with
* nucleus sampling */
__shared__ float s_max_logit;
float max_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
max_logit = fmaxf(max_logit, (float)logits[idx]) +
(float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]);
}
float max_logit_array[1];
max_logit_array[0] = max_logit;
typedef cub::BlockRadixSort<float, 1024, 1> BlockRadixSort;
__shared__ typename BlockRadixSort::TempStorage sort_temp_storage;
BlockRadixSort(sort_temp_storage).SortDescending(max_logit_array);
float presum_max_logit_exp;
max_logit = max_logit_array[0];
float block_max_logit = blockReduceMax(max_logit);
if (threadIdx.x == 0) {
s_max_logit = block_max_logit;
}
__syncthreads();
float biased_logit_exp =
expf(fmaxf(max_logit - s_max_logit, logit_thresh_min));
typedef cub::BlockScan<float, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage presum_temp_storage;
BlockScan(presum_temp_storage)
.InclusiveSum(biased_logit_exp, presum_max_logit_exp);
float topp_exp_threshold;
if (threadIdx.x == blockDim.x - 1) {
topp_exp_threshold = p * presum_max_logit_exp;
}
__shared__ float s_presum_logit_exp_threshold;
if (presum_max_logit_exp > topp_exp_threshold) {
presum_max_logit_exp = CUDA_FLOAT_INF_NEG;
}
float logit_exp_threshold = blockReduceMax(presum_max_logit_exp);
if (threadIdx.x == 0) {
s_presum_logit_exp_threshold = logit_exp_threshold;
}
__syncthreads();
__shared__ float s_logit_threshold;
if (presum_max_logit_exp == s_presum_logit_exp_threshold) {
s_logit_threshold = max_logit;
}
__syncthreads();
/* step2 hold one logit per thread which larger than Kth logit and sample
* from them */
float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG;
int topk_tid = vocab_size;
int test_num = 0;
__shared__ float s_topk_exp_sum;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit = (float)logits[idx] +
(float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]);
float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min));
if (logit >= s_logit_threshold) test_num++;
if (logit >= s_logit_threshold && logit_exp > topk_exp) {
topk_exp = logit_exp;
topk_tid = idx - left_logit_idx + threadIdx.x;
}
}
test_num = blockReduceSum(test_num);
if (topk_tid == vocab_size) topk_exp = 0;
topk_exp_sum = blockReduceSum(topk_exp);
if (threadIdx.x == 0) {
s_topk_exp_sum = topk_exp_sum;
}
__syncthreads();
/* calculate cumulative probability */
float topk_prob = topk_exp / s_topk_exp_sum;
float prefix_sum_prob;
BlockScan(presum_temp_storage).InclusiveSum(topk_prob, prefix_sum_prob);
__shared__ float random_x;
if (threadIdx.x == 0) {
random_x = curand_uniform(curandstate + blockIdx.x);
}
__syncthreads();
__shared__ int s_tid;
if (threadIdx.x == 0) {
s_tid = vocab_size;
}
__syncthreads();
int threadID = threadIdx.x;
__shared__ int s_threadID;
__shared__ float s_max_prob;
if (random_x > prefix_sum_prob) threadID = blockDim.x;
threadID = blockReduceMin(threadID);
float max_prob = blockReduceMax(topk_prob);
if (threadIdx.x == 0) {
s_threadID = threadID;
s_max_prob = max_prob;
}
__syncthreads();
if (threadIdx.x == s_threadID) {
s_tid = topk_tid;
}
__syncthreads();
if (s_tid == vocab_size && topk_prob == s_max_prob) {
s_tid = topk_tid;
}
__syncthreads();
/* if new sampled tid is not EOS, set unfinish TRUE */
if (threadIdx.x == 0) {
if (s_tid != eos_id) unfinished[0] = 1;
}
/* step3 write back new sampled ids */
if (threadIdx.x == 0) {
old_input_ids[token_idx_in_batch + 1] = s_tid;
}
}
template <typename T>
void ker_topp_sample_launcher(int batch_size, int batch_seq_len,
const int max_step, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream,
const T* logits, const T* logit_bias,
int* old_input_ids, int* new_input_ids,
const int vocab_size, const float p,
int* unfinished, curandState* curandstate,
int eos_id) {
ker_topp_sample<T><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step,
batch_seq_len, logits_seq_len, unfinished, p, curandstate, eos_id);
}
template void ker_topp_sample_launcher<float>(
int batch_size, int batch_seq_len, const int max_step, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream, const float* logits,
const float* logit_bias, int* old_input_ids, int* new_input_idx,
const int vocab_size, const float p, int* unfinished,
curandState* curandstate, int eos_id);
template void ker_topp_sample_launcher<__half>(
int batch_size, int batch_seq_len, const int max_step, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream, const __half* logits,
const __half* logit_bias, int* old_input_ids, int* new_input_idx,
const int vocab_size, const float p, int* unfinished,
curandState* curandstate, int eos_id);
/**
@brief: ker_bias_gelu
add bias, activated by gelu
@thread
gridDim.x = batch_size * batch_seq_len
blockDim.x = max_thread_per_block
@param
input: [batch_size * batch_seq_len, feature_dim]
bias: [feature_dim]
feature_dim: the dim of input feature
*/
template <typename T>
__global__ void ker_bias_gelu(T* input, const T* bias, int feature_dim) {
int offset = blockIdx.x * feature_dim;
for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) {
int cur_offset = offset + idx;
input[cur_offset] = gelu<float>(input[cur_offset] + __ldg(&bias[idx]));
}
}
/* fp16 version */
template <>
__global__ void ker_bias_gelu<__half>(__half* input, const __half* bias,
int feature_dim) {
int offset = blockIdx.x * feature_dim;
half2* pinput = (half2*)input;
const half2* pbias = (const half2*)bias;
for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) {
int cur_offset = offset + idx;
pinput[cur_offset] =
gelu<half2>(__hadd2(pinput[cur_offset], __ldg(&pbias[idx])));
}
}
template <typename T>
void ker_bias_gelu_launcher(int batch_token_num, int block_dim,
cudaStream_t stream, T* input, const T* bias,
int feature_dim) {
ker_bias_gelu<T>
<<<batch_token_num, block_dim, 0, stream>>>(input, bias, feature_dim);
}
template <>
void ker_bias_gelu_launcher<__half>(int batch_token_num, int block_dim,
cudaStream_t stream, __half* input,
const __half* bias, int feature_dim) {
ker_bias_gelu<__half>
<<<batch_token_num, block_dim, 0, stream>>>(input, bias, feature_dim / 2);
}
template void ker_bias_gelu_launcher<float>(int batch_token_num, int block_dim,
cudaStream_t stream, float* input,
const float* bias, int feature_dim);
template void ker_bias_gelu_launcher<__half>(int batch_token_num, int block_dim,
cudaStream_t stream, __half* input,
const __half* bias,
int feature_dim);
__global__ void ker_curand_setup(curandState* state) {
/* Each thread gets same seed, a different sequence
number, no offset */
curand_init(clock(), blockIdx.x, 0, &state[blockIdx.x]);
}
} // namespace cuda
} // namespace lightseq
|
1a644bb89074a3b8e2be01a1539b27e681333df6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <complex>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <hip/hip_complex.h>
// Kernel Definitions
/******************************************************************************
* Function: CUDAisInMandelbrotSet
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* CUDAisInMandelbrotSet() takes in hipDoubleComplex number, c, and an int pointer
* to return the number of iterations
*
* Parameters: [in] c
* [in-out] iterations
******************************************************************************/
__device__ void CUDAisInMandelbrotSet(hipDoubleComplex c, int *iterations){
double zr = 0;
double zi = 0;
hipDoubleComplex z = make_cuDoubleComplex(zr, zi);
int i = 0;
for (i = 0; i < 100 && cuCabs(z) < 2.0; ++i){
z = cuCadd(cuCmul(z, z), c); // z = z*z + c;
}
*iterations = i;
}
/******************************************************************************
* Function: CUDAisInMandelbrotSet
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* CUDAisInJuliaSet() takes in two hipDoubleComplex numbers, c & z, and an int
* pointer to return the number of iterations
*
* Parameters: [in] z
* [in] c
* [in-out] iterations
******************************************************************************/
__device__ void CUDAisInJuliaSet(hipDoubleComplex z, hipDoubleComplex c, int *iterations){
int i = 0;
for (i = 0; i < 100 && cuCabs(z) < 2.0; ++i){
z = cuCadd(cuCmul(z, z), c); // z = z*z + c;
}
*iterations = i;
}
/******************************************************************************
* Function: mandelCalc
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* mandelCalc() is a kernel which takes in a set of real and imaginary values
* and will calcualte in parallel wether or not each point is in or out of the
* mandelbrot set.
*
* Parameters: [in] cReals
* [in] cImags
* [in-out] iterations
* [in] len
******************************************************************************/
__global__ void mandelCalc(double *cReals, double *cImags, int *iterations, int len) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
hipDoubleComplex num;
if(i < len){
num = make_cuDoubleComplex(cReals[i], cImags[i]);
CUDAisInMandelbrotSet(num, &iterations[i]);
}
}
/******************************************************************************
* Function: juliaCalc
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* juliaCalc() is a kernel which takes in a set of real and imaginary values as
* well as a real and imaginary number (as doubles), and will calcualte in
* parallel wether or not each point in the arrays is in or out of the Julia set
* associated with the given point
*
* Parameters: [in] zReal
* [in] zImag
* [in] cReals
* [in] cImags
* [in-out] iterations
* [in] len
******************************************************************************/
__global__ void juliaCalc(double zReal, double zImag, double *cReals, double *cImags, int *iterations, int len) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
hipDoubleComplex num;
hipDoubleComplex z;
if(i < len){
num = make_cuDoubleComplex(cReals[i], cImags[i]);
z = make_cuDoubleComplex(zReal, zImag);
CUDAisInJuliaSet(num, z, &iterations[i]);
}
}
/******************************************************************************
* Function: cudaCalcMandelbrot
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* cudaCalcMandelbrot() is a function which takes in a set of real and imaginary
* values, and will reutrn if each point is in our out of the mandelbrot set using
* the in-out iterations parameter
*
* Parameters: [in] setOfReals
* [in] setOfImags
* [in-out] iterations
* [in] len
******************************************************************************/
__host__ void cudaCalcMandelbrot(double *setOfReals, double *setOfImags, int *iterations, int len) {
// Block management
int n = len;
// Device arrays
double *d_set_reals;
double *d_set_imags;
int *d_iterations;
// Allocate our memory on the device
hipMalloc(&d_set_reals, len * sizeof(double));
hipMalloc(&d_set_imags, len * sizeof(double));
hipMalloc(&d_iterations, len * sizeof(int));
// Copy our stuff to the device
hipMemcpy(d_set_reals, setOfReals, len * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_set_imags, setOfImags, len * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_iterations, iterations, len * sizeof(int), hipMemcpyHostToDevice);
// Run the code on the GPU
int nThreads = 128; // should be multiple of 32 (up to 1024)
int nBlocks = ( n + nThreads - 1 ) / nThreads;
hipLaunchKernelGGL(( mandelCalc), dim3(nBlocks), dim3(nThreads), 0, 0, d_set_reals, d_set_imags, d_iterations, len);
// Copy stuff from the GPU to our host
hipMemcpy(setOfReals, d_set_reals, len * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(setOfImags, d_set_imags, len * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(iterations, d_iterations, len * sizeof(int), hipMemcpyDeviceToHost);
// Free the device memory
hipFree(d_set_reals);
hipFree(d_set_imags);
hipFree(d_iterations);
}
/******************************************************************************
* Function: cudaCalcJulia
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* cudaCalcJulia() is a function which takes in a real and imaginary z value and
* a set of real and imaginary values, c, and will reutrn if each point in c is
* in the Julia set associated with the point z
*
* Parameters: [in] zReal
* [in] zImag
* [in] cReals
* [in] cImags
* [in-out] iterations
* [in] len
******************************************************************************/
__host__ void cudaCalcJulia(double zReal, double zImag, double *setOfReals, double *setOfImags, int *iterations, int len) {
// Local variables
int n = len;
// Device variables
double *d_set_reals;
double *d_set_imags;
int *d_iterations;
// Allocate our memory on the device
hipMalloc(&d_set_reals, len * sizeof(double));
hipMalloc(&d_set_imags, len * sizeof(double));
hipMalloc(&d_iterations, len * sizeof(int));
// Copy our stuff to the device
hipMemcpy(d_set_reals, setOfReals, len * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_set_imags, setOfImags, len * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_iterations, iterations, len * sizeof(int), hipMemcpyHostToDevice);
// Run the code on the GPU
int nThreads = 128; // should be multiple of 32 (up to 1024)
int nBlocks = ( n + nThreads - 1 ) / nThreads;
hipLaunchKernelGGL(( juliaCalc), dim3(nBlocks), dim3(nThreads), 0, 0, zReal, zImag, d_set_reals, d_set_imags, d_iterations, len);
// Copy stuff from the GPU to our host
hipMemcpy(iterations, d_iterations, len * sizeof(int), hipMemcpyDeviceToHost);
// Free the device memory
hipFree(d_set_reals);
hipFree(d_set_imags);
hipFree(d_iterations);
}
| 1a644bb89074a3b8e2be01a1539b27e681333df6.cu | #include <complex>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <cuComplex.h>
// Kernel Definitions
/******************************************************************************
* Function: CUDAisInMandelbrotSet
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* CUDAisInMandelbrotSet() takes in cuDoubleComplex number, c, and an int pointer
* to return the number of iterations
*
* Parameters: [in] c
* [in-out] iterations
******************************************************************************/
__device__ void CUDAisInMandelbrotSet(cuDoubleComplex c, int *iterations){
double zr = 0;
double zi = 0;
cuDoubleComplex z = make_cuDoubleComplex(zr, zi);
int i = 0;
for (i = 0; i < 100 && cuCabs(z) < 2.0; ++i){
z = cuCadd(cuCmul(z, z), c); // z = z*z + c;
}
*iterations = i;
}
/******************************************************************************
* Function: CUDAisInMandelbrotSet
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* CUDAisInJuliaSet() takes in two cuDoubleComplex numbers, c & z, and an int
* pointer to return the number of iterations
*
* Parameters: [in] z
* [in] c
* [in-out] iterations
******************************************************************************/
__device__ void CUDAisInJuliaSet(cuDoubleComplex z, cuDoubleComplex c, int *iterations){
int i = 0;
for (i = 0; i < 100 && cuCabs(z) < 2.0; ++i){
z = cuCadd(cuCmul(z, z), c); // z = z*z + c;
}
*iterations = i;
}
/******************************************************************************
* Function: mandelCalc
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* mandelCalc() is a kernel which takes in a set of real and imaginary values
* and will calcualte in parallel wether or not each point is in or out of the
* mandelbrot set.
*
* Parameters: [in] cReals
* [in] cImags
* [in-out] iterations
* [in] len
******************************************************************************/
__global__ void mandelCalc(double *cReals, double *cImags, int *iterations, int len) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
cuDoubleComplex num;
if(i < len){
num = make_cuDoubleComplex(cReals[i], cImags[i]);
CUDAisInMandelbrotSet(num, &iterations[i]);
}
}
/******************************************************************************
* Function: juliaCalc
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* juliaCalc() is a kernel which takes in a set of real and imaginary values as
* well as a real and imaginary number (as doubles), and will calcualte in
* parallel wether or not each point in the arrays is in or out of the Julia set
* associated with the given point
*
* Parameters: [in] zReal
* [in] zImag
* [in] cReals
* [in] cImags
* [in-out] iterations
* [in] len
******************************************************************************/
__global__ void juliaCalc(double zReal, double zImag, double *cReals, double *cImags, int *iterations, int len) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
cuDoubleComplex num;
cuDoubleComplex z;
if(i < len){
num = make_cuDoubleComplex(cReals[i], cImags[i]);
z = make_cuDoubleComplex(zReal, zImag);
CUDAisInJuliaSet(num, z, &iterations[i]);
}
}
/******************************************************************************
* Function: cudaCalcMandelbrot
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* cudaCalcMandelbrot() is a function which takes in a set of real and imaginary
* values, and will reutrn if each point is in our out of the mandelbrot set using
* the in-out iterations parameter
*
* Parameters: [in] setOfReals
* [in] setOfImags
* [in-out] iterations
* [in] len
******************************************************************************/
__host__ void cudaCalcMandelbrot(double *setOfReals, double *setOfImags, int *iterations, int len) {
// Block management
int n = len;
// Device arrays
double *d_set_reals;
double *d_set_imags;
int *d_iterations;
// Allocate our memory on the device
cudaMalloc(&d_set_reals, len * sizeof(double));
cudaMalloc(&d_set_imags, len * sizeof(double));
cudaMalloc(&d_iterations, len * sizeof(int));
// Copy our stuff to the device
cudaMemcpy(d_set_reals, setOfReals, len * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_set_imags, setOfImags, len * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_iterations, iterations, len * sizeof(int), cudaMemcpyHostToDevice);
// Run the code on the GPU
int nThreads = 128; // should be multiple of 32 (up to 1024)
int nBlocks = ( n + nThreads - 1 ) / nThreads;
mandelCalc<<<nBlocks, nThreads>>>(d_set_reals, d_set_imags, d_iterations, len);
// Copy stuff from the GPU to our host
cudaMemcpy(setOfReals, d_set_reals, len * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(setOfImags, d_set_imags, len * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(iterations, d_iterations, len * sizeof(int), cudaMemcpyDeviceToHost);
// Free the device memory
cudaFree(d_set_reals);
cudaFree(d_set_imags);
cudaFree(d_iterations);
}
/******************************************************************************
* Function: cudaCalcJulia
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* cudaCalcJulia() is a function which takes in a real and imaginary z value and
* a set of real and imaginary values, c, and will reutrn if each point in c is
* in the Julia set associated with the point z
*
* Parameters: [in] zReal
* [in] zImag
* [in] cReals
* [in] cImags
* [in-out] iterations
* [in] len
******************************************************************************/
__host__ void cudaCalcJulia(double zReal, double zImag, double *setOfReals, double *setOfImags, int *iterations, int len) {
// Local variables
int n = len;
// Device variables
double *d_set_reals;
double *d_set_imags;
int *d_iterations;
// Allocate our memory on the device
cudaMalloc(&d_set_reals, len * sizeof(double));
cudaMalloc(&d_set_imags, len * sizeof(double));
cudaMalloc(&d_iterations, len * sizeof(int));
// Copy our stuff to the device
cudaMemcpy(d_set_reals, setOfReals, len * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_set_imags, setOfImags, len * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_iterations, iterations, len * sizeof(int), cudaMemcpyHostToDevice);
// Run the code on the GPU
int nThreads = 128; // should be multiple of 32 (up to 1024)
int nBlocks = ( n + nThreads - 1 ) / nThreads;
juliaCalc<<<nBlocks, nThreads>>>(zReal, zImag, d_set_reals, d_set_imags, d_iterations, len);
// Copy stuff from the GPU to our host
cudaMemcpy(iterations, d_iterations, len * sizeof(int), cudaMemcpyDeviceToHost);
// Free the device memory
cudaFree(d_set_reals);
cudaFree(d_set_imags);
cudaFree(d_iterations);
}
|
01320716ccf6eed54d99872d17914f8216fbf49f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void foo(int *inp, int *out)
{
if(inp[0] == 1) {
if(inp[2] == 2) {
out[3] = 3;
}
else {
__syncthreads();
}
}
else {
if(inp[3] == 4) {
out[0] = 4;
__syncthreads();
}
else {
if(inp[4] == 5) {
out[5] = 5;
}
else if(inp[5] == 5) {
out[4] = inp[5] + 4;
if(inp[14] == 66) {
out[44] = 5;
__syncthreads();
}
else {
out[32] = 5;
}
__syncthreads();
out[99] = 99;
}
else {
__syncthreads();
out[4] = 5;
}
}
}
out[22] = 22;
}
| 01320716ccf6eed54d99872d17914f8216fbf49f.cu | __global__ void foo(int *inp, int *out)
{
if(inp[0] == 1) {
if(inp[2] == 2) {
out[3] = 3;
}
else {
__syncthreads();
}
}
else {
if(inp[3] == 4) {
out[0] = 4;
__syncthreads();
}
else {
if(inp[4] == 5) {
out[5] = 5;
}
else if(inp[5] == 5) {
out[4] = inp[5] + 4;
if(inp[14] == 66) {
out[44] = 5;
__syncthreads();
}
else {
out[32] = 5;
}
__syncthreads();
out[99] = 99;
}
else {
__syncthreads();
out[4] = 5;
}
}
}
out[22] = 22;
}
|
1968849fd6a8f60f1b283e49c81fecf6a91e56e7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#define N 100
#define DIM 2
#define PamM 2e-11
#define S 0.5
char le_entrada();
char inicializa_parametros();
float *aloca_matriz(int, int);
void cal_cond_robin();
char parametro_independentes();
char copia_dados_para_gpu();
void copia_dados_para_cpu();
void clear_mem();
//char calcula_pressao_velocidade(int, int, int, int, int);
//char atualiza_mult_lagrange(int tid);
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
//- - - - - - - - - - - - - - GLOBAIS - - - - - - - - - - - - - - //
/* - - - - - - - Estruturas - - - - - - - */
typedef struct{
float *R, *L, *U, *D;
float *R_old, *L_old, *U_old, *D_old;
}ESTRUTURA_Q;
typedef struct{
float *R, *L, *U, *D;
float *R_old, *L_old, *U_old, *D_old;
}ESTRUTURA_L;
typedef struct{
float *R, *L, *U, *D;
float *R_old, *L_old, *U_old, *D_old;
}ESTRUTURA_B;
typedef struct{
float *p, *p_old;
}ESTRUTURA_PRESSAO;
typedef struct{
float *perm, *font, *epsilon;
}ESTRUTURA_MAT;
/* - - - - - - - Fim das Estruturas - - - - - - - */
/* - - - - - - - Variaveis das Estruturas - - - - - - - */
ESTRUTURA_Q host_q, dev_q;
ESTRUTURA_L host_l, dev_l;
ESTRUTURA_B host_b, dev_b;
ESTRUTURA_PRESSAO host_pressao, dev_pressao;
ESTRUTURA_MAT host_mat, dev_mat;
/* - - - - - - - Entradas Externas - - - - - - - */
int tam_mat_interna = 3, tam_mat_real = 3 + 2, max_interacoes = 1000, op_contorno = 1;
float tam_regiao = 20000.00, erro_max = 1e-5, valor_contor = 2.00;
float h = 20000.00 / 3; // ALTURA H = TAM_REGIAO / TAM_MAT_INTERNA
//float *mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL;
//float *dev_mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL;
/* - - - - - - - Fim das Entradas Externas - - - - - - - */
/* - - - - - - - Fim das Variaveis das Estruturas - - - - - - - */
/* - - - - - - - Ponteiros para GPU - - - - - - - */
float *dev_aux = NULL, dev_erro, dev_media;
// float *dev_aux = NULL, dev_erro = 0.0, dev_media = 0.0, dev_sum1 = 0.0, dev_sum2 = 0.0;
//
// float *dev_q.R = NULL, *dev_q.L = NULL, *dev_q.U = NULL, *dev_q.D = NULL;
// float *dev_q.R_old = NULL, *dev_q.L_old = NULL, *dev_q.U_old = NULL, *dev_q.D_old = NULL;
//
// float *dev_l.R = NULL, *dev_l.L = NULL, *dev_l.U = NULL, *dev_l.D = NULL;
// float *dev_l.R_old = NULL, *dev_l.L_old = NULL, *dev_l.U_old = NULL, *dev_l.D_old = NULL;
//
// float *dev_b.R = NULL, *dev_b.L = NULL, *dev_b.U = NULL, *dev_b.D = NULL;
// float *dev_b.R_old = NULL, *dev_b.L_old = NULL, *dev_b.U_old = NULL, *dev_b.D_old = NULL;
//
// float *dev_pressao.p = NULL, *dev_pressao.p_old = NULL;
//
//- - - - - - - - - - - - - - FIM - GLOBAIS - - - - - - - - - - - - - - //
__device__ char atualiza_mult_lagrange( int tid,
ESTRUTURA_Q *dev_q,
ESTRUTURA_L *dev_l,
ESTRUTURA_B *dev_b
){
int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0;
int index_mem_left = 0, index_mem_right = 0;
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1 + 2; // o kernel contem somente a quantidade de elementos internos
// portanto a fronteira deve ser contata "+ 2" de cada lado
index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
index_mem_uper = index_mem_central - (offset -1); // (offset -1) = comprimento do kernel
index_mem_down = index_mem_central + (offset -1);
index_mem_left = index_mem_central - 1;
index_mem_right = index_mem_central + 1;
dev_l->U[index_mem_central] = dev_b->U[index_mem_central] * (dev_q->U[index_mem_central] + dev_q->D_old[index_mem_uper]) + dev_l->D_old[index_mem_uper];
dev_l->D[index_mem_central] = dev_b->D[index_mem_central] * (dev_q->D[index_mem_central] + dev_q->U_old[index_mem_down]) + dev_l->U_old[index_mem_down];
dev_l->R[index_mem_central] = dev_b->R[index_mem_central] * (dev_q->R[index_mem_central] + dev_q->L_old[index_mem_right]) + dev_l->L_old[index_mem_right];
dev_l->L[index_mem_central] = dev_b->L[index_mem_central] * (dev_q->L[index_mem_central] + dev_q->R_old[index_mem_left]) + dev_l->R_old[index_mem_left];
return 0;
}
__device__ char calcula_pressao_velocidade( int tid, int uper, int right, int down, int left,
ESTRUTURA_Q *dev_q,
ESTRUTURA_L *dev_l,
ESTRUTURA_B *dev_b,
ESTRUTURA_PRESSAO *dev_pressao,
ESTRUTURA_MAT *dev_mat
){
float auxU = 0.0, auxD = 0.0, auxR = 0.0, auxL = 0.0, DU = 0.0, DD = 0.0, DR = 0.0, DL = 0.0;
int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0;
int index_mem_left = 0, index_mem_right = 0;
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1 + 2; // o kernel contem somente a quantidade de elementos internos
// portanto a fronteira deve ser contata "+ 2" de cada lado
index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
index_mem_uper = index_mem_central - (offset -1); // (offset -1) = comprimento do kernel
index_mem_down = index_mem_central + (offset -1);
index_mem_left = index_mem_central - 1;
index_mem_right = index_mem_central + 1;
if(uper == 1){
auxU = dev_mat->epsilon[index_mem_central] / (1 + dev_b->U[index_mem_central] * dev_mat->epsilon[index_mem_central]);
DU = auxU * (dev_b->U[index_mem_central] * dev_q->D_old[index_mem_uper] + dev_l->D_old[index_mem_uper]);
}
if(right == 1){
auxR = dev_mat->epsilon[index_mem_central] / (1 + dev_b->R[index_mem_central] * dev_mat->epsilon[index_mem_central]);
DR = auxR * (dev_b->R[index_mem_central] * dev_q->L_old[index_mem_right] + dev_l->L_old[index_mem_right]);
}
if(down == 1){
auxD = dev_mat->epsilon[index_mem_central] / (1 + dev_b->D[index_mem_central] * dev_mat->epsilon[index_mem_central]);
DD = auxD * (dev_b->D[index_mem_central] * dev_q->U_old[index_mem_down] + dev_l->U_old[index_mem_down]);
}
if(left == 1){
auxL = dev_mat->epsilon[index_mem_central] / (1 + dev_b->L[index_mem_central] * dev_mat->epsilon[index_mem_central]);
DL = auxL * (dev_b->L[index_mem_central] * dev_q->R_old[index_mem_left] + dev_l->R_old[index_mem_left]);
}
dev_pressao->p[index_mem_central] = (dev_mat->font[index_mem_central] + DU + DR + DD + DL) / (auxU + auxR + auxD + auxL);
dev_q->L[index_mem_central] = auxL * dev_pressao->p[index_mem_central] - DL;
dev_q->R[index_mem_central] = auxR * dev_pressao->p[index_mem_central] - DR;
dev_q->U[index_mem_central] = auxU * dev_pressao->p[index_mem_central] - DU;
dev_q->D[index_mem_central] = auxD * dev_pressao->p[index_mem_central] - DD;
return 0;
}
__global__ void escoamento_monofasico( ESTRUTURA_Q dev_q,
ESTRUTURA_L dev_l,
ESTRUTURA_B dev_b,
ESTRUTURA_PRESSAO dev_pressao,
ESTRUTURA_MAT dev_mat,
float *dev_aux, const float erro_max, float dev_erro, float dev_media){
/*int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
a[offset] = offset;*/
/*vificar as condies de contorno*/
float dev_sum1 = 0.0, dev_sum2 = 0.0;
int flag_thread_centrais = 1;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
/*int offset = (blockDim.x * gridDim.x) + 1; // deslocamento para o tamanho da regio (tam_regiao = n + 2)
*/
int tid = x + y * blockDim.x * gridDim.x;
//verificar esse deslocamento para n causar problema (somente na hora de armazenar utilizar o deslocamento)
//int tid = (x + y * blockDim.x * gridDim.x) + offset; // tid fornece o indice do vetor
int dimensao_x = blockDim.x * gridDim.x;
int dimensao_y = blockDim.y * gridDim.y;
int eq_tid_cant_sup_dir = blockDim.x * gridDim.x - 1; // posio extremo sup direito
int eq_tid_cant_inf_dir = ((gridDim.x * blockDim.x) * (gridDim.y * blockDim.y)) - 1; // posio extremo inf direito
int eq_tid_cant_inf_esq = (gridDim.x * blockDim.x) * (gridDim.y * blockDim.y - 1); // posio extremo inf esquerdo
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1 + 2; // o kernel contem somente a quantidade de elementos internos
// portanto a fronteira deve ser contata "+ 2" de cada lado
int index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
int i = 0;
//while(i < 1500){
if(tid == 0){//canto superior esquerdo
/*VERIFICAR AS CONDIES DE CONTORNO*/
/*
* calcula_pressao_velocidade();
*
* Param: ESTRUTURA_Q dev_q,
ESTRUTURA_L dev_l,
ESTRUTURA_B dev_b,
ESTRUTURA_PRESSAO dev_pressao,
ESTRUTURA_MAT dev_mat
*
*/
calcula_pressao_velocidade( tid, 0, 1, 1, 0,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
/*
*
* atualiza_mult_lagrange();
*
* param: int tid,
ESTRUTURA_Q dev_q,
ESTRUTURA_L dev_l,
ESTRUTURA_B dev_b
*
*/
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_sup_dir){//canto superior direito
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 0, 0, 1, 1,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_inf_esq){//canto inferior esquerdo
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 0, 0,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_inf_dir){//canto inferior direito
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 0, 0, 1,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange( tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if((tid > 0) && (tid < eq_tid_cant_sup_dir)){//fronteira superior
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 0, 1, 1, 1,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if((tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_dir) && (tid % dimensao_x == eq_tid_cant_sup_dir)){ //fronteira direita
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 0, 1, 1,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if((tid > eq_tid_cant_inf_esq) && (tid < eq_tid_cant_inf_dir)){ //fronteira inferior
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 0, 1,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if((tid > 0) && (tid < eq_tid_cant_inf_dir) && (tid < eq_tid_cant_inf_esq) && (tid % dimensao_y == 0)){//fronteira esquerda
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 1, 0,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if(flag_thread_centrais){
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 1, 1,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
}
/*
*
*SINCRONIZA
*COMENTARIOS
*ALOCAR VARIVEL aux com o tamanho de "tids"
*VERIFICAR ATOMICIDADE PRA VALORES FLOAT
*VERIFICAR ALOCAO DAS MEMRIAS GLOBAIS
*alocar memria erro
*alocar float media = 0.0, sum1 = 0.0, sum2 = 0.0;
*/
__syncthreads();
if(tid == eq_tid_cant_inf_dir){
dev_media = 0.0, dev_sum1 = 0.0, dev_sum2 = 0.0;
int i = 0;
for(i = 0; i <= eq_tid_cant_inf_dir; i++){
dev_media = dev_media + dev_pressao.p[(offset + 1) + i];
}
dev_media = dev_media / (eq_tid_cant_inf_dir + 1);
}
__syncthreads();
dev_pressao.p[index_mem_central] = dev_media;
dev_l.D[index_mem_central] = dev_l.D[index_mem_central] - dev_media;
dev_l.U[index_mem_central] = dev_l.U[index_mem_central] - dev_media;
dev_l.L[index_mem_central] = dev_l.L[index_mem_central] - dev_media;
dev_l.R[index_mem_central] = dev_l.R[index_mem_central] - dev_media;
//avaliando criterio de convergencia
dev_aux[index_mem_central] = dev_pressao.p[index_mem_central] - dev_pressao.p_old[index_mem_central];
__syncthreads();
if(tid == eq_tid_cant_inf_dir){
dev_erro = 0.0;
int i = 0;
for(i = 0; i <= eq_tid_cant_inf_dir; i++){
dev_sum1 += dev_aux[(offset + 1) + i] * dev_aux[(offset + 1) + i];
dev_sum2 += dev_pressao.p[(offset + 1) + i] * dev_pressao.p[(offset + 1) + i];
}
dev_erro = sqrt(dev_sum1/dev_sum2);
}
__syncthreads();
if (dev_erro > erro_max){
return;
}
dev_pressao.p_old[index_mem_central] = dev_pressao.p[index_mem_central];
dev_q.U_old[index_mem_central] = dev_q.U[index_mem_central];
dev_q.R_old[index_mem_central] = dev_q.R[index_mem_central];
dev_q.L_old[index_mem_central] = dev_q.L[index_mem_central];
dev_q.D_old[index_mem_central] = dev_q.D[index_mem_central];
dev_l.D_old[index_mem_central] = dev_l.D[index_mem_central];
dev_l.U_old[index_mem_central] = dev_l.U[index_mem_central];
dev_l.L_old[index_mem_central] = dev_l.L[index_mem_central];
dev_l.R_old[index_mem_central] = dev_l.R[index_mem_central];
i++;
//}
/*
* Imponiendo a media cero na distribuicao de presiones
* Calculo de la media
*/
/*
atomicAdd( &media, dev_pressao.p[tid] );
//atomicSub( &aux[tid], dev_pressao.p[tid] - dev_pressao.p_old[tid] );
__syncthreads();
dev_pressao.p[tid] -= M;
dev_l.D[tid] -= M;
dev_l.U[tid] -= M;
dev_l.L[tid] -= M;
dev_l.R[tid] -= M;
//avaliando criterio de convergencia
aux[tid] = dev_pressao.p[tid] - dev_b.D_old[tid];
__syncthreads();
atomicAdd( &sum1, aux[tid] * aux[tid] );
atomicAdd( &sum2, dev_pressao.p[tid] * dev_pressao.p[tid] );
__syncthreads();
if(tid == 0)
erro = sqrt(sum1/sum2);
if (erro < 1e-5) return 0;
p_old[j][k] = p[j][k];
dev_pressao.p_old[tid] = dev_pressao.p_old[tid];
dev_q.U_old[tid] = dev_q.U[tid];
dev_q.R_old[tid] = dev_q.R[tid];
dev_q.L_old[tid] = dev_q.L[tid];
dev_q.D_old[tid] = dev_q.D[tid];
dev_l.D_old[tid] = dev_l.D[tid];
dev_l.U_old[tid] = dev_l.U[tid];
dev_l.L_old[tid] = dev_l.L[tid];
dev_l.R_old[tid] = dev_l.R[tid];*/
}
int main(void){
le_entrada();
inicializa_parametros();
cal_cond_robin();
parametro_independentes();
copia_dados_para_gpu();
// dim3 block(comprimento/16 , altura/16);
// dim3 thread(16, 16);
dim3 block(2, 2);
dim3 thread(4, 4);
/*
* escoamento_monofasico();
*
* Param: ESTRUTURA_Q dev_q,
ESTRUTURA_L dev_l,
ESTRUTURA_B dev_b,
ESTRUTURA_PRESSAO dev_pressao,
ESTRUTURA_MAT dev_mat,
float *dev_aux, const float erro_max
*
*/
hipLaunchKernelGGL(( escoamento_monofasico), dim3(block), dim3(thread), 0, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat,
dev_aux, 1e-5, dev_erro, dev_media);
copia_dados_para_cpu();
int i = 0, j = 0;
printf("\ntam_mat_interna = %d\n", tam_mat_interna);
printf("tam_mat_real = %d\n", tam_mat_real);
printf("max_interacoes = %d\n", max_interacoes);
printf("op_contorno = %d\n", op_contorno);
printf("tam_regiao = %f\n", tam_regiao);
printf("erro_max = %f\n", erro_max);
printf("valor_contor = %f\n", valor_contor);
printf("\n\n\t\t\tmat_font:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_mat.font[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\tmat_perm:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_mat.perm[i*tam_mat_real + j]);
//printf("%12.4E ", host_mat.perm[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\tmat_epsilon:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_mat.epsilon[i*tam_mat_real + j]);
printf("\n");
}
printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n");
printf("\n\n\t\t\tbeta U:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.U[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\tbeta R:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.R[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\tbeta L:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.L[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\tbeta D:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.D[i*tam_mat_real + j]);
printf("\n");
}
printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n");
printf("\npressao:\n");
printf("\n\n\t\t\t\tpressao:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_pressao.p[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_U:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.U[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_R:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.R[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_L:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.L[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_D:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.D[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_U:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.U[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_R:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.R[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_L:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.L[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_D:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.D[i*tam_mat_real + j]);
printf("\n");
}
/*printf("\n\n\t\t\t\tb_U:\t\t\t\t\t\t\t\t\tb_U_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.U[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.U_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tb_R:\t\t\t\t\t\t\t\t\tb_R_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.R[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.R_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tb_D:\t\t\t\t\t\t\t\t\tb_D_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.D[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.D_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tb_D:\t\t\t\t\t\t\t\t\tb_D_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.D[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.D_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n");
printf("\npressao:\n");
printf("\n\n\t\t\t\tpressao:\t\t\t\t\t\t\t\t\tpressao_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_pressao.p[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_pressao.p_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n");
printf("\n\n\t\t\t\tl_U:\t\t\t\t\t\t\t\t\tl_U_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.U[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.U_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_R:\t\t\t\t\t\t\t\t\tl_R_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.R[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.R_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_D:\t\t\t\t\t\t\t\t\tl_D_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.D[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.D_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_L:\t\t\t\t\t\t\t\t\tl_L_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.L[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.L_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n");
printf("\n\n\t\t\t\tq_U:\t\t\t\t\t\t\t\t\tq_U_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.U[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.U_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_R:\t\t\t\t\t\t\t\t\tq_R_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.R[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.R_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_D:\t\t\t\t\t\t\t\t\tq_D_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.D[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.D_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_L:\t\t\t\t\t\t\t\t\tq_L_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.L[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.L_old[i*tam_mat_real + j]);
printf("\n");
}*/
clear_mem();
//
// system("pause");
return 0;
}
char le_entrada(){
printf("\n\n\t\t - - CARREGANDO ENTRADA - - \n\n");
FILE *arq = NULL;
//arq = fopen("../dir_entrada/parametro_entrada.txt", "r");
arq = fopen("parametro_entrada.txt", "r");
if(arq == NULL){
printf("Erro ao abrir aquivo: 'parametro_entrada.txt'\n\t\tCertifique-se que o arquivo exite.\n");
exit(1);
}
else{
printf("\t\t - - LENDO ARQUIVO DE ENTRADA - -\n");
/*char c[2], dados[255], buffer[255];*/
char buffer[255];
int cont = 1;
while(cont < 9){
fscanf(arq, "%s", buffer);
//puts(buffer);
int i = 0, j = 0;
switch(strlen(buffer)){
case 8: //erro_maximo
fscanf(arq, "%f", &erro_max);
break;
case 10: //tam_regiao
fscanf(arq, "%f", &tam_regiao);
break;
case 11: //opcao_contorno
fscanf(arq, "%d", &op_contorno);
break;
case 12: //valor_contor
fscanf(arq, "%f", &valor_contor);
break;
case 14: //max_interacoes
fscanf(arq, "%d", &max_interacoes);
break;
case 15: //tam_mat_interna
fscanf(arq, "%d", &tam_mat_interna);
break;
case 16: //matriz_de_fontes
//uso (tam_mat_interna + 2) - pois ainda no inicializei 'tam_mat_real'
host_mat.font = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
fscanf(arq, "%f", &host_mat.font[i*(tam_mat_interna+2) + j]);
break;
case 18: //matriz_permeabilidade
host_mat.perm = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
host_mat.epsilon = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
fscanf(arq, "%f", &host_mat.perm[i*(tam_mat_interna+2) + j]);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
host_mat.perm[i*(tam_mat_interna+2) + j] = PamM*exp(S * host_mat.perm[i*(tam_mat_interna+2) + j]);
break;
default:
printf("\n\n\t\tHouve algum erro no aquivo de entrada!\n\n");
return 0;
}
//int tam = strlen(buffer);
cont++;
}
printf("\t\t - - ARQUIVO DE ENTRADA CARREGADO - -\n");
}
printf("\n\n\t\t - - ENTRADA CARREGA - - \n\n");
return 1;
}
float *aloca_matriz(int L, int C){
float *aux = NULL;
aux = (float *) calloc(L * C, sizeof(float));
if(aux == NULL){
printf("\n\n\t\tErro ao alocar memoria\n\n");
exit(1);
}else{
return (aux);
}
return NULL;
}
/*
*
*VERIFICAR RETORNO
*
*/
void cal_cond_robin(){
float keff = 0.0, numerador = 0.0, denominador = 0.0;
float C = 1.0; // Cte adimensional que se ajusta experimentalmente C = 1.0
//Canto superior esquerdo
numerador = ( 2 * host_mat.perm[tam_mat_real + 1] * host_mat.perm[tam_mat_real + 2] );
denominador = ( host_mat.perm[tam_mat_real + 1] + host_mat.perm[tam_mat_real + 2] );
keff = numerador / denominador;
host_b.R[tam_mat_real + 1] = C*h/keff;
numerador = (2 * host_mat.perm[tam_mat_real + 1] * host_mat.perm[(2*tam_mat_real) + 1]);
denominador = ( host_mat.perm[tam_mat_real + 1] + host_mat.perm[(2*tam_mat_real) + 1]);
keff = numerador / denominador;
host_b.D[tam_mat_real + 1] = C*h/keff;
//Canto superior direito
numerador = ( 2 * host_mat.perm[tam_mat_real + tam_mat_interna] * host_mat.perm[tam_mat_real + (tam_mat_interna - 1)] );
denominador = ( host_mat.perm[tam_mat_real + tam_mat_interna] + host_mat.perm[tam_mat_real + (tam_mat_interna - 1)] );
keff = numerador / denominador;
host_b.L[tam_mat_real + tam_mat_interna] = C*h/keff;
numerador = ( 2 * host_mat.perm[tam_mat_real + tam_mat_interna] * host_mat.perm[(2 * tam_mat_real) + tam_mat_interna] );
denominador = ( host_mat.perm[tam_mat_real + tam_mat_interna] + host_mat.perm[(2 * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
host_b.D[tam_mat_real + tam_mat_interna] = C*h/keff;
//Canto infeior esquerdo
numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] * host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] );
denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] + host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] );
keff = numerador / denominador;
host_b.U[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff;
numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] * host_mat.perm[(tam_mat_real * tam_mat_interna) + 2] );
denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] + host_mat.perm[(tam_mat_real * tam_mat_interna) + 2] );
keff = numerador / denominador;
host_b.R[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff;
//Canto infeior direito
numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] );
denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] );
keff = numerador / denominador;
host_b.U[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * host_mat.perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] );
denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + host_mat.perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] );
keff = numerador / denominador;
host_b.L[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff;
//Calculo das fronteiras e regio interna para betas
int i = 0;
for(i = 2; i < tam_mat_interna; i ++){
//Calcula fronteira superior
numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[tam_mat_real + (i-1)] );
denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[tam_mat_real + (i-1)] );
keff = numerador / denominador;
host_b.L[tam_mat_real + i] = C*h/keff;
numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[tam_mat_real + (i+1)] );
denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[tam_mat_real + (i+1)] );
keff = numerador / denominador;
host_b.R[tam_mat_real + i] = C*h/keff;
numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[(2 * tam_mat_real) + i] );
denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[(2 * tam_mat_real) + i] );
keff = numerador / denominador;
host_b.D[tam_mat_real + i] = C*h/keff;
//Calcula fronteira esquerda
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[((i - 1) * tam_mat_real) + 1] );
denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[((i - 1) * tam_mat_real) + 1] );
keff = numerador / denominador;
host_b.U[(i * tam_mat_real) + 1] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[(i * tam_mat_real) + 2] );
denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[(i * tam_mat_real) + 2] );
keff = numerador / denominador;
host_b.R[(i * tam_mat_real) + 1] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[((i + 1) * tam_mat_real) + 1] );
denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[((i + 1) * tam_mat_real) + 1] );
keff = numerador / denominador;
host_b.D[(i * tam_mat_real) + 1] = C*h/keff;
//Calcula fronteira inferior
numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[(tam_mat_interna * tam_mat_real) + (i - 1)] );
denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[(tam_mat_interna * tam_mat_real) + (i - 1)] );
keff = numerador / denominador;
host_b.L[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[((tam_mat_interna - 1) * tam_mat_real) + i] );
denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[((tam_mat_interna - 1) * tam_mat_real) + i] );
keff = numerador / denominador;
host_b.U[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[(tam_mat_interna * tam_mat_real) + (i + 1)] );
denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[(tam_mat_interna * tam_mat_real) + (i + 1)] );
keff = numerador / denominador;
host_b.R[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
//Calcula fronteira direita
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[((i-1) * tam_mat_real) + tam_mat_interna] );
denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[((i-1) * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
host_b.U[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[(i * tam_mat_real) + (tam_mat_interna - 1)] );
denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[(i * tam_mat_real) + (tam_mat_interna - 1)] );
keff = numerador / denominador;
host_b.L[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[((i+1) * tam_mat_real) + tam_mat_interna] );
denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[((i+1) * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
host_b.D[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
//Calcula dados internos
int j = 0;
for(j = 2; j < tam_mat_interna; j ++){
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[(i * tam_mat_real) + (j - 1)] );
denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[(i * tam_mat_real) + (j - 1)] );
keff = numerador / denominador;
host_b.L[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[(i * tam_mat_real) + (j + 1)] );
denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[(i * tam_mat_real) + (j + 1)] );
keff = numerador / denominador;
host_b.R[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[((i - 1) * tam_mat_real) + j] );
denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[((i - 1) * tam_mat_real) + j] );
keff = numerador / denominador;
host_b.U[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[((i + 1) * tam_mat_real) + j] );
denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[((i + 1) * tam_mat_real) + j] );
keff = numerador / denominador;
host_b.D[(i * tam_mat_real) + j] = C*h/keff;
}
}
}
/*
*
*VERIFICAR RETORNO
*
*/
char parametro_independentes(){
int i = 0, j = 0;
float constante = 2/h;
for(i = 0; i < tam_mat_real; i ++)
for(j = 0; j < tam_mat_real; j++){
host_mat.epsilon[i*tam_mat_real + j] = constante * host_mat.perm[i*tam_mat_real + j];
host_mat.font[i*tam_mat_real + j] *= h;
}
return 0;
}
char copia_dados_para_gpu(){
HANDLE_ERROR( hipMemcpy( dev_q.R, host_q.R, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q.L, host_q.L, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q.U, host_q.U, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q.D, host_q.D, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q.R_old, host_q.R_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q.L_old, host_q.L_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q.U_old, host_q.U_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q.D_old, host_q.D_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l.R, host_l.R, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l.L, host_l.L, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l.U, host_l.U, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l.D, host_l.D, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l.R_old, host_l.R_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l.L_old, host_l.L_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l.U_old, host_l.U_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l.D_old, host_l.D_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b.R, host_b.R, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b.L, host_b.L, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b.U, host_b.U, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b.D, host_b.D, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b.R_old, host_b.R_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b.L_old, host_b.L_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b.U_old, host_b.U_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b.D_old, host_b.D_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_pressao.p, host_pressao.p, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_pressao.p_old, host_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_mat.perm, host_mat.perm, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_mat.epsilon, host_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_mat.font, host_mat.font, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyHostToDevice ) );
return 0;
}
void copia_dados_para_cpu(){
HANDLE_ERROR( hipMemcpy( host_q.R, dev_q.R, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_q.L, dev_q.L, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_q.U, dev_q.U, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_q.D, dev_q.D, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_q.R_old, dev_q.R_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_q.L_old, dev_q.L_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_q.U_old, dev_q.U_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_q.D_old, dev_q.D_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_l.R, dev_l.R, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_l.L, dev_l.L, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_l.U, dev_l.U, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_l.D, dev_l.D, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_l.R_old, dev_l.R_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_l.L_old, dev_l.L_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_l.U_old, dev_l.U_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_l.D_old, dev_l.D_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_b.R, dev_b.R, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_b.L, dev_b.L, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_b.U, dev_b.U, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_b.D, dev_b.D, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_b.R_old, dev_b.R_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_b.L_old, dev_b.L_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_b.U_old, dev_b.U_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_b.D_old, dev_b.D_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_pressao.p, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_pressao.p_old, dev_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_mat.font, dev_mat.font, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_mat.perm, dev_mat.perm, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_mat.epsilon, dev_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float),
hipMemcpyDeviceToHost ) );
}
char inicializa_parametros(){
printf("\n\n\t\t- - INICIALIZANDO PARAMETROS - - \n\n\n");
/*
*
*
* CONTRUIR FUNCAO PARA VERIFICAR ERRO DE ALOCAO
* VERIFICAR RETORNO
*/
tam_mat_real = tam_mat_interna + 2;
h = tam_regiao / tam_mat_interna;
HANDLE_ERROR( hipMalloc( (void**)&dev_q, sizeof(ESTRUTURA_Q) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_l, sizeof(ESTRUTURA_L) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b, sizeof(ESTRUTURA_B) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_pressao, sizeof(ESTRUTURA_PRESSAO) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_mat, sizeof(ESTRUTURA_MAT) ) );
host_q.R = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.R != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q.R, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.L = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.L != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q.L, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.U = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.U != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q.U, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.D = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.D != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q.D, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.R_old != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.L_old != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.U_old != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.D_old != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.R = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.R != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l.R, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.L = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.L != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l.L, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.U = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.U != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l.U, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.D = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.D != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l.D, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.R_old != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.L_old != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.U_old != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.D_old != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.R = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.R != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b.R, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.L = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.L != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b.L, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.U = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.U != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b.U, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.D = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.D != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b.D, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.R_old != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.L_old != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.U_old != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.D_old != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_pressao.p = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_pressao.p != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_pressao.p_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_pressao.p_old != NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_mat.perm, tam_mat_real * tam_mat_real * sizeof(float) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_mat.font, tam_mat_real * tam_mat_real * sizeof(float) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_aux, tam_mat_real * tam_mat_real * sizeof(float) ) );
HANDLE_ERROR( hipMemset( dev_aux, 0, tam_mat_real * tam_mat_real * sizeof(float) ) );
HANDLE_ERROR( hipMalloc( (void**)&erro_max, sizeof(float) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_erro, sizeof(float) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_media, sizeof(float) ) );
int i = 0;
switch(op_contorno){
case 1: //Inicializa contorno superior
for(i = 0; i < tam_mat_real; i++){
host_q.D[i] = valor_contor;
host_q.D_old[i] = valor_contor;
}
break;
case 2://Inicializa contorno esquerdo
for(i = 0; i < tam_mat_real; i++){
host_q.R[i*tam_mat_real] = valor_contor;
host_q.R_old[i*tam_mat_real] = valor_contor;
}
break;
case 3://Inicializa contorno direito
for(i = 0; i < tam_mat_real; i++){
host_q.L[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor;
host_q.L_old[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor;
}
break;
case 4://Inicializa contorno inferior
for(i = 0; i < tam_mat_real; i++){
host_q.L[(tam_mat_real-1)*tam_mat_real + i] = valor_contor;
host_q.L_old[(tam_mat_real-1)*tam_mat_real + i] = valor_contor;
}
break;
default:
printf("\n\n\t\t - - OCORREU ALGUM ERRO NA OPCAO DE CONTORNO - - \n\n");
break;
}
printf("\n\n\t\t- - FIM DA INICIALIZACAO PARAMETROS - - \n\n\n");
return 1;
}
void clear_mem(){
HANDLE_ERROR( hipFree (dev_q.U));
HANDLE_ERROR( hipFree (dev_q.R));
HANDLE_ERROR( hipFree (dev_q.D));
HANDLE_ERROR( hipFree (dev_q.L));
free(host_q.U);
free(host_q.R);
free(host_q.D);
free(host_q.L);
HANDLE_ERROR( hipFree (dev_l.U));
HANDLE_ERROR( hipFree (dev_l.R));
HANDLE_ERROR( hipFree (dev_l.D));
HANDLE_ERROR( hipFree (dev_l.L));
free(host_l.U);
free(host_l.R);
free(host_l.D);
free(host_l.L);
HANDLE_ERROR( hipFree (dev_b.U));
HANDLE_ERROR( hipFree (dev_b.R));
HANDLE_ERROR( hipFree (dev_b.D));
HANDLE_ERROR( hipFree (dev_b.L));
free(host_b.U);
free(host_b.R);
free(host_b.D);
free(host_b.L);
HANDLE_ERROR( hipFree (dev_pressao.p));
HANDLE_ERROR( hipFree (dev_pressao.p_old));
free(host_pressao.p);
free(host_pressao.p_old);
HANDLE_ERROR( hipFree (dev_mat.perm));
HANDLE_ERROR( hipFree (dev_mat.font));
HANDLE_ERROR( hipFree (dev_mat.epsilon));
free(host_mat.perm);
free(host_mat.font);
free(host_mat.epsilon);
} | 1968849fd6a8f60f1b283e49c81fecf6a91e56e7.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#define N 100
#define DIM 2
#define PamM 2e-11
#define S 0.5
char le_entrada();
char inicializa_parametros();
float *aloca_matriz(int, int);
void cal_cond_robin();
char parametro_independentes();
char copia_dados_para_gpu();
void copia_dados_para_cpu();
void clear_mem();
//char calcula_pressao_velocidade(int, int, int, int, int);
//char atualiza_mult_lagrange(int tid);
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
//- - - - - - - - - - - - - - GLOBAIS - - - - - - - - - - - - - - //
/* - - - - - - - Estruturas - - - - - - - */
typedef struct{
float *R, *L, *U, *D;
float *R_old, *L_old, *U_old, *D_old;
}ESTRUTURA_Q;
typedef struct{
float *R, *L, *U, *D;
float *R_old, *L_old, *U_old, *D_old;
}ESTRUTURA_L;
typedef struct{
float *R, *L, *U, *D;
float *R_old, *L_old, *U_old, *D_old;
}ESTRUTURA_B;
typedef struct{
float *p, *p_old;
}ESTRUTURA_PRESSAO;
typedef struct{
float *perm, *font, *epsilon;
}ESTRUTURA_MAT;
/* - - - - - - - Fim das Estruturas - - - - - - - */
/* - - - - - - - Variaveis das Estruturas - - - - - - - */
ESTRUTURA_Q host_q, dev_q;
ESTRUTURA_L host_l, dev_l;
ESTRUTURA_B host_b, dev_b;
ESTRUTURA_PRESSAO host_pressao, dev_pressao;
ESTRUTURA_MAT host_mat, dev_mat;
/* - - - - - - - Entradas Externas - - - - - - - */
int tam_mat_interna = 3, tam_mat_real = 3 + 2, max_interacoes = 1000, op_contorno = 1;
float tam_regiao = 20000.00, erro_max = 1e-5, valor_contor = 2.00;
float h = 20000.00 / 3; // ALTURA H = TAM_REGIAO / TAM_MAT_INTERNA
//float *mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL;
//float *dev_mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL;
/* - - - - - - - Fim das Entradas Externas - - - - - - - */
/* - - - - - - - Fim das Variaveis das Estruturas - - - - - - - */
/* - - - - - - - Ponteiros para GPU - - - - - - - */
float *dev_aux = NULL, dev_erro, dev_media;
// float *dev_aux = NULL, dev_erro = 0.0, dev_media = 0.0, dev_sum1 = 0.0, dev_sum2 = 0.0;
//
// float *dev_q.R = NULL, *dev_q.L = NULL, *dev_q.U = NULL, *dev_q.D = NULL;
// float *dev_q.R_old = NULL, *dev_q.L_old = NULL, *dev_q.U_old = NULL, *dev_q.D_old = NULL;
//
// float *dev_l.R = NULL, *dev_l.L = NULL, *dev_l.U = NULL, *dev_l.D = NULL;
// float *dev_l.R_old = NULL, *dev_l.L_old = NULL, *dev_l.U_old = NULL, *dev_l.D_old = NULL;
//
// float *dev_b.R = NULL, *dev_b.L = NULL, *dev_b.U = NULL, *dev_b.D = NULL;
// float *dev_b.R_old = NULL, *dev_b.L_old = NULL, *dev_b.U_old = NULL, *dev_b.D_old = NULL;
//
// float *dev_pressao.p = NULL, *dev_pressao.p_old = NULL;
//
//- - - - - - - - - - - - - - FIM - GLOBAIS - - - - - - - - - - - - - - //
__device__ char atualiza_mult_lagrange( int tid,
ESTRUTURA_Q *dev_q,
ESTRUTURA_L *dev_l,
ESTRUTURA_B *dev_b
){
int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0;
int index_mem_left = 0, index_mem_right = 0;
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1 + 2; // o kernel contem somente a quantidade de elementos internos
// portanto a fronteira deve ser contata "+ 2" de cada lado
index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
index_mem_uper = index_mem_central - (offset -1); // (offset -1) = comprimento do kernel
index_mem_down = index_mem_central + (offset -1);
index_mem_left = index_mem_central - 1;
index_mem_right = index_mem_central + 1;
dev_l->U[index_mem_central] = dev_b->U[index_mem_central] * (dev_q->U[index_mem_central] + dev_q->D_old[index_mem_uper]) + dev_l->D_old[index_mem_uper];
dev_l->D[index_mem_central] = dev_b->D[index_mem_central] * (dev_q->D[index_mem_central] + dev_q->U_old[index_mem_down]) + dev_l->U_old[index_mem_down];
dev_l->R[index_mem_central] = dev_b->R[index_mem_central] * (dev_q->R[index_mem_central] + dev_q->L_old[index_mem_right]) + dev_l->L_old[index_mem_right];
dev_l->L[index_mem_central] = dev_b->L[index_mem_central] * (dev_q->L[index_mem_central] + dev_q->R_old[index_mem_left]) + dev_l->R_old[index_mem_left];
return 0;
}
__device__ char calcula_pressao_velocidade( int tid, int uper, int right, int down, int left,
ESTRUTURA_Q *dev_q,
ESTRUTURA_L *dev_l,
ESTRUTURA_B *dev_b,
ESTRUTURA_PRESSAO *dev_pressao,
ESTRUTURA_MAT *dev_mat
){
float auxU = 0.0, auxD = 0.0, auxR = 0.0, auxL = 0.0, DU = 0.0, DD = 0.0, DR = 0.0, DL = 0.0;
int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0;
int index_mem_left = 0, index_mem_right = 0;
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1 + 2; // o kernel contem somente a quantidade de elementos internos
// portanto a fronteira deve ser contata "+ 2" de cada lado
index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
index_mem_uper = index_mem_central - (offset -1); // (offset -1) = comprimento do kernel
index_mem_down = index_mem_central + (offset -1);
index_mem_left = index_mem_central - 1;
index_mem_right = index_mem_central + 1;
if(uper == 1){
auxU = dev_mat->epsilon[index_mem_central] / (1 + dev_b->U[index_mem_central] * dev_mat->epsilon[index_mem_central]);
DU = auxU * (dev_b->U[index_mem_central] * dev_q->D_old[index_mem_uper] + dev_l->D_old[index_mem_uper]);
}
if(right == 1){
auxR = dev_mat->epsilon[index_mem_central] / (1 + dev_b->R[index_mem_central] * dev_mat->epsilon[index_mem_central]);
DR = auxR * (dev_b->R[index_mem_central] * dev_q->L_old[index_mem_right] + dev_l->L_old[index_mem_right]);
}
if(down == 1){
auxD = dev_mat->epsilon[index_mem_central] / (1 + dev_b->D[index_mem_central] * dev_mat->epsilon[index_mem_central]);
DD = auxD * (dev_b->D[index_mem_central] * dev_q->U_old[index_mem_down] + dev_l->U_old[index_mem_down]);
}
if(left == 1){
auxL = dev_mat->epsilon[index_mem_central] / (1 + dev_b->L[index_mem_central] * dev_mat->epsilon[index_mem_central]);
DL = auxL * (dev_b->L[index_mem_central] * dev_q->R_old[index_mem_left] + dev_l->R_old[index_mem_left]);
}
dev_pressao->p[index_mem_central] = (dev_mat->font[index_mem_central] + DU + DR + DD + DL) / (auxU + auxR + auxD + auxL);
dev_q->L[index_mem_central] = auxL * dev_pressao->p[index_mem_central] - DL;
dev_q->R[index_mem_central] = auxR * dev_pressao->p[index_mem_central] - DR;
dev_q->U[index_mem_central] = auxU * dev_pressao->p[index_mem_central] - DU;
dev_q->D[index_mem_central] = auxD * dev_pressao->p[index_mem_central] - DD;
return 0;
}
__global__ void escoamento_monofasico( ESTRUTURA_Q dev_q,
ESTRUTURA_L dev_l,
ESTRUTURA_B dev_b,
ESTRUTURA_PRESSAO dev_pressao,
ESTRUTURA_MAT dev_mat,
float *dev_aux, const float erro_max, float dev_erro, float dev_media){
/*int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
a[offset] = offset;*/
/*vificar as condições de contorno*/
float dev_sum1 = 0.0, dev_sum2 = 0.0;
int flag_thread_centrais = 1;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
/*int offset = (blockDim.x * gridDim.x) + 1; // deslocamento para o tamanho da região (tam_regiao = n + 2)
*/
int tid = x + y * blockDim.x * gridDim.x;
//verificar esse deslocamento para n causar problema (somente na hora de armazenar utilizar o deslocamento)
//int tid = (x + y * blockDim.x * gridDim.x) + offset; // tid fornece o indice do vetor
int dimensao_x = blockDim.x * gridDim.x;
int dimensao_y = blockDim.y * gridDim.y;
int eq_tid_cant_sup_dir = blockDim.x * gridDim.x - 1; // posição extremo sup direito
int eq_tid_cant_inf_dir = ((gridDim.x * blockDim.x) * (gridDim.y * blockDim.y)) - 1; // posição extremo inf direito
int eq_tid_cant_inf_esq = (gridDim.x * blockDim.x) * (gridDim.y * blockDim.y - 1); // posição extremo inf esquerdo
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1 + 2; // o kernel contem somente a quantidade de elementos internos
// portanto a fronteira deve ser contata "+ 2" de cada lado
int index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
int i = 0;
//while(i < 1500){
if(tid == 0){//canto superior esquerdo
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
/*
* calcula_pressao_velocidade();
*
* Param: ESTRUTURA_Q dev_q,
ESTRUTURA_L dev_l,
ESTRUTURA_B dev_b,
ESTRUTURA_PRESSAO dev_pressao,
ESTRUTURA_MAT dev_mat
*
*/
calcula_pressao_velocidade( tid, 0, 1, 1, 0,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
/*
*
* atualiza_mult_lagrange();
*
* param: int tid,
ESTRUTURA_Q dev_q,
ESTRUTURA_L dev_l,
ESTRUTURA_B dev_b
*
*/
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_sup_dir){//canto superior direito
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 0, 0, 1, 1,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_inf_esq){//canto inferior esquerdo
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 0, 0,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_inf_dir){//canto inferior direito
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 0, 0, 1,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange( tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if((tid > 0) && (tid < eq_tid_cant_sup_dir)){//fronteira superior
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 0, 1, 1, 1,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if((tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_dir) && (tid % dimensao_x == eq_tid_cant_sup_dir)){ //fronteira direita
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 0, 1, 1,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if((tid > eq_tid_cant_inf_esq) && (tid < eq_tid_cant_inf_dir)){ //fronteira inferior
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 0, 1,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if((tid > 0) && (tid < eq_tid_cant_inf_dir) && (tid < eq_tid_cant_inf_esq) && (tid % dimensao_y == 0)){//fronteira esquerda
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 1, 0,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
flag_thread_centrais = 0;
}
if(flag_thread_centrais){
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 1, 1,
&dev_q,
&dev_l,
&dev_b,
&dev_pressao,
&dev_mat);
atualiza_mult_lagrange(tid, &dev_q, &dev_l, &dev_b);
}
/*
*
*SINCRONIZA
*COMENTARIOS
*ALOCAR VARIÁVEL aux com o tamanho de "tids"
*VERIFICAR ATOMICIDADE PRA VALORES FLOAT
*VERIFICAR ALOCAÇÃO DAS MEMÓRIAS GLOBAIS
*alocar memória erro
*alocar float media = 0.0, sum1 = 0.0, sum2 = 0.0;
*/
__syncthreads();
if(tid == eq_tid_cant_inf_dir){
dev_media = 0.0, dev_sum1 = 0.0, dev_sum2 = 0.0;
int i = 0;
for(i = 0; i <= eq_tid_cant_inf_dir; i++){
dev_media = dev_media + dev_pressao.p[(offset + 1) + i];
}
dev_media = dev_media / (eq_tid_cant_inf_dir + 1);
}
__syncthreads();
dev_pressao.p[index_mem_central] = dev_media;
dev_l.D[index_mem_central] = dev_l.D[index_mem_central] - dev_media;
dev_l.U[index_mem_central] = dev_l.U[index_mem_central] - dev_media;
dev_l.L[index_mem_central] = dev_l.L[index_mem_central] - dev_media;
dev_l.R[index_mem_central] = dev_l.R[index_mem_central] - dev_media;
//avaliando criterio de convergencia
dev_aux[index_mem_central] = dev_pressao.p[index_mem_central] - dev_pressao.p_old[index_mem_central];
__syncthreads();
if(tid == eq_tid_cant_inf_dir){
dev_erro = 0.0;
int i = 0;
for(i = 0; i <= eq_tid_cant_inf_dir; i++){
dev_sum1 += dev_aux[(offset + 1) + i] * dev_aux[(offset + 1) + i];
dev_sum2 += dev_pressao.p[(offset + 1) + i] * dev_pressao.p[(offset + 1) + i];
}
dev_erro = sqrt(dev_sum1/dev_sum2);
}
__syncthreads();
if (dev_erro > erro_max){
return;
}
dev_pressao.p_old[index_mem_central] = dev_pressao.p[index_mem_central];
dev_q.U_old[index_mem_central] = dev_q.U[index_mem_central];
dev_q.R_old[index_mem_central] = dev_q.R[index_mem_central];
dev_q.L_old[index_mem_central] = dev_q.L[index_mem_central];
dev_q.D_old[index_mem_central] = dev_q.D[index_mem_central];
dev_l.D_old[index_mem_central] = dev_l.D[index_mem_central];
dev_l.U_old[index_mem_central] = dev_l.U[index_mem_central];
dev_l.L_old[index_mem_central] = dev_l.L[index_mem_central];
dev_l.R_old[index_mem_central] = dev_l.R[index_mem_central];
i++;
//}
/*
* Imponiendo a media cero na distribuicao de presiones
* Calculo de la media
*/
/*
atomicAdd( &media, dev_pressao.p[tid] );
//atomicSub( &aux[tid], dev_pressao.p[tid] - dev_pressao.p_old[tid] );
__syncthreads();
dev_pressao.p[tid] -= M;
dev_l.D[tid] -= M;
dev_l.U[tid] -= M;
dev_l.L[tid] -= M;
dev_l.R[tid] -= M;
//avaliando criterio de convergencia
aux[tid] = dev_pressao.p[tid] - dev_b.D_old[tid];
__syncthreads();
atomicAdd( &sum1, aux[tid] * aux[tid] );
atomicAdd( &sum2, dev_pressao.p[tid] * dev_pressao.p[tid] );
__syncthreads();
if(tid == 0)
erro = sqrt(sum1/sum2);
if (erro < 1e-5) return 0;
p_old[j][k] = p[j][k];
dev_pressao.p_old[tid] = dev_pressao.p_old[tid];
dev_q.U_old[tid] = dev_q.U[tid];
dev_q.R_old[tid] = dev_q.R[tid];
dev_q.L_old[tid] = dev_q.L[tid];
dev_q.D_old[tid] = dev_q.D[tid];
dev_l.D_old[tid] = dev_l.D[tid];
dev_l.U_old[tid] = dev_l.U[tid];
dev_l.L_old[tid] = dev_l.L[tid];
dev_l.R_old[tid] = dev_l.R[tid];*/
}
int main(void){
le_entrada();
inicializa_parametros();
cal_cond_robin();
parametro_independentes();
copia_dados_para_gpu();
// dim3 block(comprimento/16 , altura/16);
// dim3 thread(16, 16);
dim3 block(2, 2);
dim3 thread(4, 4);
/*
* escoamento_monofasico();
*
* Param: ESTRUTURA_Q dev_q,
ESTRUTURA_L dev_l,
ESTRUTURA_B dev_b,
ESTRUTURA_PRESSAO dev_pressao,
ESTRUTURA_MAT dev_mat,
float *dev_aux, const float erro_max
*
*/
escoamento_monofasico<<<block, thread>>>( dev_q, dev_l, dev_b, dev_pressao, dev_mat,
dev_aux, 1e-5, dev_erro, dev_media);
copia_dados_para_cpu();
int i = 0, j = 0;
printf("\ntam_mat_interna = %d\n", tam_mat_interna);
printf("tam_mat_real = %d\n", tam_mat_real);
printf("max_interacoes = %d\n", max_interacoes);
printf("op_contorno = %d\n", op_contorno);
printf("tam_regiao = %f\n", tam_regiao);
printf("erro_max = %f\n", erro_max);
printf("valor_contor = %f\n", valor_contor);
printf("\n\n\t\t\tmat_font:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_mat.font[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\tmat_perm:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_mat.perm[i*tam_mat_real + j]);
//printf("%12.4E ", host_mat.perm[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\tmat_epsilon:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_mat.epsilon[i*tam_mat_real + j]);
printf("\n");
}
printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n");
printf("\n\n\t\t\tbeta U:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.U[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\tbeta R:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.R[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\tbeta L:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.L[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\tbeta D:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.D[i*tam_mat_real + j]);
printf("\n");
}
printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n");
printf("\npressao:\n");
printf("\n\n\t\t\t\tpressao:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_pressao.p[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_U:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.U[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_R:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.R[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_L:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.L[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_D:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.D[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_U:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.U[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_R:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.R[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_L:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.L[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_D:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.D[i*tam_mat_real + j]);
printf("\n");
}
/*printf("\n\n\t\t\t\tb_U:\t\t\t\t\t\t\t\t\tb_U_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.U[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.U_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tb_R:\t\t\t\t\t\t\t\t\tb_R_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.R[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.R_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tb_D:\t\t\t\t\t\t\t\t\tb_D_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.D[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.D_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tb_D:\t\t\t\t\t\t\t\t\tb_D_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.D[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_b.D_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n");
printf("\npressao:\n");
printf("\n\n\t\t\t\tpressao:\t\t\t\t\t\t\t\t\tpressao_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_pressao.p[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_pressao.p_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n");
printf("\n\n\t\t\t\tl_U:\t\t\t\t\t\t\t\t\tl_U_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.U[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.U_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_R:\t\t\t\t\t\t\t\t\tl_R_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.R[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.R_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_D:\t\t\t\t\t\t\t\t\tl_D_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.D[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.D_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tl_L:\t\t\t\t\t\t\t\t\tl_L_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.L[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_l.L_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n");
printf("\n\n\t\t\t\tq_U:\t\t\t\t\t\t\t\t\tq_U_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.U[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.U_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_R:\t\t\t\t\t\t\t\t\tq_R_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.R[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.R_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_D:\t\t\t\t\t\t\t\t\tq_D_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.D[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.D_old[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\n\t\t\t\tq_L:\t\t\t\t\t\t\t\t\tq_L_old:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.L[i*tam_mat_real + j]);
printf("| ");
for(j = 0; j < tam_mat_real; j++)
printf("%12.4E ", host_q.L_old[i*tam_mat_real + j]);
printf("\n");
}*/
clear_mem();
//
// system("pause");
return 0;
}
char le_entrada(){
printf("\n\n\t\t - - CARREGANDO ENTRADA - - \n\n");
FILE *arq = NULL;
//arq = fopen("../dir_entrada/parametro_entrada.txt", "r");
arq = fopen("parametro_entrada.txt", "r");
if(arq == NULL){
printf("Erro ao abrir aquivo: 'parametro_entrada.txt'\n\t\tCertifique-se que o arquivo exite.\n");
exit(1);
}
else{
printf("\t\t - - LENDO ARQUIVO DE ENTRADA - -\n");
/*char c[2], dados[255], buffer[255];*/
char buffer[255];
int cont = 1;
while(cont < 9){
fscanf(arq, "%s", buffer);
//puts(buffer);
int i = 0, j = 0;
switch(strlen(buffer)){
case 8: //erro_maximo
fscanf(arq, "%f", &erro_max);
break;
case 10: //tam_regiao
fscanf(arq, "%f", &tam_regiao);
break;
case 11: //opcao_contorno
fscanf(arq, "%d", &op_contorno);
break;
case 12: //valor_contor
fscanf(arq, "%f", &valor_contor);
break;
case 14: //max_interacoes
fscanf(arq, "%d", &max_interacoes);
break;
case 15: //tam_mat_interna
fscanf(arq, "%d", &tam_mat_interna);
break;
case 16: //matriz_de_fontes
//uso (tam_mat_interna + 2) - pois ainda não inicializei 'tam_mat_real'
host_mat.font = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
fscanf(arq, "%f", &host_mat.font[i*(tam_mat_interna+2) + j]);
break;
case 18: //matriz_permeabilidade
host_mat.perm = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
host_mat.epsilon = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
fscanf(arq, "%f", &host_mat.perm[i*(tam_mat_interna+2) + j]);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
host_mat.perm[i*(tam_mat_interna+2) + j] = PamM*exp(S * host_mat.perm[i*(tam_mat_interna+2) + j]);
break;
default:
printf("\n\n\t\tHouve algum erro no aquivo de entrada!\n\n");
return 0;
}
//int tam = strlen(buffer);
cont++;
}
printf("\t\t - - ARQUIVO DE ENTRADA CARREGADO - -\n");
}
printf("\n\n\t\t - - ENTRADA CARREGA - - \n\n");
return 1;
}
float *aloca_matriz(int L, int C){
float *aux = NULL;
aux = (float *) calloc(L * C, sizeof(float));
if(aux == NULL){
printf("\n\n\t\tErro ao alocar memoria\n\n");
exit(1);
}else{
return (aux);
}
return NULL;
}
/*
*
*VERIFICAR RETORNO
*
*/
void cal_cond_robin(){
float keff = 0.0, numerador = 0.0, denominador = 0.0;
float C = 1.0; // Cte adimensional que se ajusta experimentalmente C = 1.0
//Canto superior esquerdo
numerador = ( 2 * host_mat.perm[tam_mat_real + 1] * host_mat.perm[tam_mat_real + 2] );
denominador = ( host_mat.perm[tam_mat_real + 1] + host_mat.perm[tam_mat_real + 2] );
keff = numerador / denominador;
host_b.R[tam_mat_real + 1] = C*h/keff;
numerador = (2 * host_mat.perm[tam_mat_real + 1] * host_mat.perm[(2*tam_mat_real) + 1]);
denominador = ( host_mat.perm[tam_mat_real + 1] + host_mat.perm[(2*tam_mat_real) + 1]);
keff = numerador / denominador;
host_b.D[tam_mat_real + 1] = C*h/keff;
//Canto superior direito
numerador = ( 2 * host_mat.perm[tam_mat_real + tam_mat_interna] * host_mat.perm[tam_mat_real + (tam_mat_interna - 1)] );
denominador = ( host_mat.perm[tam_mat_real + tam_mat_interna] + host_mat.perm[tam_mat_real + (tam_mat_interna - 1)] );
keff = numerador / denominador;
host_b.L[tam_mat_real + tam_mat_interna] = C*h/keff;
numerador = ( 2 * host_mat.perm[tam_mat_real + tam_mat_interna] * host_mat.perm[(2 * tam_mat_real) + tam_mat_interna] );
denominador = ( host_mat.perm[tam_mat_real + tam_mat_interna] + host_mat.perm[(2 * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
host_b.D[tam_mat_real + tam_mat_interna] = C*h/keff;
//Canto infeior esquerdo
numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] * host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] );
denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] + host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] );
keff = numerador / denominador;
host_b.U[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff;
numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] * host_mat.perm[(tam_mat_real * tam_mat_interna) + 2] );
denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] + host_mat.perm[(tam_mat_real * tam_mat_interna) + 2] );
keff = numerador / denominador;
host_b.R[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff;
//Canto infeior direito
numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] );
denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] );
keff = numerador / denominador;
host_b.U[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * host_mat.perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] );
denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + host_mat.perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] );
keff = numerador / denominador;
host_b.L[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff;
//Calculo das fronteiras e região interna para betas
int i = 0;
for(i = 2; i < tam_mat_interna; i ++){
//Calcula fronteira superior
numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[tam_mat_real + (i-1)] );
denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[tam_mat_real + (i-1)] );
keff = numerador / denominador;
host_b.L[tam_mat_real + i] = C*h/keff;
numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[tam_mat_real + (i+1)] );
denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[tam_mat_real + (i+1)] );
keff = numerador / denominador;
host_b.R[tam_mat_real + i] = C*h/keff;
numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[(2 * tam_mat_real) + i] );
denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[(2 * tam_mat_real) + i] );
keff = numerador / denominador;
host_b.D[tam_mat_real + i] = C*h/keff;
//Calcula fronteira esquerda
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[((i - 1) * tam_mat_real) + 1] );
denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[((i - 1) * tam_mat_real) + 1] );
keff = numerador / denominador;
host_b.U[(i * tam_mat_real) + 1] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[(i * tam_mat_real) + 2] );
denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[(i * tam_mat_real) + 2] );
keff = numerador / denominador;
host_b.R[(i * tam_mat_real) + 1] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[((i + 1) * tam_mat_real) + 1] );
denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[((i + 1) * tam_mat_real) + 1] );
keff = numerador / denominador;
host_b.D[(i * tam_mat_real) + 1] = C*h/keff;
//Calcula fronteira inferior
numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[(tam_mat_interna * tam_mat_real) + (i - 1)] );
denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[(tam_mat_interna * tam_mat_real) + (i - 1)] );
keff = numerador / denominador;
host_b.L[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[((tam_mat_interna - 1) * tam_mat_real) + i] );
denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[((tam_mat_interna - 1) * tam_mat_real) + i] );
keff = numerador / denominador;
host_b.U[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[(tam_mat_interna * tam_mat_real) + (i + 1)] );
denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[(tam_mat_interna * tam_mat_real) + (i + 1)] );
keff = numerador / denominador;
host_b.R[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
//Calcula fronteira direita
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[((i-1) * tam_mat_real) + tam_mat_interna] );
denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[((i-1) * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
host_b.U[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[(i * tam_mat_real) + (tam_mat_interna - 1)] );
denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[(i * tam_mat_real) + (tam_mat_interna - 1)] );
keff = numerador / denominador;
host_b.L[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[((i+1) * tam_mat_real) + tam_mat_interna] );
denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[((i+1) * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
host_b.D[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
//Calcula dados internos
int j = 0;
for(j = 2; j < tam_mat_interna; j ++){
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[(i * tam_mat_real) + (j - 1)] );
denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[(i * tam_mat_real) + (j - 1)] );
keff = numerador / denominador;
host_b.L[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[(i * tam_mat_real) + (j + 1)] );
denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[(i * tam_mat_real) + (j + 1)] );
keff = numerador / denominador;
host_b.R[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[((i - 1) * tam_mat_real) + j] );
denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[((i - 1) * tam_mat_real) + j] );
keff = numerador / denominador;
host_b.U[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[((i + 1) * tam_mat_real) + j] );
denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[((i + 1) * tam_mat_real) + j] );
keff = numerador / denominador;
host_b.D[(i * tam_mat_real) + j] = C*h/keff;
}
}
}
/*
*
*VERIFICAR RETORNO
*
*/
char parametro_independentes(){
int i = 0, j = 0;
float constante = 2/h;
for(i = 0; i < tam_mat_real; i ++)
for(j = 0; j < tam_mat_real; j++){
host_mat.epsilon[i*tam_mat_real + j] = constante * host_mat.perm[i*tam_mat_real + j];
host_mat.font[i*tam_mat_real + j] *= h;
}
return 0;
}
char copia_dados_para_gpu(){
HANDLE_ERROR( cudaMemcpy( dev_q.R, host_q.R, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q.L, host_q.L, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q.U, host_q.U, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q.D, host_q.D, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q.R_old, host_q.R_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q.L_old, host_q.L_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q.U_old, host_q.U_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q.D_old, host_q.D_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l.R, host_l.R, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l.L, host_l.L, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l.U, host_l.U, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l.D, host_l.D, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l.R_old, host_l.R_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l.L_old, host_l.L_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l.U_old, host_l.U_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l.D_old, host_l.D_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b.R, host_b.R, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b.L, host_b.L, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b.U, host_b.U, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b.D, host_b.D, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b.R_old, host_b.R_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b.L_old, host_b.L_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b.U_old, host_b.U_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b.D_old, host_b.D_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_pressao.p, host_pressao.p, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_pressao.p_old, host_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_mat.perm, host_mat.perm, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_mat.epsilon, host_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_mat.font, host_mat.font, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyHostToDevice ) );
return 0;
}
void copia_dados_para_cpu(){
HANDLE_ERROR( cudaMemcpy( host_q.R, dev_q.R, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_q.L, dev_q.L, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_q.U, dev_q.U, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_q.D, dev_q.D, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_q.R_old, dev_q.R_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_q.L_old, dev_q.L_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_q.U_old, dev_q.U_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_q.D_old, dev_q.D_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_l.R, dev_l.R, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_l.L, dev_l.L, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_l.U, dev_l.U, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_l.D, dev_l.D, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_l.R_old, dev_l.R_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_l.L_old, dev_l.L_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_l.U_old, dev_l.U_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_l.D_old, dev_l.D_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_b.R, dev_b.R, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_b.L, dev_b.L, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_b.U, dev_b.U, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_b.D, dev_b.D, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_b.R_old, dev_b.R_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_b.L_old, dev_b.L_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_b.U_old, dev_b.U_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_b.D_old, dev_b.D_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_pressao.p, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_pressao.p_old, dev_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_mat.font, dev_mat.font, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_mat.perm, dev_mat.perm, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_mat.epsilon, dev_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float),
cudaMemcpyDeviceToHost ) );
}
char inicializa_parametros(){
printf("\n\n\t\t- - INICIALIZANDO PARAMETROS - - \n\n\n");
/*
*
*
* CONTRUIR FUNCAO PARA VERIFICAR ERRO DE ALOCAÇÃO
* VERIFICAR RETORNO
*/
tam_mat_real = tam_mat_interna + 2;
h = tam_regiao / tam_mat_interna;
HANDLE_ERROR( cudaMalloc( (void**)&dev_q, sizeof(ESTRUTURA_Q) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_l, sizeof(ESTRUTURA_L) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b, sizeof(ESTRUTURA_B) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_pressao, sizeof(ESTRUTURA_PRESSAO) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_mat, sizeof(ESTRUTURA_MAT) ) );
host_q.R = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.R != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q.R, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.L = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.L != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q.L, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.U = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.U != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q.U, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.D = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.D != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q.D, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.R_old != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.L_old != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.U_old != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_q.D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_q.D_old != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.R = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.R != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l.R, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.L = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.L != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l.L, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.U = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.U != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l.U, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.D = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.D != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l.D, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.R_old != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.L_old != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.U_old != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_l.D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_l.D_old != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.R = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.R != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b.R, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.L = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.L != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b.L, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.U = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.U != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b.U, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.D = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.D != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b.D, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.R_old != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.L_old != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.U_old != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_b.D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_b.D_old != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_pressao.p = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_pressao.p != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float) ) );
host_pressao.p_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(host_pressao.p_old != NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_mat.perm, tam_mat_real * tam_mat_real * sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_mat.font, tam_mat_real * tam_mat_real * sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_aux, tam_mat_real * tam_mat_real * sizeof(float) ) );
HANDLE_ERROR( cudaMemset( dev_aux, 0, tam_mat_real * tam_mat_real * sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**)&erro_max, sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_erro, sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_media, sizeof(float) ) );
int i = 0;
switch(op_contorno){
case 1: //Inicializa contorno superior
for(i = 0; i < tam_mat_real; i++){
host_q.D[i] = valor_contor;
host_q.D_old[i] = valor_contor;
}
break;
case 2://Inicializa contorno esquerdo
for(i = 0; i < tam_mat_real; i++){
host_q.R[i*tam_mat_real] = valor_contor;
host_q.R_old[i*tam_mat_real] = valor_contor;
}
break;
case 3://Inicializa contorno direito
for(i = 0; i < tam_mat_real; i++){
host_q.L[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor;
host_q.L_old[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor;
}
break;
case 4://Inicializa contorno inferior
for(i = 0; i < tam_mat_real; i++){
host_q.L[(tam_mat_real-1)*tam_mat_real + i] = valor_contor;
host_q.L_old[(tam_mat_real-1)*tam_mat_real + i] = valor_contor;
}
break;
default:
printf("\n\n\t\t - - OCORREU ALGUM ERRO NA OPCAO DE CONTORNO - - \n\n");
break;
}
printf("\n\n\t\t- - FIM DA INICIALIZACAO PARAMETROS - - \n\n\n");
return 1;
}
void clear_mem(){
HANDLE_ERROR( cudaFree (dev_q.U));
HANDLE_ERROR( cudaFree (dev_q.R));
HANDLE_ERROR( cudaFree (dev_q.D));
HANDLE_ERROR( cudaFree (dev_q.L));
free(host_q.U);
free(host_q.R);
free(host_q.D);
free(host_q.L);
HANDLE_ERROR( cudaFree (dev_l.U));
HANDLE_ERROR( cudaFree (dev_l.R));
HANDLE_ERROR( cudaFree (dev_l.D));
HANDLE_ERROR( cudaFree (dev_l.L));
free(host_l.U);
free(host_l.R);
free(host_l.D);
free(host_l.L);
HANDLE_ERROR( cudaFree (dev_b.U));
HANDLE_ERROR( cudaFree (dev_b.R));
HANDLE_ERROR( cudaFree (dev_b.D));
HANDLE_ERROR( cudaFree (dev_b.L));
free(host_b.U);
free(host_b.R);
free(host_b.D);
free(host_b.L);
HANDLE_ERROR( cudaFree (dev_pressao.p));
HANDLE_ERROR( cudaFree (dev_pressao.p_old));
free(host_pressao.p);
free(host_pressao.p_old);
HANDLE_ERROR( cudaFree (dev_mat.perm));
HANDLE_ERROR( cudaFree (dev_mat.font));
HANDLE_ERROR( cudaFree (dev_mat.epsilon));
free(host_mat.perm);
free(host_mat.font);
free(host_mat.epsilon);
} |
f0f846bd38b7b0eaf7964ca7ba9e6d167273d529.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "printThreadIndex.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
const int nx = 1;
const int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
printThreadIndex), dim3(gridBlock),dim3(threadBlock), 0, 0, A,nx,ny);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
printThreadIndex), dim3(gridBlock),dim3(threadBlock), 0, 0, A,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
printThreadIndex), dim3(gridBlock),dim3(threadBlock), 0, 0, A,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f0f846bd38b7b0eaf7964ca7ba9e6d167273d529.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "printThreadIndex.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
const int nx = 1;
const int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
printThreadIndex<<<gridBlock,threadBlock>>>(A,nx,ny);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
printThreadIndex<<<gridBlock,threadBlock>>>(A,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
printThreadIndex<<<gridBlock,threadBlock>>>(A,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
36076bf2054c935ea8ba84bdc726b57e6a512ab8.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 36076bf2054c935ea8ba84bdc726b57e6a512ab8.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
a73e757b471d9500ceee5dadd9a51a0133fb3ffb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <thread>
#include <hip/hip_runtime.h>
#include "errors.h"
#include "device_launch_parameters.h"
#define BLOCK_SIZE 32
// GPU Floyd Warshall
__global__ void FloydWarshall(float* d, int k, int n)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = by*BLOCK_SIZE + ty;
int j = bx*BLOCK_SIZE + tx;
int ij = n*i + j;
if (i < n && j < n)
{
float dist = d[n*i + k] + d[n*k + j];
d[ij] = dist*(dist < d[ij]) + d[ij] * (dist >= d[ij]);
}
}
void FloydWarshallCPU(float* d, int n)
{
for (int k = 0; k < n; ++k)
for (int i = 0; i < n; ++i)
for (int j = 0; j < n; ++j)
if (d[n*i + j] > d[n*i + k] + d[n*k + j])
d[n*i + j] = d[n*i + k] + d[n*k + j];
}
void CPUMulti(float* d, int k, int s, int n, int CPU_THREADS)
{
for (int c = s; c < n*n; c += CPU_THREADS)
{
int i = c/n;
int j = c - i*n;
if (d[n*i + j] > d[n*i + k] + d[n*k + j])
d[n*i + j] = d[n*i + k] + d[n*k + j];
}
}
void FloydWarshallCPUMulti(float* d, int n, int CPU_THREADS)
{
for (int k = 0; k < n; ++k)
{
std::thread* t = new std::thread[CPU_THREADS];
for (int i = 0; i < CPU_THREADS; ++i)
t[i] = std::thread(CPUMulti, d, k, i, n, CPU_THREADS);
for (int i = 0; i < CPU_THREADS; ++i)
t[i].join();
delete[] t;
}
} | a73e757b471d9500ceee5dadd9a51a0133fb3ffb.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <thread>
#include <cuda_runtime.h>
#include "errors.h"
#include "device_launch_parameters.h"
#define BLOCK_SIZE 32
// GPU Floyd Warshall
__global__ void FloydWarshall(float* d, int k, int n)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = by*BLOCK_SIZE + ty;
int j = bx*BLOCK_SIZE + tx;
int ij = n*i + j;
if (i < n && j < n)
{
float dist = d[n*i + k] + d[n*k + j];
d[ij] = dist*(dist < d[ij]) + d[ij] * (dist >= d[ij]);
}
}
void FloydWarshallCPU(float* d, int n)
{
for (int k = 0; k < n; ++k)
for (int i = 0; i < n; ++i)
for (int j = 0; j < n; ++j)
if (d[n*i + j] > d[n*i + k] + d[n*k + j])
d[n*i + j] = d[n*i + k] + d[n*k + j];
}
void CPUMulti(float* d, int k, int s, int n, int CPU_THREADS)
{
for (int c = s; c < n*n; c += CPU_THREADS)
{
int i = c/n;
int j = c - i*n;
if (d[n*i + j] > d[n*i + k] + d[n*k + j])
d[n*i + j] = d[n*i + k] + d[n*k + j];
}
}
void FloydWarshallCPUMulti(float* d, int n, int CPU_THREADS)
{
for (int k = 0; k < n; ++k)
{
std::thread* t = new std::thread[CPU_THREADS];
for (int i = 0; i < CPU_THREADS; ++i)
t[i] = std::thread(CPUMulti, d, k, i, n, CPU_THREADS);
for (int i = 0; i < CPU_THREADS; ++i)
t[i].join();
delete[] t;
}
} |
1a14e4c3e962df4023f68654acc4c300ea891402.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
static unsigned int iSnapUp(const unsigned int dividend, const unsigned int divisor)
{
return ((dividend % divisor) == 0) ? dividend : (dividend - dividend % divisor + divisor);
}
unsigned int factorRadix2(unsigned int& log2L, unsigned int L)
{
if(!L)
{
log2L = 0;
return 0;
} else {
for(log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
////////////////////////////////////////////////////////////////////////////////
// Scan codelets
////////////////////////////////////////////////////////////////////////////////
#if(1)
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
__device__
inline unsigned int scan1Inclusive(const unsigned int idata,
unsigned int* l_Data, const unsigned int size)
{
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
l_Data[pos] = 0;
pos += size;
l_Data[pos] = idata;
for(unsigned int offset = 1; offset < size; offset <<= 1){
__syncthreads();
unsigned int t = l_Data[pos] + l_Data[pos - offset];
__syncthreads();
l_Data[pos] = t;
}
return l_Data[pos];
}
__device__
inline unsigned int scan1Exclusive(const unsigned int idata,
unsigned int* l_Data, const unsigned int size)
{
return scan1Inclusive(idata, l_Data, size) - idata;
}
#else
#define LOG2_WARP_SIZE 5U
#define WARP_SIZE (1U << LOG2_WARP_SIZE)
//Almost the same as naiveScan1 but doesn't need barriers
//assuming size <= WARP_SIZE
inline unsigned int warpScanInclusive(const unsigned int idata,
unsigned int* l_Data, const unsigned int size)
{
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
l_Data[pos] = 0;
pos += size;
l_Data[pos] = idata;
for(unsigned int offset = 1; offset < size; offset <<= 1)
l_Data[pos] += l_Data[pos - offset];
return l_Data[pos];
}
inline unsigned int warpScanExclusive(const unsigned int idata,
unsigned int* l_Data, const unsigned int size)
{
return warpScanInclusive(idata, l_Data, size) - idata;
}
__device__
inline unsigned int scan1Inclusive(const unsigned int idata,
unsigned int* l_Data, const unsigned int size)
{
if(size > WARP_SIZE){
//Bottom-level inclusive warp scan
unsigned int warpResult = warpScanInclusive(idata, l_Data, WARP_SIZE);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because l_Data is being overwritten)
__syncthreads();
int lid = threadIdx.x;
if( (lid & (WARP_SIZE - 1)) == (WARP_SIZE - 1) )
l_Data[lid >> LOG2_WARP_SIZE] = warpResult;
//wait for warp scans to complete
__syncthreads();
if( lid < (WORKGROUP_SIZE / WARP_SIZE) ){
//grab top warp elements
unsigned int val = l_Data[lid] ;
//calculate exclsive scan and write back to shared memory
l_Data[lid] = warpScanExclusive(val, l_Data, size >> LOG2_WARP_SIZE);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + l_Data[lid >> LOG2_WARP_SIZE];
}else{
return warpScanInclusive(idata, l_Data, size);
}
}
__device__
inline unsigned int scan1Exclusive(const unsigned int idata,
unsigned int* l_Data, const unsigned int size){
return scan1Inclusive(idata, l_Data, size) - idata;
}
#endif
//Vector scan: the array to be scanned is stored
//in work-item private memory as uint4
__device__
inline uint4 scan4Inclusive(uint4 data4,
unsigned int* l_Data, const unsigned int size){
//Level-0 inclusive scan
data4.y += data4.x;
data4.z += data4.y;
data4.w += data4.z;
//Level-1 exclusive scan
unsigned int val = scan1Inclusive(data4.w, l_Data, size / 4) - data4.w;
return (data4 + make_uint4(val));
}
__device__
inline uint4 scan4Exclusive(uint4 data4,
unsigned int* l_Data, const unsigned int size)
{
return scan4Inclusive(data4, l_Data, size) - data4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveLocal1K(
unsigned int*__restrict__ d_Dst,
const unsigned int*__restrict__ d_Src,
const unsigned int size)
{
__shared__ unsigned int l_Data[2 * WORKGROUP_SIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x;
//Load data
uint4 idata4 = reinterpret_cast<const uint4*>(d_Src)[i];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, l_Data, size);
//Write back
reinterpret_cast<uint4*>(d_Dst)[i] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveLocal2K(
unsigned int*__restrict__ d_Buf,
unsigned int*__restrict__ d_Dst,
const unsigned int*__restrict__ d_Src,
const unsigned int N,
const unsigned int arrayLength)
{
//Load top elements
//Convert results of bottom-level scan back to inclusive
//Skip loads and stores for inactive work-items of the work-group with highest index(pos >= N)
__shared__ unsigned int l_Data[2 * WORKGROUP_SIZE];
unsigned int data = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N)
data = d_Dst[(4 * WORKGROUP_SIZE - 1) + (4 * WORKGROUP_SIZE) * i] +
d_Src[(4 * WORKGROUP_SIZE - 1) + (4 * WORKGROUP_SIZE) * i];
//Compute
unsigned int odata = scan1Exclusive(data, l_Data, arrayLength);
//Avoid out-of-bound access
if(i < N) d_Buf[i] = odata;
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdateK(
unsigned int*__restrict__ d_Data,
unsigned int*__restrict__ d_Buf)
{
__shared__ unsigned int buf[1];
int i = blockIdx.x * blockDim.x + threadIdx.x;
uint4 data4 = reinterpret_cast<uint4*>(d_Data)[i];
if(threadIdx.x == 0)
buf[0] = d_Buf[blockIdx.x];
__syncthreads();
data4 += make_uint4(buf[0]);
reinterpret_cast<uint4*>(d_Data)[i] = data4;
}
| 1a14e4c3e962df4023f68654acc4c300ea891402.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
static unsigned int iSnapUp(const unsigned int dividend, const unsigned int divisor)
{
return ((dividend % divisor) == 0) ? dividend : (dividend - dividend % divisor + divisor);
}
unsigned int factorRadix2(unsigned int& log2L, unsigned int L)
{
if(!L)
{
log2L = 0;
return 0;
} else {
for(log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
////////////////////////////////////////////////////////////////////////////////
// Scan codelets
////////////////////////////////////////////////////////////////////////////////
#if(1)
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
__device__
inline unsigned int scan1Inclusive(const unsigned int idata,
unsigned int* l_Data, const unsigned int size)
{
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
l_Data[pos] = 0;
pos += size;
l_Data[pos] = idata;
for(unsigned int offset = 1; offset < size; offset <<= 1){
__syncthreads();
unsigned int t = l_Data[pos] + l_Data[pos - offset];
__syncthreads();
l_Data[pos] = t;
}
return l_Data[pos];
}
__device__
inline unsigned int scan1Exclusive(const unsigned int idata,
unsigned int* l_Data, const unsigned int size)
{
return scan1Inclusive(idata, l_Data, size) - idata;
}
#else
#define LOG2_WARP_SIZE 5U
#define WARP_SIZE (1U << LOG2_WARP_SIZE)
//Almost the same as naiveScan1 but doesn't need barriers
//assuming size <= WARP_SIZE
inline unsigned int warpScanInclusive(const unsigned int idata,
unsigned int* l_Data, const unsigned int size)
{
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
l_Data[pos] = 0;
pos += size;
l_Data[pos] = idata;
for(unsigned int offset = 1; offset < size; offset <<= 1)
l_Data[pos] += l_Data[pos - offset];
return l_Data[pos];
}
inline unsigned int warpScanExclusive(const unsigned int idata,
unsigned int* l_Data, const unsigned int size)
{
return warpScanInclusive(idata, l_Data, size) - idata;
}
__device__
inline unsigned int scan1Inclusive(const unsigned int idata,
unsigned int* l_Data, const unsigned int size)
{
if(size > WARP_SIZE){
//Bottom-level inclusive warp scan
unsigned int warpResult = warpScanInclusive(idata, l_Data, WARP_SIZE);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because l_Data is being overwritten)
__syncthreads();
int lid = threadIdx.x;
if( (lid & (WARP_SIZE - 1)) == (WARP_SIZE - 1) )
l_Data[lid >> LOG2_WARP_SIZE] = warpResult;
//wait for warp scans to complete
__syncthreads();
if( lid < (WORKGROUP_SIZE / WARP_SIZE) ){
//grab top warp elements
unsigned int val = l_Data[lid] ;
//calculate exclsive scan and write back to shared memory
l_Data[lid] = warpScanExclusive(val, l_Data, size >> LOG2_WARP_SIZE);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + l_Data[lid >> LOG2_WARP_SIZE];
}else{
return warpScanInclusive(idata, l_Data, size);
}
}
__device__
inline unsigned int scan1Exclusive(const unsigned int idata,
unsigned int* l_Data, const unsigned int size){
return scan1Inclusive(idata, l_Data, size) - idata;
}
#endif
//Vector scan: the array to be scanned is stored
//in work-item private memory as uint4
__device__
inline uint4 scan4Inclusive(uint4 data4,
unsigned int* l_Data, const unsigned int size){
//Level-0 inclusive scan
data4.y += data4.x;
data4.z += data4.y;
data4.w += data4.z;
//Level-1 exclusive scan
unsigned int val = scan1Inclusive(data4.w, l_Data, size / 4) - data4.w;
return (data4 + make_uint4(val));
}
__device__
inline uint4 scan4Exclusive(uint4 data4,
unsigned int* l_Data, const unsigned int size)
{
return scan4Inclusive(data4, l_Data, size) - data4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveLocal1K(
unsigned int*__restrict__ d_Dst,
const unsigned int*__restrict__ d_Src,
const unsigned int size)
{
__shared__ unsigned int l_Data[2 * WORKGROUP_SIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x;
//Load data
uint4 idata4 = reinterpret_cast<const uint4*>(d_Src)[i];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, l_Data, size);
//Write back
reinterpret_cast<uint4*>(d_Dst)[i] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveLocal2K(
unsigned int*__restrict__ d_Buf,
unsigned int*__restrict__ d_Dst,
const unsigned int*__restrict__ d_Src,
const unsigned int N,
const unsigned int arrayLength)
{
//Load top elements
//Convert results of bottom-level scan back to inclusive
//Skip loads and stores for inactive work-items of the work-group with highest index(pos >= N)
__shared__ unsigned int l_Data[2 * WORKGROUP_SIZE];
unsigned int data = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N)
data = d_Dst[(4 * WORKGROUP_SIZE - 1) + (4 * WORKGROUP_SIZE) * i] +
d_Src[(4 * WORKGROUP_SIZE - 1) + (4 * WORKGROUP_SIZE) * i];
//Compute
unsigned int odata = scan1Exclusive(data, l_Data, arrayLength);
//Avoid out-of-bound access
if(i < N) d_Buf[i] = odata;
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdateK(
unsigned int*__restrict__ d_Data,
unsigned int*__restrict__ d_Buf)
{
__shared__ unsigned int buf[1];
int i = blockIdx.x * blockDim.x + threadIdx.x;
uint4 data4 = reinterpret_cast<uint4*>(d_Data)[i];
if(threadIdx.x == 0)
buf[0] = d_Buf[blockIdx.x];
__syncthreads();
data4 += make_uint4(buf[0]);
reinterpret_cast<uint4*>(d_Data)[i] = data4;
}
|
d70b77e0145aa0fd8963043710838d7daab0cf42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************
File : lcsExclusiveScanForInt.cu
Author : Mingcheng Chen
Last Update : January 29th, 2013
*******************************************************************/
#include <stdio.h>
#define BLOCK_SIZE 512
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define CONFLICT_FREE_OFFSET(n) ((n) >> (LOG_NUM_BANKS))
#define POSI(n) ((n) + CONFLICT_FREE_OFFSET(n))
__global__ void ScanKernel(int *globalArray, int length, int step) {
__shared__ int localArray[POSI(BLOCK_SIZE << 1) + 1];
int localID = threadIdx.x;
int groupID = blockIdx.x;
int groupSize = blockDim.x;
int startOffset = (groupSize << 1) * groupID * step;
int posi1 = startOffset + localID * step;
int posi2 = posi1 + groupSize * step;
localArray[POSI(localID)] = posi1 < length ? globalArray[posi1] : 0;
localArray[POSI(localID + groupSize)] = posi2 < length ? globalArray[posi2] : 0;
// Up-sweep
for (int stride = 1, d = groupSize; stride <= groupSize; stride <<= 1, d >>= 1) {
__syncthreads();
if (localID < d) {
posi1 = stride * ((localID << 1) + 1) - 1;
posi2 = posi1 + stride;
localArray[POSI(posi2)] += localArray[POSI(posi1)];
}
}
// Down-sweep
for (int stride = groupSize, d = 1; stride >= 1; stride >>= 1, d <<= 1) {
__syncthreads();
if (localID < d) {
posi1 = stride * ((localID << 1) + 1) - 1;
posi2 = POSI(posi1 + stride);
posi1 = POSI(posi1);
int t = localArray[posi1];
localArray[posi1] = localArray[posi2];
localArray[posi2] = localArray[posi2] * !!localID + t;
}
}
__syncthreads();
// Write to global memory
posi1 = startOffset + localID * step;
posi2 = posi1 + groupSize * step;
if (posi1 < length) globalArray[posi1] = localArray[POSI(localID)];
if (posi2 < length) globalArray[posi2] = localArray[POSI(localID + groupSize)];
}
__global__ void ReverseUpdateKernel(int *globalArray, int length, int step) {
int localID = threadIdx.x;
int groupID = blockIdx.x;
int groupSize = blockDim.x;
int startOffset = groupID * (groupSize << 1) * step;
if (groupID) {
int value = globalArray[startOffset];
int posi1 = startOffset + localID * step;
int posi2 = posi1 + groupSize * step;
if (posi1 < length && localID) globalArray[posi1] += value;
if (posi2 < length) globalArray[posi2] += value;
}
}
extern "C"
int ExclusiveScanForInt(int *d_arr, int length) {
hipError_t err;
// Get the work group size
int localWorkSize = BLOCK_SIZE;
// Up-sweep and down-sweep
static int records[10];
int problemSize = length;
int numOfRecords = 0;
int d_step = 1;
/// DEBUG ///
//printf("length = %d\n", length);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid(1, 1, 1);
for (; problemSize > 1; problemSize = (problemSize - 1) / (localWorkSize * 2) + 1) {
if (numOfRecords) d_step *= localWorkSize * 2;
records[numOfRecords++] = problemSize;
dimGrid.x = (problemSize - 1) / (localWorkSize * 2) + 1;
hipLaunchKernelGGL(( ScanKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_arr, length, d_step);
err = hipDeviceSynchronize();
if (err) {
printf("Fail to finish scan kernel");
hipGetErrorString(err);
exit(0);
}
}
int sum;
err = hipMemcpy(&sum, d_arr, sizeof(int), hipMemcpyDeviceToHost);
if (err) {
hipGetErrorString(err);
exit(0);
}
err = hipMemset(d_arr, 0, sizeof(int));
if (err) {
hipGetErrorString(err);
exit(0);
}
// Reverse updates
for (int i = numOfRecords - 1; i >= 0; i--, d_step /= localWorkSize * 2) {
dimGrid.x = (records[i] - 1) / (localWorkSize * 2) + 1;
hipLaunchKernelGGL(( ReverseUpdateKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_arr, length, d_step);
err = hipDeviceSynchronize();
if (err) {
printf("Fail to finish reverse update kernel");
hipGetErrorString(err);
exit(0);
}
}
return sum;
}
| d70b77e0145aa0fd8963043710838d7daab0cf42.cu | /******************************************************************
File : lcsExclusiveScanForInt.cu
Author : Mingcheng Chen
Last Update : January 29th, 2013
*******************************************************************/
#include <stdio.h>
#define BLOCK_SIZE 512
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define CONFLICT_FREE_OFFSET(n) ((n) >> (LOG_NUM_BANKS))
#define POSI(n) ((n) + CONFLICT_FREE_OFFSET(n))
__global__ void ScanKernel(int *globalArray, int length, int step) {
__shared__ int localArray[POSI(BLOCK_SIZE << 1) + 1];
int localID = threadIdx.x;
int groupID = blockIdx.x;
int groupSize = blockDim.x;
int startOffset = (groupSize << 1) * groupID * step;
int posi1 = startOffset + localID * step;
int posi2 = posi1 + groupSize * step;
localArray[POSI(localID)] = posi1 < length ? globalArray[posi1] : 0;
localArray[POSI(localID + groupSize)] = posi2 < length ? globalArray[posi2] : 0;
// Up-sweep
for (int stride = 1, d = groupSize; stride <= groupSize; stride <<= 1, d >>= 1) {
__syncthreads();
if (localID < d) {
posi1 = stride * ((localID << 1) + 1) - 1;
posi2 = posi1 + stride;
localArray[POSI(posi2)] += localArray[POSI(posi1)];
}
}
// Down-sweep
for (int stride = groupSize, d = 1; stride >= 1; stride >>= 1, d <<= 1) {
__syncthreads();
if (localID < d) {
posi1 = stride * ((localID << 1) + 1) - 1;
posi2 = POSI(posi1 + stride);
posi1 = POSI(posi1);
int t = localArray[posi1];
localArray[posi1] = localArray[posi2];
localArray[posi2] = localArray[posi2] * !!localID + t;
}
}
__syncthreads();
// Write to global memory
posi1 = startOffset + localID * step;
posi2 = posi1 + groupSize * step;
if (posi1 < length) globalArray[posi1] = localArray[POSI(localID)];
if (posi2 < length) globalArray[posi2] = localArray[POSI(localID + groupSize)];
}
__global__ void ReverseUpdateKernel(int *globalArray, int length, int step) {
int localID = threadIdx.x;
int groupID = blockIdx.x;
int groupSize = blockDim.x;
int startOffset = groupID * (groupSize << 1) * step;
if (groupID) {
int value = globalArray[startOffset];
int posi1 = startOffset + localID * step;
int posi2 = posi1 + groupSize * step;
if (posi1 < length && localID) globalArray[posi1] += value;
if (posi2 < length) globalArray[posi2] += value;
}
}
extern "C"
int ExclusiveScanForInt(int *d_arr, int length) {
cudaError_t err;
// Get the work group size
int localWorkSize = BLOCK_SIZE;
// Up-sweep and down-sweep
static int records[10];
int problemSize = length;
int numOfRecords = 0;
int d_step = 1;
/// DEBUG ///
//printf("length = %d\n", length);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid(1, 1, 1);
for (; problemSize > 1; problemSize = (problemSize - 1) / (localWorkSize * 2) + 1) {
if (numOfRecords) d_step *= localWorkSize * 2;
records[numOfRecords++] = problemSize;
dimGrid.x = (problemSize - 1) / (localWorkSize * 2) + 1;
ScanKernel<<<dimGrid, dimBlock>>>(d_arr, length, d_step);
err = cudaDeviceSynchronize();
if (err) {
printf("Fail to finish scan kernel");
cudaGetErrorString(err);
exit(0);
}
}
int sum;
err = cudaMemcpy(&sum, d_arr, sizeof(int), cudaMemcpyDeviceToHost);
if (err) {
cudaGetErrorString(err);
exit(0);
}
err = cudaMemset(d_arr, 0, sizeof(int));
if (err) {
cudaGetErrorString(err);
exit(0);
}
// Reverse updates
for (int i = numOfRecords - 1; i >= 0; i--, d_step /= localWorkSize * 2) {
dimGrid.x = (records[i] - 1) / (localWorkSize * 2) + 1;
ReverseUpdateKernel<<<dimGrid, dimBlock>>>(d_arr, length, d_step);
err = cudaDeviceSynchronize();
if (err) {
printf("Fail to finish reverse update kernel");
cudaGetErrorString(err);
exit(0);
}
}
return sum;
}
|
3df18c617209834c3c38b3997be0a6dbcb0cb9f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
There can be problem with crashing app
It is caused by WDDM TDR delay
this delay works in such a way that kill the kernel if it doesnt finish in specific time
so for big numbers it can be a problem
but you can change time or even turn it off in Nsight monitor : option->general->microsoft display driver
*/
#define PI 3.14159265358979323846
#define N 10000 //data size
#define ES 10000 //estimation size
#define HS 20 //histogram size the lower hs is the better results will appear
//do not spoil and dont set data size greater than histogram size
__global__ void estimationKernel(float* data, size_t n, float* kernelEstimation, size_t es, float dx, float h)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < es; i += stride)
{
float di;
di = dx * i; //on which position on OX axis we calculate the estimation
kernelEstimation[i] = 0;
for (int j = 0; j < n; j++)
{
//formula:
float power = -0.5f * (di - data[j]) * (di - data[j]) / h / h;
kernelEstimation[i] += expf(power);
}
kernelEstimation[i] /= (n * h) * sqrt(2 * PI); //also formula
}
} | 3df18c617209834c3c38b3997be0a6dbcb0cb9f3.cu | #include "includes.h"
/*
There can be problem with crashing app
It is caused by WDDM TDR delay
this delay works in such a way that kill the kernel if it doesnt finish in specific time
so for big numbers it can be a problem
but you can change time or even turn it off in Nsight monitor : option->general->microsoft display driver
*/
#define PI 3.14159265358979323846
#define N 10000 //data size
#define ES 10000 //estimation size
#define HS 20 //histogram size the lower hs is the better results will appear
//do not spoil and dont set data size greater than histogram size
__global__ void estimationKernel(float* data, size_t n, float* kernelEstimation, size_t es, float dx, float h)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < es; i += stride)
{
float di;
di = dx * i; //on which position on OX axis we calculate the estimation
kernelEstimation[i] = 0;
for (int j = 0; j < n; j++)
{
//formula:
float power = -0.5f * (di - data[j]) * (di - data[j]) / h / h;
kernelEstimation[i] += expf(power);
}
kernelEstimation[i] /= (n * h) * sqrt(2 * PI); //also formula
}
} |
14529201c51f937e1d415a04871ee796454d2d4f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<stdlib.h>
#include<math.h>
#include<assert.h>
#include<iostream>
// cuda kernal for vector addition
__global__ void vectorAdd(int* a, int* b, int* c, int n)
{
// Calculate global thread ID(tid)
// one thread per element that gets added.
//out of all of our thread figure out who am I
// blockIdx.x => which block am i
//bolckDim.x => block size ( 256)
// threadIdx.x => which thread am I in the block.
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// vector boundary guard
if (tid < n)
{
//Each thread adds a single element
c[tid] = a[tid] + b[tid];
}
}
// Initialize vector of size n to int between 0-99
void matrix_init(int* a, int n)
{
for (int i = 0; i < n; ++i)
{
a[i] = rand() % 100;
}
}
//Check vector add result
void error_check(int* a, int* b, int* c, int n)
{
for (int i = 0; i < n; ++i)
{
assert(c[i] == a[i] + b[i]);
}
}
int main()
{
// vector size of 2^16 (65536 elements)
int n = 1 << 16;
// Host vector pointers
int* h_a, * h_b, * h_c;
//Device vector pointers
int* d_a, * d_b, * d_c;
//Allocation size for all vectors
size_t bytes = sizeof(int) * n;
//Allocate host memory
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
//Allocate device memory
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
//Initialize vectors a and b with random values between 0 and 99;
matrix_init(h_a, n);
matrix_init(h_b, n);
// Threadblock size
int NUM_THREADS = 256;
//Grid size
int NUM_BLOCKS = (int)ceil(n / NUM_THREADS);
//Launch kernel on default strream w/o stream
vectorAdd << < NUM_BLOCKS, NUM_THREADS >> > (d_a, d_b, d_c, n);
// copy sum vector from device to host
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
//check result for errors
error_check(h_a, h_b, h_c, n);
std::cout << "COMPLETED SUCCESSUFLLY\n" << std::endl;
return 0;
}
| 14529201c51f937e1d415a04871ee796454d2d4f.cu | #include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<stdlib.h>
#include<math.h>
#include<assert.h>
#include<iostream>
// cuda kernal for vector addition
__global__ void vectorAdd(int* a, int* b, int* c, int n)
{
// Calculate global thread ID(tid)
// one thread per element that gets added.
//out of all of our thread figure out who am I
// blockIdx.x => which block am i
//bolckDim.x => block size ( 256)
// threadIdx.x => which thread am I in the block.
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// vector boundary guard
if (tid < n)
{
//Each thread adds a single element
c[tid] = a[tid] + b[tid];
}
}
// Initialize vector of size n to int between 0-99
void matrix_init(int* a, int n)
{
for (int i = 0; i < n; ++i)
{
a[i] = rand() % 100;
}
}
//Check vector add result
void error_check(int* a, int* b, int* c, int n)
{
for (int i = 0; i < n; ++i)
{
assert(c[i] == a[i] + b[i]);
}
}
int main()
{
// vector size of 2^16 (65536 elements)
int n = 1 << 16;
// Host vector pointers
int* h_a, * h_b, * h_c;
//Device vector pointers
int* d_a, * d_b, * d_c;
//Allocation size for all vectors
size_t bytes = sizeof(int) * n;
//Allocate host memory
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
//Allocate device memory
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
//Initialize vectors a and b with random values between 0 and 99;
matrix_init(h_a, n);
matrix_init(h_b, n);
// Threadblock size
int NUM_THREADS = 256;
//Grid size
int NUM_BLOCKS = (int)ceil(n / NUM_THREADS);
//Launch kernel on default strream w/o stream
vectorAdd << < NUM_BLOCKS, NUM_THREADS >> > (d_a, d_b, d_c, n);
// copy sum vector from device to host
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
//check result for errors
error_check(h_a, h_b, h_c, n);
std::cout << "COMPLETED SUCCESSUFLLY\n" << std::endl;
return 0;
}
|
6e564bec9ae278c440cf9e7c6118ce83339ff4dd.hip | // !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include "matrixMul_kernel.hip"
#include <iostream>
//This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line)
{
if (hipSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, hipGetErrorString(err));
exit(-1);
}
}
//This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString(err));
exit(-1);
}
}
// declaration, forward
void doMatrixMul(int argc, char** argv);
void randomInit(float*, int);
void inline checkError(hipblasStatus_t status, const char* msg)
{
if (status != HIPBLAS_STATUS_SUCCESS) {
printf(msg);
exit(-1);
}
}
// Program main
int main(int argc, char** argv)
{
doMatrixMul(argc, argv);
}
// host function
void doMatrixMul(int argc, char** argv)
{
int size = 32;
int devID;
hipDeviceProp_t props;
checkCudaErrors(hipGetDevice(&devID));
checkCudaErrors(hipGetDeviceProperties(&props, devID));
int block_size = (props.major < 2) ? 16 : 32;
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = size;
uiHA = size;
uiWB = size;
uiHB = size;
uiWC = size;
uiHC = size;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
// initialize host memory
srand(2012);
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_B, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*)malloc(mem_size_C);
float* h_CUBLAS = (float*)malloc(mem_size_C);
checkCudaErrors(hipMalloc((void**)&d_A, mem_size_A));
checkCudaErrors(hipMalloc((void**)&d_B, mem_size_B));
// copy host memory to device
checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**)&d_C, mem_size_C));
// setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(uiWC / threads.x, uiHC / threads.y);
if (block_size == 16) {
matrixMul<16> << < grid, threads >> >(d_C, d_A, d_B, uiWA, uiWB);
}
else {
matrixMul<32> << < grid, threads >> >(d_C, d_A, d_B, uiWA, uiWB);
}
hipDeviceSynchronize();
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
hipDeviceReset();
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
} | 6e564bec9ae278c440cf9e7c6118ce83339ff4dd.cu | #include <cublas_v2.h>
#include <cuda_runtime.h>
#include "matrixMul_kernel.cu"
#include <iostream>
//This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line)
{
if (cudaSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString(err));
exit(-1);
}
}
//This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString(err));
exit(-1);
}
}
// declaration, forward
void doMatrixMul(int argc, char** argv);
void randomInit(float*, int);
void inline checkError(cublasStatus_t status, const char* msg)
{
if (status != CUBLAS_STATUS_SUCCESS) {
printf(msg);
exit(-1);
}
}
// Program main
int main(int argc, char** argv)
{
doMatrixMul(argc, argv);
}
// host function
void doMatrixMul(int argc, char** argv)
{
int size = 32;
int devID;
cudaDeviceProp props;
checkCudaErrors(cudaGetDevice(&devID));
checkCudaErrors(cudaGetDeviceProperties(&props, devID));
int block_size = (props.major < 2) ? 16 : 32;
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = size;
uiHA = size;
uiWB = size;
uiHB = size;
uiWC = size;
uiHC = size;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
// initialize host memory
srand(2012);
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_B, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float* h_C = (float*)malloc(mem_size_C);
float* h_CUBLAS = (float*)malloc(mem_size_C);
checkCudaErrors(cudaMalloc((void**)&d_A, mem_size_A));
checkCudaErrors(cudaMalloc((void**)&d_B, mem_size_B));
// copy host memory to device
checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**)&d_C, mem_size_C));
// setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(uiWC / threads.x, uiHC / threads.y);
if (block_size == 16) {
matrixMul<16> << < grid, threads >> >(d_C, d_A, d_B, uiWA, uiWB);
}
else {
matrixMul<32> << < grid, threads >> >(d_C, d_A, d_B, uiWA, uiWB);
}
cudaDeviceSynchronize();
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
cudaDeviceReset();
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
} |
2c21f43222a0268583ba515448ee956b80998e4d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "yoloswag420blazeit360noscope.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
float *rand = NULL;
hipMalloc(&rand, XSIZE*YSIZE);
float prob = 1;
float scale = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
yoloswag420blazeit360noscope), dim3(gridBlock),dim3(threadBlock), 0, 0, input,size,rand,prob,scale);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
yoloswag420blazeit360noscope), dim3(gridBlock),dim3(threadBlock), 0, 0, input,size,rand,prob,scale);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
yoloswag420blazeit360noscope), dim3(gridBlock),dim3(threadBlock), 0, 0, input,size,rand,prob,scale);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2c21f43222a0268583ba515448ee956b80998e4d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "yoloswag420blazeit360noscope.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
float *rand = NULL;
cudaMalloc(&rand, XSIZE*YSIZE);
float prob = 1;
float scale = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
yoloswag420blazeit360noscope<<<gridBlock,threadBlock>>>(input,size,rand,prob,scale);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
yoloswag420blazeit360noscope<<<gridBlock,threadBlock>>>(input,size,rand,prob,scale);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
yoloswag420blazeit360noscope<<<gridBlock,threadBlock>>>(input,size,rand,prob,scale);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
35cd903c1c5e2dcbaa56ffad3ac8ba40f1b6b0b7.hip | // !!! This is a file automatically generated by hipify!!!
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
const int N = 7;
const int blocksize = 7;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello ";
int b[N] = {15, 10, 6, 0, -11, 1, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
printf("%s\n", a);
return EXIT_SUCCESS;
} | 35cd903c1c5e2dcbaa56ffad3ac8ba40f1b6b0b7.cu | // This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
const int N = 7;
const int blocksize = 7;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello ";
int b[N] = {15, 10, 6, 0, -11, 1, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
printf("%s\n", a);
return EXIT_SUCCESS;
} |
c1b5272eac89c0afbd1c452590086a3a1eb01e7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7) {
comp += (+1.4392E36f - var_1 / log10f(var_2 / (var_3 - (var_4 - (-1.9382E-36f / +1.1977E-37f)))));
if (comp > (-1.9936E-37f * -1.8166E-41f * (var_5 + acosf(expf(-1.5876E-35f))))) {
comp = var_6 / var_7;
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8);
hipDeviceSynchronize();
return 0;
}
| c1b5272eac89c0afbd1c452590086a3a1eb01e7b.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7) {
comp += (+1.4392E36f - var_1 / log10f(var_2 / (var_3 - (var_4 - (-1.9382E-36f / +1.1977E-37f)))));
if (comp > (-1.9936E-37f * -1.8166E-41f * (var_5 + acosf(expf(-1.5876E-35f))))) {
comp = var_6 / var_7;
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8);
cudaDeviceSynchronize();
return 0;
}
|
02727ca9c9512b8c9b9a03a1ad44362981b18f9c.hip | // !!! This is a file automatically generated by hipify!!!
// OPEN CV LIRBARIES
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
// CUDA LIBRARY
#include <hip/hip_runtime.h>
// CUDA CUSTOM LIBRARY
#include "common.h"
#include "helper.hpp"
#define BIN_COUNT 256
auto t = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> global_duration = t-t;
// HISTOGRAM COUNT GPU
__global__ void histogram_count_parallel_GPU(unsigned int *output, const unsigned char *input, const long color_bin){
// CREATE SHARED SUB_HISTOGRAM
__shared__ unsigned int sub_histogram[256];
sub_histogram[threadIdx.x] = 0;
__syncthreads();
// GET THE XINDEX AND THE OFFSET
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
// USE HANDLER
int tid = threadIdx.x;
while (xIndex < color_bin){
// COUNT THE SUB_HISTOGRAM
atomicAdd(&sub_histogram[input[xIndex]], 1);
xIndex += offset;
}
__syncthreads();
// MERGE THE SUB_HISTOGRAMS
atomicAdd(&(output[tid]), sub_histogram[tid]);
}
// HISTOGRAM CFD PARALLEL GPU
__global__ void histogram_cfd_parallel_GPU(unsigned int *output, const unsigned int *input, const long color_bin){
int xIndex = blockIdx.x;
int id = 0;
long sum = 0;
if(xIndex < 256){
while(id <= xIndex){
sum += input[id];
id++;
}
output[xIndex] = sum;
}
}
// HISTOGRAM EQUALIZATION PARALLEL GPU
__global__ void histogram_equalization_parallel_GPU(unsigned int *output, const unsigned int *input, int color_bin, int min, int pixels){
int xIndex = blockIdx.x;
if(xIndex < 256){
// CALCULATE THE EQUALIZED VALUE
input[xIndex] ? output[xIndex] = lroundf(((float)(input[xIndex] - min)/(float)(pixels-1))*(color_bin-1)) :
output[xIndex] = 0;
}
}
// LOOKUP REPLACEMENT PARALLEL GPU
__global__ void lookup_replace_parallel_GPU(unsigned char *output, const unsigned char *input, const int color_bin, const unsigned int *lookup_table){
// GET THE XINDEX AND THE OFFSET
int xIndex = threadIdx.x + blockDim.x * blockIdx.x;
int offset = blockDim.x * gridDim.x;
while (xIndex < color_bin){
// USE THE LOOKUP FUNCTION
output[xIndex] = lookup_table[input[xIndex]];
xIndex += offset;
}
}
// GET THE CUDA PROPS
int get_cuda_device_props(){
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
return prop.multiProcessorCount;
}
// KERNEL WRAPPER FUNCTIONS
void run_histogram_count_kernel(int blocks, unsigned int *output, const unsigned char *input, int color_bin){
auto start_cpu = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( histogram_count_parallel_GPU), dim3(blocks*2), dim3(256), 0, 0, output, input, color_bin);
auto end_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
global_duration += duration_ms;
printf("Calculating the picture histogram %f ms\n", duration_ms.count());
}
void run_histogram_cfd_kernel(int blocks, unsigned int *output, const unsigned int *input, int color_bin){
auto start_cpu = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( histogram_cfd_parallel_GPU), dim3(256), dim3(1), 0, 0, output, input, color_bin);
auto end_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
global_duration += duration_ms;
printf("Calculating the histogram's cfd %f ms\n", duration_ms.count());
}
void run_histogram_equalization_kernel(int blocks, unsigned int *output, const unsigned int *input, int color_bin, int min, int pixels){
auto start_cpu = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( histogram_equalization_parallel_GPU), dim3(256), dim3(1), 0, 0, output, input, color_bin, min, pixels);
auto end_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
global_duration += duration_ms;
printf("Equalizing the histogram %f ms\n", duration_ms.count());
}
void run_lookup_replace_kernel(int blocks, unsigned char *output, const unsigned char *input, int pixels, unsigned int *lookup_table){
auto start_cpu = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( lookup_replace_parallel_GPU), dim3(blocks*2),dim3(256), 0, 0, output, input, pixels, lookup_table);
auto end_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
global_duration += duration_ms;
printf("Processing the image %f ms\n", duration_ms.count());
}
// FUNCTION TO SUMMON THE KERNEL
void histogram_equalizer(const cv::Mat& input, cv::Mat& output){
// INPUT.STEP GETS THE NUMBER OF BYTES FOR EACH ROW
std::cout << "Input image step: " << input.step << " rows: " << input.rows << " cols: " << input.cols << std::endl;
// SETUP
// GET DEVICE PROPS
int blocks = get_cuda_device_props();
// CALCULATE TOTAL NUMBER OF BYTES OF INPUT AND OUTPUT IMAGE
unsigned char *d_input;
unsigned char *d_output;
unsigned int *d_histogram_count;
unsigned int *d_histogram_cfd;
unsigned int *d_histogram_equalized_lookup;
size_t input_output_bytes = input.step * input.rows;
size_t histogram_bytes = 256 * sizeof(unsigned int);
// ALLOCATE DEVICE MEMORY FOR FIRST INPUT AND FINAL OUTPUT
SAFE_CALL(hipMalloc<unsigned char>(&d_input, input_output_bytes), "CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_output, input_output_bytes), "CUDA Malloc Failed");
SAFE_CALL(hipMemset(d_output, 0, input_output_bytes), "CUDA Malloc Failed");
// ALLOCATE MEMORY FOR HISTOGRAM COUNT AND HISTOGRAM EQUALIZED AND INITIALIZE
SAFE_CALL(hipMalloc<unsigned int>(&d_histogram_count, histogram_bytes), "CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned int>(&d_histogram_cfd, histogram_bytes), "CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned int>(&d_histogram_equalized_lookup, histogram_bytes), "CUDA Malloc Failed");
SAFE_CALL(hipMemset(d_histogram_count, 0, histogram_bytes), "CUDA Memset Failed");
SAFE_CALL(hipMemset(d_histogram_cfd, 0, histogram_bytes), "CUDA Memset Failed");
SAFE_CALL(hipMemset(d_histogram_equalized_lookup, 0, histogram_bytes), "CUDA Memset Failed");
// COPY DATA FROM OPENCV INPUT IMAGE TO DEVICE MEMORY
SAFE_CALL(hipMemcpy(d_input, input.ptr(), input_output_bytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
// BEGIN
// EXECUTE KERNEL
run_histogram_count_kernel(blocks, d_histogram_count, d_input, input.cols*input.rows);
// COPY HISTOGRAM TO HOST MEMORY
unsigned int image_histogram[256];
SAFE_CALL(hipMemcpy(image_histogram, d_histogram_count, histogram_bytes, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
// CASE GPU CALCULATION WAS CORRECT
if(assert_gpu_vs_cpu(input.ptr(), image_histogram, input.cols*input.rows)){
std::cout << "GPU histogram calculation was successful" << std::endl;
// GET THE NEW TABLE
// unsigned int cfd_histogram_count[256] = {0};
// calculate_cfd(cfd_histogram_count, image_histogram, 256);
//
// // COPY CFD HISTOGRAM TO DEVICE
// SAFE_CALL(hipMemcpy(d_histogram_cfd, cfd_histogram_count, histogram_bytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
//
run_histogram_cfd_kernel(1, d_histogram_cfd, d_histogram_count, 256);
// RUN KERNEL
// int min = get_min_nonzero_value(cfd_histogram_count, 256);
int min = 11;
run_histogram_equalization_kernel(1, d_histogram_equalized_lookup, d_histogram_cfd, 256, min, input.cols*input.rows);
//EXECUTE KERNEL
run_lookup_replace_kernel(blocks, d_output, d_input, input.cols * input.rows, d_histogram_equalized_lookup);
// COPY BACK DATA
SAFE_CALL(hipMemcpy(output.ptr(), d_output, input_output_bytes, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
printf("Total Time %f ms\n", global_duration.count());
}else{ // CASE IT WASN'T
std::cout << "Histogram calculation found an error, closing software" << std::endl;
hipFree(d_input);
hipFree(d_output);
hipFree(d_histogram_count);
hipFree(d_histogram_cfd);
hipFree(d_histogram_equalized_lookup);
exit(1);
}
hipFree(d_input);
hipFree(d_output);
hipFree(d_histogram_count);
hipFree(d_histogram_cfd);
hipFree(d_histogram_equalized_lookup);
return;
}
int main(int argc, char *argv[]) {
// GET THE IMAGE PATH
std::string imagePath;
(argc < 2) ? imagePath = "images/scenery.jpg" : imagePath = argv[1];
// READ INPUT IMAGE FROM DISK
cv::Mat input, colorInput = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR);
if (colorInput.empty()){
std::cout << "Image Not Found!" << std::endl;
std::cin.get();
return -1;
}
// GET THE IMAGE AND CONVERT IT TO GRAYSCALE
cv::cvtColor(colorInput, input, CV_BGR2GRAY);
// CREATE OUTPUT IMAGE
cv::Mat output = input.clone();
// CALL THE WRAPPER FUNCTION
histogram_equalizer(input, output);
cv::imwrite("output.jpg", output);
return 0;
}
| 02727ca9c9512b8c9b9a03a1ad44362981b18f9c.cu | // OPEN CV LIRBARIES
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
// CUDA LIBRARY
#include <cuda_runtime.h>
// CUDA CUSTOM LIBRARY
#include "common.h"
#include "helper.hpp"
#define BIN_COUNT 256
auto t = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> global_duration = t-t;
// HISTOGRAM COUNT GPU
__global__ void histogram_count_parallel_GPU(unsigned int *output, const unsigned char *input, const long color_bin){
// CREATE SHARED SUB_HISTOGRAM
__shared__ unsigned int sub_histogram[256];
sub_histogram[threadIdx.x] = 0;
__syncthreads();
// GET THE XINDEX AND THE OFFSET
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
// USE HANDLER
int tid = threadIdx.x;
while (xIndex < color_bin){
// COUNT THE SUB_HISTOGRAM
atomicAdd(&sub_histogram[input[xIndex]], 1);
xIndex += offset;
}
__syncthreads();
// MERGE THE SUB_HISTOGRAMS
atomicAdd(&(output[tid]), sub_histogram[tid]);
}
// HISTOGRAM CFD PARALLEL GPU
__global__ void histogram_cfd_parallel_GPU(unsigned int *output, const unsigned int *input, const long color_bin){
int xIndex = blockIdx.x;
int id = 0;
long sum = 0;
if(xIndex < 256){
while(id <= xIndex){
sum += input[id];
id++;
}
output[xIndex] = sum;
}
}
// HISTOGRAM EQUALIZATION PARALLEL GPU
__global__ void histogram_equalization_parallel_GPU(unsigned int *output, const unsigned int *input, int color_bin, int min, int pixels){
int xIndex = blockIdx.x;
if(xIndex < 256){
// CALCULATE THE EQUALIZED VALUE
input[xIndex] ? output[xIndex] = lroundf(((float)(input[xIndex] - min)/(float)(pixels-1))*(color_bin-1)) :
output[xIndex] = 0;
}
}
// LOOKUP REPLACEMENT PARALLEL GPU
__global__ void lookup_replace_parallel_GPU(unsigned char *output, const unsigned char *input, const int color_bin, const unsigned int *lookup_table){
// GET THE XINDEX AND THE OFFSET
int xIndex = threadIdx.x + blockDim.x * blockIdx.x;
int offset = blockDim.x * gridDim.x;
while (xIndex < color_bin){
// USE THE LOOKUP FUNCTION
output[xIndex] = lookup_table[input[xIndex]];
xIndex += offset;
}
}
// GET THE CUDA PROPS
int get_cuda_device_props(){
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
return prop.multiProcessorCount;
}
// KERNEL WRAPPER FUNCTIONS
void run_histogram_count_kernel(int blocks, unsigned int *output, const unsigned char *input, int color_bin){
auto start_cpu = std::chrono::high_resolution_clock::now();
histogram_count_parallel_GPU<<<blocks*2, 256>>>(output, input, color_bin);
auto end_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
global_duration += duration_ms;
printf("Calculating the picture histogram %f ms\n", duration_ms.count());
}
void run_histogram_cfd_kernel(int blocks, unsigned int *output, const unsigned int *input, int color_bin){
auto start_cpu = std::chrono::high_resolution_clock::now();
histogram_cfd_parallel_GPU<<<256, 1>>>(output, input, color_bin);
auto end_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
global_duration += duration_ms;
printf("Calculating the histogram's cfd %f ms\n", duration_ms.count());
}
void run_histogram_equalization_kernel(int blocks, unsigned int *output, const unsigned int *input, int color_bin, int min, int pixels){
auto start_cpu = std::chrono::high_resolution_clock::now();
histogram_equalization_parallel_GPU<<<256, 1>>>(output, input, color_bin, min, pixels);
auto end_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
global_duration += duration_ms;
printf("Equalizing the histogram %f ms\n", duration_ms.count());
}
void run_lookup_replace_kernel(int blocks, unsigned char *output, const unsigned char *input, int pixels, unsigned int *lookup_table){
auto start_cpu = std::chrono::high_resolution_clock::now();
lookup_replace_parallel_GPU<<<blocks*2,256>>>(output, input, pixels, lookup_table);
auto end_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
global_duration += duration_ms;
printf("Processing the image %f ms\n", duration_ms.count());
}
// FUNCTION TO SUMMON THE KERNEL
void histogram_equalizer(const cv::Mat& input, cv::Mat& output){
// INPUT.STEP GETS THE NUMBER OF BYTES FOR EACH ROW
std::cout << "Input image step: " << input.step << " rows: " << input.rows << " cols: " << input.cols << std::endl;
// SETUP
// GET DEVICE PROPS
int blocks = get_cuda_device_props();
// CALCULATE TOTAL NUMBER OF BYTES OF INPUT AND OUTPUT IMAGE
unsigned char *d_input;
unsigned char *d_output;
unsigned int *d_histogram_count;
unsigned int *d_histogram_cfd;
unsigned int *d_histogram_equalized_lookup;
size_t input_output_bytes = input.step * input.rows;
size_t histogram_bytes = 256 * sizeof(unsigned int);
// ALLOCATE DEVICE MEMORY FOR FIRST INPUT AND FINAL OUTPUT
SAFE_CALL(cudaMalloc<unsigned char>(&d_input, input_output_bytes), "CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_output, input_output_bytes), "CUDA Malloc Failed");
SAFE_CALL(cudaMemset(d_output, 0, input_output_bytes), "CUDA Malloc Failed");
// ALLOCATE MEMORY FOR HISTOGRAM COUNT AND HISTOGRAM EQUALIZED AND INITIALIZE
SAFE_CALL(cudaMalloc<unsigned int>(&d_histogram_count, histogram_bytes), "CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned int>(&d_histogram_cfd, histogram_bytes), "CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned int>(&d_histogram_equalized_lookup, histogram_bytes), "CUDA Malloc Failed");
SAFE_CALL(cudaMemset(d_histogram_count, 0, histogram_bytes), "CUDA Memset Failed");
SAFE_CALL(cudaMemset(d_histogram_cfd, 0, histogram_bytes), "CUDA Memset Failed");
SAFE_CALL(cudaMemset(d_histogram_equalized_lookup, 0, histogram_bytes), "CUDA Memset Failed");
// COPY DATA FROM OPENCV INPUT IMAGE TO DEVICE MEMORY
SAFE_CALL(cudaMemcpy(d_input, input.ptr(), input_output_bytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
// BEGIN
// EXECUTE KERNEL
run_histogram_count_kernel(blocks, d_histogram_count, d_input, input.cols*input.rows);
// COPY HISTOGRAM TO HOST MEMORY
unsigned int image_histogram[256];
SAFE_CALL(cudaMemcpy(image_histogram, d_histogram_count, histogram_bytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
// CASE GPU CALCULATION WAS CORRECT
if(assert_gpu_vs_cpu(input.ptr(), image_histogram, input.cols*input.rows)){
std::cout << "GPU histogram calculation was successful" << std::endl;
// GET THE NEW TABLE
// unsigned int cfd_histogram_count[256] = {0};
// calculate_cfd(cfd_histogram_count, image_histogram, 256);
//
// // COPY CFD HISTOGRAM TO DEVICE
// SAFE_CALL(cudaMemcpy(d_histogram_cfd, cfd_histogram_count, histogram_bytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
//
run_histogram_cfd_kernel(1, d_histogram_cfd, d_histogram_count, 256);
// RUN KERNEL
// int min = get_min_nonzero_value(cfd_histogram_count, 256);
int min = 11;
run_histogram_equalization_kernel(1, d_histogram_equalized_lookup, d_histogram_cfd, 256, min, input.cols*input.rows);
//EXECUTE KERNEL
run_lookup_replace_kernel(blocks, d_output, d_input, input.cols * input.rows, d_histogram_equalized_lookup);
// COPY BACK DATA
SAFE_CALL(cudaMemcpy(output.ptr(), d_output, input_output_bytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
printf("Total Time %f ms\n", global_duration.count());
}else{ // CASE IT WASN'T
std::cout << "Histogram calculation found an error, closing software" << std::endl;
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_histogram_count);
cudaFree(d_histogram_cfd);
cudaFree(d_histogram_equalized_lookup);
exit(1);
}
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_histogram_count);
cudaFree(d_histogram_cfd);
cudaFree(d_histogram_equalized_lookup);
return;
}
int main(int argc, char *argv[]) {
// GET THE IMAGE PATH
std::string imagePath;
(argc < 2) ? imagePath = "images/scenery.jpg" : imagePath = argv[1];
// READ INPUT IMAGE FROM DISK
cv::Mat input, colorInput = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR);
if (colorInput.empty()){
std::cout << "Image Not Found!" << std::endl;
std::cin.get();
return -1;
}
// GET THE IMAGE AND CONVERT IT TO GRAYSCALE
cv::cvtColor(colorInput, input, CV_BGR2GRAY);
// CREATE OUTPUT IMAGE
cv::Mat output = input.clone();
// CALL THE WRAPPER FUNCTION
histogram_equalizer(input, output);
cv::imwrite("output.jpg", output);
return 0;
}
|
6973822a4a6e3571563ca41d21cc29fe5cf9e118.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file elemwise_sum.cu
* \brief GPU implementation of elementwise sum operator
*/
#include "./elemwise_sum.h"
#include "../../ndarray/ndarray_function.h"
#include "../../common/cuda_vectorization.cuh"
namespace mxnet {
namespace op {
using common::cuda::VectorizedKernelLauncher;
using common::cuda::VectorizedLoader;
using common::cuda::VectorizedStorer;
namespace {
constexpr size_t num_inputs_per_kernel = 4;
template <typename DType, int NumInputs>
struct VectorizedElementwiseSumKernelParams {
int num_inputs;
const DType* inputs[NumInputs];
DType* outputs[1];
};
template <bool aligned, typename DType, typename LType, int req>
__launch_bounds__(mxnet::common::cuda::vectorized_kernel_thread_num)
__global__ void VectorizedElementwiseSumKernel(
const VectorizedElementwiseSumKernelParams<DType, num_inputs_per_kernel> params,
const index_t N) {
VectorizedStorer<DType, LType, aligned> storer(params.outputs[0], N);
const index_t M = storer.num_aligned_elements();
for (index_t tid = blockIdx.x * blockDim.x + threadIdx.x;
tid < M;
tid += gridDim.x * blockDim.x) {
if (req == kAddTo) {
storer.load(tid, N);
} else {
#pragma unroll
for (int i = 0; i < storer.nvec(); ++i) {
storer.separate()[i] = 0;
}
}
#pragma unroll
for (int i = 0; i < num_inputs_per_kernel; ++i) {
if (i < params.num_inputs) {
VectorizedLoader<DType, LType, aligned> loader(params.inputs[i], N);
loader.load(tid, N);
#pragma unroll
for (int i = 0; i < loader.nvec(); ++i) {
storer.separate()[i] += loader.separate()[i];
}
}
}
storer.store(tid, N);
}
}
template <typename DType, int req>
class VectorizedElementwiseSumFwd {
public:
using ParamType = VectorizedElementwiseSumKernelParams<DType, num_inputs_per_kernel>;
template <bool aligned, typename LType>
static void Launch(const index_t blocks, const index_t threads,
hipStream_t stream,
const ParamType params, const index_t lead_dim,
const index_t /* other_dim */) {
hipLaunchKernelGGL(( VectorizedElementwiseSumKernel<aligned, DType, LType, req>)
, dim3(blocks), dim3(threads), 0, stream, params, lead_dim);
}
};
void VectorizedElementwiseSum(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
if (req[0] == kNullOp) return;
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
using LType = uint2;
const index_t size = inputs[0].Size();
for (size_t i = 0; i < inputs.size(); i += num_inputs_per_kernel) {
if (i == 0) {
using Kernel = VectorizedElementwiseSumFwd<DType, Req>;
typename Kernel::ParamType params;
params.num_inputs = ::min(num_inputs_per_kernel, inputs.size() - i);
for (int j = 0; j < params.num_inputs; ++j) {
params.inputs[j] = inputs[i + j].dptr<DType>();
}
params.outputs[0] = outputs[0].dptr<DType>();
VectorizedKernelLauncher<DType, LType, Kernel>(size, 1, s, params);
} else {
/* During subsequent launches we need to
accumulate into the previous outputs
*/
using Kernel = VectorizedElementwiseSumFwd<DType, kAddTo>;
typename Kernel::ParamType params;
params.num_inputs = ::min(num_inputs_per_kernel, inputs.size() - i);
for (int j = 0; j < params.num_inputs; ++j) {
params.inputs[j] = inputs[i + j].dptr<DType>();
}
params.outputs[0] = outputs[0].dptr<DType>();
VectorizedKernelLauncher<DType, LType, Kernel>(size, 1, s, params);
}
}
});
});
}
void ElementWiseSumComputeExGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK(!inputs.empty());
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
CHECK_EQ(req[0], kWriteTo) << "ElementWiseSumComputeExGPU only supports req = kWriteTo";
if (common::ContainsOnlyStorage(inputs, kRowSparseStorage) ||
(inputs.size() == 3U && inputs[0].storage_type() == kDefaultStorage &&
inputs[1].storage_type() == kCSRStorage && inputs[2].storage_type() == kDefaultStorage) ||
(inputs.size() > 4U && common::ContainsStorageType(inputs, kDefaultStorage) &&
outputs[0].storage_type() == kDefaultStorage)) {
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
NDArray out_nd = outputs[0];
mxnet::ndarray::ElementwiseSum<gpu>(s, ctx.requested[0], inputs, &out_nd);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
} // namespace
NNVM_REGISTER_OP(add_n)
.set_attr<FCompute>("FCompute<gpu>", VectorizedElementwiseSum)
.set_attr<FComputeEx>("FComputeEx<gpu>", ElementWiseSumComputeExGPU);
} // namespace op
} // namespace mxnet
| 6973822a4a6e3571563ca41d21cc29fe5cf9e118.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file elemwise_sum.cu
* \brief GPU implementation of elementwise sum operator
*/
#include "./elemwise_sum.h"
#include "../../ndarray/ndarray_function.h"
#include "../../common/cuda_vectorization.cuh"
namespace mxnet {
namespace op {
using common::cuda::VectorizedKernelLauncher;
using common::cuda::VectorizedLoader;
using common::cuda::VectorizedStorer;
namespace {
constexpr size_t num_inputs_per_kernel = 4;
template <typename DType, int NumInputs>
struct VectorizedElementwiseSumKernelParams {
int num_inputs;
const DType* inputs[NumInputs];
DType* outputs[1];
};
template <bool aligned, typename DType, typename LType, int req>
__launch_bounds__(mxnet::common::cuda::vectorized_kernel_thread_num)
__global__ void VectorizedElementwiseSumKernel(
const VectorizedElementwiseSumKernelParams<DType, num_inputs_per_kernel> params,
const index_t N) {
VectorizedStorer<DType, LType, aligned> storer(params.outputs[0], N);
const index_t M = storer.num_aligned_elements();
for (index_t tid = blockIdx.x * blockDim.x + threadIdx.x;
tid < M;
tid += gridDim.x * blockDim.x) {
if (req == kAddTo) {
storer.load(tid, N);
} else {
#pragma unroll
for (int i = 0; i < storer.nvec(); ++i) {
storer.separate()[i] = 0;
}
}
#pragma unroll
for (int i = 0; i < num_inputs_per_kernel; ++i) {
if (i < params.num_inputs) {
VectorizedLoader<DType, LType, aligned> loader(params.inputs[i], N);
loader.load(tid, N);
#pragma unroll
for (int i = 0; i < loader.nvec(); ++i) {
storer.separate()[i] += loader.separate()[i];
}
}
}
storer.store(tid, N);
}
}
template <typename DType, int req>
class VectorizedElementwiseSumFwd {
public:
using ParamType = VectorizedElementwiseSumKernelParams<DType, num_inputs_per_kernel>;
template <bool aligned, typename LType>
static void Launch(const index_t blocks, const index_t threads,
cudaStream_t stream,
const ParamType params, const index_t lead_dim,
const index_t /* other_dim */) {
VectorizedElementwiseSumKernel<aligned, DType, LType, req>
<<<blocks, threads, 0, stream>>>(params, lead_dim);
}
};
void VectorizedElementwiseSum(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
if (req[0] == kNullOp) return;
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
using LType = uint2;
const index_t size = inputs[0].Size();
for (size_t i = 0; i < inputs.size(); i += num_inputs_per_kernel) {
if (i == 0) {
using Kernel = VectorizedElementwiseSumFwd<DType, Req>;
typename Kernel::ParamType params;
params.num_inputs = std::min(num_inputs_per_kernel, inputs.size() - i);
for (int j = 0; j < params.num_inputs; ++j) {
params.inputs[j] = inputs[i + j].dptr<DType>();
}
params.outputs[0] = outputs[0].dptr<DType>();
VectorizedKernelLauncher<DType, LType, Kernel>(size, 1, s, params);
} else {
/* During subsequent launches we need to
accumulate into the previous outputs
*/
using Kernel = VectorizedElementwiseSumFwd<DType, kAddTo>;
typename Kernel::ParamType params;
params.num_inputs = std::min(num_inputs_per_kernel, inputs.size() - i);
for (int j = 0; j < params.num_inputs; ++j) {
params.inputs[j] = inputs[i + j].dptr<DType>();
}
params.outputs[0] = outputs[0].dptr<DType>();
VectorizedKernelLauncher<DType, LType, Kernel>(size, 1, s, params);
}
}
});
});
}
void ElementWiseSumComputeExGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK(!inputs.empty());
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
CHECK_EQ(req[0], kWriteTo) << "ElementWiseSumComputeExGPU only supports req = kWriteTo";
if (common::ContainsOnlyStorage(inputs, kRowSparseStorage) ||
(inputs.size() == 3U && inputs[0].storage_type() == kDefaultStorage &&
inputs[1].storage_type() == kCSRStorage && inputs[2].storage_type() == kDefaultStorage) ||
(inputs.size() > 4U && common::ContainsStorageType(inputs, kDefaultStorage) &&
outputs[0].storage_type() == kDefaultStorage)) {
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
NDArray out_nd = outputs[0];
mxnet::ndarray::ElementwiseSum<gpu>(s, ctx.requested[0], inputs, &out_nd);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
} // namespace
NNVM_REGISTER_OP(add_n)
.set_attr<FCompute>("FCompute<gpu>", VectorizedElementwiseSum)
.set_attr<FComputeEx>("FComputeEx<gpu>", ElementWiseSumComputeExGPU);
} // namespace op
} // namespace mxnet
|
84493dc92cc81d86f1934363602d80d5bbc28806.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
const char exp2_name[] = "exp2_kernel";
void exp2_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "exp2_cuda", [&]() {
jitted_gpu_kernel</*name=*/exp2_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, exp2_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "exp2_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::exp2(a);
});
});
#endif
}
const char i0_name[] = "i0";
void i0_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0_cuda", [&]() {
jitted_gpu_kernel</*name=*/i0_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, i0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
// implicit conversion of a to opmath_t will happen here,
// but as far as TI is concerned, it's still a no-dynamic-cast kernel because lambda input is scalar_t
return calc_i0<opmath_t>(a);
});
});
#endif
}
// See note [Jiterator]
const char i0e_name[] = "calc_i0e";
void i0e_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() {
jitted_gpu_kernel</*name=*/i0e_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, i0e_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return calc_i0e<opmath_t>(a);
});
});
#endif
}
// See note [Jiterator]
const char i1_name[] = "i1";
void i1_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1_cuda", [&]() {
jitted_gpu_kernel</*name=*/i1_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, i1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i1(a);
});
});
#endif // AT_USE_JITERATOR()
}
const char i1e_name[] = "i1e";
void i1e_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1e_cuda", [&]() {
jitted_gpu_kernel</*name=*/i1e_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, i1e_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1e_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i1e(a);
});
});
#endif
}
const char sigmoid_name[] = "sigmoid";
void sigmoid_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// only jiterate for complex-dtype
#if AT_USE_JITERATOR()
static const auto sigmoid_string = jiterator_stringify(
template <typename T>
T sigmoid(T x) {
return T{1} / (T{1} + ::exp(-x));
}
); // sigmoid_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sigmoid_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/sigmoid_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, sigmoid_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sigmoid_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
const auto one = opmath_t{1};
return static_cast<scalar_t>(one / (one + ::exp(-opmath_t{a})));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, common_dtype, "sigmoid_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
const auto one = opmath_t{1};
return static_cast<scalar_t>(one/(one + ::exp(-opmath_t{a})));
});
});
}
}
const char sinc_name[] = "sinc";
void sinc_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "sinc_cuda",
[&]() {
jitted_gpu_kernel</*name=*/sinc_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, sinc_string);
});
#else
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "sinc_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
if (a == scalar_t(0)) {
return scalar_t(1);
} else {
// NVCC says constexpr var is not accessible from device
using opmath_t = at::opmath_type<scalar_t>;
opmath_t product = c10::detail::pi<opmath_t>() * opmath_t{a};
return static_cast<scalar_t>(std::sin(product) / product);
}
});
});
#endif
}
void logit_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scalar) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.common_dtype(),
"logit_cuda",
[&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC eps = eps_scalar.to<T_ACC>();
if (eps < T_ACC(0)) {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
return c10::hip::compat::log(x_acc / (T_ACC(1) - x_acc));
});
} else {
const T_ACC lo = eps;
const T_ACC hi = T_ACC(1) - eps;
gpu_kernel(
iter, [lo, hi] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
T_ACC z = x_acc < lo ? lo : (x_acc > hi ? hi : x_acc);
return c10::hip::compat::log(z / (T_ACC(1) - z));
});
}
});
}
const char ndtri_name[] = "ndtri";
void ndtri_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "ndtri_cuda", [&]() {
jitted_gpu_kernel</*name=*/ndtri_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, ndtri_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "ndtri_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_ndtri(a); });
});
#endif
}
const char log_ndtr_name[] = "log_ndtr";
void log_ndtr_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "log_ndtr_cuda", [&]() {
jitted_gpu_kernel</*name=*/log_ndtr_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, log_ndtr_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "log_ndtr_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_log_ndtr(a); });
});
#endif
}
void erf_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erf(a);
});
});
}
const char erfc_name[] = "erfc_kernel";
void erfc_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "erfc_cuda", [&]() {
jitted_gpu_kernel</*name=*/erfc_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, erfc_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "erfc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfc(a);
});
});
#endif
}
const char erfinv_name[] = "erfinv_kernel";
void erfinv_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfinv_cuda", [&]() {
jitted_gpu_kernel</*name=*/erfinv_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, erfinv_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
#endif
}
const char erfcx_name[] = "erfcx";
void erfcx_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "erfcx_cuda", [&]() {
jitted_gpu_kernel</*name=*/erfcx_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, erfcx_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "erfcx_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_erfcx(a); });
});
#endif
}
const char kaiser_window_name[] = "kaiser_window";
void kaiser_window_kernel_cuda(TensorIteratorBase& iter, int64_t window_length, double beta_){
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t inv_alpha = static_cast<opmath_t>(2.0 / (window_length - 1));
const opmath_t beta = static_cast<opmath_t>(beta_);
const opmath_t inv_i0_beta = 1.0 / calc_i0(beta);
jitted_gpu_kernel<
/*name=*/kaiser_window_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(
iter,
kaiser_window_string,
/*scalar_pos=*/at::cuda::jit::BinaryFuncVariant::NoScalar,
/*scalar_val=*/0,
/*extra_args=*/std::make_tuple(inv_alpha, beta, inv_i0_beta));
});
#else
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t inv_alpha = static_cast<opmath_t>(2.0 / (window_length - 1));
const opmath_t beta = static_cast<opmath_t>(beta_);
const opmath_t inv_i0_beta = 1.0 / calc_i0(beta);
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t a) -> scalar_t {
opmath_t x = static_cast<opmath_t>(a) * inv_alpha - 1;
opmath_t y = std::max<opmath_t>(0, 1 - x * x);
return calc_i0(beta * ::sqrt(y)) * inv_i0_beta;
});
});
#endif
}
const char entr_name[] = "entr";
void entr_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "entr_cuda", [&]() {
jitted_gpu_kernel</*name=*/entr_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, entr_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
iter.common_dtype(),
"entr_cuda",
[&]() {
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t x) -> scalar_t {
if (at::_isnan(x)) {
return x;
} else if (x > 0) {
return -x * ::log(x);
} else if (x == 0) {
return 0;
}
return static_cast<scalar_t>(-INFINITY);
});
});
#endif
}
REGISTER_DISPATCH(exp2_stub, &exp2_kernel_cuda);
REGISTER_DISPATCH(i0_stub, &i0_kernel_cuda);
REGISTER_DISPATCH(special_i0e_stub, &i0e_kernel_cuda);
REGISTER_DISPATCH(special_i1_stub, &i1_kernel_cuda);
REGISTER_DISPATCH(special_i1e_stub, &i1e_kernel_cuda);
REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda);
REGISTER_DISPATCH(sinc_stub, &sinc_kernel_cuda);
REGISTER_DISPATCH(logit_stub, &logit_kernel_cuda);
REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda);
REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(kaiser_window_stub, &kaiser_window_kernel_cuda);
REGISTER_DISPATCH(special_entr_stub, &entr_kernel_cuda);
REGISTER_DISPATCH(special_ndtri_stub, &ndtri_kernel_cuda);
REGISTER_DISPATCH(special_log_ndtr_stub, &log_ndtr_kernel_cuda);
REGISTER_DISPATCH(special_erfcx_stub, &erfcx_kernel_cuda);
} // namespace at::native
| 84493dc92cc81d86f1934363602d80d5bbc28806.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
const char exp2_name[] = "exp2_kernel";
void exp2_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "exp2_cuda", [&]() {
jitted_gpu_kernel</*name=*/exp2_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, exp2_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "exp2_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::exp2(a);
});
});
#endif
}
const char i0_name[] = "i0";
void i0_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0_cuda", [&]() {
jitted_gpu_kernel</*name=*/i0_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, i0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
// implicit conversion of a to opmath_t will happen here,
// but as far as TI is concerned, it's still a no-dynamic-cast kernel because lambda input is scalar_t
return calc_i0<opmath_t>(a);
});
});
#endif
}
// See note [Jiterator]
const char i0e_name[] = "calc_i0e";
void i0e_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() {
jitted_gpu_kernel</*name=*/i0e_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, i0e_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return calc_i0e<opmath_t>(a);
});
});
#endif
}
// See note [Jiterator]
const char i1_name[] = "i1";
void i1_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1_cuda", [&]() {
jitted_gpu_kernel</*name=*/i1_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, i1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i1(a);
});
});
#endif // AT_USE_JITERATOR()
}
const char i1e_name[] = "i1e";
void i1e_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1e_cuda", [&]() {
jitted_gpu_kernel</*name=*/i1e_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, i1e_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1e_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i1e(a);
});
});
#endif
}
const char sigmoid_name[] = "sigmoid";
void sigmoid_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// only jiterate for complex-dtype
#if AT_USE_JITERATOR()
static const auto sigmoid_string = jiterator_stringify(
template <typename T>
T sigmoid(T x) {
return T{1} / (T{1} + std::exp(-x));
}
); // sigmoid_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sigmoid_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/sigmoid_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, sigmoid_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sigmoid_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
const auto one = opmath_t{1};
return static_cast<scalar_t>(one / (one + std::exp(-opmath_t{a})));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, common_dtype, "sigmoid_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
const auto one = opmath_t{1};
return static_cast<scalar_t>(one/(one + std::exp(-opmath_t{a})));
});
});
}
}
const char sinc_name[] = "sinc";
void sinc_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "sinc_cuda",
[&]() {
jitted_gpu_kernel</*name=*/sinc_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, sinc_string);
});
#else
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "sinc_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
if (a == scalar_t(0)) {
return scalar_t(1);
} else {
// NVCC says constexpr var is not accessible from device
using opmath_t = at::opmath_type<scalar_t>;
opmath_t product = c10::detail::pi<opmath_t>() * opmath_t{a};
return static_cast<scalar_t>(std::sin(product) / product);
}
});
});
#endif
}
void logit_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scalar) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.common_dtype(),
"logit_cuda",
[&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC eps = eps_scalar.to<T_ACC>();
if (eps < T_ACC(0)) {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
return c10::cuda::compat::log(x_acc / (T_ACC(1) - x_acc));
});
} else {
const T_ACC lo = eps;
const T_ACC hi = T_ACC(1) - eps;
gpu_kernel(
iter, [lo, hi] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
T_ACC z = x_acc < lo ? lo : (x_acc > hi ? hi : x_acc);
return c10::cuda::compat::log(z / (T_ACC(1) - z));
});
}
});
}
const char ndtri_name[] = "ndtri";
void ndtri_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "ndtri_cuda", [&]() {
jitted_gpu_kernel</*name=*/ndtri_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, ndtri_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "ndtri_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_ndtri(a); });
});
#endif
}
const char log_ndtr_name[] = "log_ndtr";
void log_ndtr_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "log_ndtr_cuda", [&]() {
jitted_gpu_kernel</*name=*/log_ndtr_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, log_ndtr_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "log_ndtr_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_log_ndtr(a); });
});
#endif
}
void erf_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erf(a);
});
});
}
const char erfc_name[] = "erfc_kernel";
void erfc_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "erfc_cuda", [&]() {
jitted_gpu_kernel</*name=*/erfc_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, erfc_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "erfc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfc(a);
});
});
#endif
}
const char erfinv_name[] = "erfinv_kernel";
void erfinv_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfinv_cuda", [&]() {
jitted_gpu_kernel</*name=*/erfinv_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, erfinv_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
#endif
}
const char erfcx_name[] = "erfcx";
void erfcx_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "erfcx_cuda", [&]() {
jitted_gpu_kernel</*name=*/erfcx_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, erfcx_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "erfcx_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_erfcx(a); });
});
#endif
}
const char kaiser_window_name[] = "kaiser_window";
void kaiser_window_kernel_cuda(TensorIteratorBase& iter, int64_t window_length, double beta_){
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t inv_alpha = static_cast<opmath_t>(2.0 / (window_length - 1));
const opmath_t beta = static_cast<opmath_t>(beta_);
const opmath_t inv_i0_beta = 1.0 / calc_i0(beta);
jitted_gpu_kernel<
/*name=*/kaiser_window_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(
iter,
kaiser_window_string,
/*scalar_pos=*/at::cuda::jit::BinaryFuncVariant::NoScalar,
/*scalar_val=*/0,
/*extra_args=*/std::make_tuple(inv_alpha, beta, inv_i0_beta));
});
#else
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t inv_alpha = static_cast<opmath_t>(2.0 / (window_length - 1));
const opmath_t beta = static_cast<opmath_t>(beta_);
const opmath_t inv_i0_beta = 1.0 / calc_i0(beta);
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t a) -> scalar_t {
opmath_t x = static_cast<opmath_t>(a) * inv_alpha - 1;
opmath_t y = std::max<opmath_t>(0, 1 - x * x);
return calc_i0(beta * ::sqrt(y)) * inv_i0_beta;
});
});
#endif
}
const char entr_name[] = "entr";
void entr_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "entr_cuda", [&]() {
jitted_gpu_kernel</*name=*/entr_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, entr_string);
});
#else
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
iter.common_dtype(),
"entr_cuda",
[&]() {
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t x) -> scalar_t {
if (at::_isnan(x)) {
return x;
} else if (x > 0) {
return -x * std::log(x);
} else if (x == 0) {
return 0;
}
return static_cast<scalar_t>(-INFINITY);
});
});
#endif
}
REGISTER_DISPATCH(exp2_stub, &exp2_kernel_cuda);
REGISTER_DISPATCH(i0_stub, &i0_kernel_cuda);
REGISTER_DISPATCH(special_i0e_stub, &i0e_kernel_cuda);
REGISTER_DISPATCH(special_i1_stub, &i1_kernel_cuda);
REGISTER_DISPATCH(special_i1e_stub, &i1e_kernel_cuda);
REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda);
REGISTER_DISPATCH(sinc_stub, &sinc_kernel_cuda);
REGISTER_DISPATCH(logit_stub, &logit_kernel_cuda);
REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda);
REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(kaiser_window_stub, &kaiser_window_kernel_cuda);
REGISTER_DISPATCH(special_entr_stub, &entr_kernel_cuda);
REGISTER_DISPATCH(special_ndtri_stub, &ndtri_kernel_cuda);
REGISTER_DISPATCH(special_log_ndtr_stub, &log_ndtr_kernel_cuda);
REGISTER_DISPATCH(special_erfcx_stub, &erfcx_kernel_cuda);
} // namespace at::native
|
39e92e8a4fe9df1954140437fc10b8f0f3dfa43b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_limiter_kernel [3][1];
static int dims_limiter_kernel_h [3][1] = {0};
//user function
__device__
void limiter_kernel_gpu(const ACC<double>& al,
ACC<double> &tht,
ACC<double>& gt) {
double aalm, aal, all, ar, gtt;
for (int m=0; m < 3 ;m++) {
aalm = fabs(al(m,-1));
aal = fabs(al(m,0));
tht(m,0) = fabs (aal - aalm) / (aal + aalm + del2);
all = al(m,-1);
ar = al(m,0);
gtt = all * ( ar * ar + del2 ) + ar * (all * all + del2);
gt(m,0)= gtt / (ar * ar + all * all + 2.00 * del2);
}
}
__global__ void ops_limiter_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
int size0 ){
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*3;
arg1 += idx_x * 1*3;
arg2 += idx_x * 1*3;
if (idx_x < size0) {
const ACC<double> argp0(3, dims_limiter_kernel[0][0], arg0);
ACC<double> argp1(3, dims_limiter_kernel[1][0], arg1);
ACC<double> argp2(3, dims_limiter_kernel[2][0], arg2);
limiter_kernel_gpu(argp0, argp1, argp2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_limiter_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_limiter_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,8)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(8,"limiter_kernel");
OPS_kernels[8].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[1];
int end[1];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[1];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<1; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
if (xdim0 != dims_limiter_kernel_h[0][0] || xdim1 != dims_limiter_kernel_h[1][0] || xdim2 != dims_limiter_kernel_h[2][0]) {
dims_limiter_kernel_h[0][0] = xdim0;
dims_limiter_kernel_h[1][0] = xdim1;
dims_limiter_kernel_h[2][0] = xdim2;
cutilSafeCall(hipMemcpyToSymbol( dims_limiter_kernel, dims_limiter_kernel_h, sizeof(dims_limiter_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1);
dim3 tblock(OPS_block_size_x,1,1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[8].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0)
hipLaunchKernelGGL(( ops_limiter_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2],x_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[8].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[8].mpi_time += t2-t1;
OPS_kernels[8].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[8].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[8].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
#ifdef OPS_LAZY
void ops_par_loop_limiter_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 8;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 8;
for ( int i=0; i<2; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->function = ops_par_loop_limiter_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(8,"limiter_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 39e92e8a4fe9df1954140437fc10b8f0f3dfa43b.cu | //
// auto-generated by ops.py
//
__constant__ int dims_limiter_kernel [3][1];
static int dims_limiter_kernel_h [3][1] = {0};
//user function
__device__
void limiter_kernel_gpu(const ACC<double>& al,
ACC<double> &tht,
ACC<double>& gt) {
double aalm, aal, all, ar, gtt;
for (int m=0; m < 3 ;m++) {
aalm = fabs(al(m,-1));
aal = fabs(al(m,0));
tht(m,0) = fabs (aal - aalm) / (aal + aalm + del2);
all = al(m,-1);
ar = al(m,0);
gtt = all * ( ar * ar + del2 ) + ar * (all * all + del2);
gt(m,0)= gtt / (ar * ar + all * all + 2.00 * del2);
}
}
__global__ void ops_limiter_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
int size0 ){
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*3;
arg1 += idx_x * 1*3;
arg2 += idx_x * 1*3;
if (idx_x < size0) {
const ACC<double> argp0(3, dims_limiter_kernel[0][0], arg0);
ACC<double> argp1(3, dims_limiter_kernel[1][0], arg1);
ACC<double> argp2(3, dims_limiter_kernel[2][0], arg2);
limiter_kernel_gpu(argp0, argp1, argp2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_limiter_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_limiter_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,8)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(8,"limiter_kernel");
OPS_kernels[8].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[1];
int end[1];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[1];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<1; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
if (xdim0 != dims_limiter_kernel_h[0][0] || xdim1 != dims_limiter_kernel_h[1][0] || xdim2 != dims_limiter_kernel_h[2][0]) {
dims_limiter_kernel_h[0][0] = xdim0;
dims_limiter_kernel_h[1][0] = xdim1;
dims_limiter_kernel_h[2][0] = xdim2;
cutilSafeCall(cudaMemcpyToSymbol( dims_limiter_kernel, dims_limiter_kernel_h, sizeof(dims_limiter_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1);
dim3 tblock(OPS_block_size_x,1,1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[8].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0)
ops_limiter_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2],x_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[8].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[8].mpi_time += t2-t1;
OPS_kernels[8].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[8].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[8].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
#ifdef OPS_LAZY
void ops_par_loop_limiter_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 8;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 8;
for ( int i=0; i<2; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->function = ops_par_loop_limiter_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(8,"limiter_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
706bc3149378dfaaa789595a9aa8a7ec478400bb.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Basic CUDA based triangle mesh path tracer.
* For background info, see http://raytracey.blogspot.co.nz/2015/12/gpu-path-tracing-tutorial-2-interactive.html
* Based on CUDA ray tracing code from http://cg.alexandra.dk/?p=278
* Copyright (C) 2015 Sam Lapere
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <hip/hip_runtime.h>
#include <math_functions.h>
#include <hip/hip_vector_types.h>
#include <vector_functions.h>
#include "device_launch_parameters.h"
#include "cutil_math.h" // required for float3 vector math
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6.5\extras\CUPTI\include\GL\glew.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6.5\extras\CUPTI\include\GL\glut.h"
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define M_PI 3.14159265359f
#define width 1024 // screenwidth
#define height 576 // screenheight
#define samps 1 // samples per pixel per pass
int total_number_of_triangles = 0;
int frames = 0;
// scene bounding box
float3 scene_aabbox_min;
float3 scene_aabbox_max;
// the scene triangles are stored in a 1D CUDA texture of float4 for memory alignment
// store two edges instead of vertices
// each triangle is stored as three float4s: (float4 first_vertex, float4 edge1, float4 edge2)
texture<float4, 1, hipReadModeElementType> triangle_texture;
// hardcoded camera position
__device__ float3 firstcamorig = { 50, 52, 295.6 };
// OpenGL vertex buffer object for real-time viewport
GLuint vbo;
void *d_vbo_buffer = NULL;
struct Ray {
float3 orig; // ray origin
float3 dir; // ray direction
__device__ Ray(float3 o_, float3 d_) : orig(o_), dir(d_) {}
};
enum Refl_t { DIFF, SPEC, REFR }; // material types, used in radiance(), only DIFF used here
// SPHERES
struct Sphere {
float rad; // radius
float3 pos, emi, col; // position, emission, color
Refl_t refl; // reflection type (DIFFuse, SPECular, REFRactive)
__device__ float intersect(const Ray &r) const { // returns distance, 0 if nohit
// Ray/sphere intersection
// Quadratic formula required to solve ax^2 + bx + c = 0
// Solution x = (-b +- sqrt(b*b - 4ac)) / 2a
// Solve t^2*d.d + 2*t*(o-p).d + (o-p).(o-p)-R^2 = 0
float3 op = pos - r.orig; //
float t, epsilon = 0.01f;
float b = dot(op, r.dir);
float disc = b*b - dot(op, op) + rad*rad; // discriminant
if (disc<0) return 0; else disc = sqrtf(disc);
return (t = b - disc)>epsilon ? t : ((t = b + disc)>epsilon ? t : 0);
}
};
// TRIANGLES
// the classic ray triangle intersection: http://www.cs.virginia.edu/~gfx/Courses/2003/ImageSynthesis/papers/Acceleration/Fast%20MinimumStorage%20RayTriangle%20Intersection.pdf
// for an explanation see http://www.scratchapixel.com/lessons/3d-basic-rendering/ray-tracing-rendering-a-triangle/moller-trumbore-ray-triangle-intersection
__device__ float RayTriangleIntersection(const Ray &r,
const float3 &v0,
const float3 &edge1,
const float3 &edge2)
{
float3 tvec = r.orig - v0;
float3 pvec = cross(r.dir, edge2);
float det = dot(edge1, pvec);
det = __fdividef(1.0f, det); // CUDA intrinsic function
float u = dot(tvec, pvec) * det;
if (u < 0.0f || u > 1.0f)
return -1.0f;
float3 qvec = cross(tvec, edge1);
float v = dot(r.dir, qvec) * det;
if (v < 0.0f || (u + v) > 1.0f)
return -1.0f;
return dot(edge2, qvec) * det;
}
__device__ float3 getTriangleNormal(const int triangleIndex){
float4 edge1 = tex1Dfetch(triangle_texture, triangleIndex * 3 + 1);
float4 edge2 = tex1Dfetch(triangle_texture, triangleIndex * 3 + 2);
// cross product of two triangle edges yields a vector orthogonal to triangle plane
float3 trinormal = cross(make_float3(edge1.x, edge1.y, edge1.z), make_float3(edge2.x, edge2.y, edge2.z));
trinormal = normalize(trinormal);
return trinormal;
}
__device__ void intersectAllTriangles(const Ray& r, float& t_scene, int& triangle_id, const int number_of_triangles, int& geomtype){
for (int i = 0; i < number_of_triangles; i++)
{
// the triangles are packed into the 1D texture using three consecutive float4 structs for each triangle,
// first float4 contains the first vertex, second float4 contains the first precomputed edge, third float4 contains second precomputed edge like this:
// (float4(vertex.x,vertex.y,vertex.z, 0), float4 (egde1.x,egde1.y,egde1.z,0),float4 (egde2.x,egde2.y,egde2.z,0))
// i is triangle index, each triangle represented by 3 float4s in triangle_texture
float4 v0 = tex1Dfetch(triangle_texture, i * 3);
float4 edge1 = tex1Dfetch(triangle_texture, i * 3 + 1);
float4 edge2 = tex1Dfetch(triangle_texture, i * 3 + 2);
// intersect ray with reconstructed triangle
float t = RayTriangleIntersection(r,
make_float3(v0.x, v0.y, v0.z),
make_float3(edge1.x, edge1.y, edge1.z),
make_float3(edge2.x, edge2.y, edge2.z));
// keep track of closest distance and closest triangle
// if ray/tri intersection finds an intersection point that is closer than closest intersection found so far
if (t < t_scene && t > 0.001)
{
t_scene = t;
triangle_id = i;
geomtype = 3;
}
}
}
// AXIS ALIGNED BOXES
// helper functions
inline __device__ float3 minf3(float3 a, float3 b){ return make_float3(a.x < b.x ? a.x : b.x, a.y < b.y ? a.y : b.y, a.z < b.z ? a.z : b.z); }
inline __device__ float3 maxf3(float3 a, float3 b){ return make_float3(a.x > b.x ? a.x : b.x, a.y > b.y ? a.y : b.y, a.z > b.z ? a.z : b.z); }
inline __device__ float minf1(float a, float b){ return a < b ? a : b; }
inline __device__ float maxf1(float a, float b){ return a > b ? a : b; }
struct Box {
float3 min; // minimum bounds
float3 max; // maximum bounds
float3 emi; // emission
float3 col; // colour
Refl_t refl; // material type
// ray/box intersection
// for theoretical background of the algorithm see
// http://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection
// optimised code from http://www.gamedev.net/topic/495636-raybox-collision-intersection-point/
__device__ float intersect(const Ray &r) const {
float epsilon = 0.001f; // required to prevent self intersection
float3 tmin = (min - r.orig) / r.dir;
float3 tmax = (max - r.orig) / r.dir;
float3 real_min = minf3(tmin, tmax);
float3 real_max = maxf3(tmin, tmax);
float minmax = minf1(minf1(real_max.x, real_max.y), real_max.z);
float maxmin = maxf1(maxf1(real_min.x, real_min.y), real_min.z);
if (minmax >= maxmin) { return maxmin > epsilon ? maxmin : 0; }
else return 0;
}
// calculate normal for point on axis aligned box
__device__ float3 Box::normalAt(float3 &point) {
float3 normal = make_float3(0.f, 0.f, 0.f);
float min_distance = 1e8;
float distance;
float epsilon = 0.001f;
if (fabs(min.x - point.x) < epsilon) normal = make_float3(-1, 0, 0);
else if (fabs(max.x - point.x) < epsilon) normal = make_float3(1, 0, 0);
else if (fabs(min.y - point.y) < epsilon) normal = make_float3(0, -1, 0);
else if (fabs(max.y - point.y) < epsilon) normal = make_float3(0, 1, 0);
else if (fabs(min.z - point.z) < epsilon) normal = make_float3(0, 0, -1);
else normal = make_float3(0, 0, 1);
return normal;
}
};
// scene: 9 spheres forming a Cornell box
// small enough to fit in constant GPU memory
__constant__ Sphere spheres[] = {
// FORMAT: { float radius, float3 position, float3 emission, float3 colour, Refl_t material }
// cornell box
//{ 1e5f, { 1e5f + 1.0f, 40.8f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { 0.75f, 0.25f, 0.25f }, DIFF }, //Left 1e5f
//{ 1e5f, { -1e5f + 99.0f, 40.8f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .25f, .25f, .75f }, DIFF }, //Right
//{ 1e5f, { 50.0f, 40.8f, 1e5f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Back
//{ 1e5f, { 50.0f, 40.8f, -1e5f + 600.0f }, { 0.0f, 0.0f, 0.0f }, { 0.00f, 0.00f, 0.00f }, DIFF }, //Front
//{ 1e5f, { 50.0f, -1e5f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Bottom
//{ 1e5f, { 50.0f, -1e5f + 81.6f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Top
//{ 16.5f, { 27.0f, 16.5f, 47.0f }, { 0.0f, 0.0f, 0.0f }, { 0.99f, 0.99f, 0.99f }, SPEC }, // small sphere 1
//{ 16.5f, { 73.0f, 16.5f, 78.0f }, { 0.0f, 0.f, .0f }, { 0.09f, 0.49f, 0.3f }, REFR }, // small sphere 2
//{ 600.0f, { 50.0f, 681.6f - .5f, 81.6f }, { 3.0f, 2.5f, 2.0f }, { 0.0f, 0.0f, 0.0f }, DIFF } // Light 12, 10 ,8
//outdoor scene: radius, position, emission, color, material
//{ 1600, { 3000.0f, 10, 6000 }, { 37, 34, 30 }, { 0.f, 0.f, 0.f }, DIFF }, // 37, 34, 30 // sun
//{ 1560, { 3500.0f, 0, 7000 }, { 50, 25, 2.5 }, { 0.f, 0.f, 0.f }, DIFF }, // 150, 75, 7.5 // sun 2
{ 10000, { 50.0f, 40.8f, -1060 }, { 0.0003, 0.01, 0.15 }, { 0.175f, 0.175f, 0.25f }, DIFF }, // sky
{ 100000, { 50.0f, -100000, 0 }, { 0.0, 0.0, 0 }, { 0.8f, 0.2f, 0.f }, DIFF }, // ground
{ 110000, { 50.0f, -110048.5, 0 }, { 3.6, 2.0, 0.2 }, { 0.f, 0.f, 0.f }, DIFF }, // horizon brightener
{ 4e4, { 50.0f, -4e4 - 30, -3000 }, { 0, 0, 0 }, { 0.2f, 0.2f, 0.2f }, DIFF }, // mountains
{ 82.5, { 30.0f, 180.5, 42 }, { 16, 12, 6 }, { .6f, .6f, 0.6f }, DIFF }, // small sphere 1
{ 12, { 115.0f, 10, 105 }, { 0.0, 0.0, 0.0 }, { 0.9f, 0.9f, 0.9f }, REFR }, // small sphere 2
{ 22, { 65.0f, 22, 24 }, { 0, 0, 0 }, { 0.9f, 0.9f, 0.9f }, SPEC }, // small sphere 3
};
__constant__ Box boxes[] = {
// FORMAT: { float3 minbounds, float3 maxbounds, float3 emission, float3 colour, Refl_t }
{ { 5.0f, 0.0f, 70.0f }, { 45.0f, 11.0f, 115.0f }, { .0f, .0f, 0.0f }, { 0.5f, 0.5f, 0.5f }, DIFF },
{ {85.0f, 0.0f, 95.0f }, { 95.0f, 20.0f, 105.0f }, { .0f, .0f, 0.0f }, { 0.5f, 0.5f, 0.5f }, DIFF },
{ {75.0f, 20.0f, 85.0f}, { 105.0f, 22.0f, 115.0f }, { .0f, .0f, 0.0f }, { 0.5f, 0.5f, 0.5f }, DIFF },
};
__device__ inline bool intersect_scene(const Ray &r, float &t, int &sphere_id, int &box_id, int& triangle_id, const int number_of_triangles, int &geomtype, const float3& bbmin, const float3& bbmax){
float tmin = 1e20;
float tmax = -1e20;
float d = 1e21;
float k = 1e21;
float q = 1e21;
float inf = t = 1e20;
// SPHERES
// intersect all spheres in the scene
float numspheres = sizeof(spheres) / sizeof(Sphere);
for (int i = int(numspheres); i--;) // for all spheres in scene
// keep track of distance from origin to closest intersection point
if ((d = spheres[i].intersect(r)) && d < t){ t = d; sphere_id = i; geomtype = 1; }
// BOXES
// intersect all boxes in the scene
float numboxes = sizeof(boxes) / sizeof(Box);
for (int i = int(numboxes); i--;) // for all boxes in scene
if ((k = boxes[i].intersect(r)) && k < t){ t = k; box_id = i; geomtype = 2; }
// TRIANGLES
Box scene_bbox; // bounding box around triangle meshes
scene_bbox.min = bbmin;
scene_bbox.max = bbmax;
// if ray hits bounding box of triangle meshes, intersect ray with all triangles
if (scene_bbox.intersect(r)){
intersectAllTriangles(r, t, triangle_id, number_of_triangles, geomtype);
}
// t is distance to closest intersection of ray with all primitives in the scene (spheres, boxes and triangles)
return t<inf;
}
// hash function to calculate new seed for each frame
// see http://www.reedbeta.com/blog/2013/01/12/quick-and-easy-gpu-random-numbers-in-d3d11/
uint WangHash(uint a) {
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
return a;
}
// radiance function
// compute path bounces in scene and accumulate returned color from each path sgment
__device__ float3 radiance(Ray &r, hiprandState_t *randstate, const int totaltris, const float3& scene_aabb_min, const float3& scene_aabb_max){ // returns ray color
// colour mask
float3 mask = make_float3(1.0f, 1.0f, 1.0f);
// accumulated colour
float3 accucolor = make_float3(0.0f, 0.0f, 0.0f);
for (int bounces = 0; bounces < 5; bounces++){ // iteration up to 4 bounces (instead of recursion in CPU code)
// reset scene intersection function parameters
float t = 100000; // distance to intersection
int sphere_id = -1;
int box_id = -1; // index of intersected sphere
int triangle_id = -1;
int geomtype = -1;
float3 f; // primitive colour
float3 emit; // primitive emission colour
float3 x; // intersection point
float3 n; // normal
float3 nl; // oriented normal
float3 d; // ray direction of next path segment
Refl_t refltype;
// intersect ray with scene
// intersect_scene keeps track of closest intersected primitive and distance to closest intersection point
if (!intersect_scene(r, t, sphere_id, box_id, triangle_id, totaltris, geomtype, scene_aabb_min, scene_aabb_max))
return make_float3(0.0f, 0.0f, 0.0f); // if miss, return black
// else: we've got a hit with a scene primitive
// determine geometry type of primitive: sphere/box/triangle
// if sphere:
if (geomtype == 1){
Sphere &sphere = spheres[sphere_id]; // hit object with closest intersection
x = r.orig + r.dir*t; // intersection point on object
n = normalize(x - sphere.pos); // normal
nl = dot(n, r.dir) < 0 ? n : n * -1; // correctly oriented normal
f = sphere.col; // object colour
refltype = sphere.refl;
emit = sphere.emi; // object emission
accucolor += (mask * emit);
}
// if box:
if (geomtype == 2){
Box &box = boxes[box_id];
x = r.orig + r.dir*t; // intersection point on object
n = normalize(box.normalAt(x)); // normal
nl = dot(n, r.dir) < 0 ? n : n * -1; // correctly oriented normal
f = box.col; // box colour
refltype = box.refl;
emit = box.emi; // box emission
accucolor += (mask * emit);
}
// if triangle:
if (geomtype == 3){
int tri_index = triangle_id;
x = r.orig + r.dir*t; // intersection point
n = normalize(getTriangleNormal(tri_index)); // normal
nl = dot(n, r.dir) < 0 ? n : n * -1; // correctly oriented normal
// colour, refltype and emit value are hardcoded and apply to all triangles
// no per triangle material support yet
f = make_float3(0.9f, 0.4f, 0.1f); // triangle colour
refltype = REFR;
emit = make_float3(0.0f, 0.0f, 0.0f);
accucolor += (mask * emit);
}
// SHADING: diffuse, specular or refractive
// ideal diffuse reflection (see "Realistic Ray Tracing", P. Shirley)
if (refltype == DIFF){
// create 2 random numbers
float r1 = 2 * M_PI * hiprand_uniform(randstate);
float r2 = hiprand_uniform(randstate);
float r2s = sqrtf(r2);
// compute orthonormal coordinate frame uvw with hitpoint as origin
float3 w = nl;
float3 u = normalize(cross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = cross(w, u);
// compute cosine weighted random ray direction on hemisphere
d = normalize(u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrtf(1 - r2));
// offset origin next path segment to prevent self intersection
x += nl * 0.03;
// multiply mask with colour of object
mask *= f;
}
// ideal specular reflection (mirror)
if (refltype == SPEC){
// compute relfected ray direction according to Snell's law
d = r.dir - 2.0f * n * dot(n, r.dir);
// offset origin next path segment to prevent self intersection
x += nl * 0.01f;
// multiply mask with colour of object
mask *= f;
}
// ideal refraction (based on smallpt code by Kevin Beason)
if (refltype == REFR){
bool into = dot(n, nl) > 0; // is ray entering or leaving refractive material?
float nc = 1.0f; // Index of Refraction air
float nt = 1.5f; // Index of Refraction glass/water
float nnt = into ? nc / nt : nt / nc; // IOR ratio of refractive materials
float ddn = dot(r.dir, nl);
float cos2t = 1.0f - nnt*nnt * (1.f - ddn*ddn);
if (cos2t < 0.0f) // total internal reflection
{
d = reflect(r.dir, n); //d = r.dir - 2.0f * n * dot(n, r.dir);
x += nl * 0.01f;
}
else // cos2t > 0
{
// compute direction of transmission ray
float3 tdir = normalize(r.dir * nnt - n * ((into ? 1 : -1) * (ddn*nnt + sqrtf(cos2t))));
float R0 = (nt - nc)*(nt - nc) / (nt + nc)*(nt + nc);
float c = 1.f - (into ? -ddn : dot(tdir, n));
float Re = R0 + (1.f - R0) * c * c * c * c * c;
float Tr = 1 - Re; // Transmission
float P = .25f + .5f * Re;
float RP = Re / P;
float TP = Tr / (1.f - P);
// randomly choose reflection or transmission ray
if (hiprand_uniform(randstate) < 0.25) // reflection ray
{
mask *= RP;
d = reflect(r.dir, n);
x += nl * 0.02f;
}
else // transmission ray
{
mask *= TP;
d = tdir; //r = Ray(x, tdir);
x += nl * 0.0005f; // epsilon must be small to avoid artefacts
}
}
}
// set up origin and direction of next path segment
r.orig = x;
r.dir = d;
}
// add radiance up to a certain ray depth
// return accumulated ray colour after all bounces are computed
return accucolor;
}
// required to convert colour to a format that OpenGL can display
union Colour // 4 bytes = 4 chars = 1 float
{
float c;
uchar4 components;
};
__global__ void render_kernel(float3 *output, float3* accumbuffer, const int numtriangles, int framenumber, uint hashedframenumber, float3 scene_bbmin, float3 scene_bbmax){ // float3 *gputexdata1, int *texoffsets
// assign a CUDA thread to every pixel by using the threadIndex
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// global threadId, see richiesams blogspot
int threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
// create random number generator, see RichieSams blogspot
hiprandState_t randState; // state of the random number generator, to prevent repetition
hiprand_init(hashedframenumber + threadId, 0, 0, &randState);
Ray cam(firstcamorig, normalize(make_float3(0, -0.042612, -1)));
float3 cx = make_float3(width * .5135 / height, 0.0f, 0.0f); // ray direction offset along X-axis
float3 cy = normalize(cross(cx, cam.dir)) * .5135; // ray dir offset along Y-axis, .5135 is FOV angle
float3 pixelcol; // final pixel color
int i = (height - y - 1)*width + x; // pixel index
pixelcol = make_float3(0.0f, 0.0f, 0.0f); // reset to zero for every pixel
for (int s = 0; s < samps; s++){
// compute primary ray direction
float3 d = cx*((.25 + x) / width - .5) + cy*((.25 + y) / height - .5) + cam.dir;
// normalize primary ray direction
d = normalize(d);
// add accumulated colour from path bounces
pixelcol += radiance(Ray(cam.orig + d * 40, d), &randState, numtriangles, scene_bbmin, scene_bbmax)*(1. / samps);
} // Camera rays are pushed ^^^^^ forward to start in interior
// add pixel colour to accumulation buffer (accumulates all samples)
accumbuffer[i] += pixelcol;
// averaged colour: divide colour by the number of calculated frames so far
float3 tempcol = accumbuffer[i] / framenumber;
Colour fcolour;
float3 colour = make_float3(clamp(tempcol.x, 0.0f, 1.0f), clamp(tempcol.y, 0.0f, 1.0f), clamp(tempcol.z, 0.0f, 1.0f));
// convert from 96-bit to 24-bit colour + perform gamma correction
fcolour.components = make_uchar4((unsigned char)(powf(colour.x, 1 / 2.2f) * 255), (unsigned char)(powf(colour.y, 1 / 2.2f) * 255), (unsigned char)(powf(colour.z, 1 / 2.2f) * 255), 1);
// store pixel coordinates and pixelcolour in OpenGL readable outputbuffer
output[i] = make_float3(x, y, fcolour.c);
}
void Timer(int obsolete) {
glutPostRedisplay();
glutTimerFunc(30, Timer, 0);
}
__device__ float timer = 0.0f;
inline float clamp(float x){ return x<0 ? 0 : x>1 ? 1 : x; }
//inline int toInt(float x){ return int(pow(clamp(x), 1 / 2.2) * 255 + .5); } // RGB float in range [0,1] to int in range [0, 255]
// buffer for accumulating samples over several frames
float3* accumulatebuffer;
// output buffer
float3 *dptr;
void disp(void)
{
frames++;
hipDeviceSynchronize();
// map vertex buffer object for acces by CUDA
hipGLMapBufferObject__((void**)&dptr, vbo);
//clear all pixels:
glClear(GL_COLOR_BUFFER_BIT);
// RAY TRACING:
// dim3 grid(WINDOW / block.x, WINDOW / block.y, 1);
// dim3 CUDA specific syntax, block and grid are required to schedule CUDA threads over streaming multiprocessors
dim3 block(16, 16, 1);
dim3 grid(width / block.x, height / block.y, 1);
// launch CUDA path tracing kernel, pass in a hashed seed based on number of frames
hipLaunchKernelGGL(( render_kernel) , dim3(grid), dim3(block) , 0, 0, dptr, accumulatebuffer, total_number_of_triangles, frames, WangHash(frames), scene_aabbox_max, scene_aabbox_min); // launches CUDA render kernel from the host
hipDeviceSynchronize();
// unmap buffer
hipGLUnmapBufferObject(vbo);
//glFlush();
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(2, GL_FLOAT, 12, 0);
glColorPointer(4, GL_UNSIGNED_BYTE, 12, (GLvoid*)8);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glDrawArrays(GL_POINTS, 0, width * height);
glDisableClientState(GL_VERTEX_ARRAY);
glutSwapBuffers();
//glutPostRedisplay();
}
// load triangle data in a CUDA texture
extern "C"
{
void bindTriangles(float *dev_triangle_p, unsigned int number_of_triangles)
{
triangle_texture.normalized = false; // access with normalized texture coordinates
triangle_texture.filterMode = hipFilterModePoint; // Point mode, so no
triangle_texture.addressMode[0] = hipAddressModeWrap; // wrap texture coordinates
size_t size = sizeof(float4)*number_of_triangles * 3;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float4>();
hipBindTexture(0, triangle_texture, dev_triangle_p, channelDesc, size);
}
}
// helpers to load triangle data
struct TriangleFace
{
int v[3]; // vertex indices
};
struct TriangleMesh
{
std::vector<float3> verts;
std::vector<TriangleFace> faces;
float3 bounding_box[2];
};
TriangleMesh mesh1;
TriangleMesh mesh2;
float *dev_triangle_p; // the cuda device pointer that points to the uploaded triangles
void loadObj(const std::string filename, TriangleMesh &mesh); // forward declaration
// 1. load triangle mesh data from obj files
// 2. copy data to CPU memory (into vector<float4> triangles)
// 3. copy to CUDA global memory (allocated with dev_triangle_p pointer)
// 4. copy to CUDA texture memory with bindtriangles()
void initCUDAmemoryTriMesh()
{
loadObj("data/teapot.obj", mesh1);
loadObj("data/bunny.obj", mesh2);
// scalefactor and offset to position/scale triangle meshes
float scalefactor1 = 8;
float scalefactor2 = 300; // 300
float3 offset1 = make_float3(90, 22, 100);// (30, -2, 80);
float3 offset2 = make_float3(30, -2, 80);
std::vector<float4> triangles;
for (unsigned int i = 0; i < mesh1.faces.size(); i++)
{
// make a local copy of the triangle vertices
float3 v0 = mesh1.verts[mesh1.faces[i].v[0] - 1];
float3 v1 = mesh1.verts[mesh1.faces[i].v[1] - 1];
float3 v2 = mesh1.verts[mesh1.faces[i].v[2] - 1];
// scale
v0 *= scalefactor1;
v1 *= scalefactor1;
v2 *= scalefactor1;
// translate
v0 += offset1;
v1 += offset1;
v2 += offset1;
// store triangle data as float4
// store two edges per triangle instead of vertices, to save some calculations in the
// ray triangle intersection test
triangles.push_back(make_float4(v0.x, v0.y, v0.z, 0));
triangles.push_back(make_float4(v1.x - v0.x, v1.y - v0.y, v1.z - v0.z, 0));
triangles.push_back(make_float4(v2.x - v0.x, v2.y - v0.y, v2.z - v0.z, 0));
}
// compute bounding box of this mesh
mesh1.bounding_box[0] *= scalefactor1; mesh1.bounding_box[0] += offset1;
mesh1.bounding_box[1] *= scalefactor1; mesh1.bounding_box[1] += offset1;
for (unsigned int i = 0; i < mesh2.faces.size(); i++)
{
float3 v0 = mesh2.verts[mesh2.faces[i].v[0] - 1];
float3 v1 = mesh2.verts[mesh2.faces[i].v[1] - 1];
float3 v2 = mesh2.verts[mesh2.faces[i].v[2] - 1];
v0 *= scalefactor2;
v1 *= scalefactor2;
v2 *= scalefactor2;
v0 += offset2;
v1 += offset2;
v2 += offset2;
triangles.push_back(make_float4(v0.x, v0.y, v0.z, 0));
triangles.push_back(make_float4(v1.x - v0.x, v1.y - v0.y, v1.z - v0.z, 1));
triangles.push_back(make_float4(v2.x - v0.x, v2.y - v0.y, v2.z - v0.z, 0));
}
mesh2.bounding_box[0] *= scalefactor2; mesh2.bounding_box[0] += offset2;
mesh2.bounding_box[1] *= scalefactor2; mesh2.bounding_box[1] += offset2;
std::cout << "total number of triangles check:" << mesh1.faces.size() + mesh2.faces.size() << " == " << triangles.size() / 3 << std::endl;
// calculate total number of triangles in the scene
size_t triangle_size = triangles.size() * sizeof(float4);
int total_num_triangles = triangles.size() / 3;
total_number_of_triangles = total_num_triangles;
if (triangle_size > 0)
{
// allocate memory for the triangle meshes on the GPU
hipMalloc((void **)&dev_triangle_p, triangle_size);
// copy triangle data to GPU
hipMemcpy(dev_triangle_p, &triangles[0], triangle_size, hipMemcpyHostToDevice);
// load triangle data into a CUDA texture
bindTriangles(dev_triangle_p, total_num_triangles);
}
// compute scene bounding box by merging bounding boxes of individual meshes
scene_aabbox_min = mesh2.bounding_box[0];
scene_aabbox_max = mesh2.bounding_box[1];
scene_aabbox_min = fminf(scene_aabbox_min, mesh1.bounding_box[0]);
scene_aabbox_max = fmaxf(scene_aabbox_max, mesh1.bounding_box[1]);
}
// read triangle data from obj file
void loadObj(const std::string filename, TriangleMesh &mesh)
{
std::ifstream in(filename.c_str());
if (!in.good())
{
std::cout << "ERROR: loading obj:(" << filename << ") file not found or not good" << "\n";
system("PAUSE");
exit(0);
}
char buffer[256], str[255];
float f1, f2, f3;
while (!in.getline(buffer, 255).eof())
{
buffer[255] = '\0';
sscanf_s(buffer, "%s", str, 255);
// reading a vertex
if (buffer[0] == 'v' && (buffer[1] == ' ' || buffer[1] == 32)){
if (sscanf(buffer, "v %f %f %f", &f1, &f2, &f3) == 3){
mesh.verts.push_back(make_float3(f1, f2, f3));
}
else{
std::cout << "ERROR: vertex not in wanted format in OBJLoader" << "\n";
exit(-1);
}
}
// reading faceMtls
else if (buffer[0] == 'f' && (buffer[1] == ' ' || buffer[1] == 32))
{
TriangleFace f;
int nt = sscanf(buffer, "f %d %d %d", &f.v[0], &f.v[1], &f.v[2]);
if (nt != 3){
std::cout << "ERROR: I don't know the format of that FaceMtl" << "\n";
exit(-1);
}
mesh.faces.push_back(f);
}
}
// calculate the bounding box of the mesh
mesh.bounding_box[0] = make_float3(1000000, 1000000, 1000000);
mesh.bounding_box[1] = make_float3(-1000000, -1000000, -1000000);
for (unsigned int i = 0; i < mesh.verts.size(); i++)
{
//update min and max value
mesh.bounding_box[0] = fminf(mesh.verts[i], mesh.bounding_box[0]);
mesh.bounding_box[1] = fmaxf(mesh.verts[i], mesh.bounding_box[1]);
}
std::cout << "obj file loaded: number of faces:" << mesh.faces.size() << " number of vertices:" << mesh.verts.size() << std::endl;
std::cout << "obj bounding box: min:(" << mesh.bounding_box[0].x << "," << mesh.bounding_box[0].y << "," << mesh.bounding_box[0].z << ") max:"
<< mesh.bounding_box[1].x << "," << mesh.bounding_box[1].y << "," << mesh.bounding_box[1].z << ")" << std::endl;
}
int main(int argc, char** argv){
// allocate memmory for the accumulation buffer on the GPU
hipMalloc(&accumulatebuffer, width * height * sizeof(float3));
// load triangle meshes in CUDA memory
initCUDAmemoryTriMesh();
// init glut for OpenGL viewport
glutInit(&argc, argv);
// specify the display mode to be RGB and single buffering
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
// specify the initial window position
glutInitWindowPosition(100, 100);
// specify the initial window size
glutInitWindowSize(width, height);
// create the window and set title
glutCreateWindow("Basic triangle mesh path tracer in CUDA");
// init OpenGL
glClearColor(0.0, 0.0, 0.0, 0.0);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(0.0, width, 0.0, height);
fprintf(stderr, "OpenGL initialized \n");
// register callback function to display graphics:
glutDisplayFunc(disp);
glewInit();
if (!glewIsSupported("GL_VERSION_2_0 ")) {
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush(stderr);
exit(0);
}
fprintf(stderr, "glew initialized \n");
// call Timer():
Timer(0);
//create VBO (vertex buffer object)
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
//initialize VBO
unsigned int size = width * height * sizeof(float3); // 3 floats
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
//register VBO with CUDA
hipGLRegisterBufferObject(*vbo);
fprintf(stderr, "VBO created \n");
// enter the main loop and process events
fprintf(stderr, "Entering glutMainLoop... \n");
glutMainLoop();
// free CUDA memory on exit
hipFree(accumulatebuffer);
hipFree(dev_triangle_p);
hipFree(dptr);
}
| 706bc3149378dfaaa789595a9aa8a7ec478400bb.cu | /*
* Basic CUDA based triangle mesh path tracer.
* For background info, see http://raytracey.blogspot.co.nz/2015/12/gpu-path-tracing-tutorial-2-interactive.html
* Based on CUDA ray tracing code from http://cg.alexandra.dk/?p=278
* Copyright (C) 2015 Sam Lapere
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <cuda.h>
#include <math_functions.h>
#include <vector_types.h>
#include <vector_functions.h>
#include "device_launch_parameters.h"
#include "cutil_math.h" // required for float3 vector math
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6.5\extras\CUPTI\include\GL\glew.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6.5\extras\CUPTI\include\GL\glut.h"
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <curand.h>
#include <curand_kernel.h>
#define M_PI 3.14159265359f
#define width 1024 // screenwidth
#define height 576 // screenheight
#define samps 1 // samples per pixel per pass
int total_number_of_triangles = 0;
int frames = 0;
// scene bounding box
float3 scene_aabbox_min;
float3 scene_aabbox_max;
// the scene triangles are stored in a 1D CUDA texture of float4 for memory alignment
// store two edges instead of vertices
// each triangle is stored as three float4s: (float4 first_vertex, float4 edge1, float4 edge2)
texture<float4, 1, cudaReadModeElementType> triangle_texture;
// hardcoded camera position
__device__ float3 firstcamorig = { 50, 52, 295.6 };
// OpenGL vertex buffer object for real-time viewport
GLuint vbo;
void *d_vbo_buffer = NULL;
struct Ray {
float3 orig; // ray origin
float3 dir; // ray direction
__device__ Ray(float3 o_, float3 d_) : orig(o_), dir(d_) {}
};
enum Refl_t { DIFF, SPEC, REFR }; // material types, used in radiance(), only DIFF used here
// SPHERES
struct Sphere {
float rad; // radius
float3 pos, emi, col; // position, emission, color
Refl_t refl; // reflection type (DIFFuse, SPECular, REFRactive)
__device__ float intersect(const Ray &r) const { // returns distance, 0 if nohit
// Ray/sphere intersection
// Quadratic formula required to solve ax^2 + bx + c = 0
// Solution x = (-b +- sqrt(b*b - 4ac)) / 2a
// Solve t^2*d.d + 2*t*(o-p).d + (o-p).(o-p)-R^2 = 0
float3 op = pos - r.orig; //
float t, epsilon = 0.01f;
float b = dot(op, r.dir);
float disc = b*b - dot(op, op) + rad*rad; // discriminant
if (disc<0) return 0; else disc = sqrtf(disc);
return (t = b - disc)>epsilon ? t : ((t = b + disc)>epsilon ? t : 0);
}
};
// TRIANGLES
// the classic ray triangle intersection: http://www.cs.virginia.edu/~gfx/Courses/2003/ImageSynthesis/papers/Acceleration/Fast%20MinimumStorage%20RayTriangle%20Intersection.pdf
// for an explanation see http://www.scratchapixel.com/lessons/3d-basic-rendering/ray-tracing-rendering-a-triangle/moller-trumbore-ray-triangle-intersection
__device__ float RayTriangleIntersection(const Ray &r,
const float3 &v0,
const float3 &edge1,
const float3 &edge2)
{
float3 tvec = r.orig - v0;
float3 pvec = cross(r.dir, edge2);
float det = dot(edge1, pvec);
det = __fdividef(1.0f, det); // CUDA intrinsic function
float u = dot(tvec, pvec) * det;
if (u < 0.0f || u > 1.0f)
return -1.0f;
float3 qvec = cross(tvec, edge1);
float v = dot(r.dir, qvec) * det;
if (v < 0.0f || (u + v) > 1.0f)
return -1.0f;
return dot(edge2, qvec) * det;
}
__device__ float3 getTriangleNormal(const int triangleIndex){
float4 edge1 = tex1Dfetch(triangle_texture, triangleIndex * 3 + 1);
float4 edge2 = tex1Dfetch(triangle_texture, triangleIndex * 3 + 2);
// cross product of two triangle edges yields a vector orthogonal to triangle plane
float3 trinormal = cross(make_float3(edge1.x, edge1.y, edge1.z), make_float3(edge2.x, edge2.y, edge2.z));
trinormal = normalize(trinormal);
return trinormal;
}
__device__ void intersectAllTriangles(const Ray& r, float& t_scene, int& triangle_id, const int number_of_triangles, int& geomtype){
for (int i = 0; i < number_of_triangles; i++)
{
// the triangles are packed into the 1D texture using three consecutive float4 structs for each triangle,
// first float4 contains the first vertex, second float4 contains the first precomputed edge, third float4 contains second precomputed edge like this:
// (float4(vertex.x,vertex.y,vertex.z, 0), float4 (egde1.x,egde1.y,egde1.z,0),float4 (egde2.x,egde2.y,egde2.z,0))
// i is triangle index, each triangle represented by 3 float4s in triangle_texture
float4 v0 = tex1Dfetch(triangle_texture, i * 3);
float4 edge1 = tex1Dfetch(triangle_texture, i * 3 + 1);
float4 edge2 = tex1Dfetch(triangle_texture, i * 3 + 2);
// intersect ray with reconstructed triangle
float t = RayTriangleIntersection(r,
make_float3(v0.x, v0.y, v0.z),
make_float3(edge1.x, edge1.y, edge1.z),
make_float3(edge2.x, edge2.y, edge2.z));
// keep track of closest distance and closest triangle
// if ray/tri intersection finds an intersection point that is closer than closest intersection found so far
if (t < t_scene && t > 0.001)
{
t_scene = t;
triangle_id = i;
geomtype = 3;
}
}
}
// AXIS ALIGNED BOXES
// helper functions
inline __device__ float3 minf3(float3 a, float3 b){ return make_float3(a.x < b.x ? a.x : b.x, a.y < b.y ? a.y : b.y, a.z < b.z ? a.z : b.z); }
inline __device__ float3 maxf3(float3 a, float3 b){ return make_float3(a.x > b.x ? a.x : b.x, a.y > b.y ? a.y : b.y, a.z > b.z ? a.z : b.z); }
inline __device__ float minf1(float a, float b){ return a < b ? a : b; }
inline __device__ float maxf1(float a, float b){ return a > b ? a : b; }
struct Box {
float3 min; // minimum bounds
float3 max; // maximum bounds
float3 emi; // emission
float3 col; // colour
Refl_t refl; // material type
// ray/box intersection
// for theoretical background of the algorithm see
// http://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection
// optimised code from http://www.gamedev.net/topic/495636-raybox-collision-intersection-point/
__device__ float intersect(const Ray &r) const {
float epsilon = 0.001f; // required to prevent self intersection
float3 tmin = (min - r.orig) / r.dir;
float3 tmax = (max - r.orig) / r.dir;
float3 real_min = minf3(tmin, tmax);
float3 real_max = maxf3(tmin, tmax);
float minmax = minf1(minf1(real_max.x, real_max.y), real_max.z);
float maxmin = maxf1(maxf1(real_min.x, real_min.y), real_min.z);
if (minmax >= maxmin) { return maxmin > epsilon ? maxmin : 0; }
else return 0;
}
// calculate normal for point on axis aligned box
__device__ float3 Box::normalAt(float3 &point) {
float3 normal = make_float3(0.f, 0.f, 0.f);
float min_distance = 1e8;
float distance;
float epsilon = 0.001f;
if (fabs(min.x - point.x) < epsilon) normal = make_float3(-1, 0, 0);
else if (fabs(max.x - point.x) < epsilon) normal = make_float3(1, 0, 0);
else if (fabs(min.y - point.y) < epsilon) normal = make_float3(0, -1, 0);
else if (fabs(max.y - point.y) < epsilon) normal = make_float3(0, 1, 0);
else if (fabs(min.z - point.z) < epsilon) normal = make_float3(0, 0, -1);
else normal = make_float3(0, 0, 1);
return normal;
}
};
// scene: 9 spheres forming a Cornell box
// small enough to fit in constant GPU memory
__constant__ Sphere spheres[] = {
// FORMAT: { float radius, float3 position, float3 emission, float3 colour, Refl_t material }
// cornell box
//{ 1e5f, { 1e5f + 1.0f, 40.8f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { 0.75f, 0.25f, 0.25f }, DIFF }, //Left 1e5f
//{ 1e5f, { -1e5f + 99.0f, 40.8f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .25f, .25f, .75f }, DIFF }, //Right
//{ 1e5f, { 50.0f, 40.8f, 1e5f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Back
//{ 1e5f, { 50.0f, 40.8f, -1e5f + 600.0f }, { 0.0f, 0.0f, 0.0f }, { 0.00f, 0.00f, 0.00f }, DIFF }, //Front
//{ 1e5f, { 50.0f, -1e5f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Bottom
//{ 1e5f, { 50.0f, -1e5f + 81.6f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Top
//{ 16.5f, { 27.0f, 16.5f, 47.0f }, { 0.0f, 0.0f, 0.0f }, { 0.99f, 0.99f, 0.99f }, SPEC }, // small sphere 1
//{ 16.5f, { 73.0f, 16.5f, 78.0f }, { 0.0f, 0.f, .0f }, { 0.09f, 0.49f, 0.3f }, REFR }, // small sphere 2
//{ 600.0f, { 50.0f, 681.6f - .5f, 81.6f }, { 3.0f, 2.5f, 2.0f }, { 0.0f, 0.0f, 0.0f }, DIFF } // Light 12, 10 ,8
//outdoor scene: radius, position, emission, color, material
//{ 1600, { 3000.0f, 10, 6000 }, { 37, 34, 30 }, { 0.f, 0.f, 0.f }, DIFF }, // 37, 34, 30 // sun
//{ 1560, { 3500.0f, 0, 7000 }, { 50, 25, 2.5 }, { 0.f, 0.f, 0.f }, DIFF }, // 150, 75, 7.5 // sun 2
{ 10000, { 50.0f, 40.8f, -1060 }, { 0.0003, 0.01, 0.15 }, { 0.175f, 0.175f, 0.25f }, DIFF }, // sky
{ 100000, { 50.0f, -100000, 0 }, { 0.0, 0.0, 0 }, { 0.8f, 0.2f, 0.f }, DIFF }, // ground
{ 110000, { 50.0f, -110048.5, 0 }, { 3.6, 2.0, 0.2 }, { 0.f, 0.f, 0.f }, DIFF }, // horizon brightener
{ 4e4, { 50.0f, -4e4 - 30, -3000 }, { 0, 0, 0 }, { 0.2f, 0.2f, 0.2f }, DIFF }, // mountains
{ 82.5, { 30.0f, 180.5, 42 }, { 16, 12, 6 }, { .6f, .6f, 0.6f }, DIFF }, // small sphere 1
{ 12, { 115.0f, 10, 105 }, { 0.0, 0.0, 0.0 }, { 0.9f, 0.9f, 0.9f }, REFR }, // small sphere 2
{ 22, { 65.0f, 22, 24 }, { 0, 0, 0 }, { 0.9f, 0.9f, 0.9f }, SPEC }, // small sphere 3
};
__constant__ Box boxes[] = {
// FORMAT: { float3 minbounds, float3 maxbounds, float3 emission, float3 colour, Refl_t }
{ { 5.0f, 0.0f, 70.0f }, { 45.0f, 11.0f, 115.0f }, { .0f, .0f, 0.0f }, { 0.5f, 0.5f, 0.5f }, DIFF },
{ {85.0f, 0.0f, 95.0f }, { 95.0f, 20.0f, 105.0f }, { .0f, .0f, 0.0f }, { 0.5f, 0.5f, 0.5f }, DIFF },
{ {75.0f, 20.0f, 85.0f}, { 105.0f, 22.0f, 115.0f }, { .0f, .0f, 0.0f }, { 0.5f, 0.5f, 0.5f }, DIFF },
};
__device__ inline bool intersect_scene(const Ray &r, float &t, int &sphere_id, int &box_id, int& triangle_id, const int number_of_triangles, int &geomtype, const float3& bbmin, const float3& bbmax){
float tmin = 1e20;
float tmax = -1e20;
float d = 1e21;
float k = 1e21;
float q = 1e21;
float inf = t = 1e20;
// SPHERES
// intersect all spheres in the scene
float numspheres = sizeof(spheres) / sizeof(Sphere);
for (int i = int(numspheres); i--;) // for all spheres in scene
// keep track of distance from origin to closest intersection point
if ((d = spheres[i].intersect(r)) && d < t){ t = d; sphere_id = i; geomtype = 1; }
// BOXES
// intersect all boxes in the scene
float numboxes = sizeof(boxes) / sizeof(Box);
for (int i = int(numboxes); i--;) // for all boxes in scene
if ((k = boxes[i].intersect(r)) && k < t){ t = k; box_id = i; geomtype = 2; }
// TRIANGLES
Box scene_bbox; // bounding box around triangle meshes
scene_bbox.min = bbmin;
scene_bbox.max = bbmax;
// if ray hits bounding box of triangle meshes, intersect ray with all triangles
if (scene_bbox.intersect(r)){
intersectAllTriangles(r, t, triangle_id, number_of_triangles, geomtype);
}
// t is distance to closest intersection of ray with all primitives in the scene (spheres, boxes and triangles)
return t<inf;
}
// hash function to calculate new seed for each frame
// see http://www.reedbeta.com/blog/2013/01/12/quick-and-easy-gpu-random-numbers-in-d3d11/
uint WangHash(uint a) {
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
return a;
}
// radiance function
// compute path bounces in scene and accumulate returned color from each path sgment
__device__ float3 radiance(Ray &r, curandState *randstate, const int totaltris, const float3& scene_aabb_min, const float3& scene_aabb_max){ // returns ray color
// colour mask
float3 mask = make_float3(1.0f, 1.0f, 1.0f);
// accumulated colour
float3 accucolor = make_float3(0.0f, 0.0f, 0.0f);
for (int bounces = 0; bounces < 5; bounces++){ // iteration up to 4 bounces (instead of recursion in CPU code)
// reset scene intersection function parameters
float t = 100000; // distance to intersection
int sphere_id = -1;
int box_id = -1; // index of intersected sphere
int triangle_id = -1;
int geomtype = -1;
float3 f; // primitive colour
float3 emit; // primitive emission colour
float3 x; // intersection point
float3 n; // normal
float3 nl; // oriented normal
float3 d; // ray direction of next path segment
Refl_t refltype;
// intersect ray with scene
// intersect_scene keeps track of closest intersected primitive and distance to closest intersection point
if (!intersect_scene(r, t, sphere_id, box_id, triangle_id, totaltris, geomtype, scene_aabb_min, scene_aabb_max))
return make_float3(0.0f, 0.0f, 0.0f); // if miss, return black
// else: we've got a hit with a scene primitive
// determine geometry type of primitive: sphere/box/triangle
// if sphere:
if (geomtype == 1){
Sphere &sphere = spheres[sphere_id]; // hit object with closest intersection
x = r.orig + r.dir*t; // intersection point on object
n = normalize(x - sphere.pos); // normal
nl = dot(n, r.dir) < 0 ? n : n * -1; // correctly oriented normal
f = sphere.col; // object colour
refltype = sphere.refl;
emit = sphere.emi; // object emission
accucolor += (mask * emit);
}
// if box:
if (geomtype == 2){
Box &box = boxes[box_id];
x = r.orig + r.dir*t; // intersection point on object
n = normalize(box.normalAt(x)); // normal
nl = dot(n, r.dir) < 0 ? n : n * -1; // correctly oriented normal
f = box.col; // box colour
refltype = box.refl;
emit = box.emi; // box emission
accucolor += (mask * emit);
}
// if triangle:
if (geomtype == 3){
int tri_index = triangle_id;
x = r.orig + r.dir*t; // intersection point
n = normalize(getTriangleNormal(tri_index)); // normal
nl = dot(n, r.dir) < 0 ? n : n * -1; // correctly oriented normal
// colour, refltype and emit value are hardcoded and apply to all triangles
// no per triangle material support yet
f = make_float3(0.9f, 0.4f, 0.1f); // triangle colour
refltype = REFR;
emit = make_float3(0.0f, 0.0f, 0.0f);
accucolor += (mask * emit);
}
// SHADING: diffuse, specular or refractive
// ideal diffuse reflection (see "Realistic Ray Tracing", P. Shirley)
if (refltype == DIFF){
// create 2 random numbers
float r1 = 2 * M_PI * curand_uniform(randstate);
float r2 = curand_uniform(randstate);
float r2s = sqrtf(r2);
// compute orthonormal coordinate frame uvw with hitpoint as origin
float3 w = nl;
float3 u = normalize(cross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = cross(w, u);
// compute cosine weighted random ray direction on hemisphere
d = normalize(u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrtf(1 - r2));
// offset origin next path segment to prevent self intersection
x += nl * 0.03;
// multiply mask with colour of object
mask *= f;
}
// ideal specular reflection (mirror)
if (refltype == SPEC){
// compute relfected ray direction according to Snell's law
d = r.dir - 2.0f * n * dot(n, r.dir);
// offset origin next path segment to prevent self intersection
x += nl * 0.01f;
// multiply mask with colour of object
mask *= f;
}
// ideal refraction (based on smallpt code by Kevin Beason)
if (refltype == REFR){
bool into = dot(n, nl) > 0; // is ray entering or leaving refractive material?
float nc = 1.0f; // Index of Refraction air
float nt = 1.5f; // Index of Refraction glass/water
float nnt = into ? nc / nt : nt / nc; // IOR ratio of refractive materials
float ddn = dot(r.dir, nl);
float cos2t = 1.0f - nnt*nnt * (1.f - ddn*ddn);
if (cos2t < 0.0f) // total internal reflection
{
d = reflect(r.dir, n); //d = r.dir - 2.0f * n * dot(n, r.dir);
x += nl * 0.01f;
}
else // cos2t > 0
{
// compute direction of transmission ray
float3 tdir = normalize(r.dir * nnt - n * ((into ? 1 : -1) * (ddn*nnt + sqrtf(cos2t))));
float R0 = (nt - nc)*(nt - nc) / (nt + nc)*(nt + nc);
float c = 1.f - (into ? -ddn : dot(tdir, n));
float Re = R0 + (1.f - R0) * c * c * c * c * c;
float Tr = 1 - Re; // Transmission
float P = .25f + .5f * Re;
float RP = Re / P;
float TP = Tr / (1.f - P);
// randomly choose reflection or transmission ray
if (curand_uniform(randstate) < 0.25) // reflection ray
{
mask *= RP;
d = reflect(r.dir, n);
x += nl * 0.02f;
}
else // transmission ray
{
mask *= TP;
d = tdir; //r = Ray(x, tdir);
x += nl * 0.0005f; // epsilon must be small to avoid artefacts
}
}
}
// set up origin and direction of next path segment
r.orig = x;
r.dir = d;
}
// add radiance up to a certain ray depth
// return accumulated ray colour after all bounces are computed
return accucolor;
}
// required to convert colour to a format that OpenGL can display
union Colour // 4 bytes = 4 chars = 1 float
{
float c;
uchar4 components;
};
__global__ void render_kernel(float3 *output, float3* accumbuffer, const int numtriangles, int framenumber, uint hashedframenumber, float3 scene_bbmin, float3 scene_bbmax){ // float3 *gputexdata1, int *texoffsets
// assign a CUDA thread to every pixel by using the threadIndex
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// global threadId, see richiesams blogspot
int threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
// create random number generator, see RichieSams blogspot
curandState randState; // state of the random number generator, to prevent repetition
curand_init(hashedframenumber + threadId, 0, 0, &randState);
Ray cam(firstcamorig, normalize(make_float3(0, -0.042612, -1)));
float3 cx = make_float3(width * .5135 / height, 0.0f, 0.0f); // ray direction offset along X-axis
float3 cy = normalize(cross(cx, cam.dir)) * .5135; // ray dir offset along Y-axis, .5135 is FOV angle
float3 pixelcol; // final pixel color
int i = (height - y - 1)*width + x; // pixel index
pixelcol = make_float3(0.0f, 0.0f, 0.0f); // reset to zero for every pixel
for (int s = 0; s < samps; s++){
// compute primary ray direction
float3 d = cx*((.25 + x) / width - .5) + cy*((.25 + y) / height - .5) + cam.dir;
// normalize primary ray direction
d = normalize(d);
// add accumulated colour from path bounces
pixelcol += radiance(Ray(cam.orig + d * 40, d), &randState, numtriangles, scene_bbmin, scene_bbmax)*(1. / samps);
} // Camera rays are pushed ^^^^^ forward to start in interior
// add pixel colour to accumulation buffer (accumulates all samples)
accumbuffer[i] += pixelcol;
// averaged colour: divide colour by the number of calculated frames so far
float3 tempcol = accumbuffer[i] / framenumber;
Colour fcolour;
float3 colour = make_float3(clamp(tempcol.x, 0.0f, 1.0f), clamp(tempcol.y, 0.0f, 1.0f), clamp(tempcol.z, 0.0f, 1.0f));
// convert from 96-bit to 24-bit colour + perform gamma correction
fcolour.components = make_uchar4((unsigned char)(powf(colour.x, 1 / 2.2f) * 255), (unsigned char)(powf(colour.y, 1 / 2.2f) * 255), (unsigned char)(powf(colour.z, 1 / 2.2f) * 255), 1);
// store pixel coordinates and pixelcolour in OpenGL readable outputbuffer
output[i] = make_float3(x, y, fcolour.c);
}
void Timer(int obsolete) {
glutPostRedisplay();
glutTimerFunc(30, Timer, 0);
}
__device__ float timer = 0.0f;
inline float clamp(float x){ return x<0 ? 0 : x>1 ? 1 : x; }
//inline int toInt(float x){ return int(pow(clamp(x), 1 / 2.2) * 255 + .5); } // RGB float in range [0,1] to int in range [0, 255]
// buffer for accumulating samples over several frames
float3* accumulatebuffer;
// output buffer
float3 *dptr;
void disp(void)
{
frames++;
cudaThreadSynchronize();
// map vertex buffer object for acces by CUDA
cudaGLMapBufferObject((void**)&dptr, vbo);
//clear all pixels:
glClear(GL_COLOR_BUFFER_BIT);
// RAY TRACING:
// dim3 grid(WINDOW / block.x, WINDOW / block.y, 1);
// dim3 CUDA specific syntax, block and grid are required to schedule CUDA threads over streaming multiprocessors
dim3 block(16, 16, 1);
dim3 grid(width / block.x, height / block.y, 1);
// launch CUDA path tracing kernel, pass in a hashed seed based on number of frames
render_kernel <<< grid, block >>>(dptr, accumulatebuffer, total_number_of_triangles, frames, WangHash(frames), scene_aabbox_max, scene_aabbox_min); // launches CUDA render kernel from the host
cudaThreadSynchronize();
// unmap buffer
cudaGLUnmapBufferObject(vbo);
//glFlush();
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(2, GL_FLOAT, 12, 0);
glColorPointer(4, GL_UNSIGNED_BYTE, 12, (GLvoid*)8);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glDrawArrays(GL_POINTS, 0, width * height);
glDisableClientState(GL_VERTEX_ARRAY);
glutSwapBuffers();
//glutPostRedisplay();
}
// load triangle data in a CUDA texture
extern "C"
{
void bindTriangles(float *dev_triangle_p, unsigned int number_of_triangles)
{
triangle_texture.normalized = false; // access with normalized texture coordinates
triangle_texture.filterMode = cudaFilterModePoint; // Point mode, so no
triangle_texture.addressMode[0] = cudaAddressModeWrap; // wrap texture coordinates
size_t size = sizeof(float4)*number_of_triangles * 3;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float4>();
cudaBindTexture(0, triangle_texture, dev_triangle_p, channelDesc, size);
}
}
// helpers to load triangle data
struct TriangleFace
{
int v[3]; // vertex indices
};
struct TriangleMesh
{
std::vector<float3> verts;
std::vector<TriangleFace> faces;
float3 bounding_box[2];
};
TriangleMesh mesh1;
TriangleMesh mesh2;
float *dev_triangle_p; // the cuda device pointer that points to the uploaded triangles
void loadObj(const std::string filename, TriangleMesh &mesh); // forward declaration
// 1. load triangle mesh data from obj files
// 2. copy data to CPU memory (into vector<float4> triangles)
// 3. copy to CUDA global memory (allocated with dev_triangle_p pointer)
// 4. copy to CUDA texture memory with bindtriangles()
void initCUDAmemoryTriMesh()
{
loadObj("data/teapot.obj", mesh1);
loadObj("data/bunny.obj", mesh2);
// scalefactor and offset to position/scale triangle meshes
float scalefactor1 = 8;
float scalefactor2 = 300; // 300
float3 offset1 = make_float3(90, 22, 100);// (30, -2, 80);
float3 offset2 = make_float3(30, -2, 80);
std::vector<float4> triangles;
for (unsigned int i = 0; i < mesh1.faces.size(); i++)
{
// make a local copy of the triangle vertices
float3 v0 = mesh1.verts[mesh1.faces[i].v[0] - 1];
float3 v1 = mesh1.verts[mesh1.faces[i].v[1] - 1];
float3 v2 = mesh1.verts[mesh1.faces[i].v[2] - 1];
// scale
v0 *= scalefactor1;
v1 *= scalefactor1;
v2 *= scalefactor1;
// translate
v0 += offset1;
v1 += offset1;
v2 += offset1;
// store triangle data as float4
// store two edges per triangle instead of vertices, to save some calculations in the
// ray triangle intersection test
triangles.push_back(make_float4(v0.x, v0.y, v0.z, 0));
triangles.push_back(make_float4(v1.x - v0.x, v1.y - v0.y, v1.z - v0.z, 0));
triangles.push_back(make_float4(v2.x - v0.x, v2.y - v0.y, v2.z - v0.z, 0));
}
// compute bounding box of this mesh
mesh1.bounding_box[0] *= scalefactor1; mesh1.bounding_box[0] += offset1;
mesh1.bounding_box[1] *= scalefactor1; mesh1.bounding_box[1] += offset1;
for (unsigned int i = 0; i < mesh2.faces.size(); i++)
{
float3 v0 = mesh2.verts[mesh2.faces[i].v[0] - 1];
float3 v1 = mesh2.verts[mesh2.faces[i].v[1] - 1];
float3 v2 = mesh2.verts[mesh2.faces[i].v[2] - 1];
v0 *= scalefactor2;
v1 *= scalefactor2;
v2 *= scalefactor2;
v0 += offset2;
v1 += offset2;
v2 += offset2;
triangles.push_back(make_float4(v0.x, v0.y, v0.z, 0));
triangles.push_back(make_float4(v1.x - v0.x, v1.y - v0.y, v1.z - v0.z, 1));
triangles.push_back(make_float4(v2.x - v0.x, v2.y - v0.y, v2.z - v0.z, 0));
}
mesh2.bounding_box[0] *= scalefactor2; mesh2.bounding_box[0] += offset2;
mesh2.bounding_box[1] *= scalefactor2; mesh2.bounding_box[1] += offset2;
std::cout << "total number of triangles check:" << mesh1.faces.size() + mesh2.faces.size() << " == " << triangles.size() / 3 << std::endl;
// calculate total number of triangles in the scene
size_t triangle_size = triangles.size() * sizeof(float4);
int total_num_triangles = triangles.size() / 3;
total_number_of_triangles = total_num_triangles;
if (triangle_size > 0)
{
// allocate memory for the triangle meshes on the GPU
cudaMalloc((void **)&dev_triangle_p, triangle_size);
// copy triangle data to GPU
cudaMemcpy(dev_triangle_p, &triangles[0], triangle_size, cudaMemcpyHostToDevice);
// load triangle data into a CUDA texture
bindTriangles(dev_triangle_p, total_num_triangles);
}
// compute scene bounding box by merging bounding boxes of individual meshes
scene_aabbox_min = mesh2.bounding_box[0];
scene_aabbox_max = mesh2.bounding_box[1];
scene_aabbox_min = fminf(scene_aabbox_min, mesh1.bounding_box[0]);
scene_aabbox_max = fmaxf(scene_aabbox_max, mesh1.bounding_box[1]);
}
// read triangle data from obj file
void loadObj(const std::string filename, TriangleMesh &mesh)
{
std::ifstream in(filename.c_str());
if (!in.good())
{
std::cout << "ERROR: loading obj:(" << filename << ") file not found or not good" << "\n";
system("PAUSE");
exit(0);
}
char buffer[256], str[255];
float f1, f2, f3;
while (!in.getline(buffer, 255).eof())
{
buffer[255] = '\0';
sscanf_s(buffer, "%s", str, 255);
// reading a vertex
if (buffer[0] == 'v' && (buffer[1] == ' ' || buffer[1] == 32)){
if (sscanf(buffer, "v %f %f %f", &f1, &f2, &f3) == 3){
mesh.verts.push_back(make_float3(f1, f2, f3));
}
else{
std::cout << "ERROR: vertex not in wanted format in OBJLoader" << "\n";
exit(-1);
}
}
// reading faceMtls
else if (buffer[0] == 'f' && (buffer[1] == ' ' || buffer[1] == 32))
{
TriangleFace f;
int nt = sscanf(buffer, "f %d %d %d", &f.v[0], &f.v[1], &f.v[2]);
if (nt != 3){
std::cout << "ERROR: I don't know the format of that FaceMtl" << "\n";
exit(-1);
}
mesh.faces.push_back(f);
}
}
// calculate the bounding box of the mesh
mesh.bounding_box[0] = make_float3(1000000, 1000000, 1000000);
mesh.bounding_box[1] = make_float3(-1000000, -1000000, -1000000);
for (unsigned int i = 0; i < mesh.verts.size(); i++)
{
//update min and max value
mesh.bounding_box[0] = fminf(mesh.verts[i], mesh.bounding_box[0]);
mesh.bounding_box[1] = fmaxf(mesh.verts[i], mesh.bounding_box[1]);
}
std::cout << "obj file loaded: number of faces:" << mesh.faces.size() << " number of vertices:" << mesh.verts.size() << std::endl;
std::cout << "obj bounding box: min:(" << mesh.bounding_box[0].x << "," << mesh.bounding_box[0].y << "," << mesh.bounding_box[0].z << ") max:"
<< mesh.bounding_box[1].x << "," << mesh.bounding_box[1].y << "," << mesh.bounding_box[1].z << ")" << std::endl;
}
int main(int argc, char** argv){
// allocate memmory for the accumulation buffer on the GPU
cudaMalloc(&accumulatebuffer, width * height * sizeof(float3));
// load triangle meshes in CUDA memory
initCUDAmemoryTriMesh();
// init glut for OpenGL viewport
glutInit(&argc, argv);
// specify the display mode to be RGB and single buffering
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
// specify the initial window position
glutInitWindowPosition(100, 100);
// specify the initial window size
glutInitWindowSize(width, height);
// create the window and set title
glutCreateWindow("Basic triangle mesh path tracer in CUDA");
// init OpenGL
glClearColor(0.0, 0.0, 0.0, 0.0);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(0.0, width, 0.0, height);
fprintf(stderr, "OpenGL initialized \n");
// register callback function to display graphics:
glutDisplayFunc(disp);
glewInit();
if (!glewIsSupported("GL_VERSION_2_0 ")) {
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush(stderr);
exit(0);
}
fprintf(stderr, "glew initialized \n");
// call Timer():
Timer(0);
//create VBO (vertex buffer object)
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
//initialize VBO
unsigned int size = width * height * sizeof(float3); // 3 floats
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
//register VBO with CUDA
cudaGLRegisterBufferObject(*vbo);
fprintf(stderr, "VBO created \n");
// enter the main loop and process events
fprintf(stderr, "Entering glutMainLoop... \n");
glutMainLoop();
// free CUDA memory on exit
cudaFree(accumulatebuffer);
cudaFree(dev_triangle_p);
cudaFree(dptr);
}
|
582b6595b2fe8024a82d8dceed3047483a7fbbec.hip | // !!! This is a file automatically generated by hipify!!!
/*
* nn.cu
* Nearest Neighbor
*
*/
#include <stdio.h>
#include <sys/time.h>
#include <float.h>
#include <vector>
#include "hip/hip_runtime.h"
#define min( a, b ) a > b ? b : a
#define ceilDiv( a, b ) ( a + b - 1 ) / b
#define print( x ) printf( #x ": %lu\n", (unsigned long) x )
#define DEBUG false
#define DEFAULT_THREADS_PER_BLOCK 256
#define MAX_ARGS 10
#define REC_LENGTH 53 // size of a record in db
#define LATITUDE_POS 28 // character position of the latitude value in each record
#define OPEN 10000 // initial value of nearest neighbors
struct Point
{
float coords[KDTREE_DIM];
};
typedef struct latLong
{
float lat;
float lng;
} LatLong;
typedef struct record
{
char recString[REC_LENGTH];
float distance;
} Record;
int loadData(char *filename,std::vector<Record> &records,std::vector<LatLong> &locations);
void findLowest(std::vector<Record> &records,float *distances,int numRecords,int topN);
void printUsage();
int parseCommandline(int argc, char *argv[], char* filename,int *r,float *lat,float *lng,
int *q, int *t, int *p, int *d);
/**
* Kernel
* Executed on GPU
* Calculates the Euclidean distance from each record in the database to the target position
*/
__global__ void euclid(LatLong *d_locations, float *d_distances, int numRecords,float lat, float lng)
{
//int globalId = gridDim.x * blockDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
int globalId = blockDim.x * ( gridDim.x * blockIdx.y + blockIdx.x ) + threadIdx.x; // more efficient
LatLong *latLong = d_locations+globalId;
if (globalId < numRecords) {
float *dist=d_distances+globalId;
*dist = (float)sqrt((lat - latLong->lat)*(lat - latLong->lat)+(lng - latLong->lng)*(lng - latLong->lng));
}
}
/**
* This program finds the k-nearest neighbors
**/
int main(int argc, char* argv[])
{
int i=0;
float lat, lng;
int quiet=0,timing=0,platform=0,device=0;
std::vector<Record> records;
std::vector<LatLong> locations;
char filename[100];
int resultsCount=10;
// parse command line
if (parseCommandline(argc, argv, filename,&resultsCount,&lat,&lng,
&quiet, &timing, &platform, &device)) {
printUsage();
return 0;
}
int numRecords = loadData(filename,records,locations);
if (resultsCount > numRecords) resultsCount = numRecords;
//for(i=0;i<numRecords;i++)
// printf("%s, %f, %f\n",(records[i].recString),locations[i].lat,locations[i].lng);
//Pointers to host memory
float *distances;
//Pointers to device memory
LatLong *d_locations;
float *d_distances;
// Scaling calculations - added by Sam Kauffman
hipDeviceProp_t deviceProp;
hipGetDeviceProperties( &deviceProp, 0 );
hipDeviceSynchronize();
unsigned long maxGridX = deviceProp.maxGridSize[0];
unsigned long threadsPerBlock = min( deviceProp.maxThreadsPerBlock, DEFAULT_THREADS_PER_BLOCK );
size_t totalDeviceMemory;
size_t freeDeviceMemory;
hipMemGetInfo( &freeDeviceMemory, &totalDeviceMemory );
hipDeviceSynchronize();
unsigned long usableDeviceMemory = freeDeviceMemory * 85 / 100; // 85% arbitrary throttle to compensate for known CUDA bug
unsigned long maxThreads = usableDeviceMemory / 12; // 4 bytes in 3 vectors per thread
if ( numRecords > maxThreads )
{
fprintf( stderr, "Error: Input too large.\n" );
exit( 1 );
}
unsigned long blocks = ceilDiv( numRecords, threadsPerBlock ); // extra threads will do nothing
unsigned long gridY = ceilDiv( blocks, maxGridX );
unsigned long gridX = ceilDiv( blocks, gridY );
// There will be no more than (gridY - 1) extra blocks
dim3 gridDim( gridX, gridY );
if ( DEBUG )
{
print( totalDeviceMemory ); // 804454400
print( freeDeviceMemory );
print( usableDeviceMemory );
print( maxGridX ); // 65535
print( deviceProp.maxThreadsPerBlock ); // 1024
print( threadsPerBlock );
print( maxThreads );
print( blocks ); // 130933
print( gridY );
print( gridX );
}
/**
* Allocate memory on host and device
*/
distances = (float *)malloc(sizeof(float) * numRecords);
hipMalloc((void **) &d_locations,sizeof(LatLong) * numRecords);
hipMalloc((void **) &d_distances,sizeof(float) * numRecords);
/**
* Transfer data from host to device
*/
hipMemcpy( d_locations, &locations[0], sizeof(LatLong) * numRecords, hipMemcpyHostToDevice);
/**
* Execute kernel
*/
hipLaunchKernelGGL(( euclid), dim3(gridDim), dim3(threadsPerBlock) , 0, 0, d_locations,d_distances,numRecords,lat,lng);
hipDeviceSynchronize();
//Copy data from device memory to host memory
hipMemcpy( distances, d_distances, sizeof(float)*numRecords, hipMemcpyDeviceToHost );
// find the resultsCount least distances
findLowest(records,distances,numRecords,resultsCount);
// print out results
if (!quiet)
{
printf("year month date hr num name lat lon speed press\n");
for(i=0;i<resultsCount;i++)
{
printf("%s --> Distance=%f\n",records[i].recString,records[i].distance);
}
}
free(distances);
//Free memory
hipFree(d_locations);
hipFree(d_distances);
}
int loadData(char *filename,std::vector<Record> &records,std::vector<LatLong> &locations){
FILE *flist,*fp;
int i=0;
char dbname[64];
int recNum=0;
/**Main processing **/
flist = fopen(filename, "r");
while(!feof(flist)) {
/**
* Read in all records of length REC_LENGTH
* If this is the last file in the filelist, then done
* else open next file to be read next iteration
*/
if(fscanf(flist, "%s\n", dbname) != 1) {
fprintf(stderr, "error reading filelist\n");
exit(0);
}
fp = fopen(dbname, "r");
if(!fp) {
printf("error opening a db\n");
exit(1);
}
// read each record
while(!feof(fp)){
Record record;
LatLong latLong;
fgets(record.recString,49,fp);
fgetc(fp); // newline
if (feof(fp)) break;
// parse for lat and long
char substr[6];
for(i=0;i<5;i++) substr[i] = *(record.recString+i+28);
substr[5] = '\0';
latLong.lat = atof(substr);
for(i=0;i<5;i++) substr[i] = *(record.recString+i+33);
substr[5] = '\0';
latLong.lng = atof(substr);
locations.push_back(latLong);
records.push_back(record);
recNum++;
}
fclose(fp);
}
fclose(flist);
// for(i=0;i<rec_count*REC_LENGTH;i++) printf("%c",sandbox[i]);
return recNum;
}
void findLowest(std::vector<Record> &records,float *distances,int numRecords,int topN){
int i,j;
float val;
int minLoc;
Record *tempRec;
float tempDist;
for(i=0;i<topN;i++) {
minLoc = i;
for(j=i;j<numRecords;j++) {
val = distances[j];
if (val < distances[minLoc]) minLoc = j;
}
// swap locations and distances
tempRec = &records[i];
records[i] = records[minLoc];
records[minLoc] = *tempRec;
tempDist = distances[i];
distances[i] = distances[minLoc];
distances[minLoc] = tempDist;
// add distance to the min we just found
records[i].distance = distances[i];
}
}
int parseCommandline(int argc, char *argv[], char* filename,int *r,float *lat,float *lng,
int *q, int *t, int *p, int *d){
int i;
if (argc < 2) return 1; // error
strncpy(filename,argv[1],100);
char flag;
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 'r': // number of results
i++;
*r = atoi(argv[i]);
break;
case 'l': // lat or lng
if (argv[i][2]=='a') {//lat
*lat = atof(argv[i+1]);
}
else {//lng
*lng = atof(argv[i+1]);
}
i++;
break;
case 'h': // help
return 1;
case 'q': // quiet
*q = 1;
break;
case 't': // timing
*t = 1;
break;
case 'p': // platform
i++;
*p = atoi(argv[i]);
break;
case 'd': // device
i++;
*d = atoi(argv[i]);
break;
}
}
}
if ((*d >= 0 && *p<0) || (*p>=0 && *d<0)) // both p and d must be specified if either are specified
return 1;
return 0;
}
void printUsage()
{
printf("Nearest Neighbor Usage\n");
printf("\n");
printf("nearestNeighbor [filename] -r [int] -lat [float] -lng [float] [-hqt] [-p [int] -d [int]]\n");
printf("\n");
printf("example:\n");
printf("$ ./nearestNeighbor filelist.txt -r 5 -lat 30 -lng 90\n");
printf("\n");
printf("filename the filename that lists the data input files\n");
printf("-r [int] the number of records to return (default: 10)\n");
printf("-lat [float] the latitude for nearest neighbors (default: 0)\n");
printf("-lng [float] the longitude for nearest neighbors (default: 0)\n");
printf("\n");
printf("-h, --help Display the help file\n");
printf("-q Quiet mode. Suppress all text output.\n");
printf("-t Print timing information.\n");
printf("\n");
printf("-p [int] Choose the platform (must choose both platform and device)\n");
printf("-d [int] Choose the device (must choose both platform and device)\n");
printf("\n");
printf("\n");
printf("Notes: 1. The filename is required as the first parameter.\n");
printf(" 2. If you declare either the device or the platform,\n");
printf(" you must declare both.\n\n");
}
| 582b6595b2fe8024a82d8dceed3047483a7fbbec.cu | /*
* nn.cu
* Nearest Neighbor
*
*/
#include <stdio.h>
#include <sys/time.h>
#include <float.h>
#include <vector>
#include "cuda.h"
#define min( a, b ) a > b ? b : a
#define ceilDiv( a, b ) ( a + b - 1 ) / b
#define print( x ) printf( #x ": %lu\n", (unsigned long) x )
#define DEBUG false
#define DEFAULT_THREADS_PER_BLOCK 256
#define MAX_ARGS 10
#define REC_LENGTH 53 // size of a record in db
#define LATITUDE_POS 28 // character position of the latitude value in each record
#define OPEN 10000 // initial value of nearest neighbors
struct Point
{
float coords[KDTREE_DIM];
};
typedef struct latLong
{
float lat;
float lng;
} LatLong;
typedef struct record
{
char recString[REC_LENGTH];
float distance;
} Record;
int loadData(char *filename,std::vector<Record> &records,std::vector<LatLong> &locations);
void findLowest(std::vector<Record> &records,float *distances,int numRecords,int topN);
void printUsage();
int parseCommandline(int argc, char *argv[], char* filename,int *r,float *lat,float *lng,
int *q, int *t, int *p, int *d);
/**
* Kernel
* Executed on GPU
* Calculates the Euclidean distance from each record in the database to the target position
*/
__global__ void euclid(LatLong *d_locations, float *d_distances, int numRecords,float lat, float lng)
{
//int globalId = gridDim.x * blockDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
int globalId = blockDim.x * ( gridDim.x * blockIdx.y + blockIdx.x ) + threadIdx.x; // more efficient
LatLong *latLong = d_locations+globalId;
if (globalId < numRecords) {
float *dist=d_distances+globalId;
*dist = (float)sqrt((lat - latLong->lat)*(lat - latLong->lat)+(lng - latLong->lng)*(lng - latLong->lng));
}
}
/**
* This program finds the k-nearest neighbors
**/
int main(int argc, char* argv[])
{
int i=0;
float lat, lng;
int quiet=0,timing=0,platform=0,device=0;
std::vector<Record> records;
std::vector<LatLong> locations;
char filename[100];
int resultsCount=10;
// parse command line
if (parseCommandline(argc, argv, filename,&resultsCount,&lat,&lng,
&quiet, &timing, &platform, &device)) {
printUsage();
return 0;
}
int numRecords = loadData(filename,records,locations);
if (resultsCount > numRecords) resultsCount = numRecords;
//for(i=0;i<numRecords;i++)
// printf("%s, %f, %f\n",(records[i].recString),locations[i].lat,locations[i].lng);
//Pointers to host memory
float *distances;
//Pointers to device memory
LatLong *d_locations;
float *d_distances;
// Scaling calculations - added by Sam Kauffman
cudaDeviceProp deviceProp;
cudaGetDeviceProperties( &deviceProp, 0 );
cudaThreadSynchronize();
unsigned long maxGridX = deviceProp.maxGridSize[0];
unsigned long threadsPerBlock = min( deviceProp.maxThreadsPerBlock, DEFAULT_THREADS_PER_BLOCK );
size_t totalDeviceMemory;
size_t freeDeviceMemory;
cudaMemGetInfo( &freeDeviceMemory, &totalDeviceMemory );
cudaThreadSynchronize();
unsigned long usableDeviceMemory = freeDeviceMemory * 85 / 100; // 85% arbitrary throttle to compensate for known CUDA bug
unsigned long maxThreads = usableDeviceMemory / 12; // 4 bytes in 3 vectors per thread
if ( numRecords > maxThreads )
{
fprintf( stderr, "Error: Input too large.\n" );
exit( 1 );
}
unsigned long blocks = ceilDiv( numRecords, threadsPerBlock ); // extra threads will do nothing
unsigned long gridY = ceilDiv( blocks, maxGridX );
unsigned long gridX = ceilDiv( blocks, gridY );
// There will be no more than (gridY - 1) extra blocks
dim3 gridDim( gridX, gridY );
if ( DEBUG )
{
print( totalDeviceMemory ); // 804454400
print( freeDeviceMemory );
print( usableDeviceMemory );
print( maxGridX ); // 65535
print( deviceProp.maxThreadsPerBlock ); // 1024
print( threadsPerBlock );
print( maxThreads );
print( blocks ); // 130933
print( gridY );
print( gridX );
}
/**
* Allocate memory on host and device
*/
distances = (float *)malloc(sizeof(float) * numRecords);
cudaMalloc((void **) &d_locations,sizeof(LatLong) * numRecords);
cudaMalloc((void **) &d_distances,sizeof(float) * numRecords);
/**
* Transfer data from host to device
*/
cudaMemcpy( d_locations, &locations[0], sizeof(LatLong) * numRecords, cudaMemcpyHostToDevice);
/**
* Execute kernel
*/
euclid<<< gridDim, threadsPerBlock >>>(d_locations,d_distances,numRecords,lat,lng);
cudaThreadSynchronize();
//Copy data from device memory to host memory
cudaMemcpy( distances, d_distances, sizeof(float)*numRecords, cudaMemcpyDeviceToHost );
// find the resultsCount least distances
findLowest(records,distances,numRecords,resultsCount);
// print out results
if (!quiet)
{
printf("year month date hr num name lat lon speed press\n");
for(i=0;i<resultsCount;i++)
{
printf("%s --> Distance=%f\n",records[i].recString,records[i].distance);
}
}
free(distances);
//Free memory
cudaFree(d_locations);
cudaFree(d_distances);
}
int loadData(char *filename,std::vector<Record> &records,std::vector<LatLong> &locations){
FILE *flist,*fp;
int i=0;
char dbname[64];
int recNum=0;
/**Main processing **/
flist = fopen(filename, "r");
while(!feof(flist)) {
/**
* Read in all records of length REC_LENGTH
* If this is the last file in the filelist, then done
* else open next file to be read next iteration
*/
if(fscanf(flist, "%s\n", dbname) != 1) {
fprintf(stderr, "error reading filelist\n");
exit(0);
}
fp = fopen(dbname, "r");
if(!fp) {
printf("error opening a db\n");
exit(1);
}
// read each record
while(!feof(fp)){
Record record;
LatLong latLong;
fgets(record.recString,49,fp);
fgetc(fp); // newline
if (feof(fp)) break;
// parse for lat and long
char substr[6];
for(i=0;i<5;i++) substr[i] = *(record.recString+i+28);
substr[5] = '\0';
latLong.lat = atof(substr);
for(i=0;i<5;i++) substr[i] = *(record.recString+i+33);
substr[5] = '\0';
latLong.lng = atof(substr);
locations.push_back(latLong);
records.push_back(record);
recNum++;
}
fclose(fp);
}
fclose(flist);
// for(i=0;i<rec_count*REC_LENGTH;i++) printf("%c",sandbox[i]);
return recNum;
}
void findLowest(std::vector<Record> &records,float *distances,int numRecords,int topN){
int i,j;
float val;
int minLoc;
Record *tempRec;
float tempDist;
for(i=0;i<topN;i++) {
minLoc = i;
for(j=i;j<numRecords;j++) {
val = distances[j];
if (val < distances[minLoc]) minLoc = j;
}
// swap locations and distances
tempRec = &records[i];
records[i] = records[minLoc];
records[minLoc] = *tempRec;
tempDist = distances[i];
distances[i] = distances[minLoc];
distances[minLoc] = tempDist;
// add distance to the min we just found
records[i].distance = distances[i];
}
}
int parseCommandline(int argc, char *argv[], char* filename,int *r,float *lat,float *lng,
int *q, int *t, int *p, int *d){
int i;
if (argc < 2) return 1; // error
strncpy(filename,argv[1],100);
char flag;
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 'r': // number of results
i++;
*r = atoi(argv[i]);
break;
case 'l': // lat or lng
if (argv[i][2]=='a') {//lat
*lat = atof(argv[i+1]);
}
else {//lng
*lng = atof(argv[i+1]);
}
i++;
break;
case 'h': // help
return 1;
case 'q': // quiet
*q = 1;
break;
case 't': // timing
*t = 1;
break;
case 'p': // platform
i++;
*p = atoi(argv[i]);
break;
case 'd': // device
i++;
*d = atoi(argv[i]);
break;
}
}
}
if ((*d >= 0 && *p<0) || (*p>=0 && *d<0)) // both p and d must be specified if either are specified
return 1;
return 0;
}
void printUsage()
{
printf("Nearest Neighbor Usage\n");
printf("\n");
printf("nearestNeighbor [filename] -r [int] -lat [float] -lng [float] [-hqt] [-p [int] -d [int]]\n");
printf("\n");
printf("example:\n");
printf("$ ./nearestNeighbor filelist.txt -r 5 -lat 30 -lng 90\n");
printf("\n");
printf("filename the filename that lists the data input files\n");
printf("-r [int] the number of records to return (default: 10)\n");
printf("-lat [float] the latitude for nearest neighbors (default: 0)\n");
printf("-lng [float] the longitude for nearest neighbors (default: 0)\n");
printf("\n");
printf("-h, --help Display the help file\n");
printf("-q Quiet mode. Suppress all text output.\n");
printf("-t Print timing information.\n");
printf("\n");
printf("-p [int] Choose the platform (must choose both platform and device)\n");
printf("-d [int] Choose the device (must choose both platform and device)\n");
printf("\n");
printf("\n");
printf("Notes: 1. The filename is required as the first parameter.\n");
printf(" 2. If you declare either the device or the platform,\n");
printf(" you must declare both.\n\n");
}
|
40e7c836724774483ceb6a707133142865b67855.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <limits>
#include <ATen/ATen.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/core/Array.h>
#include <ATen/hip/cub.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/native/hip/SortUtils.cuh>
#include <ATen/native/hip/SortingCommon.cuh>
namespace at { namespace native {
bool should_use_small_sort(const Tensor &self, int64_t dim) {
int64_t ndim = self.dim();
int64_t nsort = self.sizes()[dim];
int64_t threshold;
if (self.scalar_type() == kLong || self.scalar_type() == kDouble) {
threshold = 1024;
} else {
threshold = 2048;
}
return nsort <= threshold;
}
std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim);
void fillSliceWithIndex(Tensor& t,int dim) {
if (t.numel()) {
auto sizes = DimVector(t.dim(), 1);
sizes[dim] = t.sizes()[dim];
auto range = at::arange(t.sizes()[dim], t.options());
auto rangeview = range.view(sizes);
t.copy_(rangeview);
}
}
// In alignment with default sort on a c++ map, this function
// will permute key and value tensors identically, and
// in such a way that the 'key' tensor is ordered numerically
void sortKeyValueInplace(const Tensor& key,
const Tensor& value,
int dim, bool dir) {
TORCH_CHECK(key.sizes() == value.sizes(),
"Key tensor must have same size as value tensor");
int dims = value.dim();
TORCH_CHECK(dims <= MAX_DIMS, "value tensor has too many dimensions");
// if key and value tensors have the same size, we do not need to check both
ptrdiff_t inElements = key.numel();
if (inElements == 0) {
return;
}
int64_t keySliceSize = key.size(dim);
ptrdiff_t keySlices = inElements / keySliceSize;
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize);
// FIXME: We'd have to find some other trick with Thrust to perform a
// vectorized (key, value) sort by slice segment
TORCH_INTERNAL_ASSERT(ceilPowerOf2 <= 2048, "sortKeyValueInplace only works for sizes <= 2048 at present");
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(keySlices, grid), "Too many slices to sort");
#define HANDLE_CASE(TYPE, A, SIZE) \
do { \
int blockSize = SIZE / 2; \
if (blockSize < 1) { \
blockSize = 1; \
} \
\
dim3 block(blockSize); \
\
if (dir) { \
hipLaunchKernelGGL(( bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \
GTOp<scalar_t, true>, TYPE, SIZE>) \
, dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
GTOp<scalar_t, true>()); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} else { \
hipLaunchKernelGGL(( bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \
LTOp<scalar_t, true>, TYPE, SIZE>) \
, dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
LTOp<scalar_t, true>()); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
} while (0)
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
case 512: \
case 256: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 128: \
case 64: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 32: \
case 16: \
case 8: \
case 4: \
case 2: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 1: \
/* Nothing to do, data already sorted */ \
break; \
default: \
TORCH_INTERNAL_ASSERT(false); \
} \
}
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, key.scalar_type(), "sortKeyValueInplace", [&] {
if (at::cuda::detail::canUse32BitIndexMath(key)) {
at::cuda::detail::TensorInfo<scalar_t, unsigned int> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(key);
at::cuda::detail::TensorInfo<int64_t, unsigned int> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, unsigned int>(value);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
if (keyInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned int, -2);
} else {
switch (keyInfo.dims) {
case 2:
HANDLE_SORT_CASE(unsigned int, 2);
break;
default:
HANDLE_SORT_CASE(unsigned int, -1);
break;
}
}
} else {
at::cuda::detail::TensorInfo<scalar_t, uint64_t> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(key);
at::cuda::detail::TensorInfo<int64_t, uint64_t> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, uint64_t>(value);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
// int64_t case is rare, just instantiate the generic version
HANDLE_SORT_CASE(uint64_t, -1);
}
});
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
}
namespace {
struct offset_t {
int stride;
int begin;
__device__ int operator[](int i) {
return stride * (begin + i);
}
};
}
// We perform a segmented sort in cub with inputs that have
// more than 1024/2048 elements along the selected dimension.
// Otherwise, we do an inplace bitonic sort (see sortKeyValueInplace).
std::tuple<Tensor &,Tensor &> sort_out_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending, Tensor & values, Tensor & indices) {
// this algorithm is always stable
TORCH_INTERNAL_ASSERT(stable.has_value(), "sort_out(): c10::optional<bool> for stable has to have value.");
TensorArg self_arg{self, "self", 1}, values_arg{values, "values", 2}, indices_arg{indices, "indices", 3};
checkAllSameGPU(__func__, {self_arg, values_arg, indices_arg});
bool is_non_overlapping_and_dense = self.is_non_overlapping_and_dense();
int64_t numel = self.numel();
int64_t ndim = self.dim();
dim = maybe_wrap_dim(dim, ndim);
int64_t nsort = self.sizes()[dim];
TORCH_CHECK(nsort <= std::numeric_limits<int>::max(),
"The dimension being sorted can not have more than INT_MAX elements.");
const auto self_dtype = self.dtype();
// FIXME: remove this check once cub sort supports bool
TORCH_CHECK(self_dtype != ScalarType::Bool,
"Sort currently does not support bool dtype on CUDA.");
TORCH_CHECK(self_dtype != ScalarType::ComplexFloat && self_dtype != ScalarType::ComplexDouble,
"Sort currently does not support complex dtypes on CUDA.");
if (ndim == 0) {
if (!values.defined()) {
values = self.clone();
} else {
values.resize_as_(self);
values.copy_(self);
}
if (!indices.defined()) {
indices = at::zeros({}, self.options().dtype(kLong));
} else {
indices.resize_as_(self);
indices.zero_();
}
return std::forward_as_tuple(values, indices);
}
// use inplace algorithm for smaller input sizes without stable=True
if (should_use_small_sort(self, dim) && !stable.value()) {
// from thc: sorted->values, indices->indices, input->self
if (!values.defined()) {
values = at::empty_like(self);
}
if (!indices.defined()) {
indices = at::empty_like(self, self.options().dtype(kLong));
}
// Make sure sufficient output space is allocated
auto self_size = self.sizes();
at::native::resize_output(values, self_size);
at::native::resize_output(indices, self_size);
fillSliceWithIndex(indices, dim);
// We sort k/v pairs in-place; copy unsorted input to output
values.copy_(self);
// Sort using our in-place k/v kernel that supports arbitrary
// layout
sortKeyValueInplace(values, indices, dim, descending);
return std::forward_as_tuple(values, indices);
}
Tensor self_;
bool newself = false;
if (is_non_overlapping_and_dense && self.stride(dim) == 1) {
self_ = self;
} else {
auto new_strides_unsort = infer_dense_strides_dim_last(self, dim);
self_ = at::empty_strided(self.sizes(), new_strides_unsort, self.options());
self_.copy_(self);
newself = true;
}
Tensor values_tmp, indices_tmp;
void *values_ptr_;
int64_t *indices_ptr;
if (!values.defined()) {
if (is_non_overlapping_and_dense) {
values = at::empty_strided(self.sizes(), self.strides(), self.options());
} else {
auto strides = at::infer_dense_strides(self.sizes(), self.strides());
values = at::empty_strided(self.sizes(), strides, self.options());
}
} else {
TORCH_CHECK(self_.scalar_type() == values.scalar_type(),
"Unexpected dtype for values, expect ", self_.scalar_type(), ", got ", values.scalar_type());
values.resize_as_(self);
}
if (values.strides() == self_.strides() && (newself || get_overlap_status(self, values) == MemOverlapStatus::NO)) {
values_ptr_ = values.data_ptr();
} else {
values_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options());
values_ptr_ = values_tmp.data_ptr();
}
if (!indices.defined()) {
if (is_non_overlapping_and_dense) {
indices = at::empty_strided(self.sizes(), self.strides(), self.options().dtype(kLong));
} else {
auto strides = at::infer_dense_strides(self.sizes(), self.strides());
indices = at::empty_strided(self.sizes(), strides, self.options().dtype(kLong));
}
} else {
TORCH_CHECK(kLong == indices.scalar_type(),
"Unexpected dtype for values, expect torch.long, got ", indices.scalar_type());
indices.resize_as_(self);
}
if (indices.strides() != self_.strides()) {
indices_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options().dtype(kLong));
indices_ptr = indices_tmp.data_ptr<int64_t>();
} else {
indices_ptr = indices.data_ptr<int64_t>();
}
if (numel == 0) {
return std::forward_as_tuple(values, indices);
}
int64_t numel_or_intmax = ::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max()));
int64_t nbatch = (numel_or_intmax / nsort) * nsort;
#ifdef __HIP_PLATFORM_HCC__
constexpr bool is_rocm = true;
#else
constexpr bool is_rocm = false;
#endif
AT_DISPATCH_ALL_TYPES_AND3(kBool, kHalf, kBFloat16, self_.scalar_type(), "sort", [&]{
c10::guts::if_constexpr<!(is_rocm && std::is_same<scalar_t, c10::BFloat16>::value)>([&](auto _){
const scalar_t *self_ptr = self_.data_ptr<scalar_t>();
auto values_ptr = reinterpret_cast<scalar_t *>(values_ptr_);
int64_t remaining = _(numel);
while (remaining > 0) {
int64_t n = ::min(remaining, nbatch);
int64_t nsegments = n / nsort;
auto reverse_indices = at::arange(nsort, indices.options()).view({1, nsort}).expand({nsegments, nsort}).contiguous();
at::cuda::cub::segmented_sort_pairs(self_ptr, values_ptr,
reverse_indices.data_ptr<int64_t>(), indices_ptr, n, nsegments,
offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending);
remaining -= n;
self_ptr += n;
values_ptr += n;
indices_ptr += n;
}
}, [&](auto _){ TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm"); });
});
if (values_tmp.defined()) {
values.copy_(values_tmp);
}
if (indices_tmp.defined()) {
indices.copy_(indices_tmp);
}
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor &,Tensor &> sort_out_cuda(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices) {
return sort_out_stable_cuda(self, /*stable=*/false, dim, descending, values, indices);
}
std::tuple<Tensor,Tensor> sort_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
Tensor values, indices;
return sort_out_stable_cuda(self, stable, dim, descending, values, indices);
}
std::tuple<Tensor,Tensor> sort_cuda(const Tensor & self, int64_t dim, bool descending) { int64_t threshold;
return sort_stable_cuda(self, /*stable=*/false, dim, descending);
}
}} // namespace at::native
| 40e7c836724774483ceb6a707133142865b67855.cu | #include <limits>
#include <ATen/ATen.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/core/Array.h>
#include <ATen/cuda/cub.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/SortUtils.cuh>
#include <ATen/native/cuda/SortingCommon.cuh>
namespace at { namespace native {
bool should_use_small_sort(const Tensor &self, int64_t dim) {
int64_t ndim = self.dim();
int64_t nsort = self.sizes()[dim];
int64_t threshold;
if (self.scalar_type() == kLong || self.scalar_type() == kDouble) {
threshold = 1024;
} else {
threshold = 2048;
}
return nsort <= threshold;
}
std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim);
void fillSliceWithIndex(Tensor& t,int dim) {
if (t.numel()) {
auto sizes = DimVector(t.dim(), 1);
sizes[dim] = t.sizes()[dim];
auto range = at::arange(t.sizes()[dim], t.options());
auto rangeview = range.view(sizes);
t.copy_(rangeview);
}
}
// In alignment with default sort on a c++ map, this function
// will permute key and value tensors identically, and
// in such a way that the 'key' tensor is ordered numerically
void sortKeyValueInplace(const Tensor& key,
const Tensor& value,
int dim, bool dir) {
TORCH_CHECK(key.sizes() == value.sizes(),
"Key tensor must have same size as value tensor");
int dims = value.dim();
TORCH_CHECK(dims <= MAX_DIMS, "value tensor has too many dimensions");
// if key and value tensors have the same size, we do not need to check both
ptrdiff_t inElements = key.numel();
if (inElements == 0) {
return;
}
int64_t keySliceSize = key.size(dim);
ptrdiff_t keySlices = inElements / keySliceSize;
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize);
// FIXME: We'd have to find some other trick with Thrust to perform a
// vectorized (key, value) sort by slice segment
TORCH_INTERNAL_ASSERT(ceilPowerOf2 <= 2048, "sortKeyValueInplace only works for sizes <= 2048 at present");
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
TORCH_INTERNAL_ASSERT(getGridFromTiles(keySlices, grid), "Too many slices to sort");
#define HANDLE_CASE(TYPE, A, SIZE) \
do { \
int blockSize = SIZE / 2; \
if (blockSize < 1) { \
blockSize = 1; \
} \
\
dim3 block(blockSize); \
\
if (dir) { \
bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \
GTOp<scalar_t, true>, TYPE, SIZE> \
<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
GTOp<scalar_t, true>()); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} else { \
bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \
LTOp<scalar_t, true>, TYPE, SIZE> \
<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
LTOp<scalar_t, true>()); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
} while (0)
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
case 512: \
case 256: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 128: \
case 64: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 32: \
case 16: \
case 8: \
case 4: \
case 2: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 1: \
/* Nothing to do, data already sorted */ \
break; \
default: \
TORCH_INTERNAL_ASSERT(false); \
} \
}
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, key.scalar_type(), "sortKeyValueInplace", [&] {
if (at::cuda::detail::canUse32BitIndexMath(key)) {
at::cuda::detail::TensorInfo<scalar_t, unsigned int> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(key);
at::cuda::detail::TensorInfo<int64_t, unsigned int> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, unsigned int>(value);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
if (keyInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned int, -2);
} else {
switch (keyInfo.dims) {
case 2:
HANDLE_SORT_CASE(unsigned int, 2);
break;
default:
HANDLE_SORT_CASE(unsigned int, -1);
break;
}
}
} else {
at::cuda::detail::TensorInfo<scalar_t, uint64_t> keyInfo =
at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(key);
at::cuda::detail::TensorInfo<int64_t, uint64_t> valueInfo =
at::cuda::detail::getTensorInfo<int64_t, uint64_t>(value);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
// int64_t case is rare, just instantiate the generic version
HANDLE_SORT_CASE(uint64_t, -1);
}
});
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
}
namespace {
struct offset_t {
int stride;
int begin;
__device__ int operator[](int i) {
return stride * (begin + i);
}
};
}
// We perform a segmented sort in cub with inputs that have
// more than 1024/2048 elements along the selected dimension.
// Otherwise, we do an inplace bitonic sort (see sortKeyValueInplace).
std::tuple<Tensor &,Tensor &> sort_out_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending, Tensor & values, Tensor & indices) {
// this algorithm is always stable
TORCH_INTERNAL_ASSERT(stable.has_value(), "sort_out(): c10::optional<bool> for stable has to have value.");
TensorArg self_arg{self, "self", 1}, values_arg{values, "values", 2}, indices_arg{indices, "indices", 3};
checkAllSameGPU(__func__, {self_arg, values_arg, indices_arg});
bool is_non_overlapping_and_dense = self.is_non_overlapping_and_dense();
int64_t numel = self.numel();
int64_t ndim = self.dim();
dim = maybe_wrap_dim(dim, ndim);
int64_t nsort = self.sizes()[dim];
TORCH_CHECK(nsort <= std::numeric_limits<int>::max(),
"The dimension being sorted can not have more than INT_MAX elements.");
const auto self_dtype = self.dtype();
// FIXME: remove this check once cub sort supports bool
TORCH_CHECK(self_dtype != ScalarType::Bool,
"Sort currently does not support bool dtype on CUDA.");
TORCH_CHECK(self_dtype != ScalarType::ComplexFloat && self_dtype != ScalarType::ComplexDouble,
"Sort currently does not support complex dtypes on CUDA.");
if (ndim == 0) {
if (!values.defined()) {
values = self.clone();
} else {
values.resize_as_(self);
values.copy_(self);
}
if (!indices.defined()) {
indices = at::zeros({}, self.options().dtype(kLong));
} else {
indices.resize_as_(self);
indices.zero_();
}
return std::forward_as_tuple(values, indices);
}
// use inplace algorithm for smaller input sizes without stable=True
if (should_use_small_sort(self, dim) && !stable.value()) {
// from thc: sorted->values, indices->indices, input->self
if (!values.defined()) {
values = at::empty_like(self);
}
if (!indices.defined()) {
indices = at::empty_like(self, self.options().dtype(kLong));
}
// Make sure sufficient output space is allocated
auto self_size = self.sizes();
at::native::resize_output(values, self_size);
at::native::resize_output(indices, self_size);
fillSliceWithIndex(indices, dim);
// We sort k/v pairs in-place; copy unsorted input to output
values.copy_(self);
// Sort using our in-place k/v kernel that supports arbitrary
// layout
sortKeyValueInplace(values, indices, dim, descending);
return std::forward_as_tuple(values, indices);
}
Tensor self_;
bool newself = false;
if (is_non_overlapping_and_dense && self.stride(dim) == 1) {
self_ = self;
} else {
auto new_strides_unsort = infer_dense_strides_dim_last(self, dim);
self_ = at::empty_strided(self.sizes(), new_strides_unsort, self.options());
self_.copy_(self);
newself = true;
}
Tensor values_tmp, indices_tmp;
void *values_ptr_;
int64_t *indices_ptr;
if (!values.defined()) {
if (is_non_overlapping_and_dense) {
values = at::empty_strided(self.sizes(), self.strides(), self.options());
} else {
auto strides = at::infer_dense_strides(self.sizes(), self.strides());
values = at::empty_strided(self.sizes(), strides, self.options());
}
} else {
TORCH_CHECK(self_.scalar_type() == values.scalar_type(),
"Unexpected dtype for values, expect ", self_.scalar_type(), ", got ", values.scalar_type());
values.resize_as_(self);
}
if (values.strides() == self_.strides() && (newself || get_overlap_status(self, values) == MemOverlapStatus::NO)) {
values_ptr_ = values.data_ptr();
} else {
values_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options());
values_ptr_ = values_tmp.data_ptr();
}
if (!indices.defined()) {
if (is_non_overlapping_and_dense) {
indices = at::empty_strided(self.sizes(), self.strides(), self.options().dtype(kLong));
} else {
auto strides = at::infer_dense_strides(self.sizes(), self.strides());
indices = at::empty_strided(self.sizes(), strides, self.options().dtype(kLong));
}
} else {
TORCH_CHECK(kLong == indices.scalar_type(),
"Unexpected dtype for values, expect torch.long, got ", indices.scalar_type());
indices.resize_as_(self);
}
if (indices.strides() != self_.strides()) {
indices_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options().dtype(kLong));
indices_ptr = indices_tmp.data_ptr<int64_t>();
} else {
indices_ptr = indices.data_ptr<int64_t>();
}
if (numel == 0) {
return std::forward_as_tuple(values, indices);
}
int64_t numel_or_intmax = std::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max()));
int64_t nbatch = (numel_or_intmax / nsort) * nsort;
#ifdef __HIP_PLATFORM_HCC__
constexpr bool is_rocm = true;
#else
constexpr bool is_rocm = false;
#endif
AT_DISPATCH_ALL_TYPES_AND3(kBool, kHalf, kBFloat16, self_.scalar_type(), "sort", [&]{
c10::guts::if_constexpr<!(is_rocm && std::is_same<scalar_t, c10::BFloat16>::value)>([&](auto _){
const scalar_t *self_ptr = self_.data_ptr<scalar_t>();
auto values_ptr = reinterpret_cast<scalar_t *>(values_ptr_);
int64_t remaining = _(numel);
while (remaining > 0) {
int64_t n = std::min(remaining, nbatch);
int64_t nsegments = n / nsort;
auto reverse_indices = at::arange(nsort, indices.options()).view({1, nsort}).expand({nsegments, nsort}).contiguous();
at::cuda::cub::segmented_sort_pairs(self_ptr, values_ptr,
reverse_indices.data_ptr<int64_t>(), indices_ptr, n, nsegments,
offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending);
remaining -= n;
self_ptr += n;
values_ptr += n;
indices_ptr += n;
}
}, [&](auto _){ TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm"); });
});
if (values_tmp.defined()) {
values.copy_(values_tmp);
}
if (indices_tmp.defined()) {
indices.copy_(indices_tmp);
}
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor &,Tensor &> sort_out_cuda(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices) {
return sort_out_stable_cuda(self, /*stable=*/false, dim, descending, values, indices);
}
std::tuple<Tensor,Tensor> sort_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
Tensor values, indices;
return sort_out_stable_cuda(self, stable, dim, descending, values, indices);
}
std::tuple<Tensor,Tensor> sort_cuda(const Tensor & self, int64_t dim, bool descending) { int64_t threshold;
return sort_stable_cuda(self, /*stable=*/false, dim, descending);
}
}} // namespace at::native
|
89944ed35ad145d992ce4b35f23f5f24629a55a3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmath>
#include <cuml/cuml.hpp>
#include <cuml/ensemble/randomforest.hpp>
#include <utility>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace rf {
struct RegParams {
DatasetParams data;
RegressionParams regression;
RF_params rf;
};
template <typename D>
struct RFRegressorModel {};
template <>
struct RFRegressorModel<float> {
ML::RandomForestRegressorF model;
};
template <>
struct RFRegressorModel<double> {
ML::RandomForestRegressorD model;
};
template <typename D>
class RFRegressor : public RegressionFixture<D> {
public:
RFRegressor(const std::string& name, const RegParams& p)
: RegressionFixture<D>(name, p.data, p.regression), rfParams(p.rf) {}
protected:
void runBenchmark(::benchmark::State& state) override {
using MLCommon::Bench::CudaEventTimer;
if (this->params.rowMajor) {
state.SkipWithError("RFRegressor only supports col-major inputs");
}
this->loopOnState(state, [this]() {
auto* mPtr = &model.model;
mPtr->trees = nullptr;
fit(*this->handle, mPtr, this->data.X, this->params.nrows,
this->params.ncols, this->data.y, rfParams);
CUDA_CHECK(hipStreamSynchronize(this->stream));
});
}
private:
RFRegressorModel<D> model;
RF_params rfParams;
};
template <typename D>
std::vector<RegParams> getInputs() {
struct DimInfo {
int nrows, ncols, n_informative;
};
struct std::vector<RegParams> out;
RegParams p;
p.data.rowMajor = false;
p.regression.shuffle = true; // better to shuffle when n_informative < ncols
p.regression.seed = 12345ULL;
p.regression.effective_rank = -1; // dataset generation will be faster
p.regression.bias = 4.5;
p.regression.tail_strength = 0.5; // unused when effective_rank = -1
p.regression.noise = 1.;
p.rf.bootstrap = true;
p.rf.rows_sample = 1.f;
p.rf.tree_params.max_leaves = 1 << 20;
p.rf.tree_params.min_rows_per_node = 3;
p.rf.tree_params.n_bins = 32;
p.rf.tree_params.bootstrap_features = true;
p.rf.tree_params.quantile_per_tree = false;
p.rf.tree_params.split_algo = 1;
p.rf.tree_params.split_criterion = ML::CRITERION::MSE;
p.rf.n_trees = 500;
p.rf.n_streams = 8;
std::vector<DimInfo> dim_info = {{500000, 500, 400}};
for (auto& di : dim_info) {
// Let's run Bosch only for float type
if (!std::is_same<D, float>::value && di.ncols == 968) continue;
p.data.nrows = di.nrows;
p.data.ncols = di.ncols;
p.regression.n_informative = di.n_informative;
p.rf.tree_params.max_features = 1.f;
for (auto max_depth : std::vector<int>({8, 12, 16})) {
p.rf.tree_params.max_depth = max_depth;
out.push_back(p);
}
}
return out;
}
ML_BENCH_REGISTER(RegParams, RFRegressor<float>, "regression",
getInputs<float>());
ML_BENCH_REGISTER(RegParams, RFRegressor<double>, "regression",
getInputs<double>());
} // namespace rf
} // namespace Bench
} // namespace ML
| 89944ed35ad145d992ce4b35f23f5f24629a55a3.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmath>
#include <cuml/cuml.hpp>
#include <cuml/ensemble/randomforest.hpp>
#include <utility>
#include "benchmark.cuh"
namespace ML {
namespace Bench {
namespace rf {
struct RegParams {
DatasetParams data;
RegressionParams regression;
RF_params rf;
};
template <typename D>
struct RFRegressorModel {};
template <>
struct RFRegressorModel<float> {
ML::RandomForestRegressorF model;
};
template <>
struct RFRegressorModel<double> {
ML::RandomForestRegressorD model;
};
template <typename D>
class RFRegressor : public RegressionFixture<D> {
public:
RFRegressor(const std::string& name, const RegParams& p)
: RegressionFixture<D>(name, p.data, p.regression), rfParams(p.rf) {}
protected:
void runBenchmark(::benchmark::State& state) override {
using MLCommon::Bench::CudaEventTimer;
if (this->params.rowMajor) {
state.SkipWithError("RFRegressor only supports col-major inputs");
}
this->loopOnState(state, [this]() {
auto* mPtr = &model.model;
mPtr->trees = nullptr;
fit(*this->handle, mPtr, this->data.X, this->params.nrows,
this->params.ncols, this->data.y, rfParams);
CUDA_CHECK(cudaStreamSynchronize(this->stream));
});
}
private:
RFRegressorModel<D> model;
RF_params rfParams;
};
template <typename D>
std::vector<RegParams> getInputs() {
struct DimInfo {
int nrows, ncols, n_informative;
};
struct std::vector<RegParams> out;
RegParams p;
p.data.rowMajor = false;
p.regression.shuffle = true; // better to shuffle when n_informative < ncols
p.regression.seed = 12345ULL;
p.regression.effective_rank = -1; // dataset generation will be faster
p.regression.bias = 4.5;
p.regression.tail_strength = 0.5; // unused when effective_rank = -1
p.regression.noise = 1.;
p.rf.bootstrap = true;
p.rf.rows_sample = 1.f;
p.rf.tree_params.max_leaves = 1 << 20;
p.rf.tree_params.min_rows_per_node = 3;
p.rf.tree_params.n_bins = 32;
p.rf.tree_params.bootstrap_features = true;
p.rf.tree_params.quantile_per_tree = false;
p.rf.tree_params.split_algo = 1;
p.rf.tree_params.split_criterion = ML::CRITERION::MSE;
p.rf.n_trees = 500;
p.rf.n_streams = 8;
std::vector<DimInfo> dim_info = {{500000, 500, 400}};
for (auto& di : dim_info) {
// Let's run Bosch only for float type
if (!std::is_same<D, float>::value && di.ncols == 968) continue;
p.data.nrows = di.nrows;
p.data.ncols = di.ncols;
p.regression.n_informative = di.n_informative;
p.rf.tree_params.max_features = 1.f;
for (auto max_depth : std::vector<int>({8, 12, 16})) {
p.rf.tree_params.max_depth = max_depth;
out.push_back(p);
}
}
return out;
}
ML_BENCH_REGISTER(RegParams, RFRegressor<float>, "regression",
getInputs<float>());
ML_BENCH_REGISTER(RegParams, RFRegressor<double>, "regression",
getInputs<double>());
} // namespace rf
} // namespace Bench
} // namespace ML
|
a14b0cc47fbda6fa53dc340c33f49e826419f793.hip | // !!! This is a file automatically generated by hipify!!!
/* /////////////// DISCLAIMER/////////////////////////////////
This software is provided by the author and
contributors ``as is'' and any express or implied
warranties, including, but not limited to, the
implied warranties of merchantability and
fitness for a particular purpose are dis-
claimed. In no event shall the author or con-
tributors be liable for any direct, indirect,
incidental, special, exemplary, or consequen-
tial damages (including, but not limited to,
procurement of substitute goods or services;
loss of use, data, or profits; or business
interruption) however caused and on any
theory of liability, whether in contract,
strict liability, or tort (including negligence
or otherwise) arising in any way out of the use
of this software, even if advised of the poss-
ibility of such damage.
//////////////////////////////////////////////////////*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stddef.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "F5crypt.h"
#include "F5crypt.cu"
__device__ int device_strlen(char *str)
{
if (!str) {
return 0;
}
char *ptr = str;
while (*str) {
++str;
}
return str - ptr;
}
__device__ void device_decode(int idx, short *coeff, int coeff_len, int * perm_buffer, char * passwords, char * results, int max_msg_len, int max_pass, int max_decode)
{
char * pass;
int pass_len;
f5_rand_state rstate;
int * perms = &(perm_buffer[idx * coeff_len]);
pass = &(passwords[idx * max_pass]);
pass_len = device_strlen(pass);
// printf("%d perm: %p trying pass: \"%s\" len %d\n", idx, perms, pass, pass_len);
F5gen_rand_seed(pass, pass_len, &rstate);
/*
printf("initial rands: ");
for (int offset = 0; offset < 20; offset++) {
printf("%02x", rstate->output[offset] & 0xff);
}
printf("\n");
*/
F5permutation(&rstate, perms, coeff_len);
/*
for (int j = 0; j < 10; j++) {
printf("%d %s msg %p [%d] : %p = %d\n", idx, pass, perms, j, &(perms[j]), perms[j]);
}
*/
char *msg = &(results[idx * (max_decode+1)]);
int msg_len = 0;
// 4. Attempt extraction
// Return 1 on success, 0 on failure. On success message and message_len will be modified.
int ret = F5extract(coeff, coeff_len, perms, &rstate, max_msg_len, msg, &msg_len, max_decode);
msg[msg_len] = '\0';
// 5. Write results if password found
if (ret)
{
printf(">>>>> Password Hit: \"%s\" <<<<<\n", pass);
printf("%s == %s\n", pass, msg);
}
}
__global__ void decode(short *coeff, int coeff_len, int * perm_buffer, char * passwords, char * results, int max_msg_len, int max_pass, int max_decode)
{
device_decode(blockIdx.x * blockDim.x + threadIdx.x, coeff, coeff_len, perm_buffer, passwords, results, max_msg_len, max_pass, max_decode);
}
int getline(char line[256], size_t *n, FILE *stream);
int load_coeff(char* filename, short** coeffptr, int* coeff_len, int *max_msg_len);
#define CUDA_ERR_CHECK if (cudaStatus != hipSuccess) { fprintf(stderr, "%d cuda returned error code %d : %s!\n", __LINE__, cudaStatus, hipGetErrorString(cudaStatus)); return 1; }
void usage()
{
printf("--- BruthCrackF5CUDA ---\n");
printf("Reads a provided coefficent dump from a JPEG file (TODO: jpeglib) and tests it\n");
printf("against a seriesof passwords provided as a password file.\n");
printf("\nUsage: brutecrackf5 filename [OPTION]...\n\nOptions:\n");
printf(" --pass FILENAME Password list. Expected to be seperated by new-line charactors.\n");
printf(" --gpu number\n");
printf(" Default: 0\n");
printf(" --blocks count\n");
printf(" Default: 4\n");
printf(" --threads count\n");
printf(" Default: 4\n");
printf(" --max-pass max length of password\n");
printf(" Default: 8\n");
printf(" --max-decode max number of bytes matching PixelKnot header to decode\n");
printf(" Default: 4\n");
printf(" --suffix try all suffix of each password up to length\n");
printf(" Default: 0\n");
printf(" --skip skip lines of password file\n");
printf(" Default: 0\n");
exit(0);
}
int main(int argc, char** argv)
{
char * coeff_file = 0;
char * pass_file = 0;
int n_blocks = 32;
int n_threads = 32;
int max_pass = 8;
int max_decode = 4;
int suffix_length = 0;
int skip = 0;
int gpu_id = 0;
//Parse Args
for (int i = 1; i < argc; i++)
{
if (strcmp(argv[i], "--pass") == 0 || strcmp(argv[i], "-p") == 0)
{
if (i + 1 == argc) usage();
pass_file = argv[++i];
continue;
}
if (strcmp(argv[i], "--gpu") == 0 || strcmp(argv[i], "-g") == 0)
{
if (i + 1 == argc) usage();
skip = strtol(argv[++i], NULL, 10);
continue;
}
if (strcmp(argv[i], "--skip") == 0 || strcmp(argv[i], "-sk") == 0)
{
if (i + 1 == argc) usage();
skip = strtol(argv[++i], NULL, 10);
continue;
}
if (strcmp(argv[i], "--blocks") == 0 || strcmp(argv[i], "-b") == 0)
{
if (i + 1 == argc) usage();
n_blocks = strtol(argv[++i], NULL, 10);
continue;
}
if (strcmp(argv[i], "--threads") == 0 || strcmp(argv[i], "-t") == 0)
{
if (i + 1 == argc) usage();
n_threads = strtol(argv[++i], NULL, 10);
continue;
}
if (strcmp(argv[i], "--max-pass") == 0 || strcmp(argv[i], "-mp") == 0)
{
if (i + 1 == argc) usage();
max_pass = strtol(argv[++i], NULL, 10);
continue;
}
if (strcmp(argv[i], "--max-decode") == 0 || strcmp(argv[i], "-md") == 0)
{
if (i + 1 == argc) usage();
max_decode = strtol(argv[++i], NULL, 10);
continue;
}
if (strcmp(argv[i], "--suffix") == 0 || strcmp(argv[i], "-s") == 0)
{
if (i + 1 == argc) usage();
suffix_length = strtol(argv[++i], NULL, 10);
continue;
}
//fall through
if (!coeff_file)
{
coeff_file = argv[i];
continue;
}
//anything else
usage();
}
if (!coeff_file)
usage();
if (!pass_file)
usage();
int max_batch = n_blocks * n_threads;
char * password_buffer;
char * result_buffer;
short * coeff_buffer;
int * perm_buffer;
short *coeff = 0; // coefficent dump from a JPEG file
int coeff_len;
int max_msg_len; // max legit message length
hipError_t cudaStatus;
clock_t start, end;
float seconds;
start = clock();
FILE * fp;
char line[256];
size_t len = 0;
int read;
printf("setting gpu to %d\n", gpu_id);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(gpu_id);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU and drivers installed?");
return 1;
}
// load coefs and copy to GPU
printf("loading coeff file %s\n", coeff_file);
load_coeff(coeff_file, &coeff, &coeff_len, &max_msg_len);
printf("allocating memory for batches of %d * (%d coeff + %d pass + %d decode)\n", max_batch, coeff_len, max_pass, max_decode + 1);
cudaStatus = hipMalloc(&coeff_buffer, (sizeof(short) * coeff_len)); CUDA_ERR_CHECK;
cudaStatus = hipMemcpy(coeff_buffer, coeff, coeff_len * sizeof(short), hipMemcpyHostToDevice); CUDA_ERR_CHECK;
// allocate room for passwords and results
cudaStatus = hipMalloc(&perm_buffer, (sizeof(int) * max_batch * coeff_len)); CUDA_ERR_CHECK;
cudaStatus = hipMalloc(&password_buffer, (sizeof(char) * max_batch * max_pass)); CUDA_ERR_CHECK;
cudaStatus = hipMalloc(&result_buffer, (sizeof(char) * max_batch * (max_decode + 1))); CUDA_ERR_CHECK;
char * passwords = (char *)malloc(max_batch * max_pass * sizeof(char));
char * results = (char *)malloc(max_batch * max_decode * sizeof(char));
printf("opening password file %s\n", pass_file);
fp = fopen(pass_file, "r");
if (fp == NULL)
{
printf("could not open %s", pass_file);
exit(EXIT_FAILURE);
}
int pass_count = 0;
int processed = 0;
if (skip > 0) {
printf("skipping %d lines\n", skip);
}
while (skip-- > 0 && getline(line, &len, fp)) { ; }
while ((read = getline(line, &len, fp)) != -1) {
// printf("Retrieved line of length %d :\n", read);
// printf("%s", line);
int pass_len = read;
int off = 0;
while (pass_len >= suffix_length) {
// printf("%s %d %d\n", &line[off], pass_len, max_pass);
if (pass_len < max_pass) {
memset(&(passwords[pass_count * max_pass]), '\0', max_pass);
strncpy(&(passwords[pass_count * max_pass]), &line[off], pass_len);
pass_count++;
if (pass_count >= max_batch) {
// printf("batch ready %d\n", pass_count);
// batch is ready for processing
cudaStatus = hipMemcpy(password_buffer, passwords, max_batch * max_pass, hipMemcpyHostToDevice); CUDA_ERR_CHECK;
decode << <n_blocks, n_threads >> > (coeff_buffer, coeff_len, perm_buffer, password_buffer, result_buffer, max_msg_len, max_pass, max_decode);
cudaStatus = hipGetLastError(); CUDA_ERR_CHECK;
cudaStatus = hipDeviceSynchronize(); CUDA_ERR_CHECK;
pass_count = 0;
processed += max_batch;
end = clock();
seconds = end - start; // time difference is now a float
seconds /= CLOCKS_PER_SEC; // this division is now floating point
printf("processed %d pass in %.02f seconds = %.02f pass/sec @ %s\n", processed, seconds, processed / seconds, line);
}
}
// else { printf("skipping %s,too long\n", &line[off]); }
if (suffix_length == 0) {
// 0 means no suffixing
pass_len = 0;
}
pass_len--;
off++;
}
}
if (pass_count > 0) {
processed += pass_count;
while (pass_count < max_batch) {
memset(&(passwords[pass_count * max_pass]), '\0', max_pass);
pass_count++;
}
cudaStatus = hipMemcpy(password_buffer, passwords, max_batch * max_pass, hipMemcpyHostToDevice); CUDA_ERR_CHECK;
decode << <n_blocks, n_threads >> > (coeff_buffer, coeff_len, perm_buffer, password_buffer, result_buffer, max_msg_len, max_pass, max_decode);
cudaStatus = hipGetLastError(); CUDA_ERR_CHECK;
cudaStatus = hipDeviceSynchronize(); CUDA_ERR_CHECK;
end = clock();
seconds = end - start; // time difference is now a float
seconds /= CLOCKS_PER_SEC; // this division is now floating point
printf("processed %d pass in %.02f seconds = %.02f pass/sec complete\n", processed, seconds, processed / seconds);
}
printf("done\n");
fclose(fp);
hipFree(coeff_buffer);
hipFree(perm_buffer);
hipFree(password_buffer);
hipFree(result_buffer);
return 1;
}
int load_coeff(char* filename, short** coeffptr, int* coeff_len, int *max_msg_len)
{
FILE *fp;
short *coeff = 0;
fp = fopen(filename, "rb");
if (!fp)
{
fputs("File not found\n", stderr);
return 1;
}
fseek(fp, 0, SEEK_END);
*coeff_len = ftell(fp) / 2;
rewind(fp);
printf("File: %s %i bytes.\n", filename, *coeff_len * 2);
coeff = (short *)malloc(*coeff_len * sizeof(short));
*coeffptr = coeff; //export the pointer
if (fread(coeff, 2, *coeff_len, fp) != *coeff_len)
{
fputs("File error\n", stderr);
return 1;
}
*max_msg_len = 0;
for (int i = 0; i < *coeff_len; i++)
if ((i % 64 != 0) & (coeff[i] != 0))
(*max_msg_len)++;
printf("Max theoretical message length: %i\n", *max_msg_len);
return 0;
}
int getline(char line[256], size_t *n, FILE *stream)
{
char *ptr;
size_t len;
if (ferror(stream))
return -1;
if (feof(stream))
return -1;
fgets(line, 256, stream);
ptr = strchr(line, '\n');
if (ptr)
*ptr = '\0';
len = strlen(line);
return((int)len);
}
| a14b0cc47fbda6fa53dc340c33f49e826419f793.cu | /* /////////////// DISCLAIMER/////////////////////////////////
This software is provided by the author and
contributors ``as is'' and any express or implied
warranties, including, but not limited to, the
implied warranties of merchantability and
fitness for a particular purpose are dis-
claimed. In no event shall the author or con-
tributors be liable for any direct, indirect,
incidental, special, exemplary, or consequen-
tial damages (including, but not limited to,
procurement of substitute goods or services;
loss of use, data, or profits; or business
interruption) however caused and on any
theory of liability, whether in contract,
strict liability, or tort (including negligence
or otherwise) arising in any way out of the use
of this software, even if advised of the poss-
ibility of such damage.
//////////////////////////////////////////////////////*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stddef.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "F5crypt.h"
#include "F5crypt.cu"
__device__ int device_strlen(char *str)
{
if (!str) {
return 0;
}
char *ptr = str;
while (*str) {
++str;
}
return str - ptr;
}
__device__ void device_decode(int idx, short *coeff, int coeff_len, int * perm_buffer, char * passwords, char * results, int max_msg_len, int max_pass, int max_decode)
{
char * pass;
int pass_len;
f5_rand_state rstate;
int * perms = &(perm_buffer[idx * coeff_len]);
pass = &(passwords[idx * max_pass]);
pass_len = device_strlen(pass);
// printf("%d perm: %p trying pass: \"%s\" len %d\n", idx, perms, pass, pass_len);
F5gen_rand_seed(pass, pass_len, &rstate);
/*
printf("initial rands: ");
for (int offset = 0; offset < 20; offset++) {
printf("%02x", rstate->output[offset] & 0xff);
}
printf("\n");
*/
F5permutation(&rstate, perms, coeff_len);
/*
for (int j = 0; j < 10; j++) {
printf("%d %s msg %p [%d] : %p = %d\n", idx, pass, perms, j, &(perms[j]), perms[j]);
}
*/
char *msg = &(results[idx * (max_decode+1)]);
int msg_len = 0;
// 4. Attempt extraction
// Return 1 on success, 0 on failure. On success message and message_len will be modified.
int ret = F5extract(coeff, coeff_len, perms, &rstate, max_msg_len, msg, &msg_len, max_decode);
msg[msg_len] = '\0';
// 5. Write results if password found
if (ret)
{
printf(">>>>> Password Hit: \"%s\" <<<<<\n", pass);
printf("%s == %s\n", pass, msg);
}
}
__global__ void decode(short *coeff, int coeff_len, int * perm_buffer, char * passwords, char * results, int max_msg_len, int max_pass, int max_decode)
{
device_decode(blockIdx.x * blockDim.x + threadIdx.x, coeff, coeff_len, perm_buffer, passwords, results, max_msg_len, max_pass, max_decode);
}
int getline(char line[256], size_t *n, FILE *stream);
int load_coeff(char* filename, short** coeffptr, int* coeff_len, int *max_msg_len);
#define CUDA_ERR_CHECK if (cudaStatus != cudaSuccess) { fprintf(stderr, "%d cuda returned error code %d : %s!\n", __LINE__, cudaStatus, cudaGetErrorString(cudaStatus)); return 1; }
void usage()
{
printf("--- BruthCrackF5CUDA ---\n");
printf("Reads a provided coefficent dump from a JPEG file (TODO: jpeglib) and tests it\n");
printf("against a seriesof passwords provided as a password file.\n");
printf("\nUsage: brutecrackf5 filename [OPTION]...\n\nOptions:\n");
printf(" --pass FILENAME Password list. Expected to be seperated by new-line charactors.\n");
printf(" --gpu number\n");
printf(" Default: 0\n");
printf(" --blocks count\n");
printf(" Default: 4\n");
printf(" --threads count\n");
printf(" Default: 4\n");
printf(" --max-pass max length of password\n");
printf(" Default: 8\n");
printf(" --max-decode max number of bytes matching PixelKnot header to decode\n");
printf(" Default: 4\n");
printf(" --suffix try all suffix of each password up to length\n");
printf(" Default: 0\n");
printf(" --skip skip lines of password file\n");
printf(" Default: 0\n");
exit(0);
}
int main(int argc, char** argv)
{
char * coeff_file = 0;
char * pass_file = 0;
int n_blocks = 32;
int n_threads = 32;
int max_pass = 8;
int max_decode = 4;
int suffix_length = 0;
int skip = 0;
int gpu_id = 0;
//Parse Args
for (int i = 1; i < argc; i++)
{
if (strcmp(argv[i], "--pass") == 0 || strcmp(argv[i], "-p") == 0)
{
if (i + 1 == argc) usage();
pass_file = argv[++i];
continue;
}
if (strcmp(argv[i], "--gpu") == 0 || strcmp(argv[i], "-g") == 0)
{
if (i + 1 == argc) usage();
skip = strtol(argv[++i], NULL, 10);
continue;
}
if (strcmp(argv[i], "--skip") == 0 || strcmp(argv[i], "-sk") == 0)
{
if (i + 1 == argc) usage();
skip = strtol(argv[++i], NULL, 10);
continue;
}
if (strcmp(argv[i], "--blocks") == 0 || strcmp(argv[i], "-b") == 0)
{
if (i + 1 == argc) usage();
n_blocks = strtol(argv[++i], NULL, 10);
continue;
}
if (strcmp(argv[i], "--threads") == 0 || strcmp(argv[i], "-t") == 0)
{
if (i + 1 == argc) usage();
n_threads = strtol(argv[++i], NULL, 10);
continue;
}
if (strcmp(argv[i], "--max-pass") == 0 || strcmp(argv[i], "-mp") == 0)
{
if (i + 1 == argc) usage();
max_pass = strtol(argv[++i], NULL, 10);
continue;
}
if (strcmp(argv[i], "--max-decode") == 0 || strcmp(argv[i], "-md") == 0)
{
if (i + 1 == argc) usage();
max_decode = strtol(argv[++i], NULL, 10);
continue;
}
if (strcmp(argv[i], "--suffix") == 0 || strcmp(argv[i], "-s") == 0)
{
if (i + 1 == argc) usage();
suffix_length = strtol(argv[++i], NULL, 10);
continue;
}
//fall through
if (!coeff_file)
{
coeff_file = argv[i];
continue;
}
//anything else
usage();
}
if (!coeff_file)
usage();
if (!pass_file)
usage();
int max_batch = n_blocks * n_threads;
char * password_buffer;
char * result_buffer;
short * coeff_buffer;
int * perm_buffer;
short *coeff = 0; // coefficent dump from a JPEG file
int coeff_len;
int max_msg_len; // max legit message length
cudaError_t cudaStatus;
clock_t start, end;
float seconds;
start = clock();
FILE * fp;
char line[256];
size_t len = 0;
int read;
printf("setting gpu to %d\n", gpu_id);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(gpu_id);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU and drivers installed?");
return 1;
}
// load coefs and copy to GPU
printf("loading coeff file %s\n", coeff_file);
load_coeff(coeff_file, &coeff, &coeff_len, &max_msg_len);
printf("allocating memory for batches of %d * (%d coeff + %d pass + %d decode)\n", max_batch, coeff_len, max_pass, max_decode + 1);
cudaStatus = cudaMalloc(&coeff_buffer, (sizeof(short) * coeff_len)); CUDA_ERR_CHECK;
cudaStatus = cudaMemcpy(coeff_buffer, coeff, coeff_len * sizeof(short), cudaMemcpyHostToDevice); CUDA_ERR_CHECK;
// allocate room for passwords and results
cudaStatus = cudaMalloc(&perm_buffer, (sizeof(int) * max_batch * coeff_len)); CUDA_ERR_CHECK;
cudaStatus = cudaMalloc(&password_buffer, (sizeof(char) * max_batch * max_pass)); CUDA_ERR_CHECK;
cudaStatus = cudaMalloc(&result_buffer, (sizeof(char) * max_batch * (max_decode + 1))); CUDA_ERR_CHECK;
char * passwords = (char *)malloc(max_batch * max_pass * sizeof(char));
char * results = (char *)malloc(max_batch * max_decode * sizeof(char));
printf("opening password file %s\n", pass_file);
fp = fopen(pass_file, "r");
if (fp == NULL)
{
printf("could not open %s", pass_file);
exit(EXIT_FAILURE);
}
int pass_count = 0;
int processed = 0;
if (skip > 0) {
printf("skipping %d lines\n", skip);
}
while (skip-- > 0 && getline(line, &len, fp)) { ; }
while ((read = getline(line, &len, fp)) != -1) {
// printf("Retrieved line of length %d :\n", read);
// printf("%s", line);
int pass_len = read;
int off = 0;
while (pass_len >= suffix_length) {
// printf("%s %d %d\n", &line[off], pass_len, max_pass);
if (pass_len < max_pass) {
memset(&(passwords[pass_count * max_pass]), '\0', max_pass);
strncpy(&(passwords[pass_count * max_pass]), &line[off], pass_len);
pass_count++;
if (pass_count >= max_batch) {
// printf("batch ready %d\n", pass_count);
// batch is ready for processing
cudaStatus = cudaMemcpy(password_buffer, passwords, max_batch * max_pass, cudaMemcpyHostToDevice); CUDA_ERR_CHECK;
decode << <n_blocks, n_threads >> > (coeff_buffer, coeff_len, perm_buffer, password_buffer, result_buffer, max_msg_len, max_pass, max_decode);
cudaStatus = cudaGetLastError(); CUDA_ERR_CHECK;
cudaStatus = cudaDeviceSynchronize(); CUDA_ERR_CHECK;
pass_count = 0;
processed += max_batch;
end = clock();
seconds = end - start; // time difference is now a float
seconds /= CLOCKS_PER_SEC; // this division is now floating point
printf("processed %d pass in %.02f seconds = %.02f pass/sec @ %s\n", processed, seconds, processed / seconds, line);
}
}
// else { printf("skipping %s,too long\n", &line[off]); }
if (suffix_length == 0) {
// 0 means no suffixing
pass_len = 0;
}
pass_len--;
off++;
}
}
if (pass_count > 0) {
processed += pass_count;
while (pass_count < max_batch) {
memset(&(passwords[pass_count * max_pass]), '\0', max_pass);
pass_count++;
}
cudaStatus = cudaMemcpy(password_buffer, passwords, max_batch * max_pass, cudaMemcpyHostToDevice); CUDA_ERR_CHECK;
decode << <n_blocks, n_threads >> > (coeff_buffer, coeff_len, perm_buffer, password_buffer, result_buffer, max_msg_len, max_pass, max_decode);
cudaStatus = cudaGetLastError(); CUDA_ERR_CHECK;
cudaStatus = cudaDeviceSynchronize(); CUDA_ERR_CHECK;
end = clock();
seconds = end - start; // time difference is now a float
seconds /= CLOCKS_PER_SEC; // this division is now floating point
printf("processed %d pass in %.02f seconds = %.02f pass/sec complete\n", processed, seconds, processed / seconds);
}
printf("done\n");
fclose(fp);
cudaFree(coeff_buffer);
cudaFree(perm_buffer);
cudaFree(password_buffer);
cudaFree(result_buffer);
return 1;
}
int load_coeff(char* filename, short** coeffptr, int* coeff_len, int *max_msg_len)
{
FILE *fp;
short *coeff = 0;
fp = fopen(filename, "rb");
if (!fp)
{
fputs("File not found\n", stderr);
return 1;
}
fseek(fp, 0, SEEK_END);
*coeff_len = ftell(fp) / 2;
rewind(fp);
printf("File: %s %i bytes.\n", filename, *coeff_len * 2);
coeff = (short *)malloc(*coeff_len * sizeof(short));
*coeffptr = coeff; //export the pointer
if (fread(coeff, 2, *coeff_len, fp) != *coeff_len)
{
fputs("File error\n", stderr);
return 1;
}
*max_msg_len = 0;
for (int i = 0; i < *coeff_len; i++)
if ((i % 64 != 0) & (coeff[i] != 0))
(*max_msg_len)++;
printf("Max theoretical message length: %i\n", *max_msg_len);
return 0;
}
int getline(char line[256], size_t *n, FILE *stream)
{
char *ptr;
size_t len;
if (ferror(stream))
return -1;
if (feof(stream))
return -1;
fgets(line, 256, stream);
ptr = strchr(line, '\n');
if (ptr)
*ptr = '\0';
len = strlen(line);
return((int)len);
}
|
c6153b190fb5198ddc981d612b0b80e9f77e56b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
struct squareupdateOutput_functor
{
__device__ void operator()(float* output, const float* input) const
{
*output = (*input) * (*input);
}
};
void THNN_CudaSquare_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output)
{
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_pointwiseApply2(state, output, input, squareupdateOutput_functor());
}
struct squareupdateGradInput_functor
{
__device__ void operator()(float* gradInput, const float* input, const float* gradOutput) const
{
*gradInput = 2.0 * (*gradOutput) * (*input);
}
};
void THNN_CudaSquare_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput)
{
THAssert(THCudaTensor_checkGPU(state, 3, input, gradOutput, gradInput));
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_pointwiseApply3(state, gradInput, input, gradOutput, squareupdateGradInput_functor());
}
| c6153b190fb5198ddc981d612b0b80e9f77e56b6.cu | #include "THCUNN.h"
struct squareupdateOutput_functor
{
__device__ void operator()(float* output, const float* input) const
{
*output = (*input) * (*input);
}
};
void THNN_CudaSquare_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output)
{
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_pointwiseApply2(state, output, input, squareupdateOutput_functor());
}
struct squareupdateGradInput_functor
{
__device__ void operator()(float* gradInput, const float* input, const float* gradOutput) const
{
*gradInput = 2.0 * (*gradOutput) * (*input);
}
};
void THNN_CudaSquare_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput)
{
THAssert(THCudaTensor_checkGPU(state, 3, input, gradOutput, gradInput));
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_pointwiseApply3(state, gradInput, input, gradOutput, squareupdateGradInput_functor());
}
|
a9353798cee9314072e52520fd43b13f69eb3235.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <time.h>
#include <stdio.h>
__global__ void add_vector(float *c, const float *a, const float *b, int width)
{
int col = blockIdx.x * width + threadIdx.x;
int row = blockIdx.y * width + threadIdx.y;
c[col][row] = a[col][row] + b[col][row];
}
//for an array of size, fill it with a random float in [0,1]
void random_floats(float *A, int size)
{
for (int i = 0; i < size; i++)
{
A[i] = ((float) rand()) / ((float) RAND_MAX);
}
}
float my_abs(float a)
{
if (a< 0)
return -1 * a;
return a;
}
//finds the machine epsilon for a float
float find_eps()
{
float machEps = (float) 1.0;
do {
machEps /= (float) 2.0;
}
while ((float)(1.0 + machEps) != 1.0);
return machEps;
}
int main()
{
srand(time(0));
const int SIZE = 1000;
size_t bytes = 100 * 100 * sizeof(float);
//initialize out pointers on host and device
float *A, *B, *C;
float *dA, *dB, *dC;
//allocate vectors on host
A = (float*)malloc(bytes);
random_floats(A, SIZE);
B = (float*)malloc(bytes);
random_floats(B, SIZE);
C = (float*)malloc(bytes);
//alocate vectors on device
hipMalloc((void**)&dA, bytes);
hipMalloc((void**)&dB, bytes);
hipMalloc((void**)&dC, bytes);
//copy the vector from host to device
hipMemcpy(dA, A, bytes, hipMemcpyHostToDevice);
hipMemcpy(dB, B, bytes, hipMemcpyHostToDevice);
//perform the addition
hipLaunchKernelGGL(( add_vector), dim3(1), dim3(SIZE) , 0, 0, dC, dB, dA);
//copy our answer back to the cpu
hipMemcpy(C, dC, bytes, hipMemcpyDeviceToHost);
hipFree(dA);
hipFree(dB);
hipFree(dC);
//check for correctness
float eps = find_eps();
int worked = 0;
for (int i = 0; i < SIZE; i++)
{
float rel_error = my_abs( ( (A[i] + B[i]) - C[i]) /C[i]);
if ( rel_error > eps)
{
printf("messed up on index %d\n", i);
printf("Calculation did not work\n");
worked = 1;
break;
}
}
if (worked == 0)
printf("Congrats, everyting worked!\n");
//free up the host memory
free(A);
free(B);
free(C);
}
| a9353798cee9314072e52520fd43b13f69eb3235.cu |
#include <cuda_runtime.h>
#include <time.h>
#include <stdio.h>
__global__ void add_vector(float *c, const float *a, const float *b, int width)
{
int col = blockIdx.x * width + threadIdx.x;
int row = blockIdx.y * width + threadIdx.y;
c[col][row] = a[col][row] + b[col][row];
}
//for an array of size, fill it with a random float in [0,1]
void random_floats(float *A, int size)
{
for (int i = 0; i < size; i++)
{
A[i] = ((float) rand()) / ((float) RAND_MAX);
}
}
float my_abs(float a)
{
if (a< 0)
return -1 * a;
return a;
}
//finds the machine epsilon for a float
float find_eps()
{
float machEps = (float) 1.0;
do {
machEps /= (float) 2.0;
}
while ((float)(1.0 + machEps) != 1.0);
return machEps;
}
int main()
{
srand(time(0));
const int SIZE = 1000;
size_t bytes = 100 * 100 * sizeof(float);
//initialize out pointers on host and device
float *A, *B, *C;
float *dA, *dB, *dC;
//allocate vectors on host
A = (float*)malloc(bytes);
random_floats(A, SIZE);
B = (float*)malloc(bytes);
random_floats(B, SIZE);
C = (float*)malloc(bytes);
//alocate vectors on device
cudaMalloc((void**)&dA, bytes);
cudaMalloc((void**)&dB, bytes);
cudaMalloc((void**)&dC, bytes);
//copy the vector from host to device
cudaMemcpy(dA, A, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, bytes, cudaMemcpyHostToDevice);
//perform the addition
add_vector<<<1, SIZE >>>(dC, dB, dA);
//copy our answer back to the cpu
cudaMemcpy(C, dC, bytes, cudaMemcpyDeviceToHost);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
//check for correctness
float eps = find_eps();
int worked = 0;
for (int i = 0; i < SIZE; i++)
{
float rel_error = my_abs( ( (A[i] + B[i]) - C[i]) /C[i]);
if ( rel_error > eps)
{
printf("messed up on index %d\n", i);
printf("Calculation did not work\n");
worked = 1;
break;
}
}
if (worked == 0)
printf("Congrats, everyting worked!\n");
//free up the host memory
free(A);
free(B);
free(C);
}
|
7cfd6088816aeb2a163ba294675da5d490c361bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// SIAMESE RECURRENT ARCHITECTURE FOR VISUAL TRACKING
// Version 1.0, Copyright(c) July, 2017
// Xiaqing Xu, Bingpeng Ma, Hong Chang, Xilin Chen
// Written by Xiaqing Xu
// ------------------------------------------------------------------
#include <vector>
#include <iostream>
#include "caffe/filler.hpp"
#include "caffe/layers/spatial_irnn_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe{
template <typename Dtype>
__global__ void ReLUForward(const int n, Dtype* in) {
CUDA_KERNEL_LOOP(index, n) {
in[index] = in[index] > 0 ? in[index] : Dtype(0.);
}
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, Dtype* out_diff,const Dtype* top_data) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = Dtype(1.) * (top_data[index] > 0);
}
}
template <typename Dtype>
void RNNLEFTLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top){
const Dtype* bottom_data = bottom[0]->gpu_data();
const int count = top[0]->count();
const Dtype* w = this->blobs_[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_copy(count, bottom_data, top_data);
for(int i = W_ - 1; i >= 0; i--){
if(i < W_ - 1){
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, NH_, H_ * N_, NH_, Dtype(1.),
w, top_data + (i + 1) * NH_ * H_ * N_, Dtype(1.),
top_data + i * NH_ * H_ * N_);
}
hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(NH_* H_ * N_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
NH_* H_ * N_, top_data + i * NH_ * H_ * N_);
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
void RNNLEFTLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
const int count = bottom[0]->count();
const Dtype* w = this->blobs_[0]->gpu_data();
Dtype* w_diff = this->blobs_[0]->mutable_gpu_diff();
// dh
Dtype* h_diff = cache_.mutable_gpu_data();
// f'(h)
Dtype* f_diff = cache_.mutable_gpu_diff();
Dtype* hh_diff = hh_.mutable_gpu_diff();
hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, f_diff, top_data);
CUDA_POST_KERNEL_CHECK;
caffe_copy(count, top_diff, h_diff);
for(int i = 0; i < W_; i++){
// dzdf
caffe_gpu_mul(NH_ * H_ * N_, h_diff + i * NH_ * H_ * N_,
f_diff + i * NH_ * H_ * N_, f_diff + i * NH_ * H_ * N_);
// dzdhh
caffe_gpu_gemm(CblasTrans, CblasNoTrans, NH_, H_ * N_, NH_, Dtype(1.),
w, f_diff + i * NH_ * H_ * N_, Dtype(0.), hh_diff);
if(i < W_ - 1){
caffe_gpu_add(NH_ * H_ * N_, hh_diff,
h_diff + (i + 1) * NH_ * H_ * N_,
h_diff + (i + 1) * NH_ * H_ * N_);
caffe_gpu_gemm(CblasNoTrans, CblasTrans, NH_, NH_, H_ * N_, Dtype(1.),
f_diff + i * NH_ * H_ * N_ , top_data + (i + 1) * NH_ * H_ * N_,
Dtype(1.), w_diff);
}
}
if(propagate_down[0]){
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(bottom[0]->count(), f_diff, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(RNNLEFTLayer);
} // namespace caffe
| 7cfd6088816aeb2a163ba294675da5d490c361bb.cu | // ------------------------------------------------------------------
// SIAMESE RECURRENT ARCHITECTURE FOR VISUAL TRACKING
// Version 1.0, Copyright(c) July, 2017
// Xiaqing Xu, Bingpeng Ma, Hong Chang, Xilin Chen
// Written by Xiaqing Xu
// ------------------------------------------------------------------
#include <vector>
#include <iostream>
#include "caffe/filler.hpp"
#include "caffe/layers/spatial_irnn_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe{
template <typename Dtype>
__global__ void ReLUForward(const int n, Dtype* in) {
CUDA_KERNEL_LOOP(index, n) {
in[index] = in[index] > 0 ? in[index] : Dtype(0.);
}
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, Dtype* out_diff,const Dtype* top_data) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = Dtype(1.) * (top_data[index] > 0);
}
}
template <typename Dtype>
void RNNLEFTLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top){
const Dtype* bottom_data = bottom[0]->gpu_data();
const int count = top[0]->count();
const Dtype* w = this->blobs_[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_copy(count, bottom_data, top_data);
for(int i = W_ - 1; i >= 0; i--){
if(i < W_ - 1){
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, NH_, H_ * N_, NH_, Dtype(1.),
w, top_data + (i + 1) * NH_ * H_ * N_, Dtype(1.),
top_data + i * NH_ * H_ * N_);
}
ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(NH_* H_ * N_), CAFFE_CUDA_NUM_THREADS>>>(
NH_* H_ * N_, top_data + i * NH_ * H_ * N_);
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
void RNNLEFTLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
const int count = bottom[0]->count();
const Dtype* w = this->blobs_[0]->gpu_data();
Dtype* w_diff = this->blobs_[0]->mutable_gpu_diff();
// dh
Dtype* h_diff = cache_.mutable_gpu_data();
// f'(h)
Dtype* f_diff = cache_.mutable_gpu_diff();
Dtype* hh_diff = hh_.mutable_gpu_diff();
ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, f_diff, top_data);
CUDA_POST_KERNEL_CHECK;
caffe_copy(count, top_diff, h_diff);
for(int i = 0; i < W_; i++){
// dzdf
caffe_gpu_mul(NH_ * H_ * N_, h_diff + i * NH_ * H_ * N_,
f_diff + i * NH_ * H_ * N_, f_diff + i * NH_ * H_ * N_);
// dzdhh
caffe_gpu_gemm(CblasTrans, CblasNoTrans, NH_, H_ * N_, NH_, Dtype(1.),
w, f_diff + i * NH_ * H_ * N_, Dtype(0.), hh_diff);
if(i < W_ - 1){
caffe_gpu_add(NH_ * H_ * N_, hh_diff,
h_diff + (i + 1) * NH_ * H_ * N_,
h_diff + (i + 1) * NH_ * H_ * N_);
caffe_gpu_gemm(CblasNoTrans, CblasTrans, NH_, NH_, H_ * N_, Dtype(1.),
f_diff + i * NH_ * H_ * N_ , top_data + (i + 1) * NH_ * H_ * N_,
Dtype(1.), w_diff);
}
}
if(propagate_down[0]){
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(bottom[0]->count(), f_diff, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(RNNLEFTLayer);
} // namespace caffe
|
0624b4b06abdc5445702577f161d65075702b0a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "FeatureExtractor.h"
#include "RawData.h"
#include <cmath>
#include <cstdlib>
#include <ctime>
#include "wtime.h"
#include "mathtool.h"
#include "ThreadPool.h"
#include "FeatureExtractorTool.h"
SP_RESULT FeatureExtractor::exFeatures(const RawData *data) {
return exFeatures(data, \
sampleRate,
preEmpFactor, \
winTime, \
stepTime, \
winFunc, \
minF, \
maxF, \
hz2melFunc, \
mel2hzFunc, \
nfilts, \
cepsNum);
}
SP_RESULT FeatureExtractor::exDoubleDeltaFeatures(const RawData *data) {
exFeatures(data);
doubleDelta(normalMelCeps);
return SP_SUCCESS;
}
void FeatureExtractor::doubleDelta(std::vector<Feature> & normalMelCeps) {
int idx, siz = normalMelCeps.size();
for(idx = 0; idx < siz; idx ++)
normalMelCeps[idx].fillDoubleDelta();
}
SP_RESULT FeatureExtractor::exFeatures(const RawData *data, \
int sampleRate, \
double preEmpFactor, \
double winTime, \
double stepTime, \
double (*winFunc)(int, int), \
double minF, \
double maxF, \
double (*hz2melFunc)(double), \
double (*mel2hzFunc)(double), \
int nfilts, \
int cepsNum) {
//SP_RESULT res;
inital();
double startT, finishT, initializeTime;
double totalTime = 0;
//startT = wtime();
//preEmph(e_emp_data, data->getData(), data->getFrameNum(), preEmpFactor);
//finishT = wtime();
//double t_preemp = finishT-startT;
//totalTime += t_preemp;
//startT = wtime();
//windowing(e_windows, e_emp_data, winTime, stepTime, sampleRate, winFunc);
//finishT = wtime();
//double t_window = finishT-startT;
//totalTime += t_window;
startT = wtime();
initializeTime = preProcessing(e_windows, data->getData(), data->getFrameNum(), preEmpFactor, winTime, stepTime, sampleRate);
finishT = wtime();
double t_preProcessing = finishT-startT-initializeTime;
totalTime += t_preProcessing;
startT = wtime();
powSpectrum(e_powSpec, e_windows);
finishT = wtime();
double t_powSpec = finishT-startT;
totalTime += t_powSpec;
int nfft = (e_powFrameSize -1) << 1;
startT = wtime();
fft2MelLog(nfft, &e_melLogSpec, e_powSpec, nfilts, hz2melFunc, mel2hzFunc, minF, maxF, sampleRate);
finishT = wtime();
double t_mel = finishT-startT;
totalTime += t_mel;
startT = wtime();
melCepstrum(melCeps, e_melLogSpec, cepsNum);
finishT = wtime();
double t_dctCep = finishT-startT;
totalTime += t_dctCep;
startT = wtime();
time_t start = time(0);
normalization(normalMelCeps, melCeps);
finishT = wtime();
double t_norm = finishT-startT;
totalTime += t_norm;
doubleDelta(normalMelCeps);
std::cout << "CUDA Initialize Time: " << initializeTime << std::endl;
std::cout << "Total Time (Without InitializeTime) : " << totalTime << std::endl;
//std::cout << "PreEmp: " << t_preemp << " s , " << t_preemp*100/totalTime <<"%" <<std::endl;
//std::cout << "Windowing: " << t_window << " s , " << t_window*100/totalTime <<"%" << std::endl;
std::cout << "PreProcessing: " << t_preProcessing << " s , " << t_preProcessing*100/totalTime <<"%"<< std::endl;
std::cout << "FFT: " << t_powSpec << " s , " << t_powSpec*100/totalTime <<"%"<< std::endl;
std::cout << "MelFiltering: " << t_mel << " s , " << t_mel*100/totalTime <<"%"<< std::endl;
std::cout << "DCT Ceptrum: " << t_dctCep << " s , " << t_dctCep*100/totalTime <<"%"<< std::endl;
std::cout << "Normalization: " << t_norm << " s , " << t_norm*100/totalTime <<"%"<< std::endl;
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::normalization(std::vector<Feature> &normalMels, const std::vector<Feature> & melFes) {
normalMels.clear();
if(melFes.size() == 0) return SP_SUCCESS;
Feature means, variance;
int siz = melFes[0].size();
means.resize(siz);
variance.resize(siz);
for(int i = 0;i < siz;i++) {
means[i] = variance[i] = 0;
}
for(int i = 0;i < melFes.size(); i++) {
for(int j = 0;j < siz; j++) {
if(melFes[i].size() > j) {
means[j] += melFes[i][j];
variance[j] += melFes[i][j] * melFes[i][j];
}
}
}
for(int i = 0;i < siz;i++) {
means[i] /= melFes.size();
variance[i] /= melFes.size();
variance[i] = sqrt(variance[i]);
}
for(int i = 0;i < melFes.size();i++) {
normalMels.push_back(melFes[i]);
for(int j = 0;j < siz;j++) {
if(j < melFes[i].size()) {
normalMels[i][j] -= means[j];
normalMels[i][j] /= variance[j];
}
}
}
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::mel2dct(Feature & feature, std::vector<double> melLog, int cepsNum) {
int siz = melLog.size();
feature.resize(siz);
for(int i = 0;i < siz;i++)
feature[i] = melLog[i];
// dct(feature.rawData(), siz, 1);
dct2(feature.rawData(), siz);
feature.resize(cepsNum);
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::melCepstrum(std::vector<Feature> &cepstrums, \
FEATURE_DATA **melLogSpec, \
int cepsNum) {
cepstrums.clear();
int framePerBlock = 4;
int rowNum = nfilts,
colNum = e_frameNum;
int elementNum = rowNum * colNum;
size_t memSize = elementNum*sizeof(FEATURE_DATA);
FEATURE_DATA * r_melLogSpec_data = (FEATURE_DATA *) malloc(memSize);
FEATURE_DATA ** r_melLogSpec = (FEATURE_DATA **)malloc(colNum * sizeof(FEATURE_DATA *));
for(int i=0; i<colNum; i++){
r_melLogSpec[i] = &r_melLogSpec_data[i*rowNum];
}
reverseMatrix(r_melLogSpec, melLogSpec, rowNum, colNum);
FEATURE_DATA * d_melLogSpec_data;
hipMalloc((void **) &d_melLogSpec_data, memSize);
hipMemcpy(d_melLogSpec_data, r_melLogSpec_data, memSize, hipMemcpyHostToDevice);
int blockSize = framePerBlock*rowNum;
size_t sharedMem = blockSize*sizeof(FEATURE_DATA);
dim3 dimGrid( ceil((double)elementNum/blockSize) );
dim3 dimBlock(blockSize);
hipLaunchKernelGGL(( mel2dct_kernel), dim3(dimGrid), dim3(dimBlock), sharedMem, 0, d_melLogSpec_data, rowNum, cepsNum);
hipMemcpy(r_melLogSpec_data, d_melLogSpec_data, memSize, hipMemcpyDeviceToHost);
for(int i=0; i<colNum; i++){
Feature tmpFeature;
tmpFeature.resize(cepsNum);
for(int j=0; j<cepsNum; j++){
tmpFeature[j] = r_melLogSpec[i][j];
}
cepstrums.push_back(tmpFeature);
}
//FEATURE_DATA* e_melCeps_data = (FEATURE_DATA *) malloc(colNum*cepsNum*sizeof(FEATURE_DATA));
//e_melCeps = (FEATURE_DATA **) malloc(colNum*sizeof(FEATURE_DATA *));
//size_t copyMemSize = cepsNum*sizeof(FEATURE_DATA);
//for(int i=0; i<colNum; i++){
// e_melCeps[i] = &e_melCeps_data[i*cepsNum];
// memcpy(e_melCeps[i], r_melLogSpec[i], copyMemSize);
//}
hipFree(d_melLogSpec_data);
free(r_melLogSpec_data);
free(r_melLogSpec);
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::reverseMatrix(FEATURE_DATA **outMatrix, FEATURE_DATA **inMatrix, int rowNum, int colNum){
for(int i=0; i<colNum; i++){
for(int j=0; j<rowNum; j++){
outMatrix[i][j] = inMatrix[j][i];
}
}
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::powSpectrum(FEATURE_DATA **powSpec, \
FEATURE_DATA **windows) {
int frameNum = e_frameNum,
frameSize = e_frameSize,
blockSize = e_frameSize,
elementNum = frameNum * frameSize,
selIdx = (int)(std::log2(frameSize))%2;
// Memory Size for whole data
size_t memSize = elementNum * 2 *sizeof(FEATURE_DATA);
// Share Memory Size in the CUDA
size_t sharedMem = 2 * blockSize * 2 * sizeof(FEATURE_DATA);
FEATURE_DATA *SpeechSignal_real = new FEATURE_DATA[elementNum*2],
*d_SpeechSignal_real,
*d_SpeechSignal_imag;
FEATURE_DATA *SpeechSignal_imag = &SpeechSignal_real[elementNum];
// Initialize the Speech Signal by windows (imaginary part are all zero)
memset(SpeechSignal_real, 0, memSize);
memcpy(SpeechSignal_real, windows[0], memSize/2);
hipMalloc( (void **) &d_SpeechSignal_real, memSize );
hipMemcpy( d_SpeechSignal_real, SpeechSignal_real, memSize, hipMemcpyHostToDevice);
d_SpeechSignal_imag = &d_SpeechSignal_real[elementNum];
//std::cout << "The select index is: " << selIdx << std::endl;
dim3 dimGrid( ceil( (double)elementNum/blockSize ) );
dim3 dimBlock(blockSize);
hipLaunchKernelGGL(( windowFFT_kernel), dim3(dimGrid), dim3(dimBlock), sharedMem , 0, d_SpeechSignal_real, d_SpeechSignal_imag, frameNum, frameSize, 1, selIdx);
hipMemcpy(SpeechSignal_real, d_SpeechSignal_real, memSize, hipMemcpyDeviceToHost);
// Calculate the Power Spectrum
int resSize=frameSize/2+1, frameOffset, finalOffset;
FEATURE_DATA realPart, imagPart;
e_powFrameSize = resSize;
e_powSpec = (FEATURE_DATA **) malloc(e_frameNum * sizeof(FEATURE_DATA *));
FEATURE_DATA *tmp_powSpec = (FEATURE_DATA *) malloc(e_frameNum * resSize * sizeof(FEATURE_DATA));
for(int i=0; i<frameNum; i++){
e_powSpec[i] = &tmp_powSpec[i*resSize];
frameOffset = i*frameSize;
for(int j=0; j<resSize; j++){
finalOffset = frameOffset + j;
realPart = SpeechSignal_real[finalOffset];
imagPart = SpeechSignal_imag[finalOffset];
e_powSpec[i][j] = realPart*realPart + imagPart*imagPart;
}
}
hipFree(d_SpeechSignal_real);
delete []SpeechSignal_real;
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::getWts(Matrix<double> &wts, \
int nfft, \
double minF, \
double maxF, \
int sampleRate, \
int nfilts, \
double (*hz2melFunc)(double), \
double (*mel2hzFunc)(double)) {
int nfreqs = nfft / 2 + 1;
wts.clear();
std::vector<double> points;
double minmel = hz2melFunc(minF);
double maxmel = hz2melFunc(maxF);
double step = (maxmel - minmel) / (nfilts + 1);
for(int i = 0; i <= nfilts + 1; i++)
points.push_back(mel2hzFunc( minmel + step * i));
for(int i = 0; i <= nfilts + 1; i++) {
points[i] = ceil(points[i] / sampleRate * (nfft - 1));
}
for(int i = 0;i < nfilts;i++) {
wts.push_back(std::vector<double>());
std::vector<double> &filter = wts[i];
int lp = points[i], mp = points[i+1], rp = points[i+2];
double lf = 1.0 * points[i] / nfft * sampleRate;
double mf = 1.0 * points[i+1] / nfft * sampleRate;
double rf = 1.0 * points[i+2] / nfft * sampleRate;
while(filter.size() < lp)
filter.push_back(0.0);
for(int k = lp;k <= mp;k++)
filter.push_back((1.0*k/nfft * sampleRate - lf) / (mf - lf));
for(int k = mp+1;k <= rp;k++)
filter.push_back((rf - 1.0*k/nfft * sampleRate) / (rf - mf));
while(filter.size() < nfreqs)
filter.push_back(0.0);
}
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::getWts(FEATURE_DATA ***p_wts, \
int nfft, \
double minF, \
double maxF, \
int sampleRate, \
int nfilts, \
double (*hz2melFunc)(double), \
double (*mel2hzFunc)(double)) {
int nfreqs = nfft / 2 + 1;
std::vector<double> points;
FEATURE_DATA ** wts;
wts = (FEATURE_DATA **) malloc(nfilts*sizeof(FEATURE_DATA *));
size_t memSize = nfilts * nfreqs * sizeof(FEATURE_DATA);
FEATURE_DATA * wtsData = (FEATURE_DATA *)malloc(memSize);
memset(wtsData,0, memSize);
double minmel = hz2melFunc(minF);
double maxmel = hz2melFunc(maxF);
double step = (maxmel - minmel) / (nfilts + 1);
for(int i = 0; i <= nfilts + 1; i++)
points.push_back(mel2hzFunc( minmel + step * i));
for(int i = 0; i <= nfilts + 1; i++) {
points[i] = ceil(points[i] / sampleRate * (nfft - 1));
}
for(int i = 0;i < nfilts;i++) {
wts[i] = &wtsData[i*nfreqs];
int lp = points[i], mp = points[i+1], rp = points[i+2];
double lf = 1.0 * points[i] / nfft * sampleRate;
double mf = 1.0 * points[i+1] / nfft * sampleRate;
double rf = 1.0 * points[i+2] / nfft * sampleRate;
for(int k = lp;k <= mp;k++){
wts[i][k] = (1.0*k/nfft * sampleRate - lf) / (mf - lf);
}
for(int k = mp+1;k <= rp;k++){
wts[i][k] = (rf - 1.0*k/nfft * sampleRate) / (rf - mf);
}
}
e_filterSize = nfreqs;
e_melWtsExist = true;
*p_wts = wts;
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::MatrixMul01(FEATURE_DATA ***p_melLog, \
FEATURE_DATA **wts, \
FEATURE_DATA **powSpec) {
FEATURE_DATA *h_melLog, *h_wts, *h_powSpec;
FEATURE_DATA *d_melLog, *d_wts, *d_powSpec;
FEATURE_DATA **melLog;
size_t memSize1 = nfilts * e_frameNum * sizeof(FEATURE_DATA),
memSize2 = nfilts * e_filterSize * sizeof(FEATURE_DATA),
memSize3 = e_frameNum * e_powFrameSize * sizeof(FEATURE_DATA);
h_melLog = (FEATURE_DATA *)malloc(memSize1);
h_wts = wts[0];
h_powSpec = powSpec[0];
double startT = wtime();
hipMalloc((void **)&d_melLog, memSize1);
hipMalloc((void **)&d_wts, memSize2);
hipMalloc((void **)&d_powSpec, memSize3);
hipMemcpy(d_wts, h_wts, memSize2, hipMemcpyHostToDevice);
hipMemcpy(d_powSpec, h_powSpec, memSize3, hipMemcpyHostToDevice);
int bucketNum = (((e_frameNum-1)/BLOCK_SIZE+1)-1)/COL_STEP+1;
int blockNum = (nfilts-1)/BLOCK_SIZE+1;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(bucketNum,blockNum);
int r = nfilts, c = e_frameNum;
hipLaunchKernelGGL(( matrix_mul_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_wts, d_powSpec, d_melLog, r, e_filterSize, c);
hipMemcpy(h_melLog, d_melLog, memSize1, hipMemcpyDeviceToHost);
double endT = wtime();
//printf("mel filtering calculation time %lf\n", endT-startT);
melLog = (FEATURE_DATA **) malloc(nfilts * sizeof(FEATURE_DATA*));
for(int i = 0;i < r;i++){
melLog[i] = &h_melLog[i*c];
}
*p_melLog = melLog;
hipFree(d_melLog);
hipFree(d_wts);
hipFree(d_powSpec);
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::fft2MelLog(int nfft, \
FEATURE_DATA ***p_melLog,
FEATURE_DATA **powSpec, \
int nfilts , \
double (*hz2melFunc)(double), \
double (*mel2hzFunc)(double), \
double minF, \
double maxF, \
int sampleRate) {
if(!e_melWtsExist){
getWts(&e_melWts, nfft, minF, maxF, sampleRate, nfilts, hz2melFunc, mel2hzFunc);
}
MatrixMul01(p_melLog, e_melWts, powSpec);
//FEATURE_DATA **melLog = *p_melLog;
//startT = wtime();
//for(int i = 0;i < nfilts;i++)
// for(int j = 0;j < e_frameNum;j++){
// melLog[i][j] = log(0.0001+fabs(melLog[i][j]));
// }
//finishT = wtime();
//std::cout << "MelLog: "<<finishT-startT << std::endl;
return SP_SUCCESS;
}
double FeatureExtractor::preProcessing(FEATURE_DATA **out_windows, \
const SOUND_DATA *rd, \
int size, \
double factor, \
double winTime, \
double stepTime, \
int rate){
size_empData = size;
int samplePerWin = ceil(winTime * rate);
int stepPerWin = ceil(stepTime * rate);
int nfft = (1 << int(ceil(log(1.0 * samplePerWin)/log(2.0))));
e_frameSize = nfft;
e_frameNum = ceil((double)size_empData/stepPerWin);
size_t winsEleNum = nfft * e_frameNum;
//int paddedSize = nfft*ceil((float)size_empData/stepPerWin)*sizeof(FEATURE_DATA);
int paddedSize = winsEleNum*sizeof(FEATURE_DATA);
FEATURE_DATA *window_data = (FEATURE_DATA *)malloc(paddedSize);
memset(window_data, 0, paddedSize);
double startT, finishT, initializeTime;
startT = wtime();
FEATURE_DATA *d_window_data;
hipMalloc( (void **) &d_window_data, paddedSize );
hipMemcpy( d_window_data, window_data, paddedSize, hipMemcpyHostToDevice );
SOUND_DATA *d_rd;
size_t rdMemSize = size*sizeof(SOUND_DATA);
hipMalloc( (void **) &d_rd, rdMemSize );
hipMemcpy( d_rd, rd, rdMemSize, hipMemcpyHostToDevice );
finishT = wtime();
initializeTime = finishT - startT;
assert(nfft<=1024);
//std::cout << "nfft: " << nfft << std::endl;
//size_t sharedMem = nfft*sizeof(FEATURE_DATA);
dim3 dimGrid( ceil( (double)winsEleNum/nfft) );
dim3 dimBlock(nfft);
double arg_PI_factor = 2.0*PI/samplePerWin;
hipLaunchKernelGGL(( preProcessing_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_rd, size, d_window_data, samplePerWin, stepPerWin, factor, arg_PI_factor);
hipMemcpy(window_data, d_window_data, paddedSize, hipMemcpyDeviceToHost);
e_frameNum = ceil((double)size_empData/stepPerWin);
e_windows = (FEATURE_DATA **)malloc( e_frameNum *sizeof(FEATURE_DATA *));
for(int i=0,j=0; i<e_frameNum; i++,j+=e_frameSize){
e_windows[i] = &window_data[j];
}
return initializeTime;
}
SP_RESULT FeatureExtractor::windowMul(FEATURE_DATA *window, \
int size, \
double (*winFunc)(int, int) ) {
for(int i = 0;i < size;i++) {
window[i] *= winFunc(i, size);
}
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::windowing(FEATURE_DATA **out_windows, \
const FEATURE_DATA *in, \
double winTime, \
double stepTime, \
int rate, \
double (*winFunc)(int, int)) {
int samplePerWin = ceil(winTime * rate);
int stepPerWin = ceil(stepTime * rate);
int nfft = (1 << int(ceil(log(1.0 * samplePerWin)/log(2.0))));
e_frameSize = nfft;
int paddedSize = nfft*ceil((float)size_empData/stepPerWin)*sizeof(FEATURE_DATA);
FEATURE_DATA *window_data = (FEATURE_DATA *)malloc(paddedSize);
memset(window_data, 0, paddedSize);
int cnt=0, i, j, k;
for(i = 0, k=0; i < size_empData; i += stepPerWin, k += nfft) {
cnt++;
for(j = 0;j < samplePerWin && i+j < size_empData; j++) {
window_data[k+j] = in[i+j];
}
windowMul(&window_data[k],samplePerWin,winFunc);
}
e_frameNum = cnt;
e_windows = (FEATURE_DATA **)malloc(cnt*sizeof(FEATURE_DATA *));
for(i=0,j=0; i<cnt; i++,j+=e_frameSize){
e_windows[i] = &window_data[j];
}
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::preEmph(/* out */FEATURE_DATA *outs, \
/*in*/const SOUND_DATA* rd, \
int size, \
double factor){
size_empData = size;
outs[0]=rd[0];
for(int i = 1;i<size;i++){
outs[i]=(1.0 * rd[i] - factor * rd[i-1]);
}
return SP_SUCCESS;
}
| 0624b4b06abdc5445702577f161d65075702b0a3.cu | #include "FeatureExtractor.h"
#include "RawData.h"
#include <cmath>
#include <cstdlib>
#include <ctime>
#include "wtime.h"
#include "mathtool.h"
#include "ThreadPool.h"
#include "FeatureExtractorTool.h"
SP_RESULT FeatureExtractor::exFeatures(const RawData *data) {
return exFeatures(data, \
sampleRate,
preEmpFactor, \
winTime, \
stepTime, \
winFunc, \
minF, \
maxF, \
hz2melFunc, \
mel2hzFunc, \
nfilts, \
cepsNum);
}
SP_RESULT FeatureExtractor::exDoubleDeltaFeatures(const RawData *data) {
exFeatures(data);
doubleDelta(normalMelCeps);
return SP_SUCCESS;
}
void FeatureExtractor::doubleDelta(std::vector<Feature> & normalMelCeps) {
int idx, siz = normalMelCeps.size();
for(idx = 0; idx < siz; idx ++)
normalMelCeps[idx].fillDoubleDelta();
}
SP_RESULT FeatureExtractor::exFeatures(const RawData *data, \
int sampleRate, \
double preEmpFactor, \
double winTime, \
double stepTime, \
double (*winFunc)(int, int), \
double minF, \
double maxF, \
double (*hz2melFunc)(double), \
double (*mel2hzFunc)(double), \
int nfilts, \
int cepsNum) {
//SP_RESULT res;
inital();
double startT, finishT, initializeTime;
double totalTime = 0;
//startT = wtime();
//preEmph(e_emp_data, data->getData(), data->getFrameNum(), preEmpFactor);
//finishT = wtime();
//double t_preemp = finishT-startT;
//totalTime += t_preemp;
//startT = wtime();
//windowing(e_windows, e_emp_data, winTime, stepTime, sampleRate, winFunc);
//finishT = wtime();
//double t_window = finishT-startT;
//totalTime += t_window;
startT = wtime();
initializeTime = preProcessing(e_windows, data->getData(), data->getFrameNum(), preEmpFactor, winTime, stepTime, sampleRate);
finishT = wtime();
double t_preProcessing = finishT-startT-initializeTime;
totalTime += t_preProcessing;
startT = wtime();
powSpectrum(e_powSpec, e_windows);
finishT = wtime();
double t_powSpec = finishT-startT;
totalTime += t_powSpec;
int nfft = (e_powFrameSize -1) << 1;
startT = wtime();
fft2MelLog(nfft, &e_melLogSpec, e_powSpec, nfilts, hz2melFunc, mel2hzFunc, minF, maxF, sampleRate);
finishT = wtime();
double t_mel = finishT-startT;
totalTime += t_mel;
startT = wtime();
melCepstrum(melCeps, e_melLogSpec, cepsNum);
finishT = wtime();
double t_dctCep = finishT-startT;
totalTime += t_dctCep;
startT = wtime();
time_t start = time(0);
normalization(normalMelCeps, melCeps);
finishT = wtime();
double t_norm = finishT-startT;
totalTime += t_norm;
doubleDelta(normalMelCeps);
std::cout << "CUDA Initialize Time: " << initializeTime << std::endl;
std::cout << "Total Time (Without InitializeTime) : " << totalTime << std::endl;
//std::cout << "PreEmp: " << t_preemp << " s , " << t_preemp*100/totalTime <<"%" <<std::endl;
//std::cout << "Windowing: " << t_window << " s , " << t_window*100/totalTime <<"%" << std::endl;
std::cout << "PreProcessing: " << t_preProcessing << " s , " << t_preProcessing*100/totalTime <<"%"<< std::endl;
std::cout << "FFT: " << t_powSpec << " s , " << t_powSpec*100/totalTime <<"%"<< std::endl;
std::cout << "MelFiltering: " << t_mel << " s , " << t_mel*100/totalTime <<"%"<< std::endl;
std::cout << "DCT Ceptrum: " << t_dctCep << " s , " << t_dctCep*100/totalTime <<"%"<< std::endl;
std::cout << "Normalization: " << t_norm << " s , " << t_norm*100/totalTime <<"%"<< std::endl;
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::normalization(std::vector<Feature> &normalMels, const std::vector<Feature> & melFes) {
normalMels.clear();
if(melFes.size() == 0) return SP_SUCCESS;
Feature means, variance;
int siz = melFes[0].size();
means.resize(siz);
variance.resize(siz);
for(int i = 0;i < siz;i++) {
means[i] = variance[i] = 0;
}
for(int i = 0;i < melFes.size(); i++) {
for(int j = 0;j < siz; j++) {
if(melFes[i].size() > j) {
means[j] += melFes[i][j];
variance[j] += melFes[i][j] * melFes[i][j];
}
}
}
for(int i = 0;i < siz;i++) {
means[i] /= melFes.size();
variance[i] /= melFes.size();
variance[i] = sqrt(variance[i]);
}
for(int i = 0;i < melFes.size();i++) {
normalMels.push_back(melFes[i]);
for(int j = 0;j < siz;j++) {
if(j < melFes[i].size()) {
normalMels[i][j] -= means[j];
normalMels[i][j] /= variance[j];
}
}
}
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::mel2dct(Feature & feature, std::vector<double> melLog, int cepsNum) {
int siz = melLog.size();
feature.resize(siz);
for(int i = 0;i < siz;i++)
feature[i] = melLog[i];
// dct(feature.rawData(), siz, 1);
dct2(feature.rawData(), siz);
feature.resize(cepsNum);
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::melCepstrum(std::vector<Feature> &cepstrums, \
FEATURE_DATA **melLogSpec, \
int cepsNum) {
cepstrums.clear();
int framePerBlock = 4;
int rowNum = nfilts,
colNum = e_frameNum;
int elementNum = rowNum * colNum;
size_t memSize = elementNum*sizeof(FEATURE_DATA);
FEATURE_DATA * r_melLogSpec_data = (FEATURE_DATA *) malloc(memSize);
FEATURE_DATA ** r_melLogSpec = (FEATURE_DATA **)malloc(colNum * sizeof(FEATURE_DATA *));
for(int i=0; i<colNum; i++){
r_melLogSpec[i] = &r_melLogSpec_data[i*rowNum];
}
reverseMatrix(r_melLogSpec, melLogSpec, rowNum, colNum);
FEATURE_DATA * d_melLogSpec_data;
cudaMalloc((void **) &d_melLogSpec_data, memSize);
cudaMemcpy(d_melLogSpec_data, r_melLogSpec_data, memSize, cudaMemcpyHostToDevice);
int blockSize = framePerBlock*rowNum;
size_t sharedMem = blockSize*sizeof(FEATURE_DATA);
dim3 dimGrid( ceil((double)elementNum/blockSize) );
dim3 dimBlock(blockSize);
mel2dct_kernel<<< dimGrid, dimBlock, sharedMem>>>(d_melLogSpec_data, rowNum, cepsNum);
cudaMemcpy(r_melLogSpec_data, d_melLogSpec_data, memSize, cudaMemcpyDeviceToHost);
for(int i=0; i<colNum; i++){
Feature tmpFeature;
tmpFeature.resize(cepsNum);
for(int j=0; j<cepsNum; j++){
tmpFeature[j] = r_melLogSpec[i][j];
}
cepstrums.push_back(tmpFeature);
}
//FEATURE_DATA* e_melCeps_data = (FEATURE_DATA *) malloc(colNum*cepsNum*sizeof(FEATURE_DATA));
//e_melCeps = (FEATURE_DATA **) malloc(colNum*sizeof(FEATURE_DATA *));
//size_t copyMemSize = cepsNum*sizeof(FEATURE_DATA);
//for(int i=0; i<colNum; i++){
// e_melCeps[i] = &e_melCeps_data[i*cepsNum];
// memcpy(e_melCeps[i], r_melLogSpec[i], copyMemSize);
//}
cudaFree(d_melLogSpec_data);
free(r_melLogSpec_data);
free(r_melLogSpec);
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::reverseMatrix(FEATURE_DATA **outMatrix, FEATURE_DATA **inMatrix, int rowNum, int colNum){
for(int i=0; i<colNum; i++){
for(int j=0; j<rowNum; j++){
outMatrix[i][j] = inMatrix[j][i];
}
}
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::powSpectrum(FEATURE_DATA **powSpec, \
FEATURE_DATA **windows) {
int frameNum = e_frameNum,
frameSize = e_frameSize,
blockSize = e_frameSize,
elementNum = frameNum * frameSize,
selIdx = (int)(std::log2(frameSize))%2;
// Memory Size for whole data
size_t memSize = elementNum * 2 *sizeof(FEATURE_DATA);
// Share Memory Size in the CUDA
size_t sharedMem = 2 * blockSize * 2 * sizeof(FEATURE_DATA);
FEATURE_DATA *SpeechSignal_real = new FEATURE_DATA[elementNum*2],
*d_SpeechSignal_real,
*d_SpeechSignal_imag;
FEATURE_DATA *SpeechSignal_imag = &SpeechSignal_real[elementNum];
// Initialize the Speech Signal by windows (imaginary part are all zero)
memset(SpeechSignal_real, 0, memSize);
memcpy(SpeechSignal_real, windows[0], memSize/2);
cudaMalloc( (void **) &d_SpeechSignal_real, memSize );
cudaMemcpy( d_SpeechSignal_real, SpeechSignal_real, memSize, cudaMemcpyHostToDevice);
d_SpeechSignal_imag = &d_SpeechSignal_real[elementNum];
//std::cout << "The select index is: " << selIdx << std::endl;
dim3 dimGrid( ceil( (double)elementNum/blockSize ) );
dim3 dimBlock(blockSize);
windowFFT_kernel<<< dimGrid, dimBlock, sharedMem >>>(d_SpeechSignal_real, d_SpeechSignal_imag, frameNum, frameSize, 1, selIdx);
cudaMemcpy(SpeechSignal_real, d_SpeechSignal_real, memSize, cudaMemcpyDeviceToHost);
// Calculate the Power Spectrum
int resSize=frameSize/2+1, frameOffset, finalOffset;
FEATURE_DATA realPart, imagPart;
e_powFrameSize = resSize;
e_powSpec = (FEATURE_DATA **) malloc(e_frameNum * sizeof(FEATURE_DATA *));
FEATURE_DATA *tmp_powSpec = (FEATURE_DATA *) malloc(e_frameNum * resSize * sizeof(FEATURE_DATA));
for(int i=0; i<frameNum; i++){
e_powSpec[i] = &tmp_powSpec[i*resSize];
frameOffset = i*frameSize;
for(int j=0; j<resSize; j++){
finalOffset = frameOffset + j;
realPart = SpeechSignal_real[finalOffset];
imagPart = SpeechSignal_imag[finalOffset];
e_powSpec[i][j] = realPart*realPart + imagPart*imagPart;
}
}
cudaFree(d_SpeechSignal_real);
delete []SpeechSignal_real;
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::getWts(Matrix<double> &wts, \
int nfft, \
double minF, \
double maxF, \
int sampleRate, \
int nfilts, \
double (*hz2melFunc)(double), \
double (*mel2hzFunc)(double)) {
int nfreqs = nfft / 2 + 1;
wts.clear();
std::vector<double> points;
double minmel = hz2melFunc(minF);
double maxmel = hz2melFunc(maxF);
double step = (maxmel - minmel) / (nfilts + 1);
for(int i = 0; i <= nfilts + 1; i++)
points.push_back(mel2hzFunc( minmel + step * i));
for(int i = 0; i <= nfilts + 1; i++) {
points[i] = ceil(points[i] / sampleRate * (nfft - 1));
}
for(int i = 0;i < nfilts;i++) {
wts.push_back(std::vector<double>());
std::vector<double> &filter = wts[i];
int lp = points[i], mp = points[i+1], rp = points[i+2];
double lf = 1.0 * points[i] / nfft * sampleRate;
double mf = 1.0 * points[i+1] / nfft * sampleRate;
double rf = 1.0 * points[i+2] / nfft * sampleRate;
while(filter.size() < lp)
filter.push_back(0.0);
for(int k = lp;k <= mp;k++)
filter.push_back((1.0*k/nfft * sampleRate - lf) / (mf - lf));
for(int k = mp+1;k <= rp;k++)
filter.push_back((rf - 1.0*k/nfft * sampleRate) / (rf - mf));
while(filter.size() < nfreqs)
filter.push_back(0.0);
}
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::getWts(FEATURE_DATA ***p_wts, \
int nfft, \
double minF, \
double maxF, \
int sampleRate, \
int nfilts, \
double (*hz2melFunc)(double), \
double (*mel2hzFunc)(double)) {
int nfreqs = nfft / 2 + 1;
std::vector<double> points;
FEATURE_DATA ** wts;
wts = (FEATURE_DATA **) malloc(nfilts*sizeof(FEATURE_DATA *));
size_t memSize = nfilts * nfreqs * sizeof(FEATURE_DATA);
FEATURE_DATA * wtsData = (FEATURE_DATA *)malloc(memSize);
memset(wtsData,0, memSize);
double minmel = hz2melFunc(minF);
double maxmel = hz2melFunc(maxF);
double step = (maxmel - minmel) / (nfilts + 1);
for(int i = 0; i <= nfilts + 1; i++)
points.push_back(mel2hzFunc( minmel + step * i));
for(int i = 0; i <= nfilts + 1; i++) {
points[i] = ceil(points[i] / sampleRate * (nfft - 1));
}
for(int i = 0;i < nfilts;i++) {
wts[i] = &wtsData[i*nfreqs];
int lp = points[i], mp = points[i+1], rp = points[i+2];
double lf = 1.0 * points[i] / nfft * sampleRate;
double mf = 1.0 * points[i+1] / nfft * sampleRate;
double rf = 1.0 * points[i+2] / nfft * sampleRate;
for(int k = lp;k <= mp;k++){
wts[i][k] = (1.0*k/nfft * sampleRate - lf) / (mf - lf);
}
for(int k = mp+1;k <= rp;k++){
wts[i][k] = (rf - 1.0*k/nfft * sampleRate) / (rf - mf);
}
}
e_filterSize = nfreqs;
e_melWtsExist = true;
*p_wts = wts;
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::MatrixMul01(FEATURE_DATA ***p_melLog, \
FEATURE_DATA **wts, \
FEATURE_DATA **powSpec) {
FEATURE_DATA *h_melLog, *h_wts, *h_powSpec;
FEATURE_DATA *d_melLog, *d_wts, *d_powSpec;
FEATURE_DATA **melLog;
size_t memSize1 = nfilts * e_frameNum * sizeof(FEATURE_DATA),
memSize2 = nfilts * e_filterSize * sizeof(FEATURE_DATA),
memSize3 = e_frameNum * e_powFrameSize * sizeof(FEATURE_DATA);
h_melLog = (FEATURE_DATA *)malloc(memSize1);
h_wts = wts[0];
h_powSpec = powSpec[0];
double startT = wtime();
cudaMalloc((void **)&d_melLog, memSize1);
cudaMalloc((void **)&d_wts, memSize2);
cudaMalloc((void **)&d_powSpec, memSize3);
cudaMemcpy(d_wts, h_wts, memSize2, cudaMemcpyHostToDevice);
cudaMemcpy(d_powSpec, h_powSpec, memSize3, cudaMemcpyHostToDevice);
int bucketNum = (((e_frameNum-1)/BLOCK_SIZE+1)-1)/COL_STEP+1;
int blockNum = (nfilts-1)/BLOCK_SIZE+1;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(bucketNum,blockNum);
int r = nfilts, c = e_frameNum;
matrix_mul_kernel<<<dimGrid,dimBlock>>>(d_wts, d_powSpec, d_melLog, r, e_filterSize, c);
cudaMemcpy(h_melLog, d_melLog, memSize1, cudaMemcpyDeviceToHost);
double endT = wtime();
//printf("mel filtering calculation time %lf\n", endT-startT);
melLog = (FEATURE_DATA **) malloc(nfilts * sizeof(FEATURE_DATA*));
for(int i = 0;i < r;i++){
melLog[i] = &h_melLog[i*c];
}
*p_melLog = melLog;
cudaFree(d_melLog);
cudaFree(d_wts);
cudaFree(d_powSpec);
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::fft2MelLog(int nfft, \
FEATURE_DATA ***p_melLog,
FEATURE_DATA **powSpec, \
int nfilts , \
double (*hz2melFunc)(double), \
double (*mel2hzFunc)(double), \
double minF, \
double maxF, \
int sampleRate) {
if(!e_melWtsExist){
getWts(&e_melWts, nfft, minF, maxF, sampleRate, nfilts, hz2melFunc, mel2hzFunc);
}
MatrixMul01(p_melLog, e_melWts, powSpec);
//FEATURE_DATA **melLog = *p_melLog;
//startT = wtime();
//for(int i = 0;i < nfilts;i++)
// for(int j = 0;j < e_frameNum;j++){
// melLog[i][j] = log(0.0001+fabs(melLog[i][j]));
// }
//finishT = wtime();
//std::cout << "MelLog: "<<finishT-startT << std::endl;
return SP_SUCCESS;
}
double FeatureExtractor::preProcessing(FEATURE_DATA **out_windows, \
const SOUND_DATA *rd, \
int size, \
double factor, \
double winTime, \
double stepTime, \
int rate){
size_empData = size;
int samplePerWin = ceil(winTime * rate);
int stepPerWin = ceil(stepTime * rate);
int nfft = (1 << int(ceil(log(1.0 * samplePerWin)/log(2.0))));
e_frameSize = nfft;
e_frameNum = ceil((double)size_empData/stepPerWin);
size_t winsEleNum = nfft * e_frameNum;
//int paddedSize = nfft*ceil((float)size_empData/stepPerWin)*sizeof(FEATURE_DATA);
int paddedSize = winsEleNum*sizeof(FEATURE_DATA);
FEATURE_DATA *window_data = (FEATURE_DATA *)malloc(paddedSize);
memset(window_data, 0, paddedSize);
double startT, finishT, initializeTime;
startT = wtime();
FEATURE_DATA *d_window_data;
cudaMalloc( (void **) &d_window_data, paddedSize );
cudaMemcpy( d_window_data, window_data, paddedSize, cudaMemcpyHostToDevice );
SOUND_DATA *d_rd;
size_t rdMemSize = size*sizeof(SOUND_DATA);
cudaMalloc( (void **) &d_rd, rdMemSize );
cudaMemcpy( d_rd, rd, rdMemSize, cudaMemcpyHostToDevice );
finishT = wtime();
initializeTime = finishT - startT;
assert(nfft<=1024);
//std::cout << "nfft: " << nfft << std::endl;
//size_t sharedMem = nfft*sizeof(FEATURE_DATA);
dim3 dimGrid( ceil( (double)winsEleNum/nfft) );
dim3 dimBlock(nfft);
double arg_PI_factor = 2.0*PI/samplePerWin;
preProcessing_kernel<<< dimGrid, dimBlock>>>(d_rd, size, d_window_data, samplePerWin, stepPerWin, factor, arg_PI_factor);
cudaMemcpy(window_data, d_window_data, paddedSize, cudaMemcpyDeviceToHost);
e_frameNum = ceil((double)size_empData/stepPerWin);
e_windows = (FEATURE_DATA **)malloc( e_frameNum *sizeof(FEATURE_DATA *));
for(int i=0,j=0; i<e_frameNum; i++,j+=e_frameSize){
e_windows[i] = &window_data[j];
}
return initializeTime;
}
SP_RESULT FeatureExtractor::windowMul(FEATURE_DATA *window, \
int size, \
double (*winFunc)(int, int) ) {
for(int i = 0;i < size;i++) {
window[i] *= winFunc(i, size);
}
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::windowing(FEATURE_DATA **out_windows, \
const FEATURE_DATA *in, \
double winTime, \
double stepTime, \
int rate, \
double (*winFunc)(int, int)) {
int samplePerWin = ceil(winTime * rate);
int stepPerWin = ceil(stepTime * rate);
int nfft = (1 << int(ceil(log(1.0 * samplePerWin)/log(2.0))));
e_frameSize = nfft;
int paddedSize = nfft*ceil((float)size_empData/stepPerWin)*sizeof(FEATURE_DATA);
FEATURE_DATA *window_data = (FEATURE_DATA *)malloc(paddedSize);
memset(window_data, 0, paddedSize);
int cnt=0, i, j, k;
for(i = 0, k=0; i < size_empData; i += stepPerWin, k += nfft) {
cnt++;
for(j = 0;j < samplePerWin && i+j < size_empData; j++) {
window_data[k+j] = in[i+j];
}
windowMul(&window_data[k],samplePerWin,winFunc);
}
e_frameNum = cnt;
e_windows = (FEATURE_DATA **)malloc(cnt*sizeof(FEATURE_DATA *));
for(i=0,j=0; i<cnt; i++,j+=e_frameSize){
e_windows[i] = &window_data[j];
}
return SP_SUCCESS;
}
SP_RESULT FeatureExtractor::preEmph(/* out */FEATURE_DATA *outs, \
/*in*/const SOUND_DATA* rd, \
int size, \
double factor){
size_empData = size;
outs[0]=rd[0];
for(int i = 1;i<size;i++){
outs[i]=(1.0 * rd[i] - factor * rd[i-1]);
}
return SP_SUCCESS;
}
|
99aa4b527b3f1eb814ddcd476f74003fc58e6da2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "headers/myHeaders.h"
#include "headers/myUtilityFunctions.h"
using namespace std;
__global__ void update_array_gpu_two_tables(int i, int numberOfThreadsRequired, cellType *d_array, cellType *d_T )
{
long j=blockIdx.x *blockDim.x + threadIdx.x + 1;
if (j>= numberOfThreadsRequired || j < dependencyWidthLeft)
{}
else
{
d_array(i,j)= (d_array(i-1,j) + d_array(i-1,j-1)) * d_array(i-1,j-2) / (d_array(i-1,j-3) + d_array(i-1,j-4) +d_array(i-1,j-5) +d_array(i-1,j-6) +1);
d_T(i,j)= (d_T(i-1,j) + d_T(i-1,j-1)) * d_T(i-1,j-2) / (d_T(i-1,j-3) + d_T(i-1,j-4) +d_T(i-1,j-5) +d_T(i-1,j-6) +1);
}
}
int main(int argc, char const *argv[])
{
//create array at host : initialize accordingly
cellType *h_array, *h_T;
h_array = create_array_host();
h_T = create_array_host();
//Create array at device
cellType *d_array, *d_T;
hipMalloc((void**) &d_array, sizeof(cellType)*(nRows*TOTAL_COLS));
hipMalloc((void**) &d_T, sizeof(cellType)*(nRows*TOTAL_COLS));
//copy host array to device arrray, if needed
copy_host_to_device(h_array, d_array);
copy_host_to_device(h_T, d_T);
//configure kernel
configure_kernal(TOTAL_COLS);
GpuTimer phase1;
phase1.Start();
//execute on GPU, row by row
for (int i = 1; i < nRows; ++i)
{
hipLaunchKernelGGL(( update_array_gpu_two_tables), dim3(dim3(g,1,1)), dim3(dim3(x,1,1)), 0, 0, i, TOTAL_COLS, d_array, d_T);
}
phase1.Stop();
cout <<"Time (basic GPU): " <<phase1.Elapsed()<< " Milli Seconds\n";
//copy back to cpu
copy_device_to_host(h_array, d_array);
copy_device_to_host(h_T, d_T);
//Access the resultant matrix : dump into output file
//write_array_console(h_array);
//write_array_file(h_array, "../files_output/output_s.txt");
return 0;
}
| 99aa4b527b3f1eb814ddcd476f74003fc58e6da2.cu |
#include "headers/myHeaders.h"
#include "headers/myUtilityFunctions.h"
using namespace std;
__global__ void update_array_gpu_two_tables(int i, int numberOfThreadsRequired, cellType *d_array, cellType *d_T )
{
long j=blockIdx.x *blockDim.x + threadIdx.x + 1;
if (j>= numberOfThreadsRequired || j < dependencyWidthLeft)
{}
else
{
d_array(i,j)= (d_array(i-1,j) + d_array(i-1,j-1)) * d_array(i-1,j-2) / (d_array(i-1,j-3) + d_array(i-1,j-4) +d_array(i-1,j-5) +d_array(i-1,j-6) +1);
d_T(i,j)= (d_T(i-1,j) + d_T(i-1,j-1)) * d_T(i-1,j-2) / (d_T(i-1,j-3) + d_T(i-1,j-4) +d_T(i-1,j-5) +d_T(i-1,j-6) +1);
}
}
int main(int argc, char const *argv[])
{
//create array at host : initialize accordingly
cellType *h_array, *h_T;
h_array = create_array_host();
h_T = create_array_host();
//Create array at device
cellType *d_array, *d_T;
cudaMalloc((void**) &d_array, sizeof(cellType)*(nRows*TOTAL_COLS));
cudaMalloc((void**) &d_T, sizeof(cellType)*(nRows*TOTAL_COLS));
//copy host array to device arrray, if needed
copy_host_to_device(h_array, d_array);
copy_host_to_device(h_T, d_T);
//configure kernel
configure_kernal(TOTAL_COLS);
GpuTimer phase1;
phase1.Start();
//execute on GPU, row by row
for (int i = 1; i < nRows; ++i)
{
update_array_gpu_two_tables<<<dim3(g,1,1), dim3(x,1,1)>>>(i, TOTAL_COLS, d_array, d_T);
}
phase1.Stop();
cout <<"Time (basic GPU): " <<phase1.Elapsed()<< " Milli Seconds\n";
//copy back to cpu
copy_device_to_host(h_array, d_array);
copy_device_to_host(h_T, d_T);
//Access the resultant matrix : dump into output file
//write_array_console(h_array);
//write_array_file(h_array, "../files_output/output_s.txt");
return 0;
}
|
a5f858f275abd3368060c34cd8b232a457602c4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "Arnoldi_Driver.h"
/*
Input: A - an n by n matrix
V - an n by k orthogonal matrix
H - a k by k upper Hessenberg matrix
f - an nonzero n vector
with AV = VH + fe_k' (if k > 1)
k - a positive integer (k << n assumed)
m - a positive integer (k < m << n assumed)
Output: V - an n by m orthogonal matrix
H - an m by m upper Hessenberg matrix
f - an n vector
with AV = VH + fe_m'
Leading k columns of V agree with input V (!)
*/
void Arnoldi_driver(hipblasHandle_t handle, int N, user_map_vector Axb, void *user_struct, real *V_d, real *H, real *vec_f_d, int k, int m, real *vec_v_d, real *vec_w_d, real *vec_c_d, real *vec_h_d, real *vec_h){
real tolerance=5.0e-12;
#ifdef real_float
tolerance=5.0e-8;
#endif
if(k==0){
real beta = Arnoldi::vector_norm2_GPU(handle, N, vec_f_d);
if(beta<1.0e-15){
printf( "Norm of the input vector ||f||<1e-15! Quit!\n");
exit(-1);
}
Arnoldi::vector_copy_GPU(handle, N, vec_f_d, vec_v_d);
Arnoldi::normalize_vector_GPU(handle, N, vec_v_d);
// Call matrix_vector value on GPU
// Using user definded funciton via structure
real vvpp = Arnoldi::vector_norm2_GPU(handle, N, vec_v_d);
Arnoldi::check_for_nans("\nFirst vector to Axb\n", N, vec_v_d);
Axb(user_struct, vec_v_d, vec_w_d);
Arnoldi::check_for_nans("\nFirst call of Axb\n", N, vec_w_d);
// if(use_matrix==1)
// call_vector_map_GPU(handle, N, A_d, vec_v_d, vec_w_d);
// else
// call_vector_map_GPU(N, vec_v_d, vec_w_d); //change this for abstract function call!!!
//
real alpha=Arnoldi::vector_dot_product_GPU(handle, N, vec_v_d, vec_w_d); //GG
Arnoldi::vector_copy_GPU(handle, N, vec_w_d, vec_f_d); //vec_w -> vec_f
Arnoldi::vectors_add_GPU(handle, N, -alpha, vec_v_d, vec_f_d); //f = w - v*alpha; % orthogonalization once. //GG
real c=1.0;
int it=0;
while(c>tolerance){
it++;
c=Arnoldi::vector_dot_product_GPU(handle, N, vec_v_d, vec_f_d); //(vec_v,vec_f)
Arnoldi::vectors_add_GPU(handle, N, -c, vec_v_d, vec_f_d); //vec_f=vec_f-c*vec_v
alpha+=c;
if(it>12){
printf("\nArnoldi orthogonalization failed at k==0: %.05e\n", c);
break;
}
Arnoldi::check_for_nans("\nFirst reorthogonalization\n", N, vec_f_d);
}
H[I2(0,0,m)]=alpha; //set H in HOST!
Arnoldi::set_matrix_colomn_GPU(N, m, V_d, vec_v_d, 0); //V(:,0)=vec_v
}
for(int j=k+1;j<m;j++){
real beta = Arnoldi::vector_norm2_GPU( handle, N, vec_f_d);
Arnoldi::vector_copy_GPU(handle, N, vec_f_d, vec_v_d);
Arnoldi::normalize_vector_GPU(handle, N, vec_v_d);
H[I2(j,j-1,m)] = beta; //set H in HOST!
Arnoldi::set_matrix_colomn_GPU(N, m, V_d, vec_v_d, j); //V(:,j)=vec_v
// Call matrix_vector value on GPU
// if(use_matrix==1)
// call_vector_map_GPU(handle, N, A_d, vec_v_d, vec_w_d);
// else
// call_vector_map_GPU(N, vec_v_d, vec_w_d);
Arnoldi::check_for_nans("\nOther vector to Axb\n", N, vec_v_d);
Axb(user_struct, vec_v_d, vec_w_d);
Arnoldi::check_for_nans("\nOther call of Axb\n", N, vec_w_d);
Arnoldi::set_vector_value_GPU(m, 0.0, vec_h_d); //set all to 0
Arnoldi::set_vector_value_GPU(m, 0.0, vec_c_d);
Arnoldi::matrixDotVector_part_GPU(handle, N, V_d, m, 1.0, vec_w_d, j+1, 0.0, vec_h_d); //from 0 to j STRICT!
Arnoldi::vector_copy_GPU(handle, N, vec_w_d, vec_f_d); //vec_w -> vec_f
Arnoldi::matrixMultVector_part_GPU(handle, N, V_d, m, -1.0, vec_h_d, j+1, 1.0, vec_f_d);
//matrixMultVector_part(N, V, m, 0, j, -1.0, vec_h, 1.0, vec_w, vec_f); //from 0 to j strict!
real c=1.0;
int it=0;
while(c>tolerance){
it++;
Arnoldi::matrixDotVector_part_GPU(handle, N, V_d, m, 1.0, vec_f_d, j+1, 0.0, vec_c_d);
Arnoldi::matrixMultVector_part_GPU(handle, N, V_d, m, -1.0, vec_c_d, j+1, 1.0, vec_f_d);
Arnoldi::vectors_add_GPU(handle, m, 1.0, vec_c_d, vec_h_d);
c=Arnoldi::vector_norm2_GPU(handle, m, vec_c_d);
if(it>12){
printf("\nArnoldi orthogonalization failed: %.05e at %i\n", c, j);
break;
}
Arnoldi::check_for_nans("\nOther reorthogonalization\n", N, vec_f_d);
}
Arnoldi::to_host_from_device_real_cpy(vec_h, vec_h_d, m, 1,1); //vec_h_d -> vec_h
set_matrix_colomn(m, m, H, vec_h, j); //set H on HOST
}
}
| a5f858f275abd3368060c34cd8b232a457602c4a.cu | #include "Arnoldi_Driver.h"
/*
Input: A - an n by n matrix
V - an n by k orthogonal matrix
H - a k by k upper Hessenberg matrix
f - an nonzero n vector
with AV = VH + fe_k' (if k > 1)
k - a positive integer (k << n assumed)
m - a positive integer (k < m << n assumed)
Output: V - an n by m orthogonal matrix
H - an m by m upper Hessenberg matrix
f - an n vector
with AV = VH + fe_m'
Leading k columns of V agree with input V (!)
*/
void Arnoldi_driver(cublasHandle_t handle, int N, user_map_vector Axb, void *user_struct, real *V_d, real *H, real *vec_f_d, int k, int m, real *vec_v_d, real *vec_w_d, real *vec_c_d, real *vec_h_d, real *vec_h){
real tolerance=5.0e-12;
#ifdef real_float
tolerance=5.0e-8;
#endif
if(k==0){
real beta = Arnoldi::vector_norm2_GPU(handle, N, vec_f_d);
if(beta<1.0e-15){
printf( "Norm of the input vector ||f||<1e-15! Quit!\n");
exit(-1);
}
Arnoldi::vector_copy_GPU(handle, N, vec_f_d, vec_v_d);
Arnoldi::normalize_vector_GPU(handle, N, vec_v_d);
// Call matrix_vector value on GPU
// Using user definded funciton via structure
real vvpp = Arnoldi::vector_norm2_GPU(handle, N, vec_v_d);
Arnoldi::check_for_nans("\nFirst vector to Axb\n", N, vec_v_d);
Axb(user_struct, vec_v_d, vec_w_d);
Arnoldi::check_for_nans("\nFirst call of Axb\n", N, vec_w_d);
// if(use_matrix==1)
// call_vector_map_GPU(handle, N, A_d, vec_v_d, vec_w_d);
// else
// call_vector_map_GPU(N, vec_v_d, vec_w_d); //change this for abstract function call!!!
//
real alpha=Arnoldi::vector_dot_product_GPU(handle, N, vec_v_d, vec_w_d); //GG
Arnoldi::vector_copy_GPU(handle, N, vec_w_d, vec_f_d); //vec_w -> vec_f
Arnoldi::vectors_add_GPU(handle, N, -alpha, vec_v_d, vec_f_d); //f = w - v*alpha; % orthogonalization once. //GG
real c=1.0;
int it=0;
while(c>tolerance){
it++;
c=Arnoldi::vector_dot_product_GPU(handle, N, vec_v_d, vec_f_d); //(vec_v,vec_f)
Arnoldi::vectors_add_GPU(handle, N, -c, vec_v_d, vec_f_d); //vec_f=vec_f-c*vec_v
alpha+=c;
if(it>12){
printf("\nArnoldi orthogonalization failed at k==0: %.05e\n", c);
break;
}
Arnoldi::check_for_nans("\nFirst reorthogonalization\n", N, vec_f_d);
}
H[I2(0,0,m)]=alpha; //set H in HOST!
Arnoldi::set_matrix_colomn_GPU(N, m, V_d, vec_v_d, 0); //V(:,0)=vec_v
}
for(int j=k+1;j<m;j++){
real beta = Arnoldi::vector_norm2_GPU( handle, N, vec_f_d);
Arnoldi::vector_copy_GPU(handle, N, vec_f_d, vec_v_d);
Arnoldi::normalize_vector_GPU(handle, N, vec_v_d);
H[I2(j,j-1,m)] = beta; //set H in HOST!
Arnoldi::set_matrix_colomn_GPU(N, m, V_d, vec_v_d, j); //V(:,j)=vec_v
// Call matrix_vector value on GPU
// if(use_matrix==1)
// call_vector_map_GPU(handle, N, A_d, vec_v_d, vec_w_d);
// else
// call_vector_map_GPU(N, vec_v_d, vec_w_d);
Arnoldi::check_for_nans("\nOther vector to Axb\n", N, vec_v_d);
Axb(user_struct, vec_v_d, vec_w_d);
Arnoldi::check_for_nans("\nOther call of Axb\n", N, vec_w_d);
Arnoldi::set_vector_value_GPU(m, 0.0, vec_h_d); //set all to 0
Arnoldi::set_vector_value_GPU(m, 0.0, vec_c_d);
Arnoldi::matrixDotVector_part_GPU(handle, N, V_d, m, 1.0, vec_w_d, j+1, 0.0, vec_h_d); //from 0 to j STRICT!
Arnoldi::vector_copy_GPU(handle, N, vec_w_d, vec_f_d); //vec_w -> vec_f
Arnoldi::matrixMultVector_part_GPU(handle, N, V_d, m, -1.0, vec_h_d, j+1, 1.0, vec_f_d);
//matrixMultVector_part(N, V, m, 0, j, -1.0, vec_h, 1.0, vec_w, vec_f); //from 0 to j strict!
real c=1.0;
int it=0;
while(c>tolerance){
it++;
Arnoldi::matrixDotVector_part_GPU(handle, N, V_d, m, 1.0, vec_f_d, j+1, 0.0, vec_c_d);
Arnoldi::matrixMultVector_part_GPU(handle, N, V_d, m, -1.0, vec_c_d, j+1, 1.0, vec_f_d);
Arnoldi::vectors_add_GPU(handle, m, 1.0, vec_c_d, vec_h_d);
c=Arnoldi::vector_norm2_GPU(handle, m, vec_c_d);
if(it>12){
printf("\nArnoldi orthogonalization failed: %.05e at %i\n", c, j);
break;
}
Arnoldi::check_for_nans("\nOther reorthogonalization\n", N, vec_f_d);
}
Arnoldi::to_host_from_device_real_cpy(vec_h, vec_h_d, m, 1,1); //vec_h_d -> vec_h
set_matrix_colomn(m, m, H, vec_h, j); //set H on HOST
}
}
|
247224a3a4b5d1a5ef0283ef8295c958a32859e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define COMMENT "Histogram_GPU"
#define N_BINS 64
#define N_THREADS 128
#define RGB_COMPONENT_COLOR 255
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename)
{
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
__global__ void _k_histogram(PPMPixel *image_data, float *h, int n_pixels)
{
int pixel_id = threadIdx.x,
this_bin = blockIdx.x;
// Initialize all bins. Notice I only have to check if threadIdx is zero,
// because the number of blocks follows the number of bins.
if (threadIdx.x == 0) h[this_bin] = 0;
__syncthreads();
while (pixel_id < n_pixels)
{
// Maps a pixel value to a unique bin in the 64-length array.
int should_be_at = image_data[pixel_id].red * 16 +
image_data[pixel_id].green * 4 +
image_data[pixel_id].blue;
if (should_be_at == this_bin)
atomicAdd(&h[this_bin], 1);
// Translate per number of threads. The other threads will take care
// of the rest of the data that wasn't covered by this one.
pixel_id += N_THREADS;
}
__syncthreads();
// Normalize all bins.
if (threadIdx.x == 0) h[this_bin] /= n_pixels;
}
void parallel_histogram(PPMImage *image, float *h)
{
int i,
n_pixels = image->y * image->x;
for (i = 0; i < n_pixels; i++)
{
image->data[i].red = floor((image->data[i].red * 4) / 256);
image->data[i].blue = floor((image->data[i].blue * 4) / 256);
image->data[i].green = floor((image->data[i].green * 4) / 256);
}
PPMPixel *dimage_data;
float *dh;
int size_of_image = n_pixels * sizeof(PPMPixel),
size_of_bins = N_BINS * sizeof(float);
double t_start = rtclock();
hipMalloc((void **)&dimage_data, size_of_image);
hipMalloc((void **)&dh, size_of_bins);
double t_end = rtclock();
// fprintf(stdout, "\nBuffer creating time: %0.6lfs\n", t_end - t_start);
t_start = rtclock();
hipMemcpy(dimage_data, image->data, size_of_image,
hipMemcpyHostToDevice);
t_end = rtclock();
// fprintf(stdout, "\nHtD memory copy time: %0.6lfs\n", t_end - t_start);
t_start = rtclock();
hipLaunchKernelGGL(( _k_histogram), dim3(N_BINS), dim3(N_THREADS), 0, 0, dimage_data, dh, n_pixels);
hipDeviceSynchronize();
t_end = rtclock();
// fprintf(stdout, "\nKernel time: %0.6lfs\n", t_end - t_start);
t_start = rtclock();
hipMemcpy(h, dh, size_of_bins, hipMemcpyDeviceToHost);
t_end = rtclock();
// fprintf(stdout, "\nKernel time: %0.6lfs\n", t_end - t_start);
hipFree(dimage_data); hipFree(dh);
}
int main(int argc, char *argv[])
{
if( argc != 2 ) printf("Too many or no one arguments supplied.\n");
char *filename = argv[1];
PPMImage *image = readPPM(filename);
float *h = (float*)malloc(sizeof(float) * N_BINS);
double t_start = rtclock();
parallel_histogram(image, h);
double t_end = rtclock();
int i;
for (i = 0; i < 64; i++) printf("%.3f ", h[i]);
fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(h);
}
/*
* # Report Table
*
* # | File | ST | BCT | HtDT | KT | DtHT | TT | S
* --------------------------------------------------------------------------------------------------
* 1 | arq1.in | 0.205821s | 0.035295s | 0.000437s | 0.014306s | 0.000018s | 0.069355s | 2.967644726s
* 2 | arq2.in | 0.376651s | 0.038361s | 0.001041s | 0.035484s | 0.000017s | 0.178696s | 2.107775216s
* 3 | arq3.in | 1.367025s | 0.035133s | 0.003970s | 0.141030s | 0.000019s | 0.339280s | 4.029194176s
*
* Legend:
* * F : file
* * ST : Serial Time
* * BCT : Buffer Creation Time
* * HtDT : Host to Device Offload Time
* * KT : Kernel Time
* * DtHT : Device to Host Offload Time
* * TT : Total Time
* * S : Speedup
*/
| 247224a3a4b5d1a5ef0283ef8295c958a32859e2.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define COMMENT "Histogram_GPU"
#define N_BINS 64
#define N_THREADS 128
#define RGB_COMPONENT_COLOR 255
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename)
{
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
__global__ void _k_histogram(PPMPixel *image_data, float *h, int n_pixels)
{
int pixel_id = threadIdx.x,
this_bin = blockIdx.x;
// Initialize all bins. Notice I only have to check if threadIdx is zero,
// because the number of blocks follows the number of bins.
if (threadIdx.x == 0) h[this_bin] = 0;
__syncthreads();
while (pixel_id < n_pixels)
{
// Maps a pixel value to a unique bin in the 64-length array.
int should_be_at = image_data[pixel_id].red * 16 +
image_data[pixel_id].green * 4 +
image_data[pixel_id].blue;
if (should_be_at == this_bin)
atomicAdd(&h[this_bin], 1);
// Translate per number of threads. The other threads will take care
// of the rest of the data that wasn't covered by this one.
pixel_id += N_THREADS;
}
__syncthreads();
// Normalize all bins.
if (threadIdx.x == 0) h[this_bin] /= n_pixels;
}
void parallel_histogram(PPMImage *image, float *h)
{
int i,
n_pixels = image->y * image->x;
for (i = 0; i < n_pixels; i++)
{
image->data[i].red = floor((image->data[i].red * 4) / 256);
image->data[i].blue = floor((image->data[i].blue * 4) / 256);
image->data[i].green = floor((image->data[i].green * 4) / 256);
}
PPMPixel *dimage_data;
float *dh;
int size_of_image = n_pixels * sizeof(PPMPixel),
size_of_bins = N_BINS * sizeof(float);
double t_start = rtclock();
cudaMalloc((void **)&dimage_data, size_of_image);
cudaMalloc((void **)&dh, size_of_bins);
double t_end = rtclock();
// fprintf(stdout, "\nBuffer creating time: %0.6lfs\n", t_end - t_start);
t_start = rtclock();
cudaMemcpy(dimage_data, image->data, size_of_image,
cudaMemcpyHostToDevice);
t_end = rtclock();
// fprintf(stdout, "\nHtD memory copy time: %0.6lfs\n", t_end - t_start);
t_start = rtclock();
_k_histogram<<<N_BINS, N_THREADS>>>(dimage_data, dh, n_pixels);
cudaDeviceSynchronize();
t_end = rtclock();
// fprintf(stdout, "\nKernel time: %0.6lfs\n", t_end - t_start);
t_start = rtclock();
cudaMemcpy(h, dh, size_of_bins, cudaMemcpyDeviceToHost);
t_end = rtclock();
// fprintf(stdout, "\nKernel time: %0.6lfs\n", t_end - t_start);
cudaFree(dimage_data); cudaFree(dh);
}
int main(int argc, char *argv[])
{
if( argc != 2 ) printf("Too many or no one arguments supplied.\n");
char *filename = argv[1];
PPMImage *image = readPPM(filename);
float *h = (float*)malloc(sizeof(float) * N_BINS);
double t_start = rtclock();
parallel_histogram(image, h);
double t_end = rtclock();
int i;
for (i = 0; i < 64; i++) printf("%.3f ", h[i]);
fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(h);
}
/*
* # Report Table
*
* # | File | ST | BCT | HtDT | KT | DtHT | TT | S
* --------------------------------------------------------------------------------------------------
* 1 | arq1.in | 0.205821s | 0.035295s | 0.000437s | 0.014306s | 0.000018s | 0.069355s | 2.967644726s
* 2 | arq2.in | 0.376651s | 0.038361s | 0.001041s | 0.035484s | 0.000017s | 0.178696s | 2.107775216s
* 3 | arq3.in | 1.367025s | 0.035133s | 0.003970s | 0.141030s | 0.000019s | 0.339280s | 4.029194176s
*
* Legend:
* * F : file
* * ST : Serial Time
* * BCT : Buffer Creation Time
* * HtDT : Host to Device Offload Time
* * KT : Kernel Time
* * DtHT : Device to Host Offload Time
* * TT : Total Time
* * S : Speedup
*/
|
84c1bf700faac0ff5ca6baf91db0304795296f35.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/dictionary/update_keys.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/detail/concatenate.cuh>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/search.hpp>
#include <rmm/thrust_rmm_allocator.h>
namespace cudf
{
namespace dictionary
{
namespace detail
{
/**
* @brief Create a new dictionary column by adding the new keys elements
* to the existing dictionary_column.
*
* ```
* Example:
* d1 = {[a, b, c, d, f], {4, 0, 3, 1, 2, 2, 2, 4, 0}}
* d2 = add_keys( d1, [d, b, e] )
* d2 is now {[a, b, c, d, e, f], [5, 0, 3, 1, 2, 2, 2, 5, 0]}
* ```
*
*/
std::unique_ptr<column> add_keys( dictionary_column_view const& dictionary_column,
column_view const& new_keys,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
CUDF_EXPECTS( !new_keys.has_nulls(), "Keys must not have nulls" );
auto old_keys = dictionary_column.keys(); // [a,b,c,d,f]
CUDF_EXPECTS( new_keys.type()==old_keys.type(), "Keys must be the same type");
// first, concatenate the keys together
// [a,b,c,d,f] + [d,b,e] = [a,b,c,d,f,d,b,e]
auto combined_keys = cudf::detail::concatenate(std::vector<column_view>{old_keys, new_keys}, mr, stream);
// sort and remove any duplicates from the combined keys
// drop_duplicates([a,b,c,d,f,d,b,e]) = [a,b,c,d,e,f]
auto table_keys = experimental::detail::drop_duplicates( table_view{{*combined_keys}},
std::vector<size_type>{0}, // only one key column
experimental::duplicate_keep_option::KEEP_FIRST,
true, mr, stream )->release();
std::unique_ptr<column> keys_column(std::move(table_keys.front()));
// create a map for the indices
// lower_bound([a,b,c,d,e,f],[a,b,c,d,f]) = [0,1,2,3,5]
auto map_indices = cudf::experimental::detail::lower_bound( table_view{{keys_column->view()}},
table_view{{old_keys}},
std::vector<order>{order::ASCENDING},
std::vector<null_order>{null_order::AFTER}, // should be no nulls here
mr, stream);
// now create the indices column -- map old values to the new ones
// gather([4,0,3,1,2,2,2,4,0],[0,1,2,3,5]) = [5,0,3,1,2,2,2,5,0]
column_view indices_view( data_type{INT32}, dictionary_column.size(),
dictionary_column.indices().data<int32_t>(),
nullptr, 0, dictionary_column.offset() );
auto table_indices = cudf::experimental::detail::gather( table_view{{map_indices->view()}},
indices_view, false, false, false,
mr, stream )->release();
std::unique_ptr<column> indices_column(std::move(table_indices.front()));
//
CUDF_EXPECTS( indices_column->type().id()==cudf::type_id::INT32, "expecting INT32 indices");
// create new dictionary column with keys_column and indices_column
return make_dictionary_column( std::move(keys_column), std::move(indices_column),
copy_bitmask( dictionary_column.parent(), stream, mr), // nulls have
dictionary_column.null_count() ); // not changed
}
} // namespace detail
std::unique_ptr<column> add_keys( dictionary_column_view const& dictionary_column,
column_view const& keys,
rmm::mr::device_memory_resource* mr)
{
return detail::add_keys(dictionary_column, keys,mr);
}
} // namespace dictionary
} // namespace cudf
| 84c1bf700faac0ff5ca6baf91db0304795296f35.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/dictionary/update_keys.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/detail/concatenate.cuh>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/search.hpp>
#include <rmm/thrust_rmm_allocator.h>
namespace cudf
{
namespace dictionary
{
namespace detail
{
/**
* @brief Create a new dictionary column by adding the new keys elements
* to the existing dictionary_column.
*
* ```
* Example:
* d1 = {[a, b, c, d, f], {4, 0, 3, 1, 2, 2, 2, 4, 0}}
* d2 = add_keys( d1, [d, b, e] )
* d2 is now {[a, b, c, d, e, f], [5, 0, 3, 1, 2, 2, 2, 5, 0]}
* ```
*
*/
std::unique_ptr<column> add_keys( dictionary_column_view const& dictionary_column,
column_view const& new_keys,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
CUDF_EXPECTS( !new_keys.has_nulls(), "Keys must not have nulls" );
auto old_keys = dictionary_column.keys(); // [a,b,c,d,f]
CUDF_EXPECTS( new_keys.type()==old_keys.type(), "Keys must be the same type");
// first, concatenate the keys together
// [a,b,c,d,f] + [d,b,e] = [a,b,c,d,f,d,b,e]
auto combined_keys = cudf::detail::concatenate(std::vector<column_view>{old_keys, new_keys}, mr, stream);
// sort and remove any duplicates from the combined keys
// drop_duplicates([a,b,c,d,f,d,b,e]) = [a,b,c,d,e,f]
auto table_keys = experimental::detail::drop_duplicates( table_view{{*combined_keys}},
std::vector<size_type>{0}, // only one key column
experimental::duplicate_keep_option::KEEP_FIRST,
true, mr, stream )->release();
std::unique_ptr<column> keys_column(std::move(table_keys.front()));
// create a map for the indices
// lower_bound([a,b,c,d,e,f],[a,b,c,d,f]) = [0,1,2,3,5]
auto map_indices = cudf::experimental::detail::lower_bound( table_view{{keys_column->view()}},
table_view{{old_keys}},
std::vector<order>{order::ASCENDING},
std::vector<null_order>{null_order::AFTER}, // should be no nulls here
mr, stream);
// now create the indices column -- map old values to the new ones
// gather([4,0,3,1,2,2,2,4,0],[0,1,2,3,5]) = [5,0,3,1,2,2,2,5,0]
column_view indices_view( data_type{INT32}, dictionary_column.size(),
dictionary_column.indices().data<int32_t>(),
nullptr, 0, dictionary_column.offset() );
auto table_indices = cudf::experimental::detail::gather( table_view{{map_indices->view()}},
indices_view, false, false, false,
mr, stream )->release();
std::unique_ptr<column> indices_column(std::move(table_indices.front()));
//
CUDF_EXPECTS( indices_column->type().id()==cudf::type_id::INT32, "expecting INT32 indices");
// create new dictionary column with keys_column and indices_column
return make_dictionary_column( std::move(keys_column), std::move(indices_column),
copy_bitmask( dictionary_column.parent(), stream, mr), // nulls have
dictionary_column.null_count() ); // not changed
}
} // namespace detail
std::unique_ptr<column> add_keys( dictionary_column_view const& dictionary_column,
column_view const& keys,
rmm::mr::device_memory_resource* mr)
{
return detail::add_keys(dictionary_column, keys,mr);
}
} // namespace dictionary
} // namespace cudf
|
b2367e4f77c58e9de34de92f80a3f011dae9830e.hip | // !!! This is a file automatically generated by hipify!!!
#include "convolution3Dfft.h"
#include "book.h"
#include "hip/hip_runtime.h"
#include "hipfft.h"
#include <iostream>
#include <cmath>
#include <algorithm>
//__device__ static const float PI_2 = 6.28318530717958620f;
//__device__ static const float PI_1 = 3.14159265358979310f;
////////////////////////////////////////////////////////////////////////////////
// Modulate Fourier image of padded data by Fourier image of padded kernel
// and normalize by FFT size
////////////////////////////////////////////////////////////////////////////////
//Adapted from CUDA SDK examples
__device__ void mulAndScale(hipfftComplex& a, const hipfftComplex& b, const float& c)
{
hipfftComplex t = {c * (a.x * b.x - a.y * b.y), c * (a.y * b.x + a.x * b.y)};
a = t;
};
__global__ void __launch_bounds__(MAX_THREADS_CUDA) modulateAndNormalize_kernel(hipfftComplex *d_Dst, hipfftComplex *d_Src, long long int dataSize,float c)
{
long long int i = (long long int)blockDim.x * (long long int)blockIdx.x + (long long int)threadIdx.x;
long long int offset = (long long int)blockDim.x * (long long int)gridDim.x;
while(i < dataSize)
{
hipfftComplex a = d_Src[i];
hipfftComplex b = d_Dst[i];
mulAndScale(a, b, c);
d_Dst[i] = a;
i += offset;
}
};
//we use nearest neighbor interpolation to access FFT coefficients in the kernel
__global__ void __launch_bounds__(MAX_THREADS_CUDA) modulateAndNormalizeSubsampled_kernel(hipfftComplex *d_Dst, hipfftComplex *d_Src,int kernelDim_0,int kernelDim_1,int kernelDim_2,int imDim_0,int imDim_1,int imDim_2,long long int datasize,float c)
{
float r_0 = ((float)kernelDim_0) / ((float)imDim_0); //ratio between image size and kernel size to calculate access
float r_1 = ((float)kernelDim_1) / ((float)imDim_1);
float r_2 = ((float)kernelDim_2) / ((float)imDim_2);
long long int i = (long long int)blockDim.x * (long long int)blockIdx.x + (long long int)threadIdx.x;
long long int offset = (long long int)blockDim.x * (long long int)gridDim.x;
int k_0,k_1,k_2;
int aux;
// float auxExp, auxSin,auxCos;
while(i < datasize)
{
//for each dimension we need to access k_i*r_i i=0, 1, 2
aux = 1 + imDim_2/2;
k_2 = i % aux;
aux = (i - k_2) / aux;
k_1 = aux % imDim_1;
k_0 = (aux - k_1) / imDim_1;
hipfftComplex b = d_Dst[i];
//apply shift in fourier domain since we did not apply fftshift to kernel (so we could use the trick of assuming the kernel is padded with zeros and then just subsample FFT)
/* This is how we would do it in Matlab (linear phase change)
auxExp = k_0 * r_0;
auxExp += k_1 * r_1;
auxExp += k_2 * r_2;
auxExp *= PI_1;
auxSin = sin(auxExp);
auxCos = cos(auxExp);
auxExp = b.x * auxCos - b.y * auxSin;
b.y = b.x * auxSin + b.y * auxCos;
b.x = auxExp;
*/
//add the ratio to each dimension and apply nearest neighbor interpolation
//k_2 = min((int)(r_2*(float)k_2 + 0.5f),kernelDim_2-1);//the very end points need to be interpolated as "ceiling" instead of round or we can get oout of bounds access
//k_1 = min((int)(r_1*(float)k_1 + 0.5f),kernelDim_1-1);
//k_0 = min((int)(r_0*(float)k_0 + 0.5f),kernelDim_0-1);
k_2 = ((int)(r_2*(float)k_2 + 0.5f)) % kernelDim_2;//the very end points need to be interpolated as "ceiling" instead of round or we can get oout of bounds access
k_1 = ((int)(r_1*(float)k_1 + 0.5f)) % kernelDim_1;
k_0 = ((int)(r_0*(float)k_0 + 0.5f)) % kernelDim_0;
//calculate new coordinate relative to kernel size
aux = 1 + kernelDim_2/2;
hipfftComplex a = d_Src[k_2 + aux *(k_1 + kernelDim_1 * k_0)];
if( (k_0 + k_1 + k_2) % 2 == 1 )//after much debugging it seems the phase shift is 0 or Pi (nothing in between). In Matlab is a nice linear change as programmed above
{
a.x = -a.x;
a.y = -a.y;
}
mulAndScale(a, b, c);
//__syncthreads();//this actually slows down the code by a lot (0.1 sec for 512x512x512)
d_Dst[i] = a;
i += offset;
}
};
//WARNING: for cuFFT the fastest running index is z direction!!! so pos = z + imDim[2] * (y + imDim[1] * x)
__global__ void __launch_bounds__(MAX_THREADS_CUDA) fftShiftKernel(imageType* kernelCUDA,imageType* kernelPaddedCUDA,int kernelDim_0,int kernelDim_1,int kernelDim_2,int imDim_0,int imDim_1,int imDim_2)
{
int kernelSize = kernelDim_0 * kernelDim_1 * kernelDim_2;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid<kernelSize)
{
//find coordinates
long long int x,y,z,aux;
z = tid % kernelDim_2;
aux = (tid - z)/kernelDim_2;
y = aux % kernelDim_1;
x = (aux - y)/kernelDim_1;
//center coordinates
x -= kernelDim_0/2;
y -= kernelDim_1/2;
z -= kernelDim_2/2;
//circular shift if necessary
if(x<0) x += imDim_0;
if(y<0) y += imDim_1;
if(z<0) z += imDim_2;
//calculate position in padded kernel
aux = z + imDim_2 * (y + imDim_1 * x);
//copy value
kernelPaddedCUDA[aux] = kernelCUDA[tid];//for the most part it should be a coalescent access in oth places
}
}
//=====================================================================
//-------------to debug elements--------------------------------------
void writeOutCUDAfft(char* filename,imageType* fftCUDA,int* fftCUDAdims)
{
int fftSize = 1;
for(int ii=0;ii<dimsImage;ii++)
{
fftSize *= fftCUDAdims[ii];
}
//copy FFT from CUDA
imageType* fftHOST = new imageType[2*fftSize];//complex format
HANDLE_ERROR(hipMemcpy(fftHOST,fftCUDA,2*sizeof(imageType)*fftSize,hipMemcpyDeviceToHost));
//calculate module
/*
int count = 0;
for(int ii=0;ii<fftSize;ii++)
{
fftHOST[ii] = sqrt(fftHOST[count]*fftHOST[count] + fftHOST[count+1]*fftHOST[count+1]);
count += 2;
}
*/
FILE* fid = fopen(filename,"wb");
if(fid == NULL)
{
printf("ERROR: at writeOutCUDAfft opening file %s\n",filename);
exit(2);
}else{
printf("DEBUGGING: Writing FFT (real part first,imaginary second) from CUDA of dimensions %d x %d x %d in file %s\n",fftCUDAdims[2],fftCUDAdims[1],fftCUDAdims[0],filename);
}
//fwrite(fftHOST,sizeof(imageType),fftSize,fid);
for(int ii=0;ii<2*fftSize;ii+=2)
fwrite(&(fftHOST[ii]),sizeof(imageType),1,fid);
for(int ii=1;ii<2*fftSize;ii+=2)
fwrite(&(fftHOST[ii]),sizeof(imageType),1,fid);
fclose(fid);
delete[] fftHOST;
}
//=====================================================================
//WARNING: for cuFFT the fastest running index is z direction!!! so pos = z + imDim[2] * (y + imDim[1] * x)
imageType* convolution3DfftCUDA_test(imageType* im,
int* imDim,
imageType* kernel,
int devCUDA)
{
imageType* convResult = NULL;
imageType* imCUDA = NULL;
imageType* kernelCUDA = NULL;
hipfftHandle fftPlanFwd, fftPlanInv;
HANDLE_ERROR( hipSetDevice( devCUDA ) );
long long int imSize = 1;
for(int ii=0;ii<dimsImage;ii++)
{
imSize *= (long long int) (imDim[ii]);
}
long long int imSizeFFT = imSize+(long long int)(2*imDim[0]*imDim[1]); //size of the R2C transform in cuFFTComplex
//allocate memory for output result
convResult = new imageType[imSize];
//allocat ememory in GPU
HANDLE_ERROR( hipMalloc( (void**)&(imCUDA), imSizeFFT*sizeof(imageType) ) );//a little bit larger to allow in-place FFT
HANDLE_ERROR( hipMalloc( (void**)&(kernelCUDA), imSizeFFT*sizeof(imageType) ) );
//TODO: pad image to a power of 2 size in all dimensions (use whatever boundary conditions you want to apply)
//TODO: pad kernel to image size
//TODO: pad kernel and image to xy(z/2 + 1) for in-place transform
//NOTE: in the example for 2D convolution using FFT in the Nvidia SDK they do the padding in the GPU, but in might be pushing the memory in the GPU for large images.
//printf("Copying memory (kernel and image) to GPU\n");
HANDLE_ERROR( hipMemcpy( kernelCUDA, kernel, imSize*sizeof(imageType) , hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( imCUDA, im, imSize*sizeof(imageType) , hipMemcpyHostToDevice ) );
//printf("Creating R2C & C2R FFT plans for size %i x %i x %i\n",imDim[0],imDim[1],imDim[2]);
hipfftPlan3d(&fftPlanFwd, imDim[0], imDim[1], imDim[2], HIPFFT_R2C);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanFwd,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL; //for highest performance since we do not need FFTW compatibility
hipfftPlan3d(&fftPlanInv, imDim[0], imDim[1], imDim[2], HIPFFT_C2R);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanInv,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL;
//transforming convolution kernel; TODO: if I do multiple convolutions with the same kernel I could reuse the results at teh expense of using out-of place memory (and then teh layout of the data is different!!!! so imCUDAfft should also be out of place)
//NOTE: from CUFFT manual: If idata and odata are the same, this method does an in-place transform.
//NOTE: from CUFFT manual: inplace output data xy(z/2 + 1) with fcomplex. Therefore, in order to perform an in-place FFT, the user has to pad the input array in the last dimension to Nn2 + 1 complex elements interleaved. Note that the real-to-complex transform is implicitly forward.
hipfftExecR2C(fftPlanFwd, imCUDA, (hipfftComplex *)imCUDA);HANDLE_ERROR_KERNEL;
//transforming image
hipfftExecR2C(fftPlanFwd, kernelCUDA, (hipfftComplex *)kernelCUDA);HANDLE_ERROR_KERNEL;
//multiply image and kernel in fourier space (and normalize)
//NOTE: from CUFFT manual: CUFFT performs un-normalized FFTs; that is, performing a forward FFT on an input data set followed by an inverse FFT on the resulting set yields data that is equal to the input scaled by the number of elements.
int numThreads=::min((long long int)MAX_THREADS_CUDA,imSizeFFT/2);//we are using complex number
int numBlocks=::min((long long int)MAX_BLOCKS_CUDA,(long long int)(imSizeFFT/2+(long long int)(numThreads-1))/((long long int)numThreads));
hipLaunchKernelGGL(( modulateAndNormalize_kernel), dim3(numBlocks),dim3(numThreads), 0, 0, (hipfftComplex *)imCUDA, (hipfftComplex *)kernelCUDA, imSizeFFT/2,1.0f/(float)(imSize));//last parameter is the size of the FFT
//inverse FFT
hipfftExecC2R(fftPlanInv, (hipfftComplex *)imCUDA, imCUDA);HANDLE_ERROR_KERNEL;
//copy result to host
HANDLE_ERROR(hipMemcpy(convResult,imCUDA,sizeof(imageType)*imSize,hipMemcpyDeviceToHost));
//release memory
( hipfftDestroy(fftPlanInv) );HANDLE_ERROR_KERNEL;
( hipfftDestroy(fftPlanFwd) );HANDLE_ERROR_KERNEL;
HANDLE_ERROR( hipFree( imCUDA));
HANDLE_ERROR( hipFree( kernelCUDA));
return convResult;
}
//=====================================================================
//WARNING: for cuFFT the fastest running index is z direction!!! so pos = z + imDim[2] * (y + imDim[1] * x)
//NOTE: to avoid transferring a large padded kernel, since memcpy is a limiting factor
imageType* convolution3DfftCUDA(imageType* im,
int* imDim,
imageType* kernel,
int* kernelDim,
int devCUDA)
{
imageType* convResult = NULL;
imageType* imCUDA = NULL;
imageType* kernelCUDA = NULL;
imageType* kernelPaddedCUDA = NULL;
hipfftHandle fftPlanFwd, fftPlanInv;
HANDLE_ERROR( hipSetDevice( devCUDA ) );
long long int imSize = 1;
long long int kernelSize = 1;
for(int ii=0;ii<dimsImage;ii++)
{
imSize *= (long long int) (imDim[ii]);
kernelSize *= (long long int) (kernelDim[ii]);
}
long long int imSizeFFT = imSize+(long long int)(2*imDim[0]*imDim[1]); //size of the R2C transform in cuFFTComplex
//allocate memory for output result
convResult = new imageType[imSize];
//allocat ememory in GPU
HANDLE_ERROR( hipMalloc( (void**)&(imCUDA), imSizeFFT*sizeof(imageType) ) );//a little bit larger to allow in-place FFT
HANDLE_ERROR( hipMalloc( (void**)&(kernelCUDA), (kernelSize)*sizeof(imageType) ) );
HANDLE_ERROR( hipMalloc( (void**)&(kernelPaddedCUDA), imSizeFFT*sizeof(imageType) ) );
//TODO: pad image to a power of 2 size in all dimensions (use whatever boundary conditions you want to apply)
//TODO: pad kernel to image size
//TODO: pad kernel and image to xy(z/2 + 1) for in-place transform
//NOTE: in the example for 2D convolution using FFT in the Nvidia SDK they do the padding in the GPU, but in might be pushing the memory in the GPU for large images.
//printf("Copying memory (kernel and image) to GPU\n");
HANDLE_ERROR( hipMemcpy( kernelCUDA, kernel, kernelSize*sizeof(imageType) , hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( imCUDA, im, imSize*sizeof(imageType) , hipMemcpyHostToDevice ) );
//apply ffshift to kernel and pad it with zeros so we can calculate convolution with FFT
HANDLE_ERROR( hipMemset( kernelPaddedCUDA, 0, imSizeFFT*sizeof(imageType) ));
int numThreads=::min((long long int)MAX_THREADS_CUDA,kernelSize);
int numBlocks=::min((long long int)MAX_BLOCKS_CUDA,(long long int)(kernelSize+(long long int)(numThreads-1))/((long long int)numThreads));
hipLaunchKernelGGL(( fftShiftKernel), dim3(numBlocks),dim3(numThreads), 0, 0, kernelCUDA,kernelPaddedCUDA,kernelDim[0],kernelDim[1],kernelDim[2],imDim[0],imDim[1],imDim[2]);HANDLE_ERROR_KERNEL;
//make sure GPU finishes before we launch two different streams
HANDLE_ERROR(hipDeviceSynchronize());
//printf("Creating R2C & C2R FFT plans for size %i x %i x %i\n",imDim[0],imDim[1],imDim[2]);
hipfftPlan3d(&fftPlanFwd, imDim[0], imDim[1], imDim[2], HIPFFT_R2C);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanFwd,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL; //for highest performance since we do not need FFTW compatibility
hipfftPlan3d(&fftPlanInv, imDim[0], imDim[1], imDim[2], HIPFFT_C2R);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanInv,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL;
//transforming convolution kernel; TODO: if I do multiple convolutions with the same kernel I could reuse the results at teh expense of using out-of place memory (and then teh layout of the data is different!!!! so imCUDAfft should also be out of place)
//NOTE: from CUFFT manual: If idata and odata are the same, this method does an in-place transform.
//NOTE: from CUFFT manual: inplace output data xy(z/2 + 1) with fcomplex. Therefore, in order to perform an in-place FFT, the user has to pad the input array in the last dimension to Nn2 + 1 complex elements interleaved. Note that the real-to-complex transform is implicitly forward.
hipfftExecR2C(fftPlanFwd, imCUDA, (hipfftComplex *)imCUDA);HANDLE_ERROR_KERNEL;
//transforming image
hipfftExecR2C(fftPlanFwd, kernelPaddedCUDA, (hipfftComplex *)kernelPaddedCUDA);HANDLE_ERROR_KERNEL;
//multiply image and kernel in fourier space (and normalize)
//NOTE: from CUFFT manual: CUFFT performs un-normalized FFTs; that is, performing a forward FFT on an input data set followed by an inverse FFT on the resulting set yields data that is equal to the input scaled by the number of elements.
numThreads=::min((long long int)MAX_THREADS_CUDA,imSizeFFT/2);//we are using complex numbers
numBlocks=::min((long long int)MAX_BLOCKS_CUDA,(long long int)(imSizeFFT/2+(long long int)(numThreads-1))/((long long int)numThreads));
hipLaunchKernelGGL(( modulateAndNormalize_kernel), dim3(numBlocks),dim3(numThreads), 0, 0, (hipfftComplex *)imCUDA, (hipfftComplex *)kernelPaddedCUDA, imSizeFFT/2,1.0f/(float)(imSize));//last parameter is the size of the FFT
//inverse FFT
hipfftExecC2R(fftPlanInv, (hipfftComplex *)imCUDA, imCUDA);HANDLE_ERROR_KERNEL;
//copy result to host
HANDLE_ERROR(hipMemcpy(convResult,imCUDA,sizeof(imageType)*imSize,hipMemcpyDeviceToHost));
//release memory
( hipfftDestroy(fftPlanInv) );HANDLE_ERROR_KERNEL;
( hipfftDestroy(fftPlanFwd) );HANDLE_ERROR_KERNEL;
HANDLE_ERROR( hipFree( imCUDA));
HANDLE_ERROR( hipFree( kernelCUDA));
HANDLE_ERROR( hipFree( kernelPaddedCUDA));
return convResult;
}
//=====================================================================
//WARNING: for cuFFT the fastest running index is z direction!!! so pos = z + imDim[2] * (y + imDim[1] * x)
//NOTE: to avoid transferring a large padded kernel, since memcpy is a limiting factor
void convolution3DfftCUDAInPlace(imageType* im,int* imDim,imageType* kernel,int* kernelDim,int devCUDA)
{
imageType* imCUDA = NULL;
imageType* kernelCUDA = NULL;
imageType* kernelPaddedCUDA = NULL;
hipfftHandle fftPlanFwd, fftPlanInv;
HANDLE_ERROR( hipSetDevice( devCUDA ) );
long long int imSize = 1;
long long int kernelSize = 1;
for(int ii=0;ii<dimsImage;ii++)
{
imSize *= (long long int) (imDim[ii]);
kernelSize *= (long long int) (kernelDim[ii]);
}
long long int imSizeFFT = imSize+(long long int)(2*imDim[0]*imDim[1]); //size of the R2C transform in cuFFTComplex
//allocat ememory in GPU
HANDLE_ERROR( hipMalloc( (void**)&(imCUDA), imSizeFFT*sizeof(imageType) ) );//a little bit larger to allow in-place FFT
HANDLE_ERROR( hipMalloc( (void**)&(kernelCUDA), (kernelSize)*sizeof(imageType) ) );
HANDLE_ERROR( hipMalloc( (void**)&(kernelPaddedCUDA), imSizeFFT*sizeof(imageType) ) );
//TODO: pad image to a power of 2 size in all dimensions (use whatever boundary conditions you want to apply)
//TODO: pad kernel to image size
//TODO: pad kernel and image to xy(z/2 + 1) for in-place transform
//NOTE: in the example for 2D convolution using FFT in the Nvidia SDK they do the padding in the GPU, but in might be pushing the memory in the GPU for large images.
//printf("Copying memory (kernel and image) to GPU\n");
HANDLE_ERROR( hipMemcpy( kernelCUDA, kernel, kernelSize*sizeof(imageType) , hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( imCUDA, im, imSize*sizeof(imageType) , hipMemcpyHostToDevice ) );
//apply ffshift to kernel and pad it with zeros so we can calculate convolution with FFT
HANDLE_ERROR( hipMemset( kernelPaddedCUDA, 0, imSizeFFT*sizeof(imageType) ));
int numThreads=::min((long long int)MAX_THREADS_CUDA,kernelSize);
int numBlocks=::min((long long int)MAX_BLOCKS_CUDA,(long long int)(kernelSize+(long long int)(numThreads-1))/((long long int)numThreads));
hipLaunchKernelGGL(( fftShiftKernel), dim3(numBlocks),dim3(numThreads), 0, 0, kernelCUDA,kernelPaddedCUDA,kernelDim[0],kernelDim[1],kernelDim[2],imDim[0],imDim[1],imDim[2]);HANDLE_ERROR_KERNEL;
//make sure GPU finishes
HANDLE_ERROR(hipDeviceSynchronize());
//printf("Creating R2C & C2R FFT plans for size %i x %i x %i\n",imDim[0],imDim[1],imDim[2]);
hipfftPlan3d(&fftPlanFwd, imDim[0], imDim[1], imDim[2], HIPFFT_R2C);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanFwd,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL; //for highest performance since we do not need FFTW compatibility
//hipfftPlan3d(&fftPlanInv, imDim[0], imDim[1], imDim[2], HIPFFT_C2R);HANDLE_ERROR_KERNEL;//we wait to conserve memory
//cufftSetCompatibilityMode(fftPlanInv,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL;
//transforming convolution kernel; TODO: if I do multiple convolutions with the same kernel I could reuse the results at teh expense of using out-of place memory (and then teh layout of the data is different!!!! so imCUDAfft should also be out of place)
//NOTE: from CUFFT manual: If idata and odata are the same, this method does an in-place transform.
//NOTE: from CUFFT manual: inplace output data xy(z/2 + 1) with fcomplex. Therefore, in order to perform an in-place FFT, the user has to pad the input array in the last dimension to Nn2 + 1 complex elements interleaved. Note that the real-to-complex transform is implicitly forward.
hipfftExecR2C(fftPlanFwd, imCUDA, (hipfftComplex *)imCUDA);HANDLE_ERROR_KERNEL;
//transforming image
hipfftExecR2C(fftPlanFwd, kernelPaddedCUDA, (hipfftComplex *)kernelPaddedCUDA);HANDLE_ERROR_KERNEL;
//int fftCUDAdims[dimsImage] ={imDim[0],imDim[1],1+imDim[2]/2};
//writeOutCUDAfft("E:/temp/fftCUDAgood.bin",kernelPaddedCUDA,fftCUDAdims);
//multiply image and kernel in fourier space (and normalize)
//NOTE: from CUFFT manual: CUFFT performs un-normalized FFTs; that is, performing a forward FFT on an input data set followed by an inverse FFT on the resulting set yields data that is equal to the input scaled by the number of elements.
numThreads=::min((long long int)MAX_THREADS_CUDA,imSizeFFT/2);//we are using complex numbers
numBlocks=::min((long long int)MAX_BLOCKS_CUDA,(long long int)(imSizeFFT/2+(long long int)(numThreads-1))/((long long int)numThreads));
hipLaunchKernelGGL(( modulateAndNormalize_kernel), dim3(numBlocks),dim3(numThreads), 0, 0, (hipfftComplex *)imCUDA, (hipfftComplex *)kernelPaddedCUDA, imSizeFFT/2,1.0f/(float)(imSize));HANDLE_ERROR_KERNEL;//last parameter is the size of the FFT
//we destroy memory first so we can perform larger block convoutions
( hipfftDestroy(fftPlanFwd) );HANDLE_ERROR_KERNEL;
HANDLE_ERROR( hipFree( kernelCUDA));
HANDLE_ERROR( hipFree( kernelPaddedCUDA));
//inverse FFT
hipfftPlan3d(&fftPlanInv, imDim[0], imDim[1], imDim[2], HIPFFT_C2R);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanInv,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL;
hipfftExecC2R(fftPlanInv, (hipfftComplex *)imCUDA, imCUDA);HANDLE_ERROR_KERNEL;
//copy result to host and overwrite image
HANDLE_ERROR(hipMemcpy(im,imCUDA,sizeof(imageType)*imSize,hipMemcpyDeviceToHost));
//release memory
( hipfftDestroy(fftPlanInv) );HANDLE_ERROR_KERNEL;
//( hipfftDestroy(fftPlanFwd) );HANDLE_ERROR_KERNEL;
HANDLE_ERROR( hipFree( imCUDA));
//HANDLE_ERROR( hipFree( kernelCUDA));
//HANDLE_ERROR( hipFree( kernelPaddedCUDA));
}
//=====================================================================
//WARNING: for cuFFT the fastest running index is z direction!!! so pos = z + imDim[2] * (y + imDim[1] * x)
//NOTE: to avoid transferring a large padded kernel, since memcpy is a limiting factor
/*
void convolution3DfftCUDAInPlaceSaveMemory(imageType* im,int* imDim,imageType* kernel,int* kernelDim,int devCUDA)
{
imageType* imCUDA = NULL;
imageType* kernelCUDA = NULL;
//imageType* kernelFFTCUDA = NULL; //I need a duplicate in order to perform FFTshift (I can not do it in place)
hipfftHandle fftPlanFwd, fftPlanInv;
HANDLE_ERROR( hipSetDevice( devCUDA ) );
long long int imSize = 1;
long long int kernelSize = 1;
for(int ii=0;ii<dimsImage;ii++)
{
imSize *= (long long int) (imDim[ii]);
kernelSize *= (long long int) (kernelDim[ii]);
}
long long int imSizeFFT = imSize+(long long int)(2*imDim[0]*imDim[1]); //size of the R2C transform in cuFFTComplex
long long int kernelSizeFFT = kernelSize + (long long int)(2*kernelDim[0]*kernelDim[1]);
//allocat ememory in GPU
HANDLE_ERROR( hipMalloc( (void**)&(imCUDA), imSizeFFT*sizeof(imageType) ) );//a little bit larger to allow in-place FFT
HANDLE_ERROR( hipMalloc( (void**)&(kernelCUDA), (kernelSizeFFT)*sizeof(imageType) ) );
//HANDLE_ERROR( hipMalloc( (void**)&(kernelFFTCUDA), (kernelSizeFFT)*sizeof(imageType) ) );
//TODO: pad image to a power of 2 size in all dimensions (use whatever boundary conditions you want to apply)
//TODO: pad kernel to image size
//TODO: pad kernel and image to xy(z/2 + 1) for in-place transform
//NOTE: in the example for 2D convolution using FFT in the Nvidia SDK they do the padding in the GPU, but in might be pushing the memory in the GPU for large images.
//printf("Copying memory (kernel and image) to GPU\n");
HANDLE_ERROR( hipMemcpy( kernelCUDA, kernel, kernelSize*sizeof(imageType) , hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( imCUDA, im, imSize*sizeof(imageType) , hipMemcpyHostToDevice ) );
//apply ffshift to kernel and pad it with zeros so we can calculate convolution with FFT
//HANDLE_ERROR( hipMemset( kernelFFTCUDA, 0, kernelSizeFFT*sizeof(imageType) ));
//int numThreads=::min((long long int)MAX_THREADS_CUDA,kernelSize);
//int numBlocks=::min((long long int)MAX_BLOCKS_CUDA,(long long int)(kernelSize+(long long int)(numThreads-1))/((long long int)numThreads));
//fftShiftKernel<<<numBlocks,numThreads>>>(kernelCUDA,kernelFFTCUDA,kernelDim[0],kernelDim[1],kernelDim[2],kernelDim[0],kernelDim[1],kernelDim[2]);HANDLE_ERROR_KERNEL;
//make sure GPU finishes
HANDLE_ERROR(hipDeviceSynchronize());
//printf("Creating R2C & C2R FFT plans for size %i x %i x %i\n",imDim[0],imDim[1],imDim[2]);
hipfftPlan3d(&fftPlanFwd, imDim[0], imDim[1], imDim[2], HIPFFT_R2C);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanFwd,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL; //for highest performance since we do not need FFTW compatibility
//transforming convolution kernel; TODO: if I do multiple convolutions with the same kernel I could reuse the results at teh expense of using out-of place memory (and then teh layout of the data is different!!!! so imCUDAfft should also be out of place)
//NOTE: from CUFFT manual: If idata and odata are the same, this method does an in-place transform.
//NOTE: from CUFFT manual: inplace output data xy(z/2 + 1) with fcomplex. Therefore, in order to perform an in-place FFT, the user has to pad the input array in the last dimension to Nn2 + 1 complex elements interleaved. Note that the real-to-complex transform is implicitly forward.
hipfftExecR2C(fftPlanFwd, imCUDA, (hipfftComplex *)imCUDA);HANDLE_ERROR_KERNEL;
//transforming kernel
hipfftDestroy(fftPlanFwd); HANDLE_ERROR_KERNEL;
hipfftPlan3d(&fftPlanFwd, kernelDim[0], kernelDim[1], kernelDim[2], HIPFFT_R2C);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanFwd,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL; //for highest performance since we do not need FFTW compatibility
hipfftExecR2C(fftPlanFwd, kernelCUDA, (hipfftComplex *)kernelCUDA);HANDLE_ERROR_KERNEL;
//int fftCUDAdims[dimsImage] ={kernelDim[0],kernelDim[1],1+kernelDim[2]/2};
//writeOutCUDAfft("E:/temp/fftCUDAbad.bin",kernelCUDA,fftCUDAdims);
//multiply image and kernel in fourier space (and normalize)
//NOTE: from CUFFT manual: CUFFT performs un-normalized FFTs; that is, performing a forward FFT on an input data set followed by an inverse FFT on the resulting set yields data that is equal to the input scaled by the number of elements.
int numThreads=::min((long long int)MAX_THREADS_CUDA,imSizeFFT/2);//we are using complex numbers
int numBlocks=::min((long long int)MAX_BLOCKS_CUDA,(long long int)(imSizeFFT/2+(long long int)(numThreads-1))/((long long int)numThreads));
//we multiply two FFT of different sizes (but one was just padded with zeros)
modulateAndNormalizeSubsampled_kernel<<<numBlocks,numThreads>>>((hipfftComplex *)imCUDA, (hipfftComplex *)kernelCUDA, kernelDim[0],kernelDim[1],kernelDim[2],imDim[0],imDim[1],imDim[2], imSizeFFT/2,1.0f/(float)(imSize));HANDLE_ERROR_KERNEL;
//we destroy memory first so we can perform larger block convoutions
hipfftDestroy(fftPlanFwd) ;HANDLE_ERROR_KERNEL;
HANDLE_ERROR( hipFree( kernelCUDA));
//HANDLE_ERROR( hipFree( kernelFFTCUDA));
//inverse FFT
hipfftPlan3d(&fftPlanInv, imDim[0], imDim[1], imDim[2], HIPFFT_C2R);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanInv,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL;
hipfftExecC2R(fftPlanInv, (hipfftComplex *)imCUDA, imCUDA);HANDLE_ERROR_KERNEL;
//copy result to host and overwrite image
HANDLE_ERROR(hipMemcpy(im,imCUDA,sizeof(imageType)*imSize,hipMemcpyDeviceToHost));
//release memory
( hipfftDestroy(fftPlanInv) );HANDLE_ERROR_KERNEL;
HANDLE_ERROR( hipFree( imCUDA));
}
*/
| b2367e4f77c58e9de34de92f80a3f011dae9830e.cu | #include "convolution3Dfft.h"
#include "book.h"
#include "cuda.h"
#include "cufft.h"
#include <iostream>
#include <cmath>
#include <algorithm>
//__device__ static const float PI_2 = 6.28318530717958620f;
//__device__ static const float PI_1 = 3.14159265358979310f;
////////////////////////////////////////////////////////////////////////////////
// Modulate Fourier image of padded data by Fourier image of padded kernel
// and normalize by FFT size
////////////////////////////////////////////////////////////////////////////////
//Adapted from CUDA SDK examples
__device__ void mulAndScale(cufftComplex& a, const cufftComplex& b, const float& c)
{
cufftComplex t = {c * (a.x * b.x - a.y * b.y), c * (a.y * b.x + a.x * b.y)};
a = t;
};
__global__ void __launch_bounds__(MAX_THREADS_CUDA) modulateAndNormalize_kernel(cufftComplex *d_Dst, cufftComplex *d_Src, long long int dataSize,float c)
{
long long int i = (long long int)blockDim.x * (long long int)blockIdx.x + (long long int)threadIdx.x;
long long int offset = (long long int)blockDim.x * (long long int)gridDim.x;
while(i < dataSize)
{
cufftComplex a = d_Src[i];
cufftComplex b = d_Dst[i];
mulAndScale(a, b, c);
d_Dst[i] = a;
i += offset;
}
};
//we use nearest neighbor interpolation to access FFT coefficients in the kernel
__global__ void __launch_bounds__(MAX_THREADS_CUDA) modulateAndNormalizeSubsampled_kernel(cufftComplex *d_Dst, cufftComplex *d_Src,int kernelDim_0,int kernelDim_1,int kernelDim_2,int imDim_0,int imDim_1,int imDim_2,long long int datasize,float c)
{
float r_0 = ((float)kernelDim_0) / ((float)imDim_0); //ratio between image size and kernel size to calculate access
float r_1 = ((float)kernelDim_1) / ((float)imDim_1);
float r_2 = ((float)kernelDim_2) / ((float)imDim_2);
long long int i = (long long int)blockDim.x * (long long int)blockIdx.x + (long long int)threadIdx.x;
long long int offset = (long long int)blockDim.x * (long long int)gridDim.x;
int k_0,k_1,k_2;
int aux;
// float auxExp, auxSin,auxCos;
while(i < datasize)
{
//for each dimension we need to access k_i*r_i i=0, 1, 2
aux = 1 + imDim_2/2;
k_2 = i % aux;
aux = (i - k_2) / aux;
k_1 = aux % imDim_1;
k_0 = (aux - k_1) / imDim_1;
cufftComplex b = d_Dst[i];
//apply shift in fourier domain since we did not apply fftshift to kernel (so we could use the trick of assuming the kernel is padded with zeros and then just subsample FFT)
/* This is how we would do it in Matlab (linear phase change)
auxExp = k_0 * r_0;
auxExp += k_1 * r_1;
auxExp += k_2 * r_2;
auxExp *= PI_1;
auxSin = sin(auxExp);
auxCos = cos(auxExp);
auxExp = b.x * auxCos - b.y * auxSin;
b.y = b.x * auxSin + b.y * auxCos;
b.x = auxExp;
*/
//add the ratio to each dimension and apply nearest neighbor interpolation
//k_2 = min((int)(r_2*(float)k_2 + 0.5f),kernelDim_2-1);//the very end points need to be interpolated as "ceiling" instead of round or we can get oout of bounds access
//k_1 = min((int)(r_1*(float)k_1 + 0.5f),kernelDim_1-1);
//k_0 = min((int)(r_0*(float)k_0 + 0.5f),kernelDim_0-1);
k_2 = ((int)(r_2*(float)k_2 + 0.5f)) % kernelDim_2;//the very end points need to be interpolated as "ceiling" instead of round or we can get oout of bounds access
k_1 = ((int)(r_1*(float)k_1 + 0.5f)) % kernelDim_1;
k_0 = ((int)(r_0*(float)k_0 + 0.5f)) % kernelDim_0;
//calculate new coordinate relative to kernel size
aux = 1 + kernelDim_2/2;
cufftComplex a = d_Src[k_2 + aux *(k_1 + kernelDim_1 * k_0)];
if( (k_0 + k_1 + k_2) % 2 == 1 )//after much debugging it seems the phase shift is 0 or Pi (nothing in between). In Matlab is a nice linear change as programmed above
{
a.x = -a.x;
a.y = -a.y;
}
mulAndScale(a, b, c);
//__syncthreads();//this actually slows down the code by a lot (0.1 sec for 512x512x512)
d_Dst[i] = a;
i += offset;
}
};
//WARNING: for cuFFT the fastest running index is z direction!!! so pos = z + imDim[2] * (y + imDim[1] * x)
__global__ void __launch_bounds__(MAX_THREADS_CUDA) fftShiftKernel(imageType* kernelCUDA,imageType* kernelPaddedCUDA,int kernelDim_0,int kernelDim_1,int kernelDim_2,int imDim_0,int imDim_1,int imDim_2)
{
int kernelSize = kernelDim_0 * kernelDim_1 * kernelDim_2;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid<kernelSize)
{
//find coordinates
long long int x,y,z,aux;
z = tid % kernelDim_2;
aux = (tid - z)/kernelDim_2;
y = aux % kernelDim_1;
x = (aux - y)/kernelDim_1;
//center coordinates
x -= kernelDim_0/2;
y -= kernelDim_1/2;
z -= kernelDim_2/2;
//circular shift if necessary
if(x<0) x += imDim_0;
if(y<0) y += imDim_1;
if(z<0) z += imDim_2;
//calculate position in padded kernel
aux = z + imDim_2 * (y + imDim_1 * x);
//copy value
kernelPaddedCUDA[aux] = kernelCUDA[tid];//for the most part it should be a coalescent access in oth places
}
}
//=====================================================================
//-------------to debug elements--------------------------------------
void writeOutCUDAfft(char* filename,imageType* fftCUDA,int* fftCUDAdims)
{
int fftSize = 1;
for(int ii=0;ii<dimsImage;ii++)
{
fftSize *= fftCUDAdims[ii];
}
//copy FFT from CUDA
imageType* fftHOST = new imageType[2*fftSize];//complex format
HANDLE_ERROR(cudaMemcpy(fftHOST,fftCUDA,2*sizeof(imageType)*fftSize,cudaMemcpyDeviceToHost));
//calculate module
/*
int count = 0;
for(int ii=0;ii<fftSize;ii++)
{
fftHOST[ii] = sqrt(fftHOST[count]*fftHOST[count] + fftHOST[count+1]*fftHOST[count+1]);
count += 2;
}
*/
FILE* fid = fopen(filename,"wb");
if(fid == NULL)
{
printf("ERROR: at writeOutCUDAfft opening file %s\n",filename);
exit(2);
}else{
printf("DEBUGGING: Writing FFT (real part first,imaginary second) from CUDA of dimensions %d x %d x %d in file %s\n",fftCUDAdims[2],fftCUDAdims[1],fftCUDAdims[0],filename);
}
//fwrite(fftHOST,sizeof(imageType),fftSize,fid);
for(int ii=0;ii<2*fftSize;ii+=2)
fwrite(&(fftHOST[ii]),sizeof(imageType),1,fid);
for(int ii=1;ii<2*fftSize;ii+=2)
fwrite(&(fftHOST[ii]),sizeof(imageType),1,fid);
fclose(fid);
delete[] fftHOST;
}
//=====================================================================
//WARNING: for cuFFT the fastest running index is z direction!!! so pos = z + imDim[2] * (y + imDim[1] * x)
imageType* convolution3DfftCUDA_test(imageType* im,
int* imDim,
imageType* kernel,
int devCUDA)
{
imageType* convResult = NULL;
imageType* imCUDA = NULL;
imageType* kernelCUDA = NULL;
cufftHandle fftPlanFwd, fftPlanInv;
HANDLE_ERROR( cudaSetDevice( devCUDA ) );
long long int imSize = 1;
for(int ii=0;ii<dimsImage;ii++)
{
imSize *= (long long int) (imDim[ii]);
}
long long int imSizeFFT = imSize+(long long int)(2*imDim[0]*imDim[1]); //size of the R2C transform in cuFFTComplex
//allocate memory for output result
convResult = new imageType[imSize];
//allocat ememory in GPU
HANDLE_ERROR( cudaMalloc( (void**)&(imCUDA), imSizeFFT*sizeof(imageType) ) );//a little bit larger to allow in-place FFT
HANDLE_ERROR( cudaMalloc( (void**)&(kernelCUDA), imSizeFFT*sizeof(imageType) ) );
//TODO: pad image to a power of 2 size in all dimensions (use whatever boundary conditions you want to apply)
//TODO: pad kernel to image size
//TODO: pad kernel and image to xy(z/2 + 1) for in-place transform
//NOTE: in the example for 2D convolution using FFT in the Nvidia SDK they do the padding in the GPU, but in might be pushing the memory in the GPU for large images.
//printf("Copying memory (kernel and image) to GPU\n");
HANDLE_ERROR( cudaMemcpy( kernelCUDA, kernel, imSize*sizeof(imageType) , cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( imCUDA, im, imSize*sizeof(imageType) , cudaMemcpyHostToDevice ) );
//printf("Creating R2C & C2R FFT plans for size %i x %i x %i\n",imDim[0],imDim[1],imDim[2]);
cufftPlan3d(&fftPlanFwd, imDim[0], imDim[1], imDim[2], CUFFT_R2C);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanFwd,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL; //for highest performance since we do not need FFTW compatibility
cufftPlan3d(&fftPlanInv, imDim[0], imDim[1], imDim[2], CUFFT_C2R);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanInv,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL;
//transforming convolution kernel; TODO: if I do multiple convolutions with the same kernel I could reuse the results at teh expense of using out-of place memory (and then teh layout of the data is different!!!! so imCUDAfft should also be out of place)
//NOTE: from CUFFT manual: If idata and odata are the same, this method does an in-place transform.
//NOTE: from CUFFT manual: inplace output data xy(z/2 + 1) with fcomplex. Therefore, in order to perform an in-place FFT, the user has to pad the input array in the last dimension to Nn2 + 1 complex elements interleaved. Note that the real-to-complex transform is implicitly forward.
cufftExecR2C(fftPlanFwd, imCUDA, (cufftComplex *)imCUDA);HANDLE_ERROR_KERNEL;
//transforming image
cufftExecR2C(fftPlanFwd, kernelCUDA, (cufftComplex *)kernelCUDA);HANDLE_ERROR_KERNEL;
//multiply image and kernel in fourier space (and normalize)
//NOTE: from CUFFT manual: CUFFT performs un-normalized FFTs; that is, performing a forward FFT on an input data set followed by an inverse FFT on the resulting set yields data that is equal to the input scaled by the number of elements.
int numThreads=std::min((long long int)MAX_THREADS_CUDA,imSizeFFT/2);//we are using complex number
int numBlocks=std::min((long long int)MAX_BLOCKS_CUDA,(long long int)(imSizeFFT/2+(long long int)(numThreads-1))/((long long int)numThreads));
modulateAndNormalize_kernel<<<numBlocks,numThreads>>>((cufftComplex *)imCUDA, (cufftComplex *)kernelCUDA, imSizeFFT/2,1.0f/(float)(imSize));//last parameter is the size of the FFT
//inverse FFT
cufftExecC2R(fftPlanInv, (cufftComplex *)imCUDA, imCUDA);HANDLE_ERROR_KERNEL;
//copy result to host
HANDLE_ERROR(cudaMemcpy(convResult,imCUDA,sizeof(imageType)*imSize,cudaMemcpyDeviceToHost));
//release memory
( cufftDestroy(fftPlanInv) );HANDLE_ERROR_KERNEL;
( cufftDestroy(fftPlanFwd) );HANDLE_ERROR_KERNEL;
HANDLE_ERROR( cudaFree( imCUDA));
HANDLE_ERROR( cudaFree( kernelCUDA));
return convResult;
}
//=====================================================================
//WARNING: for cuFFT the fastest running index is z direction!!! so pos = z + imDim[2] * (y + imDim[1] * x)
//NOTE: to avoid transferring a large padded kernel, since memcpy is a limiting factor
imageType* convolution3DfftCUDA(imageType* im,
int* imDim,
imageType* kernel,
int* kernelDim,
int devCUDA)
{
imageType* convResult = NULL;
imageType* imCUDA = NULL;
imageType* kernelCUDA = NULL;
imageType* kernelPaddedCUDA = NULL;
cufftHandle fftPlanFwd, fftPlanInv;
HANDLE_ERROR( cudaSetDevice( devCUDA ) );
long long int imSize = 1;
long long int kernelSize = 1;
for(int ii=0;ii<dimsImage;ii++)
{
imSize *= (long long int) (imDim[ii]);
kernelSize *= (long long int) (kernelDim[ii]);
}
long long int imSizeFFT = imSize+(long long int)(2*imDim[0]*imDim[1]); //size of the R2C transform in cuFFTComplex
//allocate memory for output result
convResult = new imageType[imSize];
//allocat ememory in GPU
HANDLE_ERROR( cudaMalloc( (void**)&(imCUDA), imSizeFFT*sizeof(imageType) ) );//a little bit larger to allow in-place FFT
HANDLE_ERROR( cudaMalloc( (void**)&(kernelCUDA), (kernelSize)*sizeof(imageType) ) );
HANDLE_ERROR( cudaMalloc( (void**)&(kernelPaddedCUDA), imSizeFFT*sizeof(imageType) ) );
//TODO: pad image to a power of 2 size in all dimensions (use whatever boundary conditions you want to apply)
//TODO: pad kernel to image size
//TODO: pad kernel and image to xy(z/2 + 1) for in-place transform
//NOTE: in the example for 2D convolution using FFT in the Nvidia SDK they do the padding in the GPU, but in might be pushing the memory in the GPU for large images.
//printf("Copying memory (kernel and image) to GPU\n");
HANDLE_ERROR( cudaMemcpy( kernelCUDA, kernel, kernelSize*sizeof(imageType) , cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( imCUDA, im, imSize*sizeof(imageType) , cudaMemcpyHostToDevice ) );
//apply ffshift to kernel and pad it with zeros so we can calculate convolution with FFT
HANDLE_ERROR( cudaMemset( kernelPaddedCUDA, 0, imSizeFFT*sizeof(imageType) ));
int numThreads=std::min((long long int)MAX_THREADS_CUDA,kernelSize);
int numBlocks=std::min((long long int)MAX_BLOCKS_CUDA,(long long int)(kernelSize+(long long int)(numThreads-1))/((long long int)numThreads));
fftShiftKernel<<<numBlocks,numThreads>>>(kernelCUDA,kernelPaddedCUDA,kernelDim[0],kernelDim[1],kernelDim[2],imDim[0],imDim[1],imDim[2]);HANDLE_ERROR_KERNEL;
//make sure GPU finishes before we launch two different streams
HANDLE_ERROR(cudaDeviceSynchronize());
//printf("Creating R2C & C2R FFT plans for size %i x %i x %i\n",imDim[0],imDim[1],imDim[2]);
cufftPlan3d(&fftPlanFwd, imDim[0], imDim[1], imDim[2], CUFFT_R2C);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanFwd,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL; //for highest performance since we do not need FFTW compatibility
cufftPlan3d(&fftPlanInv, imDim[0], imDim[1], imDim[2], CUFFT_C2R);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanInv,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL;
//transforming convolution kernel; TODO: if I do multiple convolutions with the same kernel I could reuse the results at teh expense of using out-of place memory (and then teh layout of the data is different!!!! so imCUDAfft should also be out of place)
//NOTE: from CUFFT manual: If idata and odata are the same, this method does an in-place transform.
//NOTE: from CUFFT manual: inplace output data xy(z/2 + 1) with fcomplex. Therefore, in order to perform an in-place FFT, the user has to pad the input array in the last dimension to Nn2 + 1 complex elements interleaved. Note that the real-to-complex transform is implicitly forward.
cufftExecR2C(fftPlanFwd, imCUDA, (cufftComplex *)imCUDA);HANDLE_ERROR_KERNEL;
//transforming image
cufftExecR2C(fftPlanFwd, kernelPaddedCUDA, (cufftComplex *)kernelPaddedCUDA);HANDLE_ERROR_KERNEL;
//multiply image and kernel in fourier space (and normalize)
//NOTE: from CUFFT manual: CUFFT performs un-normalized FFTs; that is, performing a forward FFT on an input data set followed by an inverse FFT on the resulting set yields data that is equal to the input scaled by the number of elements.
numThreads=std::min((long long int)MAX_THREADS_CUDA,imSizeFFT/2);//we are using complex numbers
numBlocks=std::min((long long int)MAX_BLOCKS_CUDA,(long long int)(imSizeFFT/2+(long long int)(numThreads-1))/((long long int)numThreads));
modulateAndNormalize_kernel<<<numBlocks,numThreads>>>((cufftComplex *)imCUDA, (cufftComplex *)kernelPaddedCUDA, imSizeFFT/2,1.0f/(float)(imSize));//last parameter is the size of the FFT
//inverse FFT
cufftExecC2R(fftPlanInv, (cufftComplex *)imCUDA, imCUDA);HANDLE_ERROR_KERNEL;
//copy result to host
HANDLE_ERROR(cudaMemcpy(convResult,imCUDA,sizeof(imageType)*imSize,cudaMemcpyDeviceToHost));
//release memory
( cufftDestroy(fftPlanInv) );HANDLE_ERROR_KERNEL;
( cufftDestroy(fftPlanFwd) );HANDLE_ERROR_KERNEL;
HANDLE_ERROR( cudaFree( imCUDA));
HANDLE_ERROR( cudaFree( kernelCUDA));
HANDLE_ERROR( cudaFree( kernelPaddedCUDA));
return convResult;
}
//=====================================================================
//WARNING: for cuFFT the fastest running index is z direction!!! so pos = z + imDim[2] * (y + imDim[1] * x)
//NOTE: to avoid transferring a large padded kernel, since memcpy is a limiting factor
void convolution3DfftCUDAInPlace(imageType* im,int* imDim,imageType* kernel,int* kernelDim,int devCUDA)
{
imageType* imCUDA = NULL;
imageType* kernelCUDA = NULL;
imageType* kernelPaddedCUDA = NULL;
cufftHandle fftPlanFwd, fftPlanInv;
HANDLE_ERROR( cudaSetDevice( devCUDA ) );
long long int imSize = 1;
long long int kernelSize = 1;
for(int ii=0;ii<dimsImage;ii++)
{
imSize *= (long long int) (imDim[ii]);
kernelSize *= (long long int) (kernelDim[ii]);
}
long long int imSizeFFT = imSize+(long long int)(2*imDim[0]*imDim[1]); //size of the R2C transform in cuFFTComplex
//allocat ememory in GPU
HANDLE_ERROR( cudaMalloc( (void**)&(imCUDA), imSizeFFT*sizeof(imageType) ) );//a little bit larger to allow in-place FFT
HANDLE_ERROR( cudaMalloc( (void**)&(kernelCUDA), (kernelSize)*sizeof(imageType) ) );
HANDLE_ERROR( cudaMalloc( (void**)&(kernelPaddedCUDA), imSizeFFT*sizeof(imageType) ) );
//TODO: pad image to a power of 2 size in all dimensions (use whatever boundary conditions you want to apply)
//TODO: pad kernel to image size
//TODO: pad kernel and image to xy(z/2 + 1) for in-place transform
//NOTE: in the example for 2D convolution using FFT in the Nvidia SDK they do the padding in the GPU, but in might be pushing the memory in the GPU for large images.
//printf("Copying memory (kernel and image) to GPU\n");
HANDLE_ERROR( cudaMemcpy( kernelCUDA, kernel, kernelSize*sizeof(imageType) , cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( imCUDA, im, imSize*sizeof(imageType) , cudaMemcpyHostToDevice ) );
//apply ffshift to kernel and pad it with zeros so we can calculate convolution with FFT
HANDLE_ERROR( cudaMemset( kernelPaddedCUDA, 0, imSizeFFT*sizeof(imageType) ));
int numThreads=std::min((long long int)MAX_THREADS_CUDA,kernelSize);
int numBlocks=std::min((long long int)MAX_BLOCKS_CUDA,(long long int)(kernelSize+(long long int)(numThreads-1))/((long long int)numThreads));
fftShiftKernel<<<numBlocks,numThreads>>>(kernelCUDA,kernelPaddedCUDA,kernelDim[0],kernelDim[1],kernelDim[2],imDim[0],imDim[1],imDim[2]);HANDLE_ERROR_KERNEL;
//make sure GPU finishes
HANDLE_ERROR(cudaDeviceSynchronize());
//printf("Creating R2C & C2R FFT plans for size %i x %i x %i\n",imDim[0],imDim[1],imDim[2]);
cufftPlan3d(&fftPlanFwd, imDim[0], imDim[1], imDim[2], CUFFT_R2C);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanFwd,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL; //for highest performance since we do not need FFTW compatibility
//cufftPlan3d(&fftPlanInv, imDim[0], imDim[1], imDim[2], CUFFT_C2R);HANDLE_ERROR_KERNEL;//we wait to conserve memory
//cufftSetCompatibilityMode(fftPlanInv,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL;
//transforming convolution kernel; TODO: if I do multiple convolutions with the same kernel I could reuse the results at teh expense of using out-of place memory (and then teh layout of the data is different!!!! so imCUDAfft should also be out of place)
//NOTE: from CUFFT manual: If idata and odata are the same, this method does an in-place transform.
//NOTE: from CUFFT manual: inplace output data xy(z/2 + 1) with fcomplex. Therefore, in order to perform an in-place FFT, the user has to pad the input array in the last dimension to Nn2 + 1 complex elements interleaved. Note that the real-to-complex transform is implicitly forward.
cufftExecR2C(fftPlanFwd, imCUDA, (cufftComplex *)imCUDA);HANDLE_ERROR_KERNEL;
//transforming image
cufftExecR2C(fftPlanFwd, kernelPaddedCUDA, (cufftComplex *)kernelPaddedCUDA);HANDLE_ERROR_KERNEL;
//int fftCUDAdims[dimsImage] ={imDim[0],imDim[1],1+imDim[2]/2};
//writeOutCUDAfft("E:/temp/fftCUDAgood.bin",kernelPaddedCUDA,fftCUDAdims);
//multiply image and kernel in fourier space (and normalize)
//NOTE: from CUFFT manual: CUFFT performs un-normalized FFTs; that is, performing a forward FFT on an input data set followed by an inverse FFT on the resulting set yields data that is equal to the input scaled by the number of elements.
numThreads=std::min((long long int)MAX_THREADS_CUDA,imSizeFFT/2);//we are using complex numbers
numBlocks=std::min((long long int)MAX_BLOCKS_CUDA,(long long int)(imSizeFFT/2+(long long int)(numThreads-1))/((long long int)numThreads));
modulateAndNormalize_kernel<<<numBlocks,numThreads>>>((cufftComplex *)imCUDA, (cufftComplex *)kernelPaddedCUDA, imSizeFFT/2,1.0f/(float)(imSize));HANDLE_ERROR_KERNEL;//last parameter is the size of the FFT
//we destroy memory first so we can perform larger block convoutions
( cufftDestroy(fftPlanFwd) );HANDLE_ERROR_KERNEL;
HANDLE_ERROR( cudaFree( kernelCUDA));
HANDLE_ERROR( cudaFree( kernelPaddedCUDA));
//inverse FFT
cufftPlan3d(&fftPlanInv, imDim[0], imDim[1], imDim[2], CUFFT_C2R);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanInv,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL;
cufftExecC2R(fftPlanInv, (cufftComplex *)imCUDA, imCUDA);HANDLE_ERROR_KERNEL;
//copy result to host and overwrite image
HANDLE_ERROR(cudaMemcpy(im,imCUDA,sizeof(imageType)*imSize,cudaMemcpyDeviceToHost));
//release memory
( cufftDestroy(fftPlanInv) );HANDLE_ERROR_KERNEL;
//( cufftDestroy(fftPlanFwd) );HANDLE_ERROR_KERNEL;
HANDLE_ERROR( cudaFree( imCUDA));
//HANDLE_ERROR( cudaFree( kernelCUDA));
//HANDLE_ERROR( cudaFree( kernelPaddedCUDA));
}
//=====================================================================
//WARNING: for cuFFT the fastest running index is z direction!!! so pos = z + imDim[2] * (y + imDim[1] * x)
//NOTE: to avoid transferring a large padded kernel, since memcpy is a limiting factor
/*
void convolution3DfftCUDAInPlaceSaveMemory(imageType* im,int* imDim,imageType* kernel,int* kernelDim,int devCUDA)
{
imageType* imCUDA = NULL;
imageType* kernelCUDA = NULL;
//imageType* kernelFFTCUDA = NULL; //I need a duplicate in order to perform FFTshift (I can not do it in place)
cufftHandle fftPlanFwd, fftPlanInv;
HANDLE_ERROR( cudaSetDevice( devCUDA ) );
long long int imSize = 1;
long long int kernelSize = 1;
for(int ii=0;ii<dimsImage;ii++)
{
imSize *= (long long int) (imDim[ii]);
kernelSize *= (long long int) (kernelDim[ii]);
}
long long int imSizeFFT = imSize+(long long int)(2*imDim[0]*imDim[1]); //size of the R2C transform in cuFFTComplex
long long int kernelSizeFFT = kernelSize + (long long int)(2*kernelDim[0]*kernelDim[1]);
//allocat ememory in GPU
HANDLE_ERROR( cudaMalloc( (void**)&(imCUDA), imSizeFFT*sizeof(imageType) ) );//a little bit larger to allow in-place FFT
HANDLE_ERROR( cudaMalloc( (void**)&(kernelCUDA), (kernelSizeFFT)*sizeof(imageType) ) );
//HANDLE_ERROR( cudaMalloc( (void**)&(kernelFFTCUDA), (kernelSizeFFT)*sizeof(imageType) ) );
//TODO: pad image to a power of 2 size in all dimensions (use whatever boundary conditions you want to apply)
//TODO: pad kernel to image size
//TODO: pad kernel and image to xy(z/2 + 1) for in-place transform
//NOTE: in the example for 2D convolution using FFT in the Nvidia SDK they do the padding in the GPU, but in might be pushing the memory in the GPU for large images.
//printf("Copying memory (kernel and image) to GPU\n");
HANDLE_ERROR( cudaMemcpy( kernelCUDA, kernel, kernelSize*sizeof(imageType) , cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( imCUDA, im, imSize*sizeof(imageType) , cudaMemcpyHostToDevice ) );
//apply ffshift to kernel and pad it with zeros so we can calculate convolution with FFT
//HANDLE_ERROR( cudaMemset( kernelFFTCUDA, 0, kernelSizeFFT*sizeof(imageType) ));
//int numThreads=std::min((long long int)MAX_THREADS_CUDA,kernelSize);
//int numBlocks=std::min((long long int)MAX_BLOCKS_CUDA,(long long int)(kernelSize+(long long int)(numThreads-1))/((long long int)numThreads));
//fftShiftKernel<<<numBlocks,numThreads>>>(kernelCUDA,kernelFFTCUDA,kernelDim[0],kernelDim[1],kernelDim[2],kernelDim[0],kernelDim[1],kernelDim[2]);HANDLE_ERROR_KERNEL;
//make sure GPU finishes
HANDLE_ERROR(cudaDeviceSynchronize());
//printf("Creating R2C & C2R FFT plans for size %i x %i x %i\n",imDim[0],imDim[1],imDim[2]);
cufftPlan3d(&fftPlanFwd, imDim[0], imDim[1], imDim[2], CUFFT_R2C);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanFwd,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL; //for highest performance since we do not need FFTW compatibility
//transforming convolution kernel; TODO: if I do multiple convolutions with the same kernel I could reuse the results at teh expense of using out-of place memory (and then teh layout of the data is different!!!! so imCUDAfft should also be out of place)
//NOTE: from CUFFT manual: If idata and odata are the same, this method does an in-place transform.
//NOTE: from CUFFT manual: inplace output data xy(z/2 + 1) with fcomplex. Therefore, in order to perform an in-place FFT, the user has to pad the input array in the last dimension to Nn2 + 1 complex elements interleaved. Note that the real-to-complex transform is implicitly forward.
cufftExecR2C(fftPlanFwd, imCUDA, (cufftComplex *)imCUDA);HANDLE_ERROR_KERNEL;
//transforming kernel
cufftDestroy(fftPlanFwd); HANDLE_ERROR_KERNEL;
cufftPlan3d(&fftPlanFwd, kernelDim[0], kernelDim[1], kernelDim[2], CUFFT_R2C);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanFwd,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL; //for highest performance since we do not need FFTW compatibility
cufftExecR2C(fftPlanFwd, kernelCUDA, (cufftComplex *)kernelCUDA);HANDLE_ERROR_KERNEL;
//int fftCUDAdims[dimsImage] ={kernelDim[0],kernelDim[1],1+kernelDim[2]/2};
//writeOutCUDAfft("E:/temp/fftCUDAbad.bin",kernelCUDA,fftCUDAdims);
//multiply image and kernel in fourier space (and normalize)
//NOTE: from CUFFT manual: CUFFT performs un-normalized FFTs; that is, performing a forward FFT on an input data set followed by an inverse FFT on the resulting set yields data that is equal to the input scaled by the number of elements.
int numThreads=std::min((long long int)MAX_THREADS_CUDA,imSizeFFT/2);//we are using complex numbers
int numBlocks=std::min((long long int)MAX_BLOCKS_CUDA,(long long int)(imSizeFFT/2+(long long int)(numThreads-1))/((long long int)numThreads));
//we multiply two FFT of different sizes (but one was just padded with zeros)
modulateAndNormalizeSubsampled_kernel<<<numBlocks,numThreads>>>((cufftComplex *)imCUDA, (cufftComplex *)kernelCUDA, kernelDim[0],kernelDim[1],kernelDim[2],imDim[0],imDim[1],imDim[2], imSizeFFT/2,1.0f/(float)(imSize));HANDLE_ERROR_KERNEL;
//we destroy memory first so we can perform larger block convoutions
cufftDestroy(fftPlanFwd) ;HANDLE_ERROR_KERNEL;
HANDLE_ERROR( cudaFree( kernelCUDA));
//HANDLE_ERROR( cudaFree( kernelFFTCUDA));
//inverse FFT
cufftPlan3d(&fftPlanInv, imDim[0], imDim[1], imDim[2], CUFFT_C2R);HANDLE_ERROR_KERNEL;
cufftSetCompatibilityMode(fftPlanInv,CUFFT_COMPATIBILITY_NATIVE);HANDLE_ERROR_KERNEL;
cufftExecC2R(fftPlanInv, (cufftComplex *)imCUDA, imCUDA);HANDLE_ERROR_KERNEL;
//copy result to host and overwrite image
HANDLE_ERROR(cudaMemcpy(im,imCUDA,sizeof(imageType)*imSize,cudaMemcpyDeviceToHost));
//release memory
( cufftDestroy(fftPlanInv) );HANDLE_ERROR_KERNEL;
HANDLE_ERROR( cudaFree( imCUDA));
}
*/
|
c1d7ac6b90e07cafe5062b6cfd864038af498d61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include "IO.h"
#include <cutil_inline.h>
//Device Code
__global__ void SuavizaCuda(float *Tratada, float *Original, unsigned int nDimX, unsigned int nDimY)
{
unsigned int
UpIzq, UpCen, UpDer,
Izqda, Centr, Derch,
DoIzq, DoCen, DoDer,
UltPriFil, PriUltFil, UltUltFil,
PriCol, UltCol;
float
fUpIzq, fUpCen, fUpDer,
fIzqda, fCentr, fDerch,
fDoIzq, fDoCen, fDoDer,
Divisor;
int general = 0;
// Creamos el indice del hilo
int i = blockDim.x * blockIdx.x + threadIdx.x;
UltPriFil=nDimX-1;
PriUltFil=(nDimY-1)*nDimX;
UltUltFil=nDimY*nDimX-1;
if( i <= (nDimX * nDimY-1) ){
UpCen=i-nDimX; UpIzq=UpCen-1; UpDer=UpCen+1;
Centr=i; Izqda=Centr-1; Derch=Centr+1;
DoCen=i+nDimX; DoIzq=DoCen-1; DoDer=DoCen+1;
PriCol=i % nDimX;
UltCol=(i+1) % nDimX;
if (i == 0) /* elemento (0,0) */
{
fUpIzq=.0; fUpCen=.0; fUpDer=.0;
fIzqda=.0; fCentr=Original[Centr]; fDerch=Original[Derch];
fDoIzq=.0; fDoCen=Original[DoCen]; fDoDer=Original[DoDer];
Divisor=4.0;
}
else
{
if (i == UltPriFil) /* elemento (0, nDimX-1) */
{
fUpIzq=.0; fUpCen=.0; fUpDer=.0;
fIzqda=Original[Izqda]; fCentr=Original[Centr]; fDerch=.0;
fDoIzq=Original[DoIzq]; fDoCen=Original[DoCen]; fDoDer=.0;
Divisor=4.0;
}
else
{
if (i == PriUltFil) /* elemento (nDimY-1, 0) */
{
fUpIzq=.0; fUpCen=Original[UpCen]; fUpDer=Original[UpDer];
fIzqda=.0; fCentr=Original[Centr]; fDerch=Original[Derch];
fDoIzq=.0; fDoCen=.0; fDoDer=.0;
Divisor=4.0;
}
else
{
if (i == UltUltFil) /* elemento (nDimY-1, nDimX-1) */
{
fUpIzq=Original[UpIzq]; fUpCen=Original[UpCen]; fUpDer=.0;
fIzqda=Original[Izqda]; fCentr=Original[Centr]; fDerch=.0;
fDoIzq=.0; fDoCen=.0; fDoDer=.0;
Divisor=4.0;
}
else
{
if (i < UltPriFil) /* elementos intermedios primera fila */
{
fUpIzq=.0; fUpCen=.0; fUpDer=.0;
fIzqda=Original[Izqda]; fCentr=Original[Centr]; fDerch=Original[Derch];
fDoIzq=Original[DoIzq]; fDoCen=Original[DoCen]; fDoDer=Original[DoDer];
Divisor=6.0;
}
else
{
if (i > PriUltFil) /* elementos intermedios ultima fila */
{
fUpIzq=Original[UpIzq]; fUpCen=Original[UpCen]; fUpDer=Original[UpDer];
fIzqda=Original[Izqda]; fCentr=Original[Centr]; fDerch=Original[Derch];
fDoIzq=.0; fDoCen=.0; fDoDer=.0;
Divisor=6.0;
}
else
{
if (PriCol == 0) /* elementos primera columna no tratados ya */
{
fUpIzq=.0; fUpCen=Original[UpCen]; fUpDer=Original[UpDer];
fIzqda=.0; fCentr=Original[Centr]; fDerch=Original[Derch];
fDoIzq=.0; fDoCen=Original[DoCen]; fDoDer=Original[DoDer];
Divisor=6.0;
}
else
{
if (UltCol == 0) /* elementos ultima columnas no tratados ya */
{
fUpIzq=Original[UpIzq]; fUpCen=Original[UpCen]; fUpDer=.0;
fIzqda=Original[Izqda]; fCentr=Original[Centr]; fDerch=.0;
fDoIzq=Original[DoIzq]; fDoCen=Original[DoCen]; fDoDer=.0;
Divisor=6.0;
}
else /* caso general */
{
fUpIzq=Original[UpIzq]; fUpCen=Original[UpCen]; fUpDer=Original[UpDer];
fIzqda=Original[Izqda]; fCentr=Original[Centr]; fDerch=Original[Derch];
fDoIzq=Original[DoIzq]; fDoCen=Original[DoCen]; fDoDer=Original[DoDer];
Divisor=9.0;
general = 1;
}
}
}
}
}
}
}
}
if(general == 1){
Tratada[i] = ( (fUpIzq / 16) + (fUpCen / 8) + (fUpDer / 16) + (fIzqda / 8) + (fCentr / 4)+ (fDerch / 8) + (fDoIzq / 16) + (fDoCen / 8)+ (fDoDer / 16));
general = 0;
}
else{
Tratada[i] = (fUpIzq+fUpCen+fUpDer + fIzqda+fCentr+fDerch + fDoIzq+fDoCen+fDoDer) / Divisor;
}
} //Fin del if de comprobacion
}
int main(int argc, char* argv[]) {
unsigned char *Cabecera=NULL;
char *Entrada=NULL;
char *Salida=NULL;
unsigned int nDimX, nDimY, CabeceraSize, rc, Itera, i;
unsigned short BytePorPixel;
int HilosPorBloque=0, BloquesPorGrid=0;
float *ImagenTratada=NULL, *ImagenOriginal=NULL;
float *GPUTratada=NULL, *GPUOriginal=NULL;
float time=.0;
hipEvent_t start, stop;
if (argc != 5) {
printf("Uso: Suaviza-Secuencial <Fichero-Entrada> <Fichero-Salida> <Iteraciones> <Threads per Block>\n");
return -1;
}
Entrada = strdup(argv[1]);
Salida = strdup(argv[2]);
Itera = atoi(argv[3]);
HilosPorBloque = atoi(argv[4]);
/* Obtener caracteristicas de la imagen */
rc = nGetSize(Entrada, &nDimX, &nDimY, &BytePorPixel, &CabeceraSize);
assert(!rc);
/* Asegurando que es 1 Byte por Pixel */
if (BytePorPixel != 1) {
printf("Lo sentimos pero solo para 1 Byte por pixel\n");
return -1;
}
/* Para la Cabecera de la Imagen */
Cabecera = (unsigned char *)calloc(CabeceraSize, sizeof(unsigned char));
assert(Cabecera);
/* Almacena la Imagen que se esta actualizado (t+1) */
ImagenTratada = (float *)calloc(BytePorPixel*nDimX*nDimY, sizeof(float));
assert(ImagenTratada);
/* Almacena la Imagen que se usa para actualizar en (t+1), esto es, la imagen en (t) */
ImagenOriginal = (float *)calloc(BytePorPixel*nDimX*nDimY, sizeof(float));
assert(ImagenOriginal);
/* Leer la imagen */
rc = nLoad3(Entrada, nDimX, nDimY, ImagenOriginal, Cabecera, CabeceraSize);
assert(!rc);
/* No usar llamadas a cuda antes de usar setdevice. */
//cutilSafeCall(hipSetDevice(0));
hipEventCreate(&start);
hipEventCreate(&stop);
// Reservando memoria para los vectores en el device
// cutilSafeCall( );
hipMalloc((void**)&GPUOriginal, nDimX*nDimY*sizeof(float));
hipMalloc((void**)&GPUTratada, nDimX*nDimY*sizeof(float));
//cutilSafeCall(
hipMemcpy(GPUOriginal, ImagenOriginal, nDimX*nDimY*sizeof(float), hipMemcpyHostToDevice);
BloquesPorGrid= (nDimX*nDimY + HilosPorBloque -1) / HilosPorBloque;
hipEventRecord(start,0);
for (i=1; i<=Itera; i++)
{
if ((i % 2) != 0)
hipLaunchKernelGGL(( SuavizaCuda), dim3(BloquesPorGrid), dim3(HilosPorBloque), 0, 0, GPUTratada, GPUOriginal, nDimX, nDimY);
else
hipLaunchKernelGGL(( SuavizaCuda), dim3(BloquesPorGrid), dim3(HilosPorBloque), 0, 0, GPUOriginal, GPUTratada, nDimX, nDimY);
}
//cutilCheckMsg("kernel launch failure");
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time,start,stop);
printf("tiempo %f\n", time);
if ((Itera % 2) != 0)
// cutilSafeCall();
hipMemcpy(ImagenTratada, GPUTratada, nDimX*nDimY*sizeof(float), hipMemcpyDeviceToHost);
else
// cutilSafeCall();
hipMemcpy(ImagenTratada, GPUOriginal, nDimX*nDimY*sizeof(float), hipMemcpyDeviceToHost);
/* Guardar la imagen */
rc = nSave3(Salida, nDimX, nDimY, ImagenTratada, Cabecera, CabeceraSize);
assert(!rc);
free(Cabecera);
free(ImagenTratada);
free(ImagenOriginal);
// cutilSafeCall();
hipFree(GPUTratada);
hipFree(GPUOriginal);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| c1d7ac6b90e07cafe5062b6cfd864038af498d61.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include "IO.h"
#include <cutil_inline.h>
//Device Code
__global__ void SuavizaCuda(float *Tratada, float *Original, unsigned int nDimX, unsigned int nDimY)
{
unsigned int
UpIzq, UpCen, UpDer,
Izqda, Centr, Derch,
DoIzq, DoCen, DoDer,
UltPriFil, PriUltFil, UltUltFil,
PriCol, UltCol;
float
fUpIzq, fUpCen, fUpDer,
fIzqda, fCentr, fDerch,
fDoIzq, fDoCen, fDoDer,
Divisor;
int general = 0;
// Creamos el indice del hilo
int i = blockDim.x * blockIdx.x + threadIdx.x;
UltPriFil=nDimX-1;
PriUltFil=(nDimY-1)*nDimX;
UltUltFil=nDimY*nDimX-1;
if( i <= (nDimX * nDimY-1) ){
UpCen=i-nDimX; UpIzq=UpCen-1; UpDer=UpCen+1;
Centr=i; Izqda=Centr-1; Derch=Centr+1;
DoCen=i+nDimX; DoIzq=DoCen-1; DoDer=DoCen+1;
PriCol=i % nDimX;
UltCol=(i+1) % nDimX;
if (i == 0) /* elemento (0,0) */
{
fUpIzq=.0; fUpCen=.0; fUpDer=.0;
fIzqda=.0; fCentr=Original[Centr]; fDerch=Original[Derch];
fDoIzq=.0; fDoCen=Original[DoCen]; fDoDer=Original[DoDer];
Divisor=4.0;
}
else
{
if (i == UltPriFil) /* elemento (0, nDimX-1) */
{
fUpIzq=.0; fUpCen=.0; fUpDer=.0;
fIzqda=Original[Izqda]; fCentr=Original[Centr]; fDerch=.0;
fDoIzq=Original[DoIzq]; fDoCen=Original[DoCen]; fDoDer=.0;
Divisor=4.0;
}
else
{
if (i == PriUltFil) /* elemento (nDimY-1, 0) */
{
fUpIzq=.0; fUpCen=Original[UpCen]; fUpDer=Original[UpDer];
fIzqda=.0; fCentr=Original[Centr]; fDerch=Original[Derch];
fDoIzq=.0; fDoCen=.0; fDoDer=.0;
Divisor=4.0;
}
else
{
if (i == UltUltFil) /* elemento (nDimY-1, nDimX-1) */
{
fUpIzq=Original[UpIzq]; fUpCen=Original[UpCen]; fUpDer=.0;
fIzqda=Original[Izqda]; fCentr=Original[Centr]; fDerch=.0;
fDoIzq=.0; fDoCen=.0; fDoDer=.0;
Divisor=4.0;
}
else
{
if (i < UltPriFil) /* elementos intermedios primera fila */
{
fUpIzq=.0; fUpCen=.0; fUpDer=.0;
fIzqda=Original[Izqda]; fCentr=Original[Centr]; fDerch=Original[Derch];
fDoIzq=Original[DoIzq]; fDoCen=Original[DoCen]; fDoDer=Original[DoDer];
Divisor=6.0;
}
else
{
if (i > PriUltFil) /* elementos intermedios ultima fila */
{
fUpIzq=Original[UpIzq]; fUpCen=Original[UpCen]; fUpDer=Original[UpDer];
fIzqda=Original[Izqda]; fCentr=Original[Centr]; fDerch=Original[Derch];
fDoIzq=.0; fDoCen=.0; fDoDer=.0;
Divisor=6.0;
}
else
{
if (PriCol == 0) /* elementos primera columna no tratados ya */
{
fUpIzq=.0; fUpCen=Original[UpCen]; fUpDer=Original[UpDer];
fIzqda=.0; fCentr=Original[Centr]; fDerch=Original[Derch];
fDoIzq=.0; fDoCen=Original[DoCen]; fDoDer=Original[DoDer];
Divisor=6.0;
}
else
{
if (UltCol == 0) /* elementos ultima columnas no tratados ya */
{
fUpIzq=Original[UpIzq]; fUpCen=Original[UpCen]; fUpDer=.0;
fIzqda=Original[Izqda]; fCentr=Original[Centr]; fDerch=.0;
fDoIzq=Original[DoIzq]; fDoCen=Original[DoCen]; fDoDer=.0;
Divisor=6.0;
}
else /* caso general */
{
fUpIzq=Original[UpIzq]; fUpCen=Original[UpCen]; fUpDer=Original[UpDer];
fIzqda=Original[Izqda]; fCentr=Original[Centr]; fDerch=Original[Derch];
fDoIzq=Original[DoIzq]; fDoCen=Original[DoCen]; fDoDer=Original[DoDer];
Divisor=9.0;
general = 1;
}
}
}
}
}
}
}
}
if(general == 1){
Tratada[i] = ( (fUpIzq / 16) + (fUpCen / 8) + (fUpDer / 16) + (fIzqda / 8) + (fCentr / 4)+ (fDerch / 8) + (fDoIzq / 16) + (fDoCen / 8)+ (fDoDer / 16));
general = 0;
}
else{
Tratada[i] = (fUpIzq+fUpCen+fUpDer + fIzqda+fCentr+fDerch + fDoIzq+fDoCen+fDoDer) / Divisor;
}
} //Fin del if de comprobacion
}
int main(int argc, char* argv[]) {
unsigned char *Cabecera=NULL;
char *Entrada=NULL;
char *Salida=NULL;
unsigned int nDimX, nDimY, CabeceraSize, rc, Itera, i;
unsigned short BytePorPixel;
int HilosPorBloque=0, BloquesPorGrid=0;
float *ImagenTratada=NULL, *ImagenOriginal=NULL;
float *GPUTratada=NULL, *GPUOriginal=NULL;
float time=.0;
cudaEvent_t start, stop;
if (argc != 5) {
printf("Uso: Suaviza-Secuencial <Fichero-Entrada> <Fichero-Salida> <Iteraciones> <Threads per Block>\n");
return -1;
}
Entrada = strdup(argv[1]);
Salida = strdup(argv[2]);
Itera = atoi(argv[3]);
HilosPorBloque = atoi(argv[4]);
/* Obtener caracteristicas de la imagen */
rc = nGetSize(Entrada, &nDimX, &nDimY, &BytePorPixel, &CabeceraSize);
assert(!rc);
/* Asegurando que es 1 Byte por Pixel */
if (BytePorPixel != 1) {
printf("Lo sentimos pero solo para 1 Byte por pixel\n");
return -1;
}
/* Para la Cabecera de la Imagen */
Cabecera = (unsigned char *)calloc(CabeceraSize, sizeof(unsigned char));
assert(Cabecera);
/* Almacena la Imagen que se esta actualizado (t+1) */
ImagenTratada = (float *)calloc(BytePorPixel*nDimX*nDimY, sizeof(float));
assert(ImagenTratada);
/* Almacena la Imagen que se usa para actualizar en (t+1), esto es, la imagen en (t) */
ImagenOriginal = (float *)calloc(BytePorPixel*nDimX*nDimY, sizeof(float));
assert(ImagenOriginal);
/* Leer la imagen */
rc = nLoad3(Entrada, nDimX, nDimY, ImagenOriginal, Cabecera, CabeceraSize);
assert(!rc);
/* No usar llamadas a cuda antes de usar setdevice. */
//cutilSafeCall(cudaSetDevice(0));
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Reservando memoria para los vectores en el device
// cutilSafeCall( );
cudaMalloc((void**)&GPUOriginal, nDimX*nDimY*sizeof(float));
cudaMalloc((void**)&GPUTratada, nDimX*nDimY*sizeof(float));
//cutilSafeCall(
cudaMemcpy(GPUOriginal, ImagenOriginal, nDimX*nDimY*sizeof(float), cudaMemcpyHostToDevice);
BloquesPorGrid= (nDimX*nDimY + HilosPorBloque -1) / HilosPorBloque;
cudaEventRecord(start,0);
for (i=1; i<=Itera; i++)
{
if ((i % 2) != 0)
SuavizaCuda<<<BloquesPorGrid, HilosPorBloque>>>(GPUTratada, GPUOriginal, nDimX, nDimY);
else
SuavizaCuda<<<BloquesPorGrid, HilosPorBloque>>>(GPUOriginal, GPUTratada, nDimX, nDimY);
}
//cutilCheckMsg("kernel launch failure");
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time,start,stop);
printf("tiempo %f\n", time);
if ((Itera % 2) != 0)
// cutilSafeCall();
cudaMemcpy(ImagenTratada, GPUTratada, nDimX*nDimY*sizeof(float), cudaMemcpyDeviceToHost);
else
// cutilSafeCall();
cudaMemcpy(ImagenTratada, GPUOriginal, nDimX*nDimY*sizeof(float), cudaMemcpyDeviceToHost);
/* Guardar la imagen */
rc = nSave3(Salida, nDimX, nDimY, ImagenTratada, Cabecera, CabeceraSize);
assert(!rc);
free(Cabecera);
free(ImagenTratada);
free(ImagenOriginal);
// cutilSafeCall();
cudaFree(GPUTratada);
cudaFree(GPUOriginal);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
4ee5fe5105a9c63befdb407e5c43364838d5da13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
using namespace std;
using namespace std::chrono;
#define BLOCKSIZE 16
#define N 1024
__global__ void gpu_mat_vec_multiply(double *device_mat,
double *device_vec,
double *device_res){
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
int tindex = tidx + gridDim.x * BLOCKSIZE * tidy;
if(tindex < N){
int i; int m = tindex * N;
device_res[tindex] = 0.0;
for(int i = 0;i < N;i ++){
device_res[tindex] += device_mat[m + i] * device_vec[i];
}
}
__syncthreads();
}
int main(){
double *host_mat, *host_vec, *host_res;
double *device_mat, *device_vec, *device_res;
host_mat = new double[N * N];
host_vec = new double[N];
host_res = new double[N];
for(int i = 0;i < N;i ++){
host_vec[i] = double(rand()%100);
for(int j = 0;j < N;j ++){
host_mat[i * N + j] = double(rand()%40);
}
}
hipMalloc(&device_mat, (N*N)*sizeof(double));
hipMalloc(&device_vec, N*sizeof(double));
hipMalloc(&device_res, N*sizeof(double));
hipMemcpy(device_mat, host_mat, (N*N)*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_vec, host_vec, N*sizeof(double), hipMemcpyHostToDevice);
int max = BLOCKSIZE * BLOCKSIZE;
int BLocksPerGrid = N / max + 1;
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE);
if(N % max == 0) BLocksPerGrid --;
dim3 dimGrid(1, BLocksPerGrid);
hipLaunchKernelGGL(( gpu_mat_vec_multiply), dim3(dimGrid), dim3(dimBlock), 0, 0, device_mat, device_vec, device_res);
} | 4ee5fe5105a9c63befdb407e5c43364838d5da13.cu | #include <iostream>
#include <chrono>
using namespace std;
using namespace std::chrono;
#define BLOCKSIZE 16
#define N 1024
__global__ void gpu_mat_vec_multiply(double *device_mat,
double *device_vec,
double *device_res){
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
int tindex = tidx + gridDim.x * BLOCKSIZE * tidy;
if(tindex < N){
int i; int m = tindex * N;
device_res[tindex] = 0.0;
for(int i = 0;i < N;i ++){
device_res[tindex] += device_mat[m + i] * device_vec[i];
}
}
__syncthreads();
}
int main(){
double *host_mat, *host_vec, *host_res;
double *device_mat, *device_vec, *device_res;
host_mat = new double[N * N];
host_vec = new double[N];
host_res = new double[N];
for(int i = 0;i < N;i ++){
host_vec[i] = double(rand()%100);
for(int j = 0;j < N;j ++){
host_mat[i * N + j] = double(rand()%40);
}
}
cudaMalloc(&device_mat, (N*N)*sizeof(double));
cudaMalloc(&device_vec, N*sizeof(double));
cudaMalloc(&device_res, N*sizeof(double));
cudaMemcpy(device_mat, host_mat, (N*N)*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_vec, host_vec, N*sizeof(double), cudaMemcpyHostToDevice);
int max = BLOCKSIZE * BLOCKSIZE;
int BLocksPerGrid = N / max + 1;
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE);
if(N % max == 0) BLocksPerGrid --;
dim3 dimGrid(1, BLocksPerGrid);
gpu_mat_vec_multiply<<<dimGrid, dimBlock>>>(device_mat, device_vec, device_res);
} |
0016b087e94f319036f65d175bf296b10809f639.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
uchar4 *d_inputImageRGBA__;
uchar4 *d_outputImageRGBA__;
float *h_filter__;
__global__ void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols, const float* const filter, const int filterWidth) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if ( col >= numCols || row >= numRows )
return;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(row + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(col + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[row * numCols + col] = result;
}
__global__ void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel) {
int absolute_image_position_x = blockDim.x * blockIdx.x + threadIdx.x;
int absolute_image_position_y = blockDim.y * blockIdx.y + threadIdx.y;
if ( absolute_image_position_x >= numCols || absolute_image_position_y >= numRows )
return;
int thread_1D_pos = absolute_image_position_y * numCols + absolute_image_position_x;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
__global__ void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) {
//allocate memory for the three different channels
//original
hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage);
hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage);
hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage);
//Allocate memory for the filter on the GPU
hipMalloc(&d_filter, sizeof(float)*filterWidth*filterWidth);
hipMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,hipMemcpyHostToDevice);
}
void cleanup() {
hipFree(d_red);
hipFree(d_green);
hipFree(d_blue);
hipFree(d_filter);
}
void setFilter(float **h_filter, int *filterWidth, int blurKernelWidth, float blurKernelSigma) {
//Normally blurKernelWidth = 9 and blurKernelSigma = 2.0
*h_filter = new float[blurKernelWidth * blurKernelWidth];
*filterWidth = blurKernelWidth;
float filterSum = 0.f; //for normalization
for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r) {
for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c) {
float filterValue = expf( -(float)(c * c + r * r) / (2.f * blurKernelSigma * blurKernelSigma));
(*h_filter)[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] = filterValue;
filterSum += filterValue;
}
}
float normalizationFactor = 1.f / filterSum;
for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r)
for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c)
(*h_filter)[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] *= normalizationFactor;
}
uchar4* blur_ops(uchar4* d_inputImageRGBA, size_t numRows, size_t numCols, int blurKernelWidth) {
float blurKernelSigma = blurKernelWidth/4.0f;
//Set filter array
float* h_filter;
int filterWidth;
setFilter(&h_filter, &filterWidth, blurKernelWidth, blurKernelSigma);
//Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(16,16,1);
//Calculate Grid SIze
int a=numCols/blockSize.x, b=numRows/blockSize.y;
const dim3 gridSize(a+1,b+1,1);
const size_t numPixels = numRows * numCols;
uchar4 *d_outputImageRGBA;
hipMalloc((void **)&d_outputImageRGBA, sizeof(uchar4) * numPixels);
hipMemset(d_outputImageRGBA, 0, numPixels * sizeof(uchar4)); //make sure no memory is left laying around
d_inputImageRGBA__ = d_inputImageRGBA;
d_outputImageRGBA__ = d_outputImageRGBA;
//blurred
unsigned char *d_redBlurred, *d_greenBlurred, *d_blueBlurred;
hipMalloc(&d_redBlurred, sizeof(unsigned char) * numPixels);
hipMalloc(&d_greenBlurred, sizeof(unsigned char) * numPixels);
hipMalloc(&d_blueBlurred, sizeof(unsigned char) * numPixels);
hipMemset(d_redBlurred, 0, sizeof(unsigned char) * numPixels);
hipMemset(d_greenBlurred, 0, sizeof(unsigned char) * numPixels);
hipMemset(d_blueBlurred, 0, sizeof(unsigned char) * numPixels);
allocateMemoryAndCopyToGPU(numRows, numCols, h_filter, filterWidth);
//Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red,d_green, d_blue);
hipDeviceSynchronize();
//Call blur kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize();
//Now we recombine the results.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize();
//cleanup memory
cleanup();
hipFree(d_redBlurred);
hipFree(d_greenBlurred);
hipFree(d_blueBlurred);
hipDeviceSynchronize();
//Initialize memory on host for output uchar4*
uchar4* h_out;
h_out = (uchar4*)malloc(sizeof(uchar4) * numPixels);
//copy output from device to host
hipMemcpy(h_out, d_outputImageRGBA, sizeof(uchar4) * numPixels, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//cleanup memory on device
hipFree(d_inputImageRGBA__);
hipFree(d_outputImageRGBA__);
delete[] h_filter__;
//return h_out
return h_out;
} | 0016b087e94f319036f65d175bf296b10809f639.cu | #include <cuda_runtime.h>
#include <stdio.h>
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
uchar4 *d_inputImageRGBA__;
uchar4 *d_outputImageRGBA__;
float *h_filter__;
__global__ void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols, const float* const filter, const int filterWidth) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if ( col >= numCols || row >= numRows )
return;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(row + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(col + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[row * numCols + col] = result;
}
__global__ void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel) {
int absolute_image_position_x = blockDim.x * blockIdx.x + threadIdx.x;
int absolute_image_position_y = blockDim.y * blockIdx.y + threadIdx.y;
if ( absolute_image_position_x >= numCols || absolute_image_position_y >= numRows )
return;
int thread_1D_pos = absolute_image_position_y * numCols + absolute_image_position_x;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
__global__ void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) {
//allocate memory for the three different channels
//original
cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage);
cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage);
cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage);
//Allocate memory for the filter on the GPU
cudaMalloc(&d_filter, sizeof(float)*filterWidth*filterWidth);
cudaMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,cudaMemcpyHostToDevice);
}
void cleanup() {
cudaFree(d_red);
cudaFree(d_green);
cudaFree(d_blue);
cudaFree(d_filter);
}
void setFilter(float **h_filter, int *filterWidth, int blurKernelWidth, float blurKernelSigma) {
//Normally blurKernelWidth = 9 and blurKernelSigma = 2.0
*h_filter = new float[blurKernelWidth * blurKernelWidth];
*filterWidth = blurKernelWidth;
float filterSum = 0.f; //for normalization
for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r) {
for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c) {
float filterValue = expf( -(float)(c * c + r * r) / (2.f * blurKernelSigma * blurKernelSigma));
(*h_filter)[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] = filterValue;
filterSum += filterValue;
}
}
float normalizationFactor = 1.f / filterSum;
for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r)
for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c)
(*h_filter)[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] *= normalizationFactor;
}
uchar4* blur_ops(uchar4* d_inputImageRGBA, size_t numRows, size_t numCols, int blurKernelWidth) {
float blurKernelSigma = blurKernelWidth/4.0f;
//Set filter array
float* h_filter;
int filterWidth;
setFilter(&h_filter, &filterWidth, blurKernelWidth, blurKernelSigma);
//Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(16,16,1);
//Calculate Grid SIze
int a=numCols/blockSize.x, b=numRows/blockSize.y;
const dim3 gridSize(a+1,b+1,1);
const size_t numPixels = numRows * numCols;
uchar4 *d_outputImageRGBA;
cudaMalloc((void **)&d_outputImageRGBA, sizeof(uchar4) * numPixels);
cudaMemset(d_outputImageRGBA, 0, numPixels * sizeof(uchar4)); //make sure no memory is left laying around
d_inputImageRGBA__ = d_inputImageRGBA;
d_outputImageRGBA__ = d_outputImageRGBA;
//blurred
unsigned char *d_redBlurred, *d_greenBlurred, *d_blueBlurred;
cudaMalloc(&d_redBlurred, sizeof(unsigned char) * numPixels);
cudaMalloc(&d_greenBlurred, sizeof(unsigned char) * numPixels);
cudaMalloc(&d_blueBlurred, sizeof(unsigned char) * numPixels);
cudaMemset(d_redBlurred, 0, sizeof(unsigned char) * numPixels);
cudaMemset(d_greenBlurred, 0, sizeof(unsigned char) * numPixels);
cudaMemset(d_blueBlurred, 0, sizeof(unsigned char) * numPixels);
allocateMemoryAndCopyToGPU(numRows, numCols, h_filter, filterWidth);
//Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red,d_green, d_blue);
cudaDeviceSynchronize();
//Call blur kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize();
//Now we recombine the results.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize();
//cleanup memory
cleanup();
cudaFree(d_redBlurred);
cudaFree(d_greenBlurred);
cudaFree(d_blueBlurred);
cudaDeviceSynchronize();
//Initialize memory on host for output uchar4*
uchar4* h_out;
h_out = (uchar4*)malloc(sizeof(uchar4) * numPixels);
//copy output from device to host
cudaMemcpy(h_out, d_outputImageRGBA, sizeof(uchar4) * numPixels, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//cleanup memory on device
cudaFree(d_inputImageRGBA__);
cudaFree(d_outputImageRGBA__);
delete[] h_filter__;
//return h_out
return h_out;
} |
a012600fbc720e9c6e746bc9715e41ba3d68f97d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "visualize.h"
#include <utils.h>
__global__ void _colorLabels(int *labels,
float *image,
int label_count,
int pitch,
int width,
int height) {
int blk_x_idx = blockIdx.x * blockDim.x;
int blk_y_idx = blockIdx.y * blockDim.y;
int col = blk_x_idx + threadIdx.x;
int row = blk_y_idx + threadIdx.y;
int pitch_ele = pitch / sizeof(float);
int idx = row * pitch_ele + col;
int offset = width * height;
int label = labels[row * width + col];
unsigned int tmp = label * 0x01234567;
image[idx] = (tmp & 0xFF0000) >> 4;
image[idx + offset] = (tmp & 0xFF00) >> 2;
image[idx + offset * 2] = tmp & 0xFF;
}
namespace CuMeanShift {
template <int blk_w, int blk_h>
void CudaColorLabels<blk_w, blk_h>::colorLabels(int *labels,
float *image,
int label_count,
int pitch,
int width,
int height) {
dim3 block_1(blk_w, blk_h);
dim3 grid_1(CEIL(width, blk_w), CEIL(height, blk_h));
hipLaunchKernelGGL(( _colorLabels), dim3(grid_1), dim3(block_1), 0, 0, labels, image, label_count, pitch, width, height);
hipDeviceSynchronize();
}
}
template class CuMeanShift::CudaColorLabels<16, 16>;
template class CuMeanShift::CudaColorLabels<32, 32>;
| a012600fbc720e9c6e746bc9715e41ba3d68f97d.cu | #include "visualize.h"
#include <utils.h>
__global__ void _colorLabels(int *labels,
float *image,
int label_count,
int pitch,
int width,
int height) {
int blk_x_idx = blockIdx.x * blockDim.x;
int blk_y_idx = blockIdx.y * blockDim.y;
int col = blk_x_idx + threadIdx.x;
int row = blk_y_idx + threadIdx.y;
int pitch_ele = pitch / sizeof(float);
int idx = row * pitch_ele + col;
int offset = width * height;
int label = labels[row * width + col];
unsigned int tmp = label * 0x01234567;
image[idx] = (tmp & 0xFF0000) >> 4;
image[idx + offset] = (tmp & 0xFF00) >> 2;
image[idx + offset * 2] = tmp & 0xFF;
}
namespace CuMeanShift {
template <int blk_w, int blk_h>
void CudaColorLabels<blk_w, blk_h>::colorLabels(int *labels,
float *image,
int label_count,
int pitch,
int width,
int height) {
dim3 block_1(blk_w, blk_h);
dim3 grid_1(CEIL(width, blk_w), CEIL(height, blk_h));
_colorLabels<<<grid_1, block_1>>>(labels, image, label_count, pitch, width, height);
cudaDeviceSynchronize();
}
}
template class CuMeanShift::CudaColorLabels<16, 16>;
template class CuMeanShift::CudaColorLabels<32, 32>;
|
1f71b91bc2e801a244e8a27317b10227d7c95c8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated s Tue Aug 13 16:45:19 2013
@author Stan Tomov
*/
#include "common_magma.h"
#define PRECISION_s
#include "commonblas.h"
__global__ void stranspose_32( float *B, int ldb, const float *A, int lda )
{
__shared__ float a[32][SSIZE_1SHARED+1];
int inx = threadIdx.x;
int iny = threadIdx.y;
int ibx = blockIdx.x*32;
int iby = blockIdx.y*32;
A += ibx + inx + __mul24( iby + iny, lda );
B += iby + inx + __mul24( ibx + iny, ldb );
a[iny+0][inx] = A[0*lda];
a[iny+8][inx] = A[8*lda];
a[iny+16][inx] = A[16*lda];
a[iny+24][inx] = A[24*lda];
__syncthreads();
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c)
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[16*ldb] = a[inx][iny+16];
B[24*ldb] = a[inx][iny+24];
#else /* defined(PRECISION_z) */
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[0*ldb+16] = a[inx+16][iny+0];
B[8*ldb+16] = a[inx+16][iny+8];
__syncthreads();
A += SSIZE_1SHARED;
B += __mul24( 16, ldb);
a[iny+0][inx] = A[0*lda];
a[iny+8][inx] = A[8*lda];
a[iny+16][inx] = A[16*lda];
a[iny+24][inx] = A[24*lda];
__syncthreads();
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[0*ldb+16] = a[inx+16][iny+0];
B[8*ldb+16] = a[inx+16][iny+8];
#endif
}
//
// m, n - dimensions in the source matrix
// This version works when m and n are divisible by 32.
//
extern "C" void
magmablas_stranspose(float *odata, magma_int_t ldo,
const float *idata, magma_int_t ldi,
magma_int_t m, magma_int_t n )
{
//assert( (m%32) == 0 && (n%32) == 0, "misaligned transpose" );
dim3 threads( SSIZE_1SHARED, 8, 1 );
dim3 grid( m/32, n/32, 1 );
hipLaunchKernelGGL(( stranspose_32), dim3(grid), dim3(threads), 0, magma_stream , odata, ldo, idata, ldi );
}
| 1f71b91bc2e801a244e8a27317b10227d7c95c8d.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated s Tue Aug 13 16:45:19 2013
@author Stan Tomov
*/
#include "common_magma.h"
#define PRECISION_s
#include "commonblas.h"
__global__ void stranspose_32( float *B, int ldb, const float *A, int lda )
{
__shared__ float a[32][SSIZE_1SHARED+1];
int inx = threadIdx.x;
int iny = threadIdx.y;
int ibx = blockIdx.x*32;
int iby = blockIdx.y*32;
A += ibx + inx + __mul24( iby + iny, lda );
B += iby + inx + __mul24( ibx + iny, ldb );
a[iny+0][inx] = A[0*lda];
a[iny+8][inx] = A[8*lda];
a[iny+16][inx] = A[16*lda];
a[iny+24][inx] = A[24*lda];
__syncthreads();
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c)
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[16*ldb] = a[inx][iny+16];
B[24*ldb] = a[inx][iny+24];
#else /* defined(PRECISION_z) */
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[0*ldb+16] = a[inx+16][iny+0];
B[8*ldb+16] = a[inx+16][iny+8];
__syncthreads();
A += SSIZE_1SHARED;
B += __mul24( 16, ldb);
a[iny+0][inx] = A[0*lda];
a[iny+8][inx] = A[8*lda];
a[iny+16][inx] = A[16*lda];
a[iny+24][inx] = A[24*lda];
__syncthreads();
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[0*ldb+16] = a[inx+16][iny+0];
B[8*ldb+16] = a[inx+16][iny+8];
#endif
}
//
// m, n - dimensions in the source matrix
// This version works when m and n are divisible by 32.
//
extern "C" void
magmablas_stranspose(float *odata, magma_int_t ldo,
const float *idata, magma_int_t ldi,
magma_int_t m, magma_int_t n )
{
//assert( (m%32) == 0 && (n%32) == 0, "misaligned transpose" );
dim3 threads( SSIZE_1SHARED, 8, 1 );
dim3 grid( m/32, n/32, 1 );
stranspose_32<<< grid, threads, 0, magma_stream >>>( odata, ldo, idata, ldi );
}
|
7f42bda0f021de99a7fc2c8a781511fa955d0f00.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "star3d2r-32x32-3-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 25
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 2 - 2);
const AN5D_TYPE __c3Pad = (2);
#define __c3 c3
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __halo3 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 20;
const AN5D_TYPE __side3Len = 20;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.2500f * A[t%2][i][j][k]
+ 0.0620f * A[t%2][i-1][j][k] + 0.0621f * A[t%2][i+1][j][k]
+ 0.0622f * A[t%2][i][j-1][k] + 0.0623f * A[t%2][i][j+1][k]
+ 0.0624f * A[t%2][i][j][k-1] + 0.06245f * A[t%2][i][j][k+1]
+ 0.06255f * A[t%2][i-2][j][k] + 0.0626f * A[t%2][i+2][j][k]
+ 0.0627f * A[t%2][i][j-2][k] + 0.0628f * A[t%2][i][j+2][k]
+ 0.0629f * A[t%2][i][j][k-2] + 0.0630f * A[t%2][i][j][k+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 7f42bda0f021de99a7fc2c8a781511fa955d0f00.cu | #include <assert.h>
#include <stdio.h>
#include "star3d2r-32x32-3-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 25
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 2 - 2);
const AN5D_TYPE __c3Pad = (2);
#define __c3 c3
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __halo3 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 20;
const AN5D_TYPE __side3Len = 20;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.2500f * A[t%2][i][j][k]
+ 0.0620f * A[t%2][i-1][j][k] + 0.0621f * A[t%2][i+1][j][k]
+ 0.0622f * A[t%2][i][j-1][k] + 0.0623f * A[t%2][i][j+1][k]
+ 0.0624f * A[t%2][i][j][k-1] + 0.06245f * A[t%2][i][j][k+1]
+ 0.06255f * A[t%2][i-2][j][k] + 0.0626f * A[t%2][i+2][j][k]
+ 0.0627f * A[t%2][i][j-2][k] + 0.0628f * A[t%2][i][j+2][k]
+ 0.0629f * A[t%2][i][j][k-2] + 0.0630f * A[t%2][i][j][k+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
9d21fcb95dc2708354ff1d81192e8fb37eaff1a1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "fbgemm_gpu/fbgemm_cuda_utils.cuh"
#include "fbgemm_gpu/layout_transform_ops.cuh"
#include "fbgemm_gpu/permute_pooled_embedding_ops_split.h"
#include "fbgemm_gpu/sparse_ops_utils.h"
using Tensor = at::Tensor;
namespace fbgemm_gpu {
Tensor permute_pooled_embs_split_gpu(
const Tensor& pooled_embs, // [B_local][Sum_T_global(D)]
const Tensor& offset_dim_list,
const Tensor& permute_list,
const Tensor& inv_offset_dim_list,
const Tensor& inv_permute_list) {
// inv_permute_list is not being used so it's not checked here.
TENSOR_ON_CUDA_GPU(pooled_embs);
TENSOR_ON_CUDA_GPU(offset_dim_list);
TENSOR_ON_CUDA_GPU(permute_list);
TENSOR_ON_CUDA_GPU(inv_offset_dim_list);
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(pooled_embs.get_device());
// We couldn't pass the "pooled_embs.is_contiguous()" check in the backward
// passs after D22767058. TODO: optimize and make sure pooled_embs is
// contiguous.
auto pooled_embs_contiguous = pooled_embs.contiguous();
const int64_t B = pooled_embs_contiguous.size(0);
const int64_t T = permute_list.numel();
const int64_t dim_sum = pooled_embs_contiguous.size(1);
// inv_permute_list is not being used so it's not checked here.
TENSORS_ON_SAME_DEVICE(pooled_embs_contiguous, offset_dim_list);
TENSORS_ON_SAME_DEVICE(pooled_embs_contiguous, permute_list);
TENSORS_ON_SAME_DEVICE(pooled_embs_contiguous, inv_offset_dim_list);
TORCH_CHECK(offset_dim_list.numel() == permute_list.numel() + 1);
TORCH_CHECK(offset_dim_list.numel() == inv_offset_dim_list.numel());
Tensor permuted_pooled_embs = at::empty_like(pooled_embs_contiguous);
// This kernel is moving D elements per warp.
// We are launching ( div_round_up(T, warp_per_block), B ) blocks.
// The grid z dimension is also used by B in case it's greater than 65535.
const int32_t warp_per_block =
fbgemm_gpu::kMaxThreads / fbgemm_gpu::kWarpSize;
const int32_t max_grid_dim_y =
32768; // The CUDA maximum is 65535, not a power of 2.
const dim3 threads(fbgemm_gpu::kMaxThreads);
const dim3 blocks(
fbgemm_gpu::div_round_up(T, warp_per_block),
::min(static_cast<int32_t>(B), max_grid_dim_y),
(B + max_grid_dim_y - 1) / max_grid_dim_y);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
pooled_embs_contiguous.type(), "permute_pooled_embeddings", [&] {
hipLaunchKernelGGL(( permute_pooled_embs_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
pooled_embs_contiguous.data_ptr<scalar_t>(),
offset_dim_list.data_ptr<int64_t>(),
permute_list.data_ptr<int64_t>(),
inv_offset_dim_list.data_ptr<int64_t>(),
permuted_pooled_embs.data_ptr<scalar_t>(),
B,
T,
dim_sum);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
return permuted_pooled_embs;
}
} // namespace fbgemm_gpu
| 9d21fcb95dc2708354ff1d81192e8fb37eaff1a1.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
#include <c10/cuda/CUDAGuard.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "fbgemm_gpu/fbgemm_cuda_utils.cuh"
#include "fbgemm_gpu/layout_transform_ops.cuh"
#include "fbgemm_gpu/permute_pooled_embedding_ops_split.h"
#include "fbgemm_gpu/sparse_ops_utils.h"
using Tensor = at::Tensor;
namespace fbgemm_gpu {
Tensor permute_pooled_embs_split_gpu(
const Tensor& pooled_embs, // [B_local][Sum_T_global(D)]
const Tensor& offset_dim_list,
const Tensor& permute_list,
const Tensor& inv_offset_dim_list,
const Tensor& inv_permute_list) {
// inv_permute_list is not being used so it's not checked here.
TENSOR_ON_CUDA_GPU(pooled_embs);
TENSOR_ON_CUDA_GPU(offset_dim_list);
TENSOR_ON_CUDA_GPU(permute_list);
TENSOR_ON_CUDA_GPU(inv_offset_dim_list);
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(pooled_embs.get_device());
// We couldn't pass the "pooled_embs.is_contiguous()" check in the backward
// passs after D22767058. TODO: optimize and make sure pooled_embs is
// contiguous.
auto pooled_embs_contiguous = pooled_embs.contiguous();
const int64_t B = pooled_embs_contiguous.size(0);
const int64_t T = permute_list.numel();
const int64_t dim_sum = pooled_embs_contiguous.size(1);
// inv_permute_list is not being used so it's not checked here.
TENSORS_ON_SAME_DEVICE(pooled_embs_contiguous, offset_dim_list);
TENSORS_ON_SAME_DEVICE(pooled_embs_contiguous, permute_list);
TENSORS_ON_SAME_DEVICE(pooled_embs_contiguous, inv_offset_dim_list);
TORCH_CHECK(offset_dim_list.numel() == permute_list.numel() + 1);
TORCH_CHECK(offset_dim_list.numel() == inv_offset_dim_list.numel());
Tensor permuted_pooled_embs = at::empty_like(pooled_embs_contiguous);
// This kernel is moving D elements per warp.
// We are launching ( div_round_up(T, warp_per_block), B ) blocks.
// The grid z dimension is also used by B in case it's greater than 65535.
const int32_t warp_per_block =
fbgemm_gpu::kMaxThreads / fbgemm_gpu::kWarpSize;
const int32_t max_grid_dim_y =
32768; // The CUDA maximum is 65535, not a power of 2.
const dim3 threads(fbgemm_gpu::kMaxThreads);
const dim3 blocks(
fbgemm_gpu::div_round_up(T, warp_per_block),
std::min(static_cast<int32_t>(B), max_grid_dim_y),
(B + max_grid_dim_y - 1) / max_grid_dim_y);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
pooled_embs_contiguous.type(), "permute_pooled_embeddings", [&] {
permute_pooled_embs_kernel<scalar_t>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
pooled_embs_contiguous.data_ptr<scalar_t>(),
offset_dim_list.data_ptr<int64_t>(),
permute_list.data_ptr<int64_t>(),
inv_offset_dim_list.data_ptr<int64_t>(),
permuted_pooled_embs.data_ptr<scalar_t>(),
B,
T,
dim_sum);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
return permuted_pooled_embs;
}
} // namespace fbgemm_gpu
|
86e29f3630fc7b07037190c3fda0e3103ce6ae45.hip | // !!! This is a file automatically generated by hipify!!!
#include <kernels/gpu/fused_batch_norm.h>
#include <core/tensor_builder.h>
#include <global/operator_factory.h>
#include <global/fp16_operator_factory.h>
#include <backend/name.h>
#include <utils/assert.h>
#include <core/device.h>
#include <vector>
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <runtime/runtime.h>
#include "kernels/gpu/cuda_context.h"
#include "core/device_context.h"
#include "utils/ctxmgr_lite.h"
#include "kernels/gpu/cudax_fp16_math.h"
#include "kernels/gpu/gpu_kernel.h"
namespace ts {
namespace gpu {
template<typename T>
static __global__ void gpu_fused_batch_norm_compute_kernel(const T* data,T* out, int size, int step, int slice,
const T* mean, const T* variance, const T* scale, const T* bias ) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
int dim = index % ( step * slice ) / (step);
out[index] = (data[index] - mean[dim]) * variance[dim] * scale[dim] + bias[dim];
}
}
template<typename T>
static __global__ void inner_vec_kernel(const int N, float epsilon, const T* input, T* output) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < N) {
output[index] = T(1) / T(sqrt(input[index] + T(epsilon)));
}
}
#ifdef TS_USE_CUDA_FP16
template<>
__global__ void inner_vec_kernel<half>(const int N, float epsilon, const half* input, half* output) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
half one = half(1.f);
half half_epsilon = half(epsilon);
for (; index < N; index += blockDim.x * gridDim.x) {
output[index] = one / half(sqrt(input[index] + half_epsilon));
}
}
#endif
template<typename T>
static void gpu_fused_batch_norm_compute_run(const Tensor &x,
const Tensor &mean, const Tensor &variance,
const Tensor &scale, const Tensor &bias,
int dim, float epsilon, Tensor &out) {
const Shape &shape = x.sizes();
//int predims = 1;
int backdims = 1;
//for (int i = 0; i < dim; i++) {
// predims *= shape[i];
//}
for (int i = dim + 1; i < shape.size(); i++) {
backdims *= shape[i];
}
const T *psrc = x.data<T>();
const T *pmean = mean.data<T>();
const T *pvariance = variance.data<T>();
const T *pscale = scale.data<T>();
const T *pbias = bias.data<T>();
T *pdst = out.data<T>();
Shape vec_shape = variance.sizes();
Tensor vec_tensor(RuntimeContext::FlowMemory(), variance.dtype(), vec_shape, variance.device());
T* vec_data = vec_tensor.data<T>();
int vec_len = vec_tensor.count();
RUN_KERNEL(inner_vec_kernel<T>, CUDA_BLOCK(vec_len, CUDA_THREAD_NUM), CUDA_THREAD_NUM,
vec_len, epsilon, pvariance, vec_data);
RUN_KERNEL(gpu_fused_batch_norm_compute_kernel<T>,
CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM,
psrc, pdst, out.count(), backdims, shape[dim], pmean, vec_data, pscale, pbias);
}
void FusedBatchNorm::batch_norm(const Tensor &x, const Tensor &mean, const Tensor &variance,
const Tensor &scale, const Tensor &bias,
int dim, float epsilon, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch (dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { gpu_fused_batch_norm_compute_run<TYPE>(x, mean, variance, scale, bias, dim, epsilon, out); break; }
//DECLARE_COMPUTE_RUN(INT8, int8_t);
//DECLARE_COMPUTE_RUN(UINT8, uint8_t);
//DECLARE_COMPUTE_RUN(INT16, int16_t);
//DECLARE_COMPUTE_RUN(UINT16, uint16_t);
//DECLARE_COMPUTE_RUN(INT32, int32_t);
//DECLARE_COMPUTE_RUN(UINT32, uint32_t);
//DECLARE_COMPUTE_RUN(INT64, int64_t);
//DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(FusedBatchNorm, GPU, name::layer::fused_batch_norm())
#ifdef TS_USE_CUDA_FP16
TS_REGISTER_FP16_OPERATOR(FusedBatchNorm, GPU, name::layer::fused_batch_norm())
#endif
| 86e29f3630fc7b07037190c3fda0e3103ce6ae45.cu | #include <kernels/gpu/fused_batch_norm.h>
#include <core/tensor_builder.h>
#include <global/operator_factory.h>
#include <global/fp16_operator_factory.h>
#include <backend/name.h>
#include <utils/assert.h>
#include <core/device.h>
#include <vector>
#include "device_launch_parameters.h"
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <runtime/runtime.h>
#include "kernels/gpu/cuda_context.h"
#include "core/device_context.h"
#include "utils/ctxmgr_lite.h"
#include "kernels/gpu/cudax_fp16_math.h"
#include "kernels/gpu/gpu_kernel.h"
namespace ts {
namespace gpu {
template<typename T>
static __global__ void gpu_fused_batch_norm_compute_kernel(const T* data,T* out, int size, int step, int slice,
const T* mean, const T* variance, const T* scale, const T* bias ) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
int dim = index % ( step * slice ) / (step);
out[index] = (data[index] - mean[dim]) * variance[dim] * scale[dim] + bias[dim];
}
}
template<typename T>
static __global__ void inner_vec_kernel(const int N, float epsilon, const T* input, T* output) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < N) {
output[index] = T(1) / T(sqrt(input[index] + T(epsilon)));
}
}
#ifdef TS_USE_CUDA_FP16
template<>
__global__ void inner_vec_kernel<half>(const int N, float epsilon, const half* input, half* output) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
half one = half(1.f);
half half_epsilon = half(epsilon);
for (; index < N; index += blockDim.x * gridDim.x) {
output[index] = one / half(sqrt(input[index] + half_epsilon));
}
}
#endif
template<typename T>
static void gpu_fused_batch_norm_compute_run(const Tensor &x,
const Tensor &mean, const Tensor &variance,
const Tensor &scale, const Tensor &bias,
int dim, float epsilon, Tensor &out) {
const Shape &shape = x.sizes();
//int predims = 1;
int backdims = 1;
//for (int i = 0; i < dim; i++) {
// predims *= shape[i];
//}
for (int i = dim + 1; i < shape.size(); i++) {
backdims *= shape[i];
}
const T *psrc = x.data<T>();
const T *pmean = mean.data<T>();
const T *pvariance = variance.data<T>();
const T *pscale = scale.data<T>();
const T *pbias = bias.data<T>();
T *pdst = out.data<T>();
Shape vec_shape = variance.sizes();
Tensor vec_tensor(RuntimeContext::FlowMemory(), variance.dtype(), vec_shape, variance.device());
T* vec_data = vec_tensor.data<T>();
int vec_len = vec_tensor.count();
RUN_KERNEL(inner_vec_kernel<T>, CUDA_BLOCK(vec_len, CUDA_THREAD_NUM), CUDA_THREAD_NUM,
vec_len, epsilon, pvariance, vec_data);
RUN_KERNEL(gpu_fused_batch_norm_compute_kernel<T>,
CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM,
psrc, pdst, out.count(), backdims, shape[dim], pmean, vec_data, pscale, pbias);
}
void FusedBatchNorm::batch_norm(const Tensor &x, const Tensor &mean, const Tensor &variance,
const Tensor &scale, const Tensor &bias,
int dim, float epsilon, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch (dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { gpu_fused_batch_norm_compute_run<TYPE>(x, mean, variance, scale, bias, dim, epsilon, out); break; }
//DECLARE_COMPUTE_RUN(INT8, int8_t);
//DECLARE_COMPUTE_RUN(UINT8, uint8_t);
//DECLARE_COMPUTE_RUN(INT16, int16_t);
//DECLARE_COMPUTE_RUN(UINT16, uint16_t);
//DECLARE_COMPUTE_RUN(INT32, int32_t);
//DECLARE_COMPUTE_RUN(UINT32, uint32_t);
//DECLARE_COMPUTE_RUN(INT64, int64_t);
//DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(FusedBatchNorm, GPU, name::layer::fused_batch_norm())
#ifdef TS_USE_CUDA_FP16
TS_REGISTER_FP16_OPERATOR(FusedBatchNorm, GPU, name::layer::fused_batch_norm())
#endif
|
76d353ef17c2b1b39d899a704494313012848199.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_dropout_layer.hpp"
namespace caffe {
template <typename Ftype, typename Btype>
void CuDNNDropoutLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
if (this->phase_ == TRAIN) {
CUDNN_CHECK(cudnnDropoutForward(Caffe::cudnn_handle(0),
dropout_desc_,
this->bottom_desc_, bottom_data,
this->top_desc_, top_data,
reserve_space_.data(), reserve_space_size_));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
} else {
caffe_copy<Ftype>(bottom[0]->count(), bottom_data, top_data);
}
}
template <typename Ftype, typename Btype>
void CuDNNDropoutLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
const Btype* top_diff = top[0]->gpu_diff<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
if (propagate_down[0]) {
if (this->phase_ == TRAIN) {
CUDNN_CHECK(cudnnDropoutBackward(Caffe::cudnn_handle(0),
dropout_desc_,
this->top_desc_, top_diff,
this->bottom_desc_, bottom_diff,
reserve_space_.data(), reserve_space_size_));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(CuDNNDropoutLayer);
} // namespace caffe
#endif
| 76d353ef17c2b1b39d899a704494313012848199.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_dropout_layer.hpp"
namespace caffe {
template <typename Ftype, typename Btype>
void CuDNNDropoutLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
if (this->phase_ == TRAIN) {
CUDNN_CHECK(cudnnDropoutForward(Caffe::cudnn_handle(0),
dropout_desc_,
this->bottom_desc_, bottom_data,
this->top_desc_, top_data,
reserve_space_.data(), reserve_space_size_));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
} else {
caffe_copy<Ftype>(bottom[0]->count(), bottom_data, top_data);
}
}
template <typename Ftype, typename Btype>
void CuDNNDropoutLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
const Btype* top_diff = top[0]->gpu_diff<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
if (propagate_down[0]) {
if (this->phase_ == TRAIN) {
CUDNN_CHECK(cudnnDropoutBackward(Caffe::cudnn_handle(0),
dropout_desc_,
this->top_desc_, top_diff,
this->bottom_desc_, bottom_diff,
reserve_space_.data(), reserve_space_size_));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(CuDNNDropoutLayer);
} // namespace caffe
#endif
|
08cda799271f304eb56038c8ac34c6136475a617.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <chrono>
__global__
void sumOne(int n,int *m,int *partialSum,int *sum){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int threadSum =0;
for (int i = index; i < n; i += stride){
if(m[i]==1){
threadSum++;
}
}
// Block Sum
atomicAdd(&partialSum[blockIdx.x],threadSum);
__syncthreads();
if(threadIdx.x==0){
// Global Sum;
atomicAdd(&sum[0],partialSum[blockIdx.x]);
}
}
int main(int argc,char **argv){
//Read input matrix
std::ifstream infile;
infile.open(argv[1]);
if (!infile.is_open()){
std::cerr << "Couldn't read " << argv[1] << std::endl;
return 0;
}
int w,h;
infile >> w >> h;
int N = w*h;
int *m;
//Unified memory allocation
hipMallocManaged(&m, N*sizeof(int));
for(int i=0;i<N;i++){
infile >> m[i];
}
infile.close();
auto start = std::chrono::system_clock::now();
//Block,Grid parameters
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
int *partialSum;
int *sum;
hipMallocManaged(&partialSum,numBlocks*sizeof(int));
hipMallocManaged(&sum, sizeof(int));
//prefetch input matrix
int device = -1;
hipGetDevice(&device);
hipMemPrefetchAsync(m, N*sizeof(int), device, NULL);
hipMemPrefetchAsync(partialSum, numBlocks*sizeof(int), device, NULL);
hipMemPrefetchAsync(sum, sizeof(int), device, NULL);
//Sum ones
hipLaunchKernelGGL(( sumOne), dim3(numBlocks), dim3(blockSize), 0, 0, N, m, partialSum,sum);
hipDeviceSynchronize();
std::cout << sum[0] << std::endl;
hipFree(m);
hipFree(partialSum);
hipFree(sum);
return 0;
} | 08cda799271f304eb56038c8ac34c6136475a617.cu | #include <iostream>
#include <fstream>
#include <chrono>
__global__
void sumOne(int n,int *m,int *partialSum,int *sum){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int threadSum =0;
for (int i = index; i < n; i += stride){
if(m[i]==1){
threadSum++;
}
}
// Block Sum
atomicAdd(&partialSum[blockIdx.x],threadSum);
__syncthreads();
if(threadIdx.x==0){
// Global Sum;
atomicAdd(&sum[0],partialSum[blockIdx.x]);
}
}
int main(int argc,char **argv){
//Read input matrix
std::ifstream infile;
infile.open(argv[1]);
if (!infile.is_open()){
std::cerr << "Couldn't read " << argv[1] << std::endl;
return 0;
}
int w,h;
infile >> w >> h;
int N = w*h;
int *m;
//Unified memory allocation
cudaMallocManaged(&m, N*sizeof(int));
for(int i=0;i<N;i++){
infile >> m[i];
}
infile.close();
auto start = std::chrono::system_clock::now();
//Block,Grid parameters
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
int *partialSum;
int *sum;
cudaMallocManaged(&partialSum,numBlocks*sizeof(int));
cudaMallocManaged(&sum, sizeof(int));
//prefetch input matrix
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(m, N*sizeof(int), device, NULL);
cudaMemPrefetchAsync(partialSum, numBlocks*sizeof(int), device, NULL);
cudaMemPrefetchAsync(sum, sizeof(int), device, NULL);
//Sum ones
sumOne<<<numBlocks, blockSize>>>(N, m, partialSum,sum);
cudaDeviceSynchronize();
std::cout << sum[0] << std::endl;
cudaFree(m);
cudaFree(partialSum);
cudaFree(sum);
return 0;
} |
fdab538e97fe9c1908b5ad26cfbc21fbb3468281.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <thread>
#include <iomanip>
#include <cstdlib>
#include <hip/hip_runtime.h>
// to remove intellisense highlighting
#include <device_launch_parameters.h>
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include <hip/device_functions.h>
#include "BruteFactor.h"
using namespace std;
void brute(const char* a, BruteFactor** LN){
*LN = new BruteFactor(a);
(*LN)->getPrimes(); //Brute force
}
void brute2(const char* a, BruteFactor** LN){
*LN = new BruteFactor(a);
(*LN)->getPrimesCuda(); //Brute force
}
int main(){
//Change this to your project path
//string filename, a_path = "C:\\Users\\Rene\\ReneA-GDrive\\Project\\DPS915\\A3_2\\A3_2\\";
string filename, a_path = "D:\\ReneA-GDrive\\Project\\DPS915\\A3_2\\A3_2\\";
do{
cout << "Enter File Name : ";
cin >> filename;
if (filename.compare("exit") != 0){
//use for small numbers < 20 digits
BruteFactor *noCuda, *hasCuda;
string location = (a_path + filename);
thread /*t1(brute, location.c_str(), &noCuda),*/ t2(brute2, location.c_str(), &hasCuda);
//t1.join();
t2.join();
//noCuda->display();
hasCuda->display();
}
} while (filename.compare("exit"));
return 0;
}
| fdab538e97fe9c1908b5ad26cfbc21fbb3468281.cu | #include <iostream>
#include <thread>
#include <iomanip>
#include <cstdlib>
#include <cuda_runtime.h>
// to remove intellisense highlighting
#include <device_launch_parameters.h>
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include <device_functions.h>
#include "BruteFactor.h"
using namespace std;
void brute(const char* a, BruteFactor** LN){
*LN = new BruteFactor(a);
(*LN)->getPrimes(); //Brute force
}
void brute2(const char* a, BruteFactor** LN){
*LN = new BruteFactor(a);
(*LN)->getPrimesCuda(); //Brute force
}
int main(){
//Change this to your project path
//string filename, a_path = "C:\\Users\\Rene\\ReneA-GDrive\\Project\\DPS915\\A3_2\\A3_2\\";
string filename, a_path = "D:\\ReneA-GDrive\\Project\\DPS915\\A3_2\\A3_2\\";
do{
cout << "Enter File Name : ";
cin >> filename;
if (filename.compare("exit") != 0){
//use for small numbers < 20 digits
BruteFactor *noCuda, *hasCuda;
string location = (a_path + filename);
thread /*t1(brute, location.c_str(), &noCuda),*/ t2(brute2, location.c_str(), &hasCuda);
//t1.join();
t2.join();
//noCuda->display();
hasCuda->display();
}
} while (filename.compare("exit"));
return 0;
}
|
db81bf6862323e654e16835b0ddc97cf8726e0a0.hip | // !!! This is a file automatically generated by hipify!!!
/*
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include <ctime>
#include <time.h>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.hip"
#include "filter.h"
#include "callbacks.h"
#include "zone_map.h"
#ifdef __APPLE__
#include <sys/types.h>
#include <sys/sysctl.h>
#endif
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#define fseek(S, S1, S2) _fseeki64(S, S1, S2)
#include <windows.h>
#else
#include <unistd.h>
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
clock_t tot;
unsigned int total_segments = 0, old_segments;
size_t process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
bool interactive, ssd, delta, star;
unsigned int prs;
void* d_v = nullptr;
void* s_v = nullptr;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
string grp_val;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> op_nums_precision;
queue<string> col_aliases;
map<string, map<string, col_data> > data_dict;
map<unsigned int, map<unsigned long long int, size_t> > char_hash;
map<string, char*> index_buffers;
map<string, unsigned long long int*> idx_vals;
map<string, char*> buffers;
map<string, size_t> buffer_sizes;
size_t total_buffer_size;
queue<string> buffer_names;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string, unsigned int> cpy_bits;
map<string, long long int> cpy_init_val;
char* readbuff = nullptr;
thrust::device_vector<unsigned int> rcol_matches;
thrust::device_vector<int_type> rcol_dev;
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
template <typename T>
struct power_functor : public thrust::unary_function<T,T>
{
unsigned int a;
__host__ __device__
power_functor(unsigned int a_) {
a = a_;
}
__host__ __device__
T operator()(T x)
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
struct is_zero
{
__host__ __device__
bool operator()(const int &x)
{
return x == 0;
}
};
int get_utc_offset() {
time_t zero = 24*60*60L;
struct tm * timeptr;
int gmtime_hours;
/* get the local time for Jan 2, 1900 00:00 UTC */
timeptr = localtime( &zero );
gmtime_hours = timeptr->tm_hour;
/* if the local time is the "day before" the UTC, subtract 24 hours
from the hours to get the UTC offset */
if( timeptr->tm_mday < 2 )
gmtime_hours -= 24;
return gmtime_hours;
}
/*
the utc analogue of mktime,
(much like timegm on some systems)
*/
time_t tm_to_time_t_utc( struct tm * timeptr ) {
/* gets the epoch time relative to the local time zone,
and then adds the appropriate number of seconds to make it UTC */
return mktime( timeptr ) + get_utc_offset() * 3600;
}
/*class power_functor {
unsigned int a;
public:
power_functor(unsigned int a_) { a = a_; }
__host__ __device__ int_type operator()(int_type x) const
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
*/
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t getFreeMem();
size_t getTotalSystemMemory();
void process_error(int severity, string err);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
source = 1;
text_source = 1;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
source = 1;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(const size_t RecordCount, const unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> op_sel, const queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(string colname, size_t RecordCount)
{
if (type[colname] != 1 ) {
d_columns_int[colname].resize(RecordCount);
}
else
d_columns_float[colname].resize(RecordCount);
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else
h_columns_float[columnNames[i]].resize(mRecCount);
};
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else {
h_columns_float[columnNames[i]].resize(mRecCount);
}
};
};
void CudaSet::deAllocColumnOnDevice(string colname)
{
if (type[colname] != 1 && !d_columns_int.empty() && d_columns_int.find(colname) != d_columns_int.end()) {
if(d_columns_int[colname].size() > 0) {
d_columns_int[colname].resize(0);
d_columns_int[colname].shrink_to_fit();
};
}
else
if (type[colname] == 1 && !d_columns_float.empty()) {
if (d_columns_float[colname].size() > 0) {
d_columns_float[colname].resize(0);
d_columns_float[colname].shrink_to_fit();
};
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < columnNames.size(); i++)
allocColumnOnDevice(columnNames[i], RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < columnNames.size(); i++) {
deAllocColumnOnDevice(columnNames[i]);
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
for (auto it=d_columns_int.begin(); it != d_columns_int.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
for (auto it=d_columns_float.begin(); it != d_columns_float.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
if(filtered) { // dealloc the source
if(varNames.find(source_name) != varNames.end()) {
varNames[source_name]->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, string colname)
{
if (type[colname] != 1) {
d_columns_int[colname].resize(RecCount);
}
else
d_columns_float[colname].resize(RecCount);
};
void CudaSet::resizeDevice(size_t RecCount)
{
for(unsigned int i=0; i < columnNames.size(); i++) {
resizeDeviceColumn(RecCount, columnNames[i]);
};
};
bool CudaSet::onDevice(string colname)
{
if (type[colname] != 1) {
if (!d_columns_int.empty() && d_columns_int[colname].size())
return 1;
}
else
if (!d_columns_float.empty() && d_columns_float[colname].size())
return 1;
return 0;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->columnNames = columnNames;
a->ts_cols = ts_cols;
a->cols = cols;
a->type = type;
a->char_size = char_size;
a->decimal = decimal;
a->decimal_zeroes = decimal_zeroes;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(a->type[columnNames[i]] == 0) {
a->d_columns_int[columnNames[i]] = thrust::device_vector<int_type>();
a->h_columns_int[columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
}
else
if(a->type[columnNames[i]] == 1) {
a->d_columns_float[columnNames[i]] = thrust::device_vector<float_type>();
a->h_columns_float[columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
}
else {
a->h_columns_char[columnNames[i]] = nullptr;
a->d_columns_char[columnNames[i]] = nullptr;
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
int_type CudaSet::readSsdSegmentsFromFile(unsigned int segNum, string colname, size_t offset, thrust::host_vector<unsigned int>& prm_vh, CudaSet* dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
//cout << "lower_val bits " << lower_val << " " << bits << endl;
if(type[colname] == 0) {
//cout << "lower_val bits " << lower_val << " " << bits << endl;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(&val_c_r[0], 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_c_r[0];
}
else
if(bits == 16) {
fread(&val_s_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_s_r[0];
}
if(bits == 32) {
fread(&val_i_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_i_r[0];
}
if(bits == 84) {
fread(&val_l_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest->h_columns_int[colname][i + offset] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest->h_columns_int[colname][i + offset] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest->h_columns_int[colname][i + offset] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest->h_columns_int[colname][i + offset] = val_l_r[prm_vh[i]-idx];
}
};
};
}
else
if(type[colname] == 1) {
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
fread(val_c_r, 4096, 1, f);
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[0], bits/8);
}
else {
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[(prm_vh[i]-idx)*(bits/8)], bits/8);
};
};
}
else {
//no strings in fact tables
};
fclose(f);
return lower_val;
}
int_type CudaSet::readSsdSegmentsFromFileR(unsigned int segNum, string colname, thrust::host_vector<unsigned int>& prm_vh, thrust::host_vector<unsigned int>& dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(val_c_r, 4096, 1, f);
dest[i] = val_c_r[0];
}
else
if(bits == 16) {
fread(val_s_r, 4096, 1, f);
dest[i] = val_s_r[0];
}
if(bits == 32) {
fread(val_i_r, 4096, 1, f);
dest[i] = val_i_r[0];
}
if(bits == 84) {
fread(val_l_r, 4096, 1, f);
dest[i] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest[i] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest[i] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest[i] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest[i] = val_l_r[prm_vh[i]-idx];
}
};
};
fclose(f);
return lower_val;
}
std::clock_t tot_disk;
void CudaSet::readSegmentsFromFile(unsigned int segNum, string colname)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
if(type[colname] == 2)
f1 = f1 + ".idx";
std::clock_t start1 = std::clock();
if(interactive) { //check if data are in buffers
if(buffers.find(f1) == buffers.end()) { // add data to buffers
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
process_error(3, "Error opening " + string(f1) +" file " );
};
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
while(total_buffer_size + fileSize > getTotalSystemMemory() && !buffer_names.empty()) { //free some buffers
//delete [] buffers[buffer_names.front()];
hipHostFree(buffers[buffer_names.front()]);
total_buffer_size = total_buffer_size - buffer_sizes[buffer_names.front()];
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
fseek(f, 0, SEEK_SET);
char* buff;
hipHostMalloc((void**) &buff, fileSize,hipHostMallocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
buffers[f1] = buff;
buffer_sizes[f1] = fileSize;
buffer_names.push(f1);
total_buffer_size = total_buffer_size + fileSize;
buffer_names.push(f1);
cout << "added buffer " << f1 << " " << fileSize << endl;
};
// get data from buffers
if(type[colname] != 1) {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_int[colname].size()/8 + 10)
h_columns_int[colname].resize(cnt/8 + 10);
}
else {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_float[colname].size()/8 + 10)
h_columns_float[colname].resize(cnt/8 + 10);
}
}
else {
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
if(type[colname] != 1) {
if(1 > h_columns_int[colname].size())
h_columns_int[colname].resize(1);
fread(h_columns_int[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_int[colname].data()))[0];
if(cnt/8+10 > h_columns_int[colname].size()) {
h_columns_int[colname].resize(cnt + 10);
};
size_t rr = fread((unsigned int*)(h_columns_int[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
else {
if(1 > h_columns_float[colname].size())
h_columns_float[colname].resize(1);
fread(h_columns_float[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_float[colname].data()))[0];
if(cnt/8+10 > h_columns_float[colname].size())
h_columns_float[colname].resize(cnt + 10);
size_t rr = fread((unsigned int*)(h_columns_float[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
fclose(f);
};
tot_disk = tot_disk + (std::clock() - start1);
};
void CudaSet::CopyColumnToGpu(string colname, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
if(type[colname] != 1) {
if(!alloced_switch) {
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_columns_int[colname].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
else {
if(!alloced_switch) {
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_columns_float[colname].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
}
else {
readSegmentsFromFile(segment,colname);
if(!d_v)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
string f1;
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(segment) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(segment);
};
if(type[colname] != 1) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), buffers[f1], d_v, s_v, colname);
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
};
}
else {
if(decimal[colname]) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin(), long_to_float());
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
//for(int i = 0; i < mRecCount;i++)
//cout << "DECOMP " << (float_type)(d_col_int[i]) << " " << d_col_float[i] << endl;
};
}
//else // uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(string colname) // copy all segments
{
if(not_compressed) {
if(type[colname] != 1)
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mRecCount, d_columns_int[colname].begin());
else
thrust::copy(h_columns_float[colname].begin(), h_columns_float[colname].begin() + mRecCount, d_columns_float[colname].begin());
}
else {
if(!d_v)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
size_t cnt = 0;
string f1;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colname);
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(i) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(i);
};
if(type[colname] == 0) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), buffers[f1], d_v, s_v, colname);
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin() + cnt, long_to_float());
};
}
// else uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = cnt;
};
}
void CudaSet::CopyColumnToHost(string colname, size_t offset, size_t RecCount)
{
if(type[colname] != 1) {
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin() + RecCount, h_columns_int[colname].begin() + offset);
}
else
thrust::copy(d_columns_float[colname].begin(), d_columns_float[colname].begin() + RecCount, h_columns_float[colname].begin() + offset);
}
void CudaSet::CopyColumnToHost(string colname)
{
CopyColumnToHost(colname, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < columnNames.size(); i++) {
CopyColumnToHost(columnNames[i], offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_float[name].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_int[name].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_float[name].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_int[name].data());
}
void CudaSet::GroupBy(stack<string> columnRef)
{
thrust::device_vector<bool> grp_dev(mRecCount);
thrust::fill(grp_dev.begin(), grp_dev.end(), 0);
if(scratch.size() < mRecCount)
scratch.resize(mRecCount*sizeof(bool));
thrust::device_ptr<bool> d_group((bool*)thrust::raw_pointer_cast(scratch.data()));
d_group[mRecCount-1] = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
unsigned int bits;
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[columnRef.top()];
if(bits == 8) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
}
else
if(bits == 16) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
}
else
if(bits == 32) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
}
else {
thrust::transform(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount - 1,
d_columns_int[columnRef.top()].begin()+1, d_group, thrust::not_equal_to<int_type>());
};
thrust::transform(d_group, d_group+mRecCount, grp_dev.begin(), grp_dev.begin(), thrust::logical_or<bool>());
};
grp_count = thrust::count(grp_dev.begin(), grp_dev.end(), 1) + 1;
//cout << "grp count " << grp_count << endl;
grp.resize(grp_count);
if(grp_count > 1)
thrust::copy_if(thrust::make_counting_iterator((unsigned int)1), thrust::make_counting_iterator((unsigned int)grp_dev.size()),
grp_dev.begin(), grp.begin()+1, thrust::identity<bool>());
grp[0] = 0;
};
void CudaSet::addDeviceColumn(int_type* col, string colname, size_t recCount)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 0;
d_columns_int[colname] = thrust::device_vector<int_type>(recCount);
h_columns_int[colname] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_int[colname].size() < recCount) {
d_columns_int[colname].resize(recCount);
};
if(h_columns_int[colname].size() < recCount) {
h_columns_int[colname].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[colname].begin());
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin()+recCount, h_columns_int[colname].begin());
};
void CudaSet::addDeviceColumn(float_type* col, string colname, size_t recCount, bool is_decimal)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 1;
d_columns_float[colname] = thrust::device_vector<float_type>(recCount);
h_columns_float[colname] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_float[colname].size() < recCount)
d_columns_float[colname].resize(recCount);
if(h_columns_float[colname].size() < recCount)
h_columns_float[colname].resize(recCount);
};
decimal[colname] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[colname].begin());
};
void CudaSet::gpu_perm(queue<string> sf, thrust::device_vector<unsigned int>& permutation) {
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, mRecCount*8));
string sort_type = "ASC";
while(!sf.empty()) {
if (type[sf.front()] == 0) {
update_permutation(d_columns_int[sf.front()], raw_ptr, mRecCount, sort_type, (int_type*)temp, 64);
}
else
if (type[sf.front()] == 1) {
update_permutation(d_columns_float[sf.front()], raw_ptr, mRecCount, sort_type, (float_type*)temp, 64);
}
else {
thrust::host_vector<unsigned int> permutation_h = permutation;
char* temp1 = new char[char_size[sf.front()]*mRecCount];
update_permutation_char_host(h_columns_char[sf.front()], permutation_h.data(), mRecCount, sort_type, temp1, char_size[sf.front()]);
delete [] temp1;
permutation = permutation_h;
};
sf.pop();
};
hipFree(temp);
}
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount, const bool append)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
long long int oldCount;
bool int_check = 0;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!total_segments && append) {
string s= file_name + "." + columnNames[0] + ".header";
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
binary_file.read((char *)&oldCount, 8);
binary_file.read((char *)&total_segments, 4);
binary_file.read((char *)&maxRecs, 4);
if(total_max < maxRecs)
total_max = maxRecs;
binary_file.close();
total_count = oldCount + mCount;
};
};
string s = file_name + ".interval";
ifstream f(s.c_str());
if (f.good()) {
f.seekg (0, f.end);
int length = f.tellg();
f.seekg (0, f.beg);
char* buff = new char[length];
f.read(buff, length);
f.close();
char* p = strtok(buff, "|");
string s1(p);
p = strtok(NULL, "|");
string s2(p);
delete [] buff;
s = file_name + ".key";
ifstream f1(s.c_str());
if (f1.good()) {
f1.seekg (0, f1.end);
length = f1.tellg();
f1.seekg (0, f1.beg);
buff = new char[length+1];
buff[length] = 0;
f1.read(buff, length);
f1.close();
string s3(buff);
delete [] buff;
load_file_name = file_name;
calc_intervals(s1, s2, s3, total_segments, append);
int_check = 1;
};
};
if(!op_sort.empty()) { //sort the segment
gpu_perm(op_sort, permutation);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i < columnNames.size(); i++) {
std::clock_t start1 = std::clock();
string colname = columnNames[i];
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
new_offset = 0;
if(type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[colname].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[colname], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
if(!int_check) {
thrust::copy(h_columns_int[colname].begin() + offset, h_columns_int[colname].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( thrust::raw_pointer_cast(d_columns_int[colname].data()), mCount*int_size, str, h_columns_int[colname], 0);
};
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[colname], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[colname], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[colname].begin() + offset, h_columns_float[colname].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[colname], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[colname].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[colname].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[colname].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
//populate char_hash
if(append && total_segments == 1) {
string s= file_name + "." + colname;
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
char* strings = new char[oldCount*char_size[colname]];
binary_file.read(strings, oldCount*char_size[colname]);
binary_file.close();
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int z = 0 ; z < oldCount; z++) {
char_hash[ind][MurmurHash64A(&strings[z*char_size[colname]], char_size[colname], hash_seed)/2] = z;
};
delete [] strings;
};
};
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[colname]*mRecCount];
apply_permutation_char_host(h_columns_char[colname], h_permutation, mRecCount, t, char_size[colname]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[colname]*mRecCount, h_columns_char[colname]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, colname, partition_recs, new_offset, total_segments-1);
else
compress_char(str, colname, mCount - partition_recs*p, new_offset, total_segments-1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
compress_char(str, colname, mCount, offset, total_segments-1);
};
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, colname, total_segments-1);
else {
writeHeader(file_name, colname, total_segments);
};
};
total_segments = old_segments;
};
hipFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::calc_intervals(string dt1, string dt2, string index, unsigned int total_segs, bool append) {
alloced_switch = 1;
not_compressed = 1;
thrust::device_vector<unsigned int> permutation;
thrust::device_vector<int_type> stencil(maxRecs);
thrust::device_vector<int_type> d_dt2(maxRecs);
thrust::device_vector<int_type> d_index(maxRecs);
phase_copy = 0;
queue<string> sf;
sf.push(dt1);
sf.push(index);
gpu_perm(sf, permutation);
for(unsigned int i = 0; i < columnNames.size(); i++) {
if(type[columnNames[i]] == 0)
apply_permutation(d_columns_int[columnNames[i]], thrust::raw_pointer_cast(permutation.data()), mRecCount, (int_type*)thrust::raw_pointer_cast(stencil.data()), 0);
else {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[columnNames[i]]*mRecCount];
apply_permutation_char_host(h_columns_char[columnNames[i]], h_permutation, mRecCount, t, char_size[columnNames[i]]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[columnNames[i]]*mRecCount, h_columns_char[columnNames[i]]);
delete [] t;
};
};
if(type[index] == 2) {
d_columns_int[index] = thrust::device_vector<int_type>(mRecCount);
h_columns_int[index] = thrust::host_vector<int_type>(mRecCount);
for(int i = 0; i < mRecCount; i++)
h_columns_int[index][i] = MurmurHash64A(&h_columns_char[index][i*char_size[index]], char_size[index], hash_seed)/2;
d_columns_int[index] = h_columns_int[index];
};
thrust::counting_iterator<unsigned int> begin(0);
gpu_interval ff(thrust::raw_pointer_cast(d_columns_int[dt1].data()), thrust::raw_pointer_cast(d_columns_int[dt2].data()), thrust::raw_pointer_cast(d_columns_int[index].data()));
thrust::for_each(begin, begin + mRecCount - 1, ff);
auto stack_count = mRecCount;
if(append) {
not_compressed = 0;
size_t mysz = 8;
if(char_size[index] > int_size)
mysz = char_size[index];
if(mysz*maxRecs > alloced_sz) {
if(alloced_sz) {
hipFree(alloced_tmp);
};
hipMalloc((void **) &alloced_tmp, mysz*maxRecs);
alloced_sz = mysz*maxRecs;
}
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
d_columns_int[dt2].resize(0);
thrust::device_vector<unsigned int> output(stack_count);
for(int i = 0; i < total_segments; i++) {
CopyColumnToGpu(dt2, i, 0);
if(thrust::count(d_col, d_col+mRecCount,0)) {
thrust::copy(d_col, d_col+mRecCount, d_dt2.begin());
if(type[index] == 2) {
string f1 = load_file_name + "." + index + "." + to_string(i) + ".hash";
FILE* f = fopen(f1.c_str(), "rb" );
unsigned int cnt;
fread(&cnt, 4, 1, f);
unsigned long long int* buff = new unsigned long long int[cnt];
fread(buff, cnt*8, 1, f);
fclose(f);
thrust::copy(buff, buff + cnt, d_index.begin());
delete [] buff;
}
else {
CopyColumnToGpu(index, i, 0);
thrust::copy(d_col, d_col+mRecCount, d_index.begin());
};
thrust::lower_bound(d_columns_int[index].begin(), d_columns_int[index].begin()+stack_count, d_index.begin(), d_index.begin() + mRecCount, output.begin());
gpu_interval_set f(thrust::raw_pointer_cast(d_columns_int[dt1].data()), thrust::raw_pointer_cast(d_dt2.data()),
thrust::raw_pointer_cast(d_index.data()), thrust::raw_pointer_cast(d_columns_int[index].data()),
thrust::raw_pointer_cast(output.data()));
thrust::for_each(begin, begin + mRecCount, f);
string str = load_file_name + "." + dt2 + "." + to_string(i);;
pfor_compress( thrust::raw_pointer_cast(d_dt2.data()), mRecCount*int_size, str, h_columns_int[dt2], 0);
};
};
}
};
void CudaSet::writeHeader(string file_name, string colname, unsigned int tot_segs) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
//cout << "HEADER1 " << total_count << " " << tot_segs << " " << total_max << endl;
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, string colname, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
//cout << "HEADER2 " << newRecs << endl;
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
if(verbose)
cout << "sorted on " << idx << endl;
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".sort";
remove(str.c_str());
};
str = file_name;
if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".presort";
remove(str.c_str());
};
}
using namespace mgpu;
void CudaSet::Display(unsigned int limit, bool binary, bool term)
{
#define MAXCOLS 128
#define MAXFIELDSIZE 1400
//-- This should/will be converted to an array holding pointers of malloced sized structures--
char bigbuf[MAXCOLS * MAXFIELDSIZE];
memset(bigbuf, 0, MAXCOLS * MAXFIELDSIZE);
char *fields[MAXCOLS];
const char *dcolumns[MAXCOLS];
size_t mCount; // num records in play
bool print_all = 0;
string ss, str;
int rows = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
cout << "mRecCount=" << mRecCount << " mcount = " << mCount << " term " << term << " limit=" << limit << " print_all=" << print_all << endl;
unsigned int cc =0;
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
for(unsigned int i = 0; i < columnNames.size(); i++)
{
fields[cc] = &(bigbuf[cc*MAXFIELDSIZE]); // a hack to avoid malloc overheads - refine later
dcolumns[cc++] = columnNames[i].c_str();
if(string_map.find(columnNames[i]) != string_map.end()) {
auto s = string_map[columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
FILE *f;
f = fopen(string_map[columnNames[i]].c_str(), "rb");
file_map[string_map[columnNames[i]]] = f;
len_map[string_map[columnNames[i]]] = len;
};
};
// The goal here is to loop fast and avoid any double handling of outgoing data - pointers are good.
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) { // for each record
for(unsigned int j=0; j < columnNames.size(); j++) { // for each col
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
sprintf(fields[j], "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]])
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
//fprintf(file_pr, "%s", buffer);
//fprintf(file_pr, ".%d", rem);
sprintf(fields[j], "%s.%d", buffer,rem);
/*time_t tt = h_columns_int[columnNames[j]][i];
auto ti = localtime(&tt);
char buffer[10];
strftime(buffer,80,"%Y-%m-%d", ti);
sprintf(fields[j], "%s", buffer);
*/
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char **)dcolumns);
rows++;
};
}
else {
queue<string> op_vx;
for(unsigned int i = 0; i < columnNames.size(); i++)
op_vx.push(columnNames[i]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) { // if host arrays are empty
copyColumns(this, op_vx, curr_seg, cnt);
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount || print_all)
curr_count = mRecCount;
else
curr_count = mCount - sum_printed;
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end())
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char**)dcolumns);
rows++;
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
}; // end else
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
void CudaSet::Store(const string file_name, const char* sep, const unsigned int limit, const bool binary, const bool append, const bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int j=0; j < columnNames.size(); j++) {
writeHeader(file_name, columnNames[j], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
string str;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
if(binary == 0) {
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
string bf;
unsigned int max_len = 0;
for(unsigned int j=0; j < columnNames.size(); j++) {
if(string_map.find(columnNames[j]) != string_map.end()) {
auto s = string_map[columnNames[j]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if(len > max_len)
max_len = len;
FILE *f;
f = fopen(string_map[columnNames[j]].c_str(), "rb");
file_map[string_map[columnNames[j]]] = f;
len_map[string_map[columnNames[j]]] = len;
};
};
bf.reserve(max_len);
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (!file_pr)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1 ) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
//fprintf(file_pr, "%.*s", string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].size(), string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].c_str());
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
}
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
string ss;
for(unsigned int j=0; j < columnNames.size(); j++)
op_vx.push(columnNames[j]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
else {
//lets update the data dictionary
for(unsigned int j=0; j < columnNames.size(); j++) {
data_dict[file_name][columnNames[j]].col_type = type[columnNames[j]];
if(type[columnNames[j]] != 2) {
if(decimal[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = decimal_zeroes[columnNames[j]];
else
if (ts_cols[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = UINT_MAX;
else
data_dict[file_name][columnNames[j]].col_length = 0;
}
else
data_dict[file_name][columnNames[j]].col_length = char_size[columnNames[j]];
};
save_dict = 1;
if(text_source) { //writing a binary file using a text file as a source
compress(file_name, 0, 1, 0, mCount, append);
for(unsigned int i = 0; i< columnNames.size(); i++)
if(type[columnNames[i]] == 2)
deAllocColumnOnDevice(columnNames[i]);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for(unsigned int i = 0; i< columnNames.size(); i++) {
op_vx.push(columnNames[i]);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount, append);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount, append);
offset = offset + mCount;
};
};
};
};
}
void CudaSet::compress_char(const string file_name, const string colname, const size_t mCount, const size_t offset, const unsigned int segment)
{
unsigned int len = char_size[colname];
string h_name, i_name, file_no_seg = file_name.substr(0, file_name.find_last_of("."));
i_name = file_no_seg + "." + to_string(segment) + ".idx";
h_name = file_no_seg + "." + to_string(segment) + ".hash";
fstream b_file_str, loc_hashes;
fstream binary_file_h(h_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file_h.write((char *)&mCount, 4);
if(segment == 0) {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::trunc);
}
else {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::app);
};
if(h_columns_int.find(colname) == h_columns_int.end()) {
h_columns_int[colname] = thrust::host_vector<int_type >(mCount);
}
else {
if(h_columns_int[colname].size() < mCount)
h_columns_int[colname].resize(mCount);
};
if(d_columns_int.find(colname) == d_columns_int.end()) {
d_columns_int[colname] = thrust::device_vector<int_type >(mCount);
}
else {
if(d_columns_int[colname].size() < mCount)
d_columns_int[colname].resize(mCount);
};
size_t cnt;
long long int* hash_array = new long long int[mCount];
map<unsigned long long int, size_t>::iterator iter;
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
iter = char_hash[ind].find(hash_array[i]);
if(iter == char_hash[ind].end()) {
cnt = char_hash[ind].size();
char_hash[ind][hash_array[i]] = cnt;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
h_columns_int[colname][i] = cnt;
}
else {
h_columns_int[colname][i] = iter->second;
};
};
binary_file_h.write((char *)hash_array, 8*mCount);
delete [] hash_array;
thrust::device_vector<int_type> d_col(mCount);
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mCount, d_col.begin());
pfor_compress(thrust::raw_pointer_cast(d_col.data()), mCount*int_size, i_name, h_columns_int[colname], 0);
binary_file_h.close();
b_file_str.close();
};
bool first_time = 1;
size_t rec_sz = 0;
size_t process_piece;
bool CudaSet::LoadBigFile(FILE* file_p, thrust::device_vector<char>& d_readbuff, thrust::device_vector<char*>& dest,
thrust::device_vector<unsigned int>& ind, thrust::device_vector<unsigned int>& dest_len)
{
const char* sep = separator.c_str();
unsigned int maxx = cols.rbegin()->first;
map<unsigned int, string>::iterator it;
bool done = 0;
std::clock_t start1 = std::clock();
vector<int> types;
vector<int> cl;
types.push_back(0);
for(int i = 0; i < maxx; i++) {
auto iter = cols.find(i+1);
if(iter != cols.end()) {
types.push_back(type[iter->second]);
cl.push_back(iter->first-1);
}
else
types.push_back(0);
};
if(first_time) {
if(process_count*4 > getFreeMem()) {
process_piece = getFreeMem()/4;
}
else
process_piece = process_count;
readbuff = new char[process_piece+1];
d_readbuff.resize(process_piece+1);
cout << "set a piece to " << process_piece << " " << getFreeMem() << endl;
};
thrust::device_vector<unsigned int> ind_cnt(1);
thrust::device_vector<char> sepp(1);
sepp[0] = *sep;
long long int total_processed = 0;
size_t recs_processed = 0;
bool finished = 0;
thrust::device_vector<long long int> dev_pos;
long long int offset;
unsigned int cnt = 1;
const unsigned int max_len = 23;
while(!done) {
auto rb = fread(readbuff, 1, process_piece, file_p);
if(readbuff[rb-1] != '\n') {
rb++;
readbuff[rb-1] = '\n';
};
if(rb < process_piece) {
done = 1;
finished = 1;
fclose(file_p);
};
if(total_processed >= process_count)
done = 1;
thrust::fill(d_readbuff.begin(), d_readbuff.end(),0);
thrust::copy(readbuff, readbuff+rb, d_readbuff.begin());
auto curr_cnt = thrust::count(d_readbuff.begin(), d_readbuff.begin() + rb, '\n') - 1;
if(recs_processed == 0 && first_time) {
rec_sz = curr_cnt;
if(finished)
rec_sz++;
total_max = curr_cnt;
};
//cout << "curr_cnt " << curr_cnt << " Memory: " << getFreeMem() << endl;
if(first_time) {
for(unsigned int i=0; i < columnNames.size(); i++) {
auto colname = columnNames[i];
if (type[colname] == 0) {
d_columns_int[colname].resize(d_columns_int[colname].size() + rec_sz);
h_columns_int[colname].resize(h_columns_int[colname].size() + rec_sz);
}
else
if (type[colname] == 1) {
d_columns_float[colname].resize(d_columns_float[colname].size() + rec_sz);
h_columns_float[colname].resize(h_columns_float[colname].size() + rec_sz);
}
else {
char* c = new char[cnt*rec_sz*char_size[columnNames[i]]];
if(recs_processed > 0) {
memcpy(c, h_columns_char[columnNames[i]], recs_processed*char_size[columnNames[i]]);
delete [] h_columns_char[columnNames[i]];
};
h_columns_char[columnNames[i]] = c;
if(recs_processed == 0) {
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
hipMemset(temp,0,char_size[columnNames[i]]*rec_sz);
d_columns_char[columnNames[i]] = (char*)temp;
};
};
if(recs_processed == 0) {
ind[i] = cl[i];
void* temp;
if(type[columnNames[i]] != 2) {
if(!ts_cols[columnNames[i]]) {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, max_len*rec_sz));
dest_len[i] = max_len;
}
else {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, 23*rec_sz));
dest_len[i] = 23;
}
}
else {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
dest_len[i] = char_size[columnNames[i]];
};
dest[i] = (char*)temp;
};
};
};
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 2) {
hipMemset(dest[i],0,max_len*rec_sz);
}
else {
hipMemset(dest[i],0,char_size[columnNames[i]]*rec_sz);
};
};
if(dev_pos.size() < curr_cnt+1)
dev_pos.resize(curr_cnt+1); //avoiding the unnecessary allocs
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned long long int)0), thrust::make_counting_iterator((unsigned long long int)rb-1),
d_readbuff.begin(), dev_pos.begin()+1, _1 == '\n');
if(!finished) {
if(curr_cnt < rec_sz) {
offset = (dev_pos[curr_cnt] - rb)+1;
//cout << "PATH 1 " << dev_pos[curr_cnt] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = curr_cnt;
}
else {
offset = (dev_pos[rec_sz] - rb)+1;
//cout << "PATH 2 " << dev_pos[rec_sz] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = rec_sz;
};
}
else {
mRecCount = curr_cnt + 1;
};
thrust::counting_iterator<unsigned int> begin(0);
ind_cnt[0] = mColumnCount;
parse_functor ff((const char*)thrust::raw_pointer_cast(d_readbuff.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sepp.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + mRecCount, ff);
ind_cnt[0] = max_len;
for(int i =0; i < mColumnCount; i++) {
if(type[columnNames[i]] == 0) { //int
thrust::device_ptr<char> p1((char*)dest[i]);
if(p1[4] == '-') { //date
if(!ts_cols[columnNames[i]]) {
gpu_date date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
else {
gpu_tdate date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
}
else { //int
if(decimal[columnNames[i]]) {
thrust::device_vector<unsigned int> scale(1);
scale[0] = decimal_zeroes[columnNames[i]];
gpu_atold atold((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(scale.data()));
thrust::for_each(begin, begin + mRecCount, atold);
}
else {
gpu_atoll atoll_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atoll_ff);
};
};
thrust::copy(d_columns_int[columnNames[i]].begin() + recs_processed, d_columns_int[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_int[columnNames[i]].begin() + recs_processed);
}
else
if(type[columnNames[i]] == 1) {
gpu_atof atof_ff((const char*)dest[i],(double*)thrust::raw_pointer_cast(d_columns_float[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atof_ff);
thrust::copy(d_columns_float[columnNames[i]].begin() + recs_processed, d_columns_float[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_float[columnNames[i]].begin() + recs_processed);
}
else {//char is already done
thrust::device_ptr<char> p1((char*)dest[i]);
hipMemcpy( h_columns_char[columnNames[i]] + char_size[columnNames[i]]*recs_processed, (void *)dest[i] , char_size[columnNames[i]]*mRecCount, hipMemcpyDeviceToHost);
};
};
recs_processed = recs_processed + mRecCount;
cnt++;
};
if(finished) {
for(int i =0; i < mColumnCount; i++) {
if(dest[i]) {
hipFree(dest[i]);
dest[i] = nullptr;
};
};
delete [] readbuff;
};
cout << "processed recs " << recs_processed << " " << getFreeMem() << endl;
first_time = 0;
mRecCount = recs_processed;
return finished;
};
void CudaSet::free() {
for(unsigned int i = 0; i < columnNames.size(); i++ ) {
if(type[columnNames[i]] == 0 && h_columns_int[columnNames[i]].size() ) {
h_columns_int[columnNames[i]].resize(0);
h_columns_int[columnNames[i]].shrink_to_fit();
}
else {
h_columns_float[columnNames[i]].resize(0);
h_columns_float[columnNames[i]].shrink_to_fit();
};
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
deAllocOnDevice();
};
void alloc_pool(unsigned int maxRecs) {
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, 8*maxRecs));
alloced_mem.push_back(temp);
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if(d<s)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if(d>=s)
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if(d<=s)
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if(d==s)
res = 1;
else
res = 0;
else // !=
if(d!=s)
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if ((s-d) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON))
res = 1;
else
res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if(p2)
d = d*(int_type)pow(10, p2);
if (op_type == 2) // >
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
if(!p1 && !p2) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
}
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else // !=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
//cout << "OP " << d << " " << op_type << " " << p1 << " " << p2 << endl;
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
unsigned int d1 = d;
if(p2)
d = d*(unsigned int)pow(10, p2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d1), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
};
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
//cout << "OP " << op_type << " " << p1 << " " << p2 << " " << reverse << endl;
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else
if(p1 && p2) {
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::plus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::divides<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::divides<int_type>());
}
}
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return (float_type*)thrust::raw_pointer_cast(temp);
}
char CudaSet::loadIndex(const string index_name, const unsigned int segment)
{
FILE* f;
unsigned int bits_encoded, fit_count, vals_count, sz, real_count;
void* d_str;
string f1 = index_name + "." + to_string(segment);
char res;
//interactive = 0;
if(interactive) {
if(index_buffers.find(f1) == index_buffers.end()) {
f = fopen (f1.c_str(), "rb" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
char* buff;
hipHostMalloc(&buff, fileSize, hipHostMallocDefault);
fseek(f, 0, SEEK_SET);
fread(buff, fileSize, 1, f);
fclose(f);
index_buffers[f1] = buff;
};
sz = ((unsigned int*)index_buffers[f1])[0];
idx_dictionary_int[index_name].clear();
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][((int_type*)(index_buffers[f1]+4+8*i))[0]] = i;
};
vals_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[2];
real_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[3];
mRecCount = real_count;
if(idx_vals.count(index_name) == 0) {
hipMalloc((void **) &d_str, (vals_count+2)*int_size);
hipMemcpy( d_str, (void *) &((index_buffers[f1]+4 +8*sz)[0]), (vals_count+2)*int_size, hipMemcpyHostToDevice);
idx_vals[index_name] = (unsigned long long int*)d_str;
};
}
else {
f = fopen (f1.c_str(), "rb" );
fread(&sz, 4, 1, f);
int_type* d_array = new int_type[sz];
idx_dictionary_int[index_name].clear();
fread((void*)d_array, sz*int_size, 1, f);
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][d_array[i]] = i;
};
delete [] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
mRecCount = real_count;
unsigned long long int* int_array = new unsigned long long int[vals_count+2];
fseek ( f , -16 , SEEK_CUR );
fread((void*)int_array, 1, vals_count*8 + 16, f);
fread(&res, 1, 1, f);
fclose(f);
void* d_str;
hipMalloc((void **) &d_str, (vals_count+2)*int_size);
hipMemcpy( d_str, (void *) int_array, (vals_count+2)*int_size, hipMemcpyHostToDevice);
if(idx_vals.count(index_name))
hipFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
return res;
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
FILE* f;
string f1;
unsigned int cnt;
char buffer[4000];
string str;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
sorted_fields.push(str);
if(verbose)
cout << "segment sorted on " << str << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
presorted_fields.push(str);
if(verbose)
cout << "presorted on " << str << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
//f1 = file_name + "." + nameRef.front() + ".0";
//f = fopen (f1.c_str() , "rb" );
//fread((char *)&bytes, 4, 1, f); //need to read metadata such as type and length
//fclose(f);
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + nameRef.front() + ".0";
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Couldn't find field " << nameRef.front() << endl;
exit(0);
};
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type >();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
string_map[nameRef.front()] = file_name + "." + nameRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
{
mColumnCount = (unsigned int)nameRef.size();
tmp_table = 0;
filtered = 0;
mRecCount = 0;
hostRecCount = Recs;
segCount = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type>();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(const size_t RecordCount, const unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
filtered = 0;
};
void CudaSet::initialize(queue<string> op_sel, const queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
unsigned int i = 0;
CudaSet *a;
while(!op_sel.empty()) {
for(auto it = varNames.begin(); it != varNames.end(); it++) {
a = it->second;
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end())
break;
};
type[op_sel.front()] = a->type[op_sel.front()];
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
columnNames.push_back(op_sel.front());
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
//h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type>();
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
//h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type>();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if( std::find(a->columnNames.begin(), a->columnNames.end(), q_cnt.front()) != a->columnNames.end() ||
std::find(b->columnNames.begin(), b->columnNames.end(), q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
maxRecs = b->maxRecs;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
i = 0;
while(!op_sel.empty()) {
if(std::find(columnNames.begin(), columnNames.end(), op_sel.front()) == columnNames.end()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end()) {
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
columnNames.push_back(op_sel.front());
type[op_sel.front()] = a->type[op_sel.front()];
ts_cols[op_sel.front()] = a->ts_cols[op_sel.front()];
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(a->string_map.find(op_sel.front()) != a->string_map.end()) {
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
i++;
}
else
if(std::find(b->columnNames.begin(), b->columnNames.end(), op_sel.front()) != b->columnNames.end()) {
columnNames.push_back(op_sel.front());
cols[i] = op_sel.front();
decimal[op_sel.front()] = b->decimal[op_sel.front()];
type[op_sel.front()] = b->type[op_sel.front()];
ts_cols[op_sel.front()] = b->ts_cols[op_sel.front()];
if (b->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(b->string_map.find(op_sel.front()) != b->string_map.end()) {
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
decimal[op_sel.front()] = b->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = b->decimal_zeroes[op_sel.front()];
}
else
if (b->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = b->char_size[op_sel.front()];
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
i++;
}
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 1;
else
if (op_type == 1) // <
return 2;
else
if (op_type == 6) // >=
return 5;
else
if (op_type == 5) // <=
return 6;
else
return op_type;
}
size_t getFreeMem()
{
size_t available, total;
hipMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
CudaSet* t;
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
if(int_size*t->maxRecs > alloced_sz) {
if(alloced_sz) {
hipFree(alloced_tmp);
};
hipMalloc((void **) &alloced_tmp, int_size*t->maxRecs);
alloced_sz = int_size*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(var_exists(a, fields.front()) && !a->onDevice(fields.front())) {
a->allocColumnOnDevice(fields.front(), a->maxRecs);
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
if(!a->onDevice(field)) {
a->allocColumnOnDevice(field, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(field, a, t, count, a->mRecCount);
}
else {
mycopy(field, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
void copyFinalize(CudaSet* a, queue<string> fields, bool ts)
{
set<string> uniques;
if(scratch.size() < a->mRecCount*8)
scratch.resize(a->mRecCount*8);
thrust::device_ptr<int_type> tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front()) && cpy_bits.find(fields.front()) != cpy_bits.end() && (!a->ts_cols[fields.front()] || ts)) {
if(cpy_bits[fields.front()] == 8) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
};
}
else
if(cpy_bits[fields.front()] == 16) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
};
}
else
if(cpy_bits[fields.front()] == 32) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
};
}
else {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
};
};
thrust::constant_iterator<int_type> iter(cpy_init_val[fields.front()]);
if(a->type[fields.front()] != 1) {
thrust::transform(tmp, tmp + a->mRecCount, iter, a->d_columns_int[fields.front()].begin(), thrust::plus<int_type>());
}
else {
thrust::device_ptr<int_type> dest((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(tmp, tmp + a->mRecCount, iter, dest, thrust::plus<int_type>());
thrust::transform(dest, dest+a->mRecCount, a->d_columns_float[fields.front()].begin(), long_to_float());
};
};
uniques.insert(fields.front());
fields.pop();
};
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
//std::clock_t start1 = std::clock();
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz && a->mRecCount) {
queue<string> fields1(fields);
while(!fields1.empty()) {
a->resizeDeviceColumn(a->devRecCount + a->mRecCount, fields1.front());
fields1.pop();
};
a->devRecCount = a->devRecCount + a->mRecCount;
};
};
cpy_bits.clear();
cpy_init_val.clear();
auto f(fields);
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[a->source_name];
alloced_switch = 1;
t->CopyColumnToGpu(fields.front(), segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(fields.front(), segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
//std::cout<< "copy time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void mygather(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1 ) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
};
void mycopy(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[colname].begin() + offset);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(std::find(right->columnNames.begin(), right->columnNames.end(), c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() ) {
cc.push(c1.front());
};
};
c1.pop();
};
if(std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
};
rcount = right->maxRecs;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(ct.front(), rcount*right->segCount);
};
ct.pop();
};
size_t cnt_r = 0;
right->devRecCount = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->columnNames.size(); i++) {
if(a->type[a->columnNames[i]] == 2) {
if (a->char_size[a->columnNames[i]] > max_char1)
max_char1 = a->char_size[a->columnNames[i]];
}
else
if(a->type[a->columnNames[i]] == 0 && a->string_map.find(a->columnNames[i]) != a->string_map.end()) {
auto s = a->string_map[a->columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if (len > max_char1)
max_char1 = len;
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8;
while (!field_names.empty()) {
if (a->type[field_names.front()] == 2) {
if (a->char_size[field_names.front()] > max_char)
max_char = a->char_size[field_names.front()];
};
field_names.pop();
};
return max_char;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0;
while(!cols.empty()) {
if(a->type[cols.front()] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[cols.front()];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
hipMemcpy( (void*)tmp, (void*) key, RecCount*len, hipMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(const char *s, const char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
//std::clock_t start1 = std::clock();
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
b->string_map = a->string_map;
size_t cnt = 0;
b->sorted_fields = a->sorted_fields;
b->ts_cols = a->ts_cols;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0) {
b->prm_d.resize(a->maxRecs);
};
cout << endl << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
cout << endl << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
auto old_ph = phase_copy;
phase_copy = 0;
copyColumns(a, b->fil_value, segment, cnt);
phase_copy = old_ph;
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
hipFree(res);
}
else {
b->prm_index = map_check;
if(map_check == 'A')
b->mRecCount = a->mRecCount;
else
b->mRecCount = 0;
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
if(verbose)
cout << endl << "filter result " << b->mRecCount << endl;
}
size_t load_right(CudaSet* right, string f2, queue<string> op_g, queue<string> op_alt, size_t& rcount, unsigned int start_seg, unsigned int end_seg) {
size_t cnt_r = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, "", rcount, start_seg, end_seg, 1, 1);
queue<string> op_alt2;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
op_alt2.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt2.empty())
cnt_r = load_queue(op_alt2, right, "", rcount, start_seg, end_seg, 0, 0);
}
else {
cnt_r = load_queue(op_alt, right, f2, rcount, start_seg, end_seg, 1, 1);
};
return cnt_r;
};
void insert_records(const char* f, const char* s) {
char buf[4096];
size_t size, maxRecs, cnt = 0;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
process_error(3, "couldn't find " + string(s) );
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
process_error(3, "couldn't find " + string(f) );
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
cout << "SOURCES " << a->source << ":" << b->source << endl;
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
if(a->type[a->columnNames[z]] != 2) {
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str_d = b->load_file_name + "." + a->columnNames[z] + "." + to_string(b->segCount + i);
cout << str_s << " " << str_d << endl;
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
}
else { //merge strings
//read b's strings
str_s = b->load_file_name + "." + b->columnNames[z];
FILE* dest = fopen(str_s.c_str(), "rb");
auto len = b->char_size[b->columnNames[z]];
map<string, unsigned long long int> map_d;
buf[len] = 0;
unsigned long long cnt = 0;
while (fread(buf, len, 1, dest)) {
map_d[buf] = cnt;
cnt++;
};
fclose(dest);
unsigned long long int cct = cnt;
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i) + ".hash";
str_d = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".hash";
FILE* source = fopen(str_s.c_str(), "rb");
dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
str_s = a->load_file_name + "." + a->columnNames[z];
source = fopen(str_s.c_str(), "rb");
map<unsigned long long int, string> map_s;
buf[len] = 0;
cnt = 0;
while (fread(buf, len, 1, source)) {
map_s[cnt] = buf;
cnt++;
};
fclose(source);
queue<string> op_vx;
op_vx.push(a->columnNames[z]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->CopyColumnToGpu(a->columnNames[z], z, 0);
a->CopyColumnToHost(a->columnNames[z]);
str_d = b->load_file_name + "." + b->columnNames[z];
fstream f_file;
f_file.open(str_d.c_str(), ios::out|ios::app|ios::binary);
for(auto j = 0; j < a->mRecCount; j++) {
auto ss = map_s[a->h_columns_int[a->columnNames[z]][j]];
if(map_d.find(ss) == map_d.end()) { //add
f_file.write((char *)ss.c_str(), len);
a->h_columns_int[a->columnNames[z]][j] = cct;
cct++;
}
else {
a->h_columns_int[a->columnNames[z]][j] = map_d[ss];
};
};
f_file.close();
thrust::device_vector<int_type> d_col(a->mRecCount);
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, d_col.begin());
auto i_name = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".idx";
pfor_compress(thrust::raw_pointer_cast(d_col.data()), a->mRecCount*int_size, i_name, a->h_columns_int[a->columnNames[z]], 0);
};
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->reWriteHeader(b->load_file_name, b->columnNames[i], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else
if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[a->columnNames[z]] == 0) {
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_int[b->columnNames[z]].begin() + oldCount);
}
else
if(b->type[a->columnNames[z]] == 1) {
thrust::copy(a->h_columns_float[a->columnNames[z]].begin(), a->h_columns_float[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_float[b->columnNames[z]].begin() + oldCount);
}
else {
hipMemcpy(b->h_columns_char[b->columnNames[z]] + b->char_size[b->columnNames[z]]*oldCount, a->h_columns_char[a->columnNames[z]], a->char_size[a->columnNames[z]]*a->mRecCount, hipMemcpyHostToHost);
};
};
}
else
if(!a->source && b->source) {
total_segments = b->segCount;
total_count = b->mRecCount;
total_max = b->maxRecs;;
queue<string> op_vx;
for(unsigned int i=0; i < a->columnNames.size(); i++)
op_vx.push(a->columnNames[i]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
for(unsigned int i = 0; i < a->segCount; i++) {
if (a->filtered) {
copyColumns(a, op_vx, i, cnt);
a->CopyToHost(0, a->mRecCount);
};
a->compress(b->load_file_name, 0, 1, i - (a->segCount-1), a->mRecCount, 0);
};
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->writeHeader(b->load_file_name, b->columnNames[i], total_segments);
};
};
};
void delete_records(const char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
process_error(2, "Delete operator is only applicable to disk based sets\nfor deleting records from derived sets please use filter operator ");
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for ( auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
op_vx.push((*it).first);
if (std::find(a->columnNames.begin(), a->columnNames.end(), (*it).first) == a->columnNames.end()) {
if ((*it).second.col_type == 0) {
a->type[(*it).first] = 0;
a->decimal[(*it).first] = 0;
//a->h_columns_int[(*it).first] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
a->h_columns_int[(*it).first] = thrust::host_vector<int_type>();
a->d_columns_int[(*it).first] = thrust::device_vector<int_type>();
}
else
if((*it).second.col_type == 1) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 0;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else
if ((*it).second.col_type == 3) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 1;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else {
a->type[(*it).first] = 2;
a->decimal[(*it).first] = 0;
a->h_columns_char[(*it).first] = nullptr;
a->d_columns_char[(*it).first] = nullptr;
a->char_size[(*it).first] = (*it).second.col_length;
};
a->columnNames.push_back((*it).first);
}
};
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->prm_d.resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), thrust::logical_not<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
hipFree(res);
// cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
if(new_seg_count != i) {
for (auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
auto colname = (*it).first;
str_old = a->load_file_name + "." + colname + "." + to_string(i);
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
map<string, col_data> s = data_dict[a->load_file_name];
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
string colname = (*it).first;
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
if(a->type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[colname], 0);
}
else
if(a->type[colname] == 1) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[colname]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[colname], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[colname].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[colname].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str + ".hash", a->h_columns_int[colname], 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str_old = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str = a->load_file_name + "." + a->columnNames[z] + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str = a->load_file_name + "." + a->columnNames[z];
str += "." + to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
a->reWriteHeader(a->load_file_name, a->columnNames[i], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
hipFree(d);
};
};
void save_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
size_t len = data_dict.size();
binary_file.write((char *)&len, 8);
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
str_len = (*it).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*it).first.data(), str_len);
map<string, col_data> s = (*it).second;
size_t len1 = s.size();
binary_file.write((char *)&len1, 8);
for (auto sit=s.begin() ; sit != s.end(); ++sit ) {
str_len = (*sit).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*sit).first.data(), str_len);
binary_file.write((char *)&(*sit).second.col_type, 4);
binary_file.write((char *)&(*sit).second.col_length, 4);
};
};
binary_file.close();
}
void load_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len, recs, len1;
string str1, str2;
char buffer[4000];
unsigned int col_type, col_length;
fstream binary_file;
binary_file.open(file_name.c_str(),ios::in|ios::binary);
if(binary_file.is_open()) {
binary_file.read((char*)&recs, 8);
for(unsigned int i = 0; i < recs; i++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str1.assign(buffer, str_len);
binary_file.read((char*)&len1, 8);
for(unsigned int j = 0; j < len1; j++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str2.assign(buffer, str_len);
binary_file.read((char*)&col_type, 4);
binary_file.read((char*)&col_length, 4);
data_dict[str1][str2].col_type = col_type;
data_dict[str1][str2].col_length = col_length;
//cout << "data DICT " << str1 << " " << str2 << " " << col_type << " " << col_length << endl;
};
};
binary_file.close();
}
else {
cout << "Couldn't open data dictionary" << endl;
};
}
bool var_exists(CudaSet* a, string name) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), name) != a->columnNames.end())
return 1;
else
return 0;
}
int file_exist (const char *filename)
{
std::ifstream infile(filename);
return infile.good();
}
bool check_bitmap_file_exist(CudaSet* left, CudaSet* right)
{
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 0;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
return bitmaps_exist;
}
bool check_bitmaps_exist(CudaSet* left, CudaSet* right)
{
//check if there are join bitmap indexes
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 1;
return 1;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
if(bitmaps_exist) {
while(!right->fil_nums.empty() ) {
left->fil_nums.push(right->fil_nums.front());
right->fil_nums.pop();
};
while(!right->fil_nums_precision.empty() ) {
left->fil_nums_precision.push(right->fil_nums_precision.front());
right->fil_nums_precision.pop();
};
while(!right->fil_nums_f.empty() ) {
left->fil_nums_f.push(right->fil_nums_f.front());
right->fil_nums_f.pop();
};
while(!right->fil_value.empty() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), right->fil_value.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + right->fil_value.front();
left->fil_value.push(fname);
}
else
left->fil_value.push(right->fil_value.front());
right->fil_value.pop();
};
bool add_and = 1;
if(left->fil_type.empty())
add_and = 0;
while(!right->fil_type.empty() ) {
left->fil_type.push(right->fil_type.front());
right->fil_type.pop();
};
if(add_and) {
left->fil_type.push("AND");
};
return 1;
}
else {
return 0;
};
}
void check_sort(const string str, const char* rtable, const char* rid)
{
CudaSet* right = varNames.find(rtable)->second;
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::app);
binary_file.write((char *)&right->sort_check, 1);
binary_file.close();
}
void update_char_permutation(CudaSet* a, string colname, unsigned int* raw_ptr, string ord, void* temp, bool host)
{
auto s = a->string_map[colname];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
a->h_columns_char[colname] = new char[a->mRecCount*len];
memset(a->h_columns_char[colname], 0, a->mRecCount*len);
thrust::device_ptr<unsigned int> perm(raw_ptr);
thrust::device_ptr<int_type> temp_int((int_type*)temp);
thrust::gather(perm, perm+a->mRecCount, a->d_columns_int[colname].begin(), temp_int);
//for(int z = 0 ; z < a->mRecCount; z++) {
//cout << "Init vals " << a->d_columns_int[colname][z] << " " << perm[z] << " " << temp_int[z] << endl;
//};
//cout << "sz " << a->h_columns_int[colname].size() << " " << a->d_columns_int[colname].size() << " " << len << endl;
hipMemcpy(thrust::raw_pointer_cast(a->h_columns_int[colname].data()), temp, 8*a->mRecCount, hipMemcpyDeviceToHost);
FILE *f;
f = fopen(a->string_map[colname].c_str(), "rb");
for(int z = 0 ; z < a->mRecCount; z++) {
fseek(f, a->h_columns_int[colname][z] * len, SEEK_SET);
fread(a->h_columns_char[colname] + z*len, 1, len, f);
};
fclose(f);
if(!host) {
void *d;
hipMalloc((void **) &d, a->mRecCount*len);
a->d_columns_char[colname] = (char*)d;
hipMemcpy(a->d_columns_char[colname], a->h_columns_char[colname], len*a->mRecCount, hipMemcpyHostToDevice);
if (ord.compare("DESC") == 0 )
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
hipFree(d);
}
else {
if (ord.compare("DESC") == 0 )
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
};
}
void compress_int(const string file_name, const thrust::host_vector<int_type>& res)
{
std::vector<unsigned int> dict_val;
unsigned int bits_encoded;
set<int_type> dict_s;
map<int_type, unsigned int> d_ordered;
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_s.insert(f);
};
unsigned int i = 0;
for (auto it = dict_s.begin(); it != dict_s.end(); it++) {
d_ordered[*it] = i++;
};
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_val.push_back(d_ordered[f]);
};
bits_encoded = (unsigned int)ceil(log2(double(d_ordered.size()+1)));
//cout << "bits " << bits_encoded << endl;
unsigned int sz = (unsigned int)d_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&sz, 4);
for (auto it = d_ordered.begin(); it != d_ordered.end(); it++) {
binary_file.write((char*)(&(it->first)), int_size);
};
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, int_size);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
int_type* get_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->get_int_by_name(s1_val);
else {
t = exe_vectors.top();
exe_vectors.pop();
}
return t;
};
int_type* get_host_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) {
t = a->get_host_int_by_name(s1_val);
}
else {
t = exe_vectors.top();
thrust::device_ptr<int_type> st1((int_type*)t);
for(int z = 0; z < 10; z++)
cout << "RESVEC " << st1[z] << endl;
exe_vectors.pop();
}
return t;
};
unsigned int get_decimals(CudaSet* a, string s1_val, stack<unsigned int>& exe_precision) {
unsigned int t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->decimal_zeroes[s1_val];
else {
t = exe_precision.top();
exe_precision.pop();
}
return t;
};
#ifdef _WIN64
size_t getTotalSystemMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
#elif __APPLE__
size_t getTotalSystemMemory()
{
int mib [] = { CTL_HW, HW_MEMSIZE };
size_t value = 0;
size_t length = sizeof(value);
if(-1 == sysctl(mib, 2, &value, &length, NULL, 0))
return 0;
return value;
}
#else
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
#endif
| db81bf6862323e654e16835b0ddc97cf8726e0a0.cu | /*
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include <ctime>
#include <time.h>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.cu"
#include "filter.h"
#include "callbacks.h"
#include "zone_map.h"
#ifdef __APPLE__
#include <sys/types.h>
#include <sys/sysctl.h>
#endif
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#define fseek(S, S1, S2) _fseeki64(S, S1, S2)
#include <windows.h>
#else
#include <unistd.h>
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
clock_t tot;
unsigned int total_segments = 0, old_segments;
size_t process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
bool interactive, ssd, delta, star;
unsigned int prs;
void* d_v = nullptr;
void* s_v = nullptr;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
string grp_val;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> op_nums_precision;
queue<string> col_aliases;
map<string, map<string, col_data> > data_dict;
map<unsigned int, map<unsigned long long int, size_t> > char_hash;
map<string, char*> index_buffers;
map<string, unsigned long long int*> idx_vals;
map<string, char*> buffers;
map<string, size_t> buffer_sizes;
size_t total_buffer_size;
queue<string> buffer_names;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string, unsigned int> cpy_bits;
map<string, long long int> cpy_init_val;
char* readbuff = nullptr;
thrust::device_vector<unsigned int> rcol_matches;
thrust::device_vector<int_type> rcol_dev;
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
template <typename T>
struct power_functor : public thrust::unary_function<T,T>
{
unsigned int a;
__host__ __device__
power_functor(unsigned int a_) {
a = a_;
}
__host__ __device__
T operator()(T x)
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
struct is_zero
{
__host__ __device__
bool operator()(const int &x)
{
return x == 0;
}
};
int get_utc_offset() {
time_t zero = 24*60*60L;
struct tm * timeptr;
int gmtime_hours;
/* get the local time for Jan 2, 1900 00:00 UTC */
timeptr = localtime( &zero );
gmtime_hours = timeptr->tm_hour;
/* if the local time is the "day before" the UTC, subtract 24 hours
from the hours to get the UTC offset */
if( timeptr->tm_mday < 2 )
gmtime_hours -= 24;
return gmtime_hours;
}
/*
the utc analogue of mktime,
(much like timegm on some systems)
*/
time_t tm_to_time_t_utc( struct tm * timeptr ) {
/* gets the epoch time relative to the local time zone,
and then adds the appropriate number of seconds to make it UTC */
return mktime( timeptr ) + get_utc_offset() * 3600;
}
/*class power_functor {
unsigned int a;
public:
power_functor(unsigned int a_) { a = a_; }
__host__ __device__ int_type operator()(int_type x) const
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
*/
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t getFreeMem();
size_t getTotalSystemMemory();
void process_error(int severity, string err);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
source = 1;
text_source = 1;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
source = 1;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(const size_t RecordCount, const unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> op_sel, const queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(string colname, size_t RecordCount)
{
if (type[colname] != 1 ) {
d_columns_int[colname].resize(RecordCount);
}
else
d_columns_float[colname].resize(RecordCount);
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else
h_columns_float[columnNames[i]].resize(mRecCount);
};
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else {
h_columns_float[columnNames[i]].resize(mRecCount);
}
};
};
void CudaSet::deAllocColumnOnDevice(string colname)
{
if (type[colname] != 1 && !d_columns_int.empty() && d_columns_int.find(colname) != d_columns_int.end()) {
if(d_columns_int[colname].size() > 0) {
d_columns_int[colname].resize(0);
d_columns_int[colname].shrink_to_fit();
};
}
else
if (type[colname] == 1 && !d_columns_float.empty()) {
if (d_columns_float[colname].size() > 0) {
d_columns_float[colname].resize(0);
d_columns_float[colname].shrink_to_fit();
};
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < columnNames.size(); i++)
allocColumnOnDevice(columnNames[i], RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < columnNames.size(); i++) {
deAllocColumnOnDevice(columnNames[i]);
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
for (auto it=d_columns_int.begin(); it != d_columns_int.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
for (auto it=d_columns_float.begin(); it != d_columns_float.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
if(filtered) { // dealloc the source
if(varNames.find(source_name) != varNames.end()) {
varNames[source_name]->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, string colname)
{
if (type[colname] != 1) {
d_columns_int[colname].resize(RecCount);
}
else
d_columns_float[colname].resize(RecCount);
};
void CudaSet::resizeDevice(size_t RecCount)
{
for(unsigned int i=0; i < columnNames.size(); i++) {
resizeDeviceColumn(RecCount, columnNames[i]);
};
};
bool CudaSet::onDevice(string colname)
{
if (type[colname] != 1) {
if (!d_columns_int.empty() && d_columns_int[colname].size())
return 1;
}
else
if (!d_columns_float.empty() && d_columns_float[colname].size())
return 1;
return 0;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->columnNames = columnNames;
a->ts_cols = ts_cols;
a->cols = cols;
a->type = type;
a->char_size = char_size;
a->decimal = decimal;
a->decimal_zeroes = decimal_zeroes;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(a->type[columnNames[i]] == 0) {
a->d_columns_int[columnNames[i]] = thrust::device_vector<int_type>();
a->h_columns_int[columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
}
else
if(a->type[columnNames[i]] == 1) {
a->d_columns_float[columnNames[i]] = thrust::device_vector<float_type>();
a->h_columns_float[columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
}
else {
a->h_columns_char[columnNames[i]] = nullptr;
a->d_columns_char[columnNames[i]] = nullptr;
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
int_type CudaSet::readSsdSegmentsFromFile(unsigned int segNum, string colname, size_t offset, thrust::host_vector<unsigned int>& prm_vh, CudaSet* dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
//cout << "lower_val bits " << lower_val << " " << bits << endl;
if(type[colname] == 0) {
//cout << "lower_val bits " << lower_val << " " << bits << endl;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(&val_c_r[0], 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_c_r[0];
}
else
if(bits == 16) {
fread(&val_s_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_s_r[0];
}
if(bits == 32) {
fread(&val_i_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_i_r[0];
}
if(bits == 84) {
fread(&val_l_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest->h_columns_int[colname][i + offset] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest->h_columns_int[colname][i + offset] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest->h_columns_int[colname][i + offset] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest->h_columns_int[colname][i + offset] = val_l_r[prm_vh[i]-idx];
}
};
};
}
else
if(type[colname] == 1) {
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
fread(val_c_r, 4096, 1, f);
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[0], bits/8);
}
else {
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[(prm_vh[i]-idx)*(bits/8)], bits/8);
};
};
}
else {
//no strings in fact tables
};
fclose(f);
return lower_val;
}
int_type CudaSet::readSsdSegmentsFromFileR(unsigned int segNum, string colname, thrust::host_vector<unsigned int>& prm_vh, thrust::host_vector<unsigned int>& dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(val_c_r, 4096, 1, f);
dest[i] = val_c_r[0];
}
else
if(bits == 16) {
fread(val_s_r, 4096, 1, f);
dest[i] = val_s_r[0];
}
if(bits == 32) {
fread(val_i_r, 4096, 1, f);
dest[i] = val_i_r[0];
}
if(bits == 84) {
fread(val_l_r, 4096, 1, f);
dest[i] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest[i] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest[i] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest[i] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest[i] = val_l_r[prm_vh[i]-idx];
}
};
};
fclose(f);
return lower_val;
}
std::clock_t tot_disk;
void CudaSet::readSegmentsFromFile(unsigned int segNum, string colname)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
if(type[colname] == 2)
f1 = f1 + ".idx";
std::clock_t start1 = std::clock();
if(interactive) { //check if data are in buffers
if(buffers.find(f1) == buffers.end()) { // add data to buffers
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
process_error(3, "Error opening " + string(f1) +" file " );
};
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
while(total_buffer_size + fileSize > getTotalSystemMemory() && !buffer_names.empty()) { //free some buffers
//delete [] buffers[buffer_names.front()];
cudaFreeHost(buffers[buffer_names.front()]);
total_buffer_size = total_buffer_size - buffer_sizes[buffer_names.front()];
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
fseek(f, 0, SEEK_SET);
char* buff;
cudaHostAlloc((void**) &buff, fileSize,cudaHostAllocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
buffers[f1] = buff;
buffer_sizes[f1] = fileSize;
buffer_names.push(f1);
total_buffer_size = total_buffer_size + fileSize;
buffer_names.push(f1);
cout << "added buffer " << f1 << " " << fileSize << endl;
};
// get data from buffers
if(type[colname] != 1) {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_int[colname].size()/8 + 10)
h_columns_int[colname].resize(cnt/8 + 10);
}
else {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_float[colname].size()/8 + 10)
h_columns_float[colname].resize(cnt/8 + 10);
}
}
else {
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
if(type[colname] != 1) {
if(1 > h_columns_int[colname].size())
h_columns_int[colname].resize(1);
fread(h_columns_int[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_int[colname].data()))[0];
if(cnt/8+10 > h_columns_int[colname].size()) {
h_columns_int[colname].resize(cnt + 10);
};
size_t rr = fread((unsigned int*)(h_columns_int[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
else {
if(1 > h_columns_float[colname].size())
h_columns_float[colname].resize(1);
fread(h_columns_float[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_float[colname].data()))[0];
if(cnt/8+10 > h_columns_float[colname].size())
h_columns_float[colname].resize(cnt + 10);
size_t rr = fread((unsigned int*)(h_columns_float[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
fclose(f);
};
tot_disk = tot_disk + (std::clock() - start1);
};
void CudaSet::CopyColumnToGpu(string colname, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
if(type[colname] != 1) {
if(!alloced_switch) {
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_columns_int[colname].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
else {
if(!alloced_switch) {
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_columns_float[colname].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
}
else {
readSegmentsFromFile(segment,colname);
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
string f1;
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(segment) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(segment);
};
if(type[colname] != 1) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), buffers[f1], d_v, s_v, colname);
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
};
}
else {
if(decimal[colname]) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin(), long_to_float());
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
//for(int i = 0; i < mRecCount;i++)
//cout << "DECOMP " << (float_type)(d_col_int[i]) << " " << d_col_float[i] << endl;
};
}
//else // uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(string colname) // copy all segments
{
if(not_compressed) {
if(type[colname] != 1)
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mRecCount, d_columns_int[colname].begin());
else
thrust::copy(h_columns_float[colname].begin(), h_columns_float[colname].begin() + mRecCount, d_columns_float[colname].begin());
}
else {
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
size_t cnt = 0;
string f1;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colname);
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(i) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(i);
};
if(type[colname] == 0) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), buffers[f1], d_v, s_v, colname);
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin() + cnt, long_to_float());
};
}
// else uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = cnt;
};
}
void CudaSet::CopyColumnToHost(string colname, size_t offset, size_t RecCount)
{
if(type[colname] != 1) {
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin() + RecCount, h_columns_int[colname].begin() + offset);
}
else
thrust::copy(d_columns_float[colname].begin(), d_columns_float[colname].begin() + RecCount, h_columns_float[colname].begin() + offset);
}
void CudaSet::CopyColumnToHost(string colname)
{
CopyColumnToHost(colname, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < columnNames.size(); i++) {
CopyColumnToHost(columnNames[i], offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_float[name].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_int[name].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_float[name].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_int[name].data());
}
void CudaSet::GroupBy(stack<string> columnRef)
{
thrust::device_vector<bool> grp_dev(mRecCount);
thrust::fill(grp_dev.begin(), grp_dev.end(), 0);
if(scratch.size() < mRecCount)
scratch.resize(mRecCount*sizeof(bool));
thrust::device_ptr<bool> d_group((bool*)thrust::raw_pointer_cast(scratch.data()));
d_group[mRecCount-1] = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
unsigned int bits;
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[columnRef.top()];
if(bits == 8) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
}
else
if(bits == 16) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
}
else
if(bits == 32) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
}
else {
thrust::transform(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount - 1,
d_columns_int[columnRef.top()].begin()+1, d_group, thrust::not_equal_to<int_type>());
};
thrust::transform(d_group, d_group+mRecCount, grp_dev.begin(), grp_dev.begin(), thrust::logical_or<bool>());
};
grp_count = thrust::count(grp_dev.begin(), grp_dev.end(), 1) + 1;
//cout << "grp count " << grp_count << endl;
grp.resize(grp_count);
if(grp_count > 1)
thrust::copy_if(thrust::make_counting_iterator((unsigned int)1), thrust::make_counting_iterator((unsigned int)grp_dev.size()),
grp_dev.begin(), grp.begin()+1, thrust::identity<bool>());
grp[0] = 0;
};
void CudaSet::addDeviceColumn(int_type* col, string colname, size_t recCount)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 0;
d_columns_int[colname] = thrust::device_vector<int_type>(recCount);
h_columns_int[colname] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_int[colname].size() < recCount) {
d_columns_int[colname].resize(recCount);
};
if(h_columns_int[colname].size() < recCount) {
h_columns_int[colname].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[colname].begin());
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin()+recCount, h_columns_int[colname].begin());
};
void CudaSet::addDeviceColumn(float_type* col, string colname, size_t recCount, bool is_decimal)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 1;
d_columns_float[colname] = thrust::device_vector<float_type>(recCount);
h_columns_float[colname] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_float[colname].size() < recCount)
d_columns_float[colname].resize(recCount);
if(h_columns_float[colname].size() < recCount)
h_columns_float[colname].resize(recCount);
};
decimal[colname] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[colname].begin());
};
void CudaSet::gpu_perm(queue<string> sf, thrust::device_vector<unsigned int>& permutation) {
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, mRecCount*8));
string sort_type = "ASC";
while(!sf.empty()) {
if (type[sf.front()] == 0) {
update_permutation(d_columns_int[sf.front()], raw_ptr, mRecCount, sort_type, (int_type*)temp, 64);
}
else
if (type[sf.front()] == 1) {
update_permutation(d_columns_float[sf.front()], raw_ptr, mRecCount, sort_type, (float_type*)temp, 64);
}
else {
thrust::host_vector<unsigned int> permutation_h = permutation;
char* temp1 = new char[char_size[sf.front()]*mRecCount];
update_permutation_char_host(h_columns_char[sf.front()], permutation_h.data(), mRecCount, sort_type, temp1, char_size[sf.front()]);
delete [] temp1;
permutation = permutation_h;
};
sf.pop();
};
cudaFree(temp);
}
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount, const bool append)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
long long int oldCount;
bool int_check = 0;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!total_segments && append) {
string s= file_name + "." + columnNames[0] + ".header";
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
binary_file.read((char *)&oldCount, 8);
binary_file.read((char *)&total_segments, 4);
binary_file.read((char *)&maxRecs, 4);
if(total_max < maxRecs)
total_max = maxRecs;
binary_file.close();
total_count = oldCount + mCount;
};
};
string s = file_name + ".interval";
ifstream f(s.c_str());
if (f.good()) {
f.seekg (0, f.end);
int length = f.tellg();
f.seekg (0, f.beg);
char* buff = new char[length];
f.read(buff, length);
f.close();
char* p = strtok(buff, "|");
string s1(p);
p = strtok(NULL, "|");
string s2(p);
delete [] buff;
s = file_name + ".key";
ifstream f1(s.c_str());
if (f1.good()) {
f1.seekg (0, f1.end);
length = f1.tellg();
f1.seekg (0, f1.beg);
buff = new char[length+1];
buff[length] = 0;
f1.read(buff, length);
f1.close();
string s3(buff);
delete [] buff;
load_file_name = file_name;
calc_intervals(s1, s2, s3, total_segments, append);
int_check = 1;
};
};
if(!op_sort.empty()) { //sort the segment
gpu_perm(op_sort, permutation);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i < columnNames.size(); i++) {
std::clock_t start1 = std::clock();
string colname = columnNames[i];
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
new_offset = 0;
if(type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[colname].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[colname], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
if(!int_check) {
thrust::copy(h_columns_int[colname].begin() + offset, h_columns_int[colname].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( thrust::raw_pointer_cast(d_columns_int[colname].data()), mCount*int_size, str, h_columns_int[colname], 0);
};
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[colname], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[colname], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[colname].begin() + offset, h_columns_float[colname].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[colname], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[colname].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[colname].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[colname].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
//populate char_hash
if(append && total_segments == 1) {
string s= file_name + "." + colname;
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
char* strings = new char[oldCount*char_size[colname]];
binary_file.read(strings, oldCount*char_size[colname]);
binary_file.close();
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int z = 0 ; z < oldCount; z++) {
char_hash[ind][MurmurHash64A(&strings[z*char_size[colname]], char_size[colname], hash_seed)/2] = z;
};
delete [] strings;
};
};
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[colname]*mRecCount];
apply_permutation_char_host(h_columns_char[colname], h_permutation, mRecCount, t, char_size[colname]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[colname]*mRecCount, h_columns_char[colname]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, colname, partition_recs, new_offset, total_segments-1);
else
compress_char(str, colname, mCount - partition_recs*p, new_offset, total_segments-1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
compress_char(str, colname, mCount, offset, total_segments-1);
};
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, colname, total_segments-1);
else {
writeHeader(file_name, colname, total_segments);
};
};
total_segments = old_segments;
};
cudaFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::calc_intervals(string dt1, string dt2, string index, unsigned int total_segs, bool append) {
alloced_switch = 1;
not_compressed = 1;
thrust::device_vector<unsigned int> permutation;
thrust::device_vector<int_type> stencil(maxRecs);
thrust::device_vector<int_type> d_dt2(maxRecs);
thrust::device_vector<int_type> d_index(maxRecs);
phase_copy = 0;
queue<string> sf;
sf.push(dt1);
sf.push(index);
gpu_perm(sf, permutation);
for(unsigned int i = 0; i < columnNames.size(); i++) {
if(type[columnNames[i]] == 0)
apply_permutation(d_columns_int[columnNames[i]], thrust::raw_pointer_cast(permutation.data()), mRecCount, (int_type*)thrust::raw_pointer_cast(stencil.data()), 0);
else {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[columnNames[i]]*mRecCount];
apply_permutation_char_host(h_columns_char[columnNames[i]], h_permutation, mRecCount, t, char_size[columnNames[i]]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[columnNames[i]]*mRecCount, h_columns_char[columnNames[i]]);
delete [] t;
};
};
if(type[index] == 2) {
d_columns_int[index] = thrust::device_vector<int_type>(mRecCount);
h_columns_int[index] = thrust::host_vector<int_type>(mRecCount);
for(int i = 0; i < mRecCount; i++)
h_columns_int[index][i] = MurmurHash64A(&h_columns_char[index][i*char_size[index]], char_size[index], hash_seed)/2;
d_columns_int[index] = h_columns_int[index];
};
thrust::counting_iterator<unsigned int> begin(0);
gpu_interval ff(thrust::raw_pointer_cast(d_columns_int[dt1].data()), thrust::raw_pointer_cast(d_columns_int[dt2].data()), thrust::raw_pointer_cast(d_columns_int[index].data()));
thrust::for_each(begin, begin + mRecCount - 1, ff);
auto stack_count = mRecCount;
if(append) {
not_compressed = 0;
size_t mysz = 8;
if(char_size[index] > int_size)
mysz = char_size[index];
if(mysz*maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, mysz*maxRecs);
alloced_sz = mysz*maxRecs;
}
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
d_columns_int[dt2].resize(0);
thrust::device_vector<unsigned int> output(stack_count);
for(int i = 0; i < total_segments; i++) {
CopyColumnToGpu(dt2, i, 0);
if(thrust::count(d_col, d_col+mRecCount,0)) {
thrust::copy(d_col, d_col+mRecCount, d_dt2.begin());
if(type[index] == 2) {
string f1 = load_file_name + "." + index + "." + to_string(i) + ".hash";
FILE* f = fopen(f1.c_str(), "rb" );
unsigned int cnt;
fread(&cnt, 4, 1, f);
unsigned long long int* buff = new unsigned long long int[cnt];
fread(buff, cnt*8, 1, f);
fclose(f);
thrust::copy(buff, buff + cnt, d_index.begin());
delete [] buff;
}
else {
CopyColumnToGpu(index, i, 0);
thrust::copy(d_col, d_col+mRecCount, d_index.begin());
};
thrust::lower_bound(d_columns_int[index].begin(), d_columns_int[index].begin()+stack_count, d_index.begin(), d_index.begin() + mRecCount, output.begin());
gpu_interval_set f(thrust::raw_pointer_cast(d_columns_int[dt1].data()), thrust::raw_pointer_cast(d_dt2.data()),
thrust::raw_pointer_cast(d_index.data()), thrust::raw_pointer_cast(d_columns_int[index].data()),
thrust::raw_pointer_cast(output.data()));
thrust::for_each(begin, begin + mRecCount, f);
string str = load_file_name + "." + dt2 + "." + to_string(i);;
pfor_compress( thrust::raw_pointer_cast(d_dt2.data()), mRecCount*int_size, str, h_columns_int[dt2], 0);
};
};
}
};
void CudaSet::writeHeader(string file_name, string colname, unsigned int tot_segs) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
//cout << "HEADER1 " << total_count << " " << tot_segs << " " << total_max << endl;
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, string colname, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
//cout << "HEADER2 " << newRecs << endl;
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
if(verbose)
cout << "sorted on " << idx << endl;
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".sort";
remove(str.c_str());
};
str = file_name;
if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".presort";
remove(str.c_str());
};
}
using namespace mgpu;
void CudaSet::Display(unsigned int limit, bool binary, bool term)
{
#define MAXCOLS 128
#define MAXFIELDSIZE 1400
//-- This should/will be converted to an array holding pointers of malloced sized structures--
char bigbuf[MAXCOLS * MAXFIELDSIZE];
memset(bigbuf, 0, MAXCOLS * MAXFIELDSIZE);
char *fields[MAXCOLS];
const char *dcolumns[MAXCOLS];
size_t mCount; // num records in play
bool print_all = 0;
string ss, str;
int rows = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
cout << "mRecCount=" << mRecCount << " mcount = " << mCount << " term " << term << " limit=" << limit << " print_all=" << print_all << endl;
unsigned int cc =0;
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
for(unsigned int i = 0; i < columnNames.size(); i++)
{
fields[cc] = &(bigbuf[cc*MAXFIELDSIZE]); // a hack to avoid malloc overheads - refine later
dcolumns[cc++] = columnNames[i].c_str();
if(string_map.find(columnNames[i]) != string_map.end()) {
auto s = string_map[columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
FILE *f;
f = fopen(string_map[columnNames[i]].c_str(), "rb");
file_map[string_map[columnNames[i]]] = f;
len_map[string_map[columnNames[i]]] = len;
};
};
// The goal here is to loop fast and avoid any double handling of outgoing data - pointers are good.
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) { // for each record
for(unsigned int j=0; j < columnNames.size(); j++) { // for each col
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
sprintf(fields[j], "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]])
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
//fprintf(file_pr, "%s", buffer);
//fprintf(file_pr, ".%d", rem);
sprintf(fields[j], "%s.%d", buffer,rem);
/*time_t tt = h_columns_int[columnNames[j]][i];
auto ti = localtime(&tt);
char buffer[10];
strftime(buffer,80,"%Y-%m-%d", ti);
sprintf(fields[j], "%s", buffer);
*/
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char **)dcolumns);
rows++;
};
}
else {
queue<string> op_vx;
for(unsigned int i = 0; i < columnNames.size(); i++)
op_vx.push(columnNames[i]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) { // if host arrays are empty
copyColumns(this, op_vx, curr_seg, cnt);
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount || print_all)
curr_count = mRecCount;
else
curr_count = mCount - sum_printed;
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end())
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char**)dcolumns);
rows++;
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
}; // end else
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
void CudaSet::Store(const string file_name, const char* sep, const unsigned int limit, const bool binary, const bool append, const bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int j=0; j < columnNames.size(); j++) {
writeHeader(file_name, columnNames[j], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
string str;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
if(binary == 0) {
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
string bf;
unsigned int max_len = 0;
for(unsigned int j=0; j < columnNames.size(); j++) {
if(string_map.find(columnNames[j]) != string_map.end()) {
auto s = string_map[columnNames[j]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if(len > max_len)
max_len = len;
FILE *f;
f = fopen(string_map[columnNames[j]].c_str(), "rb");
file_map[string_map[columnNames[j]]] = f;
len_map[string_map[columnNames[j]]] = len;
};
};
bf.reserve(max_len);
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (!file_pr)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1 ) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
//fprintf(file_pr, "%.*s", string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].size(), string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].c_str());
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
}
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
string ss;
for(unsigned int j=0; j < columnNames.size(); j++)
op_vx.push(columnNames[j]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
else {
//lets update the data dictionary
for(unsigned int j=0; j < columnNames.size(); j++) {
data_dict[file_name][columnNames[j]].col_type = type[columnNames[j]];
if(type[columnNames[j]] != 2) {
if(decimal[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = decimal_zeroes[columnNames[j]];
else
if (ts_cols[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = UINT_MAX;
else
data_dict[file_name][columnNames[j]].col_length = 0;
}
else
data_dict[file_name][columnNames[j]].col_length = char_size[columnNames[j]];
};
save_dict = 1;
if(text_source) { //writing a binary file using a text file as a source
compress(file_name, 0, 1, 0, mCount, append);
for(unsigned int i = 0; i< columnNames.size(); i++)
if(type[columnNames[i]] == 2)
deAllocColumnOnDevice(columnNames[i]);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for(unsigned int i = 0; i< columnNames.size(); i++) {
op_vx.push(columnNames[i]);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount, append);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount, append);
offset = offset + mCount;
};
};
};
};
}
void CudaSet::compress_char(const string file_name, const string colname, const size_t mCount, const size_t offset, const unsigned int segment)
{
unsigned int len = char_size[colname];
string h_name, i_name, file_no_seg = file_name.substr(0, file_name.find_last_of("."));
i_name = file_no_seg + "." + to_string(segment) + ".idx";
h_name = file_no_seg + "." + to_string(segment) + ".hash";
fstream b_file_str, loc_hashes;
fstream binary_file_h(h_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file_h.write((char *)&mCount, 4);
if(segment == 0) {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::trunc);
}
else {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::app);
};
if(h_columns_int.find(colname) == h_columns_int.end()) {
h_columns_int[colname] = thrust::host_vector<int_type >(mCount);
}
else {
if(h_columns_int[colname].size() < mCount)
h_columns_int[colname].resize(mCount);
};
if(d_columns_int.find(colname) == d_columns_int.end()) {
d_columns_int[colname] = thrust::device_vector<int_type >(mCount);
}
else {
if(d_columns_int[colname].size() < mCount)
d_columns_int[colname].resize(mCount);
};
size_t cnt;
long long int* hash_array = new long long int[mCount];
map<unsigned long long int, size_t>::iterator iter;
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
iter = char_hash[ind].find(hash_array[i]);
if(iter == char_hash[ind].end()) {
cnt = char_hash[ind].size();
char_hash[ind][hash_array[i]] = cnt;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
h_columns_int[colname][i] = cnt;
}
else {
h_columns_int[colname][i] = iter->second;
};
};
binary_file_h.write((char *)hash_array, 8*mCount);
delete [] hash_array;
thrust::device_vector<int_type> d_col(mCount);
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mCount, d_col.begin());
pfor_compress(thrust::raw_pointer_cast(d_col.data()), mCount*int_size, i_name, h_columns_int[colname], 0);
binary_file_h.close();
b_file_str.close();
};
bool first_time = 1;
size_t rec_sz = 0;
size_t process_piece;
bool CudaSet::LoadBigFile(FILE* file_p, thrust::device_vector<char>& d_readbuff, thrust::device_vector<char*>& dest,
thrust::device_vector<unsigned int>& ind, thrust::device_vector<unsigned int>& dest_len)
{
const char* sep = separator.c_str();
unsigned int maxx = cols.rbegin()->first;
map<unsigned int, string>::iterator it;
bool done = 0;
std::clock_t start1 = std::clock();
vector<int> types;
vector<int> cl;
types.push_back(0);
for(int i = 0; i < maxx; i++) {
auto iter = cols.find(i+1);
if(iter != cols.end()) {
types.push_back(type[iter->second]);
cl.push_back(iter->first-1);
}
else
types.push_back(0);
};
if(first_time) {
if(process_count*4 > getFreeMem()) {
process_piece = getFreeMem()/4;
}
else
process_piece = process_count;
readbuff = new char[process_piece+1];
d_readbuff.resize(process_piece+1);
cout << "set a piece to " << process_piece << " " << getFreeMem() << endl;
};
thrust::device_vector<unsigned int> ind_cnt(1);
thrust::device_vector<char> sepp(1);
sepp[0] = *sep;
long long int total_processed = 0;
size_t recs_processed = 0;
bool finished = 0;
thrust::device_vector<long long int> dev_pos;
long long int offset;
unsigned int cnt = 1;
const unsigned int max_len = 23;
while(!done) {
auto rb = fread(readbuff, 1, process_piece, file_p);
if(readbuff[rb-1] != '\n') {
rb++;
readbuff[rb-1] = '\n';
};
if(rb < process_piece) {
done = 1;
finished = 1;
fclose(file_p);
};
if(total_processed >= process_count)
done = 1;
thrust::fill(d_readbuff.begin(), d_readbuff.end(),0);
thrust::copy(readbuff, readbuff+rb, d_readbuff.begin());
auto curr_cnt = thrust::count(d_readbuff.begin(), d_readbuff.begin() + rb, '\n') - 1;
if(recs_processed == 0 && first_time) {
rec_sz = curr_cnt;
if(finished)
rec_sz++;
total_max = curr_cnt;
};
//cout << "curr_cnt " << curr_cnt << " Memory: " << getFreeMem() << endl;
if(first_time) {
for(unsigned int i=0; i < columnNames.size(); i++) {
auto colname = columnNames[i];
if (type[colname] == 0) {
d_columns_int[colname].resize(d_columns_int[colname].size() + rec_sz);
h_columns_int[colname].resize(h_columns_int[colname].size() + rec_sz);
}
else
if (type[colname] == 1) {
d_columns_float[colname].resize(d_columns_float[colname].size() + rec_sz);
h_columns_float[colname].resize(h_columns_float[colname].size() + rec_sz);
}
else {
char* c = new char[cnt*rec_sz*char_size[columnNames[i]]];
if(recs_processed > 0) {
memcpy(c, h_columns_char[columnNames[i]], recs_processed*char_size[columnNames[i]]);
delete [] h_columns_char[columnNames[i]];
};
h_columns_char[columnNames[i]] = c;
if(recs_processed == 0) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
cudaMemset(temp,0,char_size[columnNames[i]]*rec_sz);
d_columns_char[columnNames[i]] = (char*)temp;
};
};
if(recs_processed == 0) {
ind[i] = cl[i];
void* temp;
if(type[columnNames[i]] != 2) {
if(!ts_cols[columnNames[i]]) {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, max_len*rec_sz));
dest_len[i] = max_len;
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 23*rec_sz));
dest_len[i] = 23;
}
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
dest_len[i] = char_size[columnNames[i]];
};
dest[i] = (char*)temp;
};
};
};
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 2) {
cudaMemset(dest[i],0,max_len*rec_sz);
}
else {
cudaMemset(dest[i],0,char_size[columnNames[i]]*rec_sz);
};
};
if(dev_pos.size() < curr_cnt+1)
dev_pos.resize(curr_cnt+1); //avoiding the unnecessary allocs
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned long long int)0), thrust::make_counting_iterator((unsigned long long int)rb-1),
d_readbuff.begin(), dev_pos.begin()+1, _1 == '\n');
if(!finished) {
if(curr_cnt < rec_sz) {
offset = (dev_pos[curr_cnt] - rb)+1;
//cout << "PATH 1 " << dev_pos[curr_cnt] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = curr_cnt;
}
else {
offset = (dev_pos[rec_sz] - rb)+1;
//cout << "PATH 2 " << dev_pos[rec_sz] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = rec_sz;
};
}
else {
mRecCount = curr_cnt + 1;
};
thrust::counting_iterator<unsigned int> begin(0);
ind_cnt[0] = mColumnCount;
parse_functor ff((const char*)thrust::raw_pointer_cast(d_readbuff.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sepp.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + mRecCount, ff);
ind_cnt[0] = max_len;
for(int i =0; i < mColumnCount; i++) {
if(type[columnNames[i]] == 0) { //int
thrust::device_ptr<char> p1((char*)dest[i]);
if(p1[4] == '-') { //date
if(!ts_cols[columnNames[i]]) {
gpu_date date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
else {
gpu_tdate date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
}
else { //int
if(decimal[columnNames[i]]) {
thrust::device_vector<unsigned int> scale(1);
scale[0] = decimal_zeroes[columnNames[i]];
gpu_atold atold((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(scale.data()));
thrust::for_each(begin, begin + mRecCount, atold);
}
else {
gpu_atoll atoll_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atoll_ff);
};
};
thrust::copy(d_columns_int[columnNames[i]].begin() + recs_processed, d_columns_int[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_int[columnNames[i]].begin() + recs_processed);
}
else
if(type[columnNames[i]] == 1) {
gpu_atof atof_ff((const char*)dest[i],(double*)thrust::raw_pointer_cast(d_columns_float[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atof_ff);
thrust::copy(d_columns_float[columnNames[i]].begin() + recs_processed, d_columns_float[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_float[columnNames[i]].begin() + recs_processed);
}
else {//char is already done
thrust::device_ptr<char> p1((char*)dest[i]);
cudaMemcpy( h_columns_char[columnNames[i]] + char_size[columnNames[i]]*recs_processed, (void *)dest[i] , char_size[columnNames[i]]*mRecCount, cudaMemcpyDeviceToHost);
};
};
recs_processed = recs_processed + mRecCount;
cnt++;
};
if(finished) {
for(int i =0; i < mColumnCount; i++) {
if(dest[i]) {
cudaFree(dest[i]);
dest[i] = nullptr;
};
};
delete [] readbuff;
};
cout << "processed recs " << recs_processed << " " << getFreeMem() << endl;
first_time = 0;
mRecCount = recs_processed;
return finished;
};
void CudaSet::free() {
for(unsigned int i = 0; i < columnNames.size(); i++ ) {
if(type[columnNames[i]] == 0 && h_columns_int[columnNames[i]].size() ) {
h_columns_int[columnNames[i]].resize(0);
h_columns_int[columnNames[i]].shrink_to_fit();
}
else {
h_columns_float[columnNames[i]].resize(0);
h_columns_float[columnNames[i]].shrink_to_fit();
};
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
deAllocOnDevice();
};
void alloc_pool(unsigned int maxRecs) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 8*maxRecs));
alloced_mem.push_back(temp);
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if(d<s)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if(d>=s)
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if(d<=s)
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if(d==s)
res = 1;
else
res = 0;
else // !=
if(d!=s)
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if ((s-d) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON))
res = 1;
else
res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if(p2)
d = d*(int_type)pow(10, p2);
if (op_type == 2) // >
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
if(!p1 && !p2) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
}
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else // !=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
//cout << "OP " << d << " " << op_type << " " << p1 << " " << p2 << endl;
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
unsigned int d1 = d;
if(p2)
d = d*(unsigned int)pow(10, p2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d1), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
};
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
//cout << "OP " << op_type << " " << p1 << " " << p2 << " " << reverse << endl;
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else
if(p1 && p2) {
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::plus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::divides<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::divides<int_type>());
}
}
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return (float_type*)thrust::raw_pointer_cast(temp);
}
char CudaSet::loadIndex(const string index_name, const unsigned int segment)
{
FILE* f;
unsigned int bits_encoded, fit_count, vals_count, sz, real_count;
void* d_str;
string f1 = index_name + "." + to_string(segment);
char res;
//interactive = 0;
if(interactive) {
if(index_buffers.find(f1) == index_buffers.end()) {
f = fopen (f1.c_str(), "rb" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
char* buff;
cudaHostAlloc(&buff, fileSize, cudaHostAllocDefault);
fseek(f, 0, SEEK_SET);
fread(buff, fileSize, 1, f);
fclose(f);
index_buffers[f1] = buff;
};
sz = ((unsigned int*)index_buffers[f1])[0];
idx_dictionary_int[index_name].clear();
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][((int_type*)(index_buffers[f1]+4+8*i))[0]] = i;
};
vals_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[2];
real_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[3];
mRecCount = real_count;
if(idx_vals.count(index_name) == 0) {
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) &((index_buffers[f1]+4 +8*sz)[0]), (vals_count+2)*int_size, cudaMemcpyHostToDevice);
idx_vals[index_name] = (unsigned long long int*)d_str;
};
}
else {
f = fopen (f1.c_str(), "rb" );
fread(&sz, 4, 1, f);
int_type* d_array = new int_type[sz];
idx_dictionary_int[index_name].clear();
fread((void*)d_array, sz*int_size, 1, f);
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][d_array[i]] = i;
};
delete [] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
mRecCount = real_count;
unsigned long long int* int_array = new unsigned long long int[vals_count+2];
fseek ( f , -16 , SEEK_CUR );
fread((void*)int_array, 1, vals_count*8 + 16, f);
fread(&res, 1, 1, f);
fclose(f);
void* d_str;
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) int_array, (vals_count+2)*int_size, cudaMemcpyHostToDevice);
if(idx_vals.count(index_name))
cudaFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
return res;
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
FILE* f;
string f1;
unsigned int cnt;
char buffer[4000];
string str;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
sorted_fields.push(str);
if(verbose)
cout << "segment sorted on " << str << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
presorted_fields.push(str);
if(verbose)
cout << "presorted on " << str << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
//f1 = file_name + "." + nameRef.front() + ".0";
//f = fopen (f1.c_str() , "rb" );
//fread((char *)&bytes, 4, 1, f); //need to read metadata such as type and length
//fclose(f);
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + nameRef.front() + ".0";
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Couldn't find field " << nameRef.front() << endl;
exit(0);
};
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type >();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
string_map[nameRef.front()] = file_name + "." + nameRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
{
mColumnCount = (unsigned int)nameRef.size();
tmp_table = 0;
filtered = 0;
mRecCount = 0;
hostRecCount = Recs;
segCount = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type>();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(const size_t RecordCount, const unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
filtered = 0;
};
void CudaSet::initialize(queue<string> op_sel, const queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
unsigned int i = 0;
CudaSet *a;
while(!op_sel.empty()) {
for(auto it = varNames.begin(); it != varNames.end(); it++) {
a = it->second;
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end())
break;
};
type[op_sel.front()] = a->type[op_sel.front()];
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
columnNames.push_back(op_sel.front());
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
//h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type>();
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
//h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type>();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if( std::find(a->columnNames.begin(), a->columnNames.end(), q_cnt.front()) != a->columnNames.end() ||
std::find(b->columnNames.begin(), b->columnNames.end(), q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
maxRecs = b->maxRecs;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
i = 0;
while(!op_sel.empty()) {
if(std::find(columnNames.begin(), columnNames.end(), op_sel.front()) == columnNames.end()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end()) {
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
columnNames.push_back(op_sel.front());
type[op_sel.front()] = a->type[op_sel.front()];
ts_cols[op_sel.front()] = a->ts_cols[op_sel.front()];
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(a->string_map.find(op_sel.front()) != a->string_map.end()) {
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
i++;
}
else
if(std::find(b->columnNames.begin(), b->columnNames.end(), op_sel.front()) != b->columnNames.end()) {
columnNames.push_back(op_sel.front());
cols[i] = op_sel.front();
decimal[op_sel.front()] = b->decimal[op_sel.front()];
type[op_sel.front()] = b->type[op_sel.front()];
ts_cols[op_sel.front()] = b->ts_cols[op_sel.front()];
if (b->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(b->string_map.find(op_sel.front()) != b->string_map.end()) {
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
decimal[op_sel.front()] = b->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = b->decimal_zeroes[op_sel.front()];
}
else
if (b->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = b->char_size[op_sel.front()];
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
i++;
}
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 1;
else
if (op_type == 1) // <
return 2;
else
if (op_type == 6) // >=
return 5;
else
if (op_type == 5) // <=
return 6;
else
return op_type;
}
size_t getFreeMem()
{
size_t available, total;
cudaMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
CudaSet* t;
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
if(int_size*t->maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, int_size*t->maxRecs);
alloced_sz = int_size*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(var_exists(a, fields.front()) && !a->onDevice(fields.front())) {
a->allocColumnOnDevice(fields.front(), a->maxRecs);
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
if(!a->onDevice(field)) {
a->allocColumnOnDevice(field, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(field, a, t, count, a->mRecCount);
}
else {
mycopy(field, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
void copyFinalize(CudaSet* a, queue<string> fields, bool ts)
{
set<string> uniques;
if(scratch.size() < a->mRecCount*8)
scratch.resize(a->mRecCount*8);
thrust::device_ptr<int_type> tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front()) && cpy_bits.find(fields.front()) != cpy_bits.end() && (!a->ts_cols[fields.front()] || ts)) {
if(cpy_bits[fields.front()] == 8) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
};
}
else
if(cpy_bits[fields.front()] == 16) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
};
}
else
if(cpy_bits[fields.front()] == 32) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
};
}
else {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
};
};
thrust::constant_iterator<int_type> iter(cpy_init_val[fields.front()]);
if(a->type[fields.front()] != 1) {
thrust::transform(tmp, tmp + a->mRecCount, iter, a->d_columns_int[fields.front()].begin(), thrust::plus<int_type>());
}
else {
thrust::device_ptr<int_type> dest((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(tmp, tmp + a->mRecCount, iter, dest, thrust::plus<int_type>());
thrust::transform(dest, dest+a->mRecCount, a->d_columns_float[fields.front()].begin(), long_to_float());
};
};
uniques.insert(fields.front());
fields.pop();
};
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
//std::clock_t start1 = std::clock();
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz && a->mRecCount) {
queue<string> fields1(fields);
while(!fields1.empty()) {
a->resizeDeviceColumn(a->devRecCount + a->mRecCount, fields1.front());
fields1.pop();
};
a->devRecCount = a->devRecCount + a->mRecCount;
};
};
cpy_bits.clear();
cpy_init_val.clear();
auto f(fields);
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[a->source_name];
alloced_switch = 1;
t->CopyColumnToGpu(fields.front(), segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(fields.front(), segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
//std::cout<< "copy time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void mygather(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1 ) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
};
void mycopy(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[colname].begin() + offset);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(std::find(right->columnNames.begin(), right->columnNames.end(), c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() ) {
cc.push(c1.front());
};
};
c1.pop();
};
if(std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
};
rcount = right->maxRecs;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(ct.front(), rcount*right->segCount);
};
ct.pop();
};
size_t cnt_r = 0;
right->devRecCount = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->columnNames.size(); i++) {
if(a->type[a->columnNames[i]] == 2) {
if (a->char_size[a->columnNames[i]] > max_char1)
max_char1 = a->char_size[a->columnNames[i]];
}
else
if(a->type[a->columnNames[i]] == 0 && a->string_map.find(a->columnNames[i]) != a->string_map.end()) {
auto s = a->string_map[a->columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if (len > max_char1)
max_char1 = len;
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8;
while (!field_names.empty()) {
if (a->type[field_names.front()] == 2) {
if (a->char_size[field_names.front()] > max_char)
max_char = a->char_size[field_names.front()];
};
field_names.pop();
};
return max_char;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0;
while(!cols.empty()) {
if(a->type[cols.front()] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[cols.front()];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
cudaMemcpy( (void*)tmp, (void*) key, RecCount*len, cudaMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(const char *s, const char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
//std::clock_t start1 = std::clock();
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
b->string_map = a->string_map;
size_t cnt = 0;
b->sorted_fields = a->sorted_fields;
b->ts_cols = a->ts_cols;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0) {
b->prm_d.resize(a->maxRecs);
};
cout << endl << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
cout << endl << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
auto old_ph = phase_copy;
phase_copy = 0;
copyColumns(a, b->fil_value, segment, cnt);
phase_copy = old_ph;
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
cudaFree(res);
}
else {
b->prm_index = map_check;
if(map_check == 'A')
b->mRecCount = a->mRecCount;
else
b->mRecCount = 0;
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
if(verbose)
cout << endl << "filter result " << b->mRecCount << endl;
}
size_t load_right(CudaSet* right, string f2, queue<string> op_g, queue<string> op_alt, size_t& rcount, unsigned int start_seg, unsigned int end_seg) {
size_t cnt_r = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, "", rcount, start_seg, end_seg, 1, 1);
queue<string> op_alt2;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
op_alt2.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt2.empty())
cnt_r = load_queue(op_alt2, right, "", rcount, start_seg, end_seg, 0, 0);
}
else {
cnt_r = load_queue(op_alt, right, f2, rcount, start_seg, end_seg, 1, 1);
};
return cnt_r;
};
void insert_records(const char* f, const char* s) {
char buf[4096];
size_t size, maxRecs, cnt = 0;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
process_error(3, "couldn't find " + string(s) );
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
process_error(3, "couldn't find " + string(f) );
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
cout << "SOURCES " << a->source << ":" << b->source << endl;
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
if(a->type[a->columnNames[z]] != 2) {
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str_d = b->load_file_name + "." + a->columnNames[z] + "." + to_string(b->segCount + i);
cout << str_s << " " << str_d << endl;
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
}
else { //merge strings
//read b's strings
str_s = b->load_file_name + "." + b->columnNames[z];
FILE* dest = fopen(str_s.c_str(), "rb");
auto len = b->char_size[b->columnNames[z]];
map<string, unsigned long long int> map_d;
buf[len] = 0;
unsigned long long cnt = 0;
while (fread(buf, len, 1, dest)) {
map_d[buf] = cnt;
cnt++;
};
fclose(dest);
unsigned long long int cct = cnt;
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i) + ".hash";
str_d = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".hash";
FILE* source = fopen(str_s.c_str(), "rb");
dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
str_s = a->load_file_name + "." + a->columnNames[z];
source = fopen(str_s.c_str(), "rb");
map<unsigned long long int, string> map_s;
buf[len] = 0;
cnt = 0;
while (fread(buf, len, 1, source)) {
map_s[cnt] = buf;
cnt++;
};
fclose(source);
queue<string> op_vx;
op_vx.push(a->columnNames[z]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->CopyColumnToGpu(a->columnNames[z], z, 0);
a->CopyColumnToHost(a->columnNames[z]);
str_d = b->load_file_name + "." + b->columnNames[z];
fstream f_file;
f_file.open(str_d.c_str(), ios::out|ios::app|ios::binary);
for(auto j = 0; j < a->mRecCount; j++) {
auto ss = map_s[a->h_columns_int[a->columnNames[z]][j]];
if(map_d.find(ss) == map_d.end()) { //add
f_file.write((char *)ss.c_str(), len);
a->h_columns_int[a->columnNames[z]][j] = cct;
cct++;
}
else {
a->h_columns_int[a->columnNames[z]][j] = map_d[ss];
};
};
f_file.close();
thrust::device_vector<int_type> d_col(a->mRecCount);
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, d_col.begin());
auto i_name = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".idx";
pfor_compress(thrust::raw_pointer_cast(d_col.data()), a->mRecCount*int_size, i_name, a->h_columns_int[a->columnNames[z]], 0);
};
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->reWriteHeader(b->load_file_name, b->columnNames[i], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else
if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[a->columnNames[z]] == 0) {
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_int[b->columnNames[z]].begin() + oldCount);
}
else
if(b->type[a->columnNames[z]] == 1) {
thrust::copy(a->h_columns_float[a->columnNames[z]].begin(), a->h_columns_float[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_float[b->columnNames[z]].begin() + oldCount);
}
else {
cudaMemcpy(b->h_columns_char[b->columnNames[z]] + b->char_size[b->columnNames[z]]*oldCount, a->h_columns_char[a->columnNames[z]], a->char_size[a->columnNames[z]]*a->mRecCount, cudaMemcpyHostToHost);
};
};
}
else
if(!a->source && b->source) {
total_segments = b->segCount;
total_count = b->mRecCount;
total_max = b->maxRecs;;
queue<string> op_vx;
for(unsigned int i=0; i < a->columnNames.size(); i++)
op_vx.push(a->columnNames[i]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
for(unsigned int i = 0; i < a->segCount; i++) {
if (a->filtered) {
copyColumns(a, op_vx, i, cnt);
a->CopyToHost(0, a->mRecCount);
};
a->compress(b->load_file_name, 0, 1, i - (a->segCount-1), a->mRecCount, 0);
};
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->writeHeader(b->load_file_name, b->columnNames[i], total_segments);
};
};
};
void delete_records(const char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
process_error(2, "Delete operator is only applicable to disk based sets\nfor deleting records from derived sets please use filter operator ");
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for ( auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
op_vx.push((*it).first);
if (std::find(a->columnNames.begin(), a->columnNames.end(), (*it).first) == a->columnNames.end()) {
if ((*it).second.col_type == 0) {
a->type[(*it).first] = 0;
a->decimal[(*it).first] = 0;
//a->h_columns_int[(*it).first] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
a->h_columns_int[(*it).first] = thrust::host_vector<int_type>();
a->d_columns_int[(*it).first] = thrust::device_vector<int_type>();
}
else
if((*it).second.col_type == 1) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 0;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else
if ((*it).second.col_type == 3) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 1;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else {
a->type[(*it).first] = 2;
a->decimal[(*it).first] = 0;
a->h_columns_char[(*it).first] = nullptr;
a->d_columns_char[(*it).first] = nullptr;
a->char_size[(*it).first] = (*it).second.col_length;
};
a->columnNames.push_back((*it).first);
}
};
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->prm_d.resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), thrust::logical_not<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
cudaFree(res);
// cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
if(new_seg_count != i) {
for (auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
auto colname = (*it).first;
str_old = a->load_file_name + "." + colname + "." + to_string(i);
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
map<string, col_data> s = data_dict[a->load_file_name];
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
string colname = (*it).first;
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
if(a->type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[colname], 0);
}
else
if(a->type[colname] == 1) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[colname]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[colname], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[colname].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[colname].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str + ".hash", a->h_columns_int[colname], 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str_old = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str = a->load_file_name + "." + a->columnNames[z] + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str = a->load_file_name + "." + a->columnNames[z];
str += "." + to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
a->reWriteHeader(a->load_file_name, a->columnNames[i], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
cudaFree(d);
};
};
void save_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
size_t len = data_dict.size();
binary_file.write((char *)&len, 8);
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
str_len = (*it).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*it).first.data(), str_len);
map<string, col_data> s = (*it).second;
size_t len1 = s.size();
binary_file.write((char *)&len1, 8);
for (auto sit=s.begin() ; sit != s.end(); ++sit ) {
str_len = (*sit).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*sit).first.data(), str_len);
binary_file.write((char *)&(*sit).second.col_type, 4);
binary_file.write((char *)&(*sit).second.col_length, 4);
};
};
binary_file.close();
}
void load_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len, recs, len1;
string str1, str2;
char buffer[4000];
unsigned int col_type, col_length;
fstream binary_file;
binary_file.open(file_name.c_str(),ios::in|ios::binary);
if(binary_file.is_open()) {
binary_file.read((char*)&recs, 8);
for(unsigned int i = 0; i < recs; i++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str1.assign(buffer, str_len);
binary_file.read((char*)&len1, 8);
for(unsigned int j = 0; j < len1; j++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str2.assign(buffer, str_len);
binary_file.read((char*)&col_type, 4);
binary_file.read((char*)&col_length, 4);
data_dict[str1][str2].col_type = col_type;
data_dict[str1][str2].col_length = col_length;
//cout << "data DICT " << str1 << " " << str2 << " " << col_type << " " << col_length << endl;
};
};
binary_file.close();
}
else {
cout << "Couldn't open data dictionary" << endl;
};
}
bool var_exists(CudaSet* a, string name) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), name) != a->columnNames.end())
return 1;
else
return 0;
}
int file_exist (const char *filename)
{
std::ifstream infile(filename);
return infile.good();
}
bool check_bitmap_file_exist(CudaSet* left, CudaSet* right)
{
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 0;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
return bitmaps_exist;
}
bool check_bitmaps_exist(CudaSet* left, CudaSet* right)
{
//check if there are join bitmap indexes
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 1;
return 1;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
if(bitmaps_exist) {
while(!right->fil_nums.empty() ) {
left->fil_nums.push(right->fil_nums.front());
right->fil_nums.pop();
};
while(!right->fil_nums_precision.empty() ) {
left->fil_nums_precision.push(right->fil_nums_precision.front());
right->fil_nums_precision.pop();
};
while(!right->fil_nums_f.empty() ) {
left->fil_nums_f.push(right->fil_nums_f.front());
right->fil_nums_f.pop();
};
while(!right->fil_value.empty() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), right->fil_value.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + right->fil_value.front();
left->fil_value.push(fname);
}
else
left->fil_value.push(right->fil_value.front());
right->fil_value.pop();
};
bool add_and = 1;
if(left->fil_type.empty())
add_and = 0;
while(!right->fil_type.empty() ) {
left->fil_type.push(right->fil_type.front());
right->fil_type.pop();
};
if(add_and) {
left->fil_type.push("AND");
};
return 1;
}
else {
return 0;
};
}
void check_sort(const string str, const char* rtable, const char* rid)
{
CudaSet* right = varNames.find(rtable)->second;
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::app);
binary_file.write((char *)&right->sort_check, 1);
binary_file.close();
}
void update_char_permutation(CudaSet* a, string colname, unsigned int* raw_ptr, string ord, void* temp, bool host)
{
auto s = a->string_map[colname];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
a->h_columns_char[colname] = new char[a->mRecCount*len];
memset(a->h_columns_char[colname], 0, a->mRecCount*len);
thrust::device_ptr<unsigned int> perm(raw_ptr);
thrust::device_ptr<int_type> temp_int((int_type*)temp);
thrust::gather(perm, perm+a->mRecCount, a->d_columns_int[colname].begin(), temp_int);
//for(int z = 0 ; z < a->mRecCount; z++) {
//cout << "Init vals " << a->d_columns_int[colname][z] << " " << perm[z] << " " << temp_int[z] << endl;
//};
//cout << "sz " << a->h_columns_int[colname].size() << " " << a->d_columns_int[colname].size() << " " << len << endl;
cudaMemcpy(thrust::raw_pointer_cast(a->h_columns_int[colname].data()), temp, 8*a->mRecCount, cudaMemcpyDeviceToHost);
FILE *f;
f = fopen(a->string_map[colname].c_str(), "rb");
for(int z = 0 ; z < a->mRecCount; z++) {
fseek(f, a->h_columns_int[colname][z] * len, SEEK_SET);
fread(a->h_columns_char[colname] + z*len, 1, len, f);
};
fclose(f);
if(!host) {
void *d;
cudaMalloc((void **) &d, a->mRecCount*len);
a->d_columns_char[colname] = (char*)d;
cudaMemcpy(a->d_columns_char[colname], a->h_columns_char[colname], len*a->mRecCount, cudaMemcpyHostToDevice);
if (ord.compare("DESC") == 0 )
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
cudaFree(d);
}
else {
if (ord.compare("DESC") == 0 )
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
};
}
void compress_int(const string file_name, const thrust::host_vector<int_type>& res)
{
std::vector<unsigned int> dict_val;
unsigned int bits_encoded;
set<int_type> dict_s;
map<int_type, unsigned int> d_ordered;
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_s.insert(f);
};
unsigned int i = 0;
for (auto it = dict_s.begin(); it != dict_s.end(); it++) {
d_ordered[*it] = i++;
};
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_val.push_back(d_ordered[f]);
};
bits_encoded = (unsigned int)ceil(log2(double(d_ordered.size()+1)));
//cout << "bits " << bits_encoded << endl;
unsigned int sz = (unsigned int)d_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&sz, 4);
for (auto it = d_ordered.begin(); it != d_ordered.end(); it++) {
binary_file.write((char*)(&(it->first)), int_size);
};
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, int_size);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
int_type* get_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->get_int_by_name(s1_val);
else {
t = exe_vectors.top();
exe_vectors.pop();
}
return t;
};
int_type* get_host_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) {
t = a->get_host_int_by_name(s1_val);
}
else {
t = exe_vectors.top();
thrust::device_ptr<int_type> st1((int_type*)t);
for(int z = 0; z < 10; z++)
cout << "RESVEC " << st1[z] << endl;
exe_vectors.pop();
}
return t;
};
unsigned int get_decimals(CudaSet* a, string s1_val, stack<unsigned int>& exe_precision) {
unsigned int t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->decimal_zeroes[s1_val];
else {
t = exe_precision.top();
exe_precision.pop();
}
return t;
};
#ifdef _WIN64
size_t getTotalSystemMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
#elif __APPLE__
size_t getTotalSystemMemory()
{
int mib [] = { CTL_HW, HW_MEMSIZE };
size_t value = 0;
size_t length = sizeof(value);
if(-1 == sysctl(mib, 2, &value, &length, NULL, 0))
return 0;
return value;
}
#else
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
#endif
|
ce078a0da84fad8db602569a86edba73ef3970da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This program is writen by qp09.
* usually just for fun.
* Sat March 12 2016
*/
#include "../neuron/lif/GLIF.h"
#include "../neuron/lifb/GLIFB.h"
#include "../neuron/tj/GTJ.h"
#include "../neuron/max/GMax.h"
#include "../neuron/poisson/GPoisson.h"
#include "../neuron/array/GArray.h"
#include "../neuron/constant/GConstant.h"
#include "../synapse/static/GStatic.h"
//#include "../../include/GNeuron.h"
//#include "../../include/GSynapse.h"
#include "../utils/utils.h"
#include "runtime.h"
#include "gpu_func.h"
int cudaUpdateConstant(void *data, int num, int start_id, BlockSize *pSize)
{
hipLaunchKernelGGL(( update_constant_neuron), dim3(pSize->gridSize), dim3(pSize->blockSize), 0, 0, (GConstantNeurons*)data, num, start_id);
return 0;
}
int cudaUpdatePoisson(void *data, int num, int start_id, BlockSize *pSize)
{
hipLaunchKernelGGL(( update_poisson_neuron), dim3(pSize->gridSize), dim3(pSize->blockSize), 0, 0, (GPoissonNeurons*)data, num, start_id);
return 0;
}
int cudaUpdateArray(void *data, int num, int start_id, BlockSize *pSize)
{
hipLaunchKernelGGL(( update_array_neuron), dim3(pSize->gridSize), dim3(pSize->blockSize), 0, 0, (GArrayNeurons*)data, num, start_id);
return 0;
}
int cudaUpdateLIFE(void *data, int num, int start_id, BlockSize *pSize)
{
hipLaunchKernelGGL(( find_life_neuron), dim3(pSize->gridSize), dim3(pSize->blockSize), 0, 0, (GLIFENeurons*)data, num, start_id);
hipLaunchKernelGGL(( update_life_neuron), dim3(pSize->gridSize), dim3(pSize->blockSize), 0, 0, (GLIFENeurons*)data, num, start_id);
//update_dense_life_neuron<<<pSize->gridSize, pSize->blockSize>>>((GLIFENeurons*)data, num, start_id);
return 0;
}
int cudaUpdateTJ(void *data, int num, int start_id, BlockSize *pSize)
{
hipLaunchKernelGGL(( update_tj_neuron), dim3(pSize->gridSize), dim3(pSize->blockSize), 0, 0, (GTJNeurons*)data, num, start_id);
return 0;
}
int cudaUpdateMax(void *data, int num, int start_id, BlockSize *pSize)
{
hipLaunchKernelGGL(( update_max_neuron), dim3(pSize->gridSize), dim3(pSize->blockSize), 0, 0, (GMaxNeurons*)data, num, start_id);
return 0;
}
int cudaUpdateStatic(void *data, int num, int start_id, BlockSize *pSize)
{
//update_static_hit<<<pSize->gridSize, pSize->blockSize>>>((GStaticSynapses*)data, num, start_id);
//reset_active_synapse<<<1, 1>>>();
hipLaunchKernelGGL(( update_dense_static_hit), dim3(pSize->gridSize), dim3(pSize->blockSize), 0, 0, (GStaticSynapses*)data, num, start_id);
return 0;
}
int cudaUpdateLIFEB(void *data, int num, int start_id, BlockSize *pSize)
{
hipLaunchKernelGGL(( find_lifeb_neuron), dim3(pSize->gridSize), dim3(pSize->blockSize), 0, 0, (GLIFEBNeurons*)data, num, start_id);
hipLaunchKernelGGL(( update_lifeb_neuron), dim3(pSize->gridSize), dim3(pSize->blockSize), 0, 0, (GLIFEBNeurons*)data, num, start_id);
//update_dense_life_neuron<<<pSize->gridSize, pSize->blockSize>>>((GLIFENeurons*)data, num, start_id);
return 0;
}
int addCrossNeurons(int *ids, int num)
{
hipLaunchKernelGGL(( add_cross_neuron), dim3((num+MAXBLOCKSIZE-1)/MAXBLOCKSIZE), dim3(MAXBLOCKSIZE), 0, 0, ids, num);
hipLaunchKernelGGL(( add_cross_neuron), dim3(1), dim3(1), 0, 0, nullptr, num);
return 0;
}
int cudaDeliverNeurons(int *idx2index, int *crossnode_index2idx, int *global_cross_data, int *fired_n_num, int node_num, int neuron_num)
{
hipLaunchKernelGGL(( deliver_neurons), dim3((neuron_num + MAXBLOCKSIZE-1)/MAXBLOCKSIZE), dim3(MAXBLOCKSIZE), 0, 0, idx2index, crossnode_index2idx, global_cross_data, fired_n_num, node_num);
return 0;
}
BlockSize * getBlockSize(int nSize, int sSize)
{
BlockSize *ret = (BlockSize*)malloc(sizeof(BlockSize)*TYPESIZE);
memset(ret, 0, sizeof(BlockSize)*TYPESIZE);
hipOccupancyMaxPotentialBlockSize(&(ret[Constant].minGridSize), &(ret[Constant].blockSize), update_constant_neuron, 0, nSize);
ret[Constant].gridSize = (upzero_else_set_one(nSize) + (ret[Constant].blockSize) - 1) / (ret[Constant].blockSize);
hipOccupancyMaxPotentialBlockSize(&(ret[Poisson].minGridSize), &(ret[Poisson].blockSize), update_poisson_neuron, 0, nSize);
ret[Poisson].gridSize = (upzero_else_set_one(nSize) + (ret[Poisson].blockSize) - 1) / (ret[Poisson].blockSize);
hipOccupancyMaxPotentialBlockSize(&(ret[Array].minGridSize), &(ret[Array].blockSize), update_poisson_neuron, 0, nSize);
ret[Array].gridSize = (upzero_else_set_one(nSize) + (ret[Array].blockSize) - 1) / (ret[Array].blockSize);
hipOccupancyMaxPotentialBlockSize(&(ret[Decide].minGridSize), &(ret[Decide].blockSize), update_poisson_neuron, 0, nSize);
ret[Decide].gridSize = (upzero_else_set_one(nSize) + (ret[Decide].blockSize) - 1) / (ret[Decide].blockSize);
hipOccupancyMaxPotentialBlockSize(&(ret[FFT].minGridSize), &(ret[FFT].blockSize), update_poisson_neuron, 0, nSize);
ret[FFT].gridSize = (upzero_else_set_one(nSize) + (ret[FFT].blockSize) - 1) / (ret[FFT].blockSize);
hipOccupancyMaxPotentialBlockSize(&(ret[Mem].minGridSize), &(ret[Mem].blockSize), update_poisson_neuron, 0, nSize);
ret[Mem].gridSize = (upzero_else_set_one(nSize) + (ret[Mem].blockSize) - 1) / (ret[Mem].blockSize);
hipOccupancyMaxPotentialBlockSize(&(ret[Max].minGridSize), &(ret[Max].blockSize), update_tj_neuron, 0, nSize);
ret[Max].gridSize = (upzero_else_set_one(nSize) + (ret[Max].blockSize) - 1) / (ret[Max].blockSize);
hipOccupancyMaxPotentialBlockSize(&(ret[LIFE].minGridSize), &(ret[LIFE].blockSize), update_life_neuron, 0, nSize);
ret[LIFE].gridSize = (upzero_else_set_one(nSize) + (ret[LIFE].blockSize) - 1) / (ret[LIFE].blockSize);
hipOccupancyMaxPotentialBlockSize(&(ret[TJ].minGridSize), &(ret[TJ].blockSize), update_life_neuron, 0, nSize);
ret[TJ].gridSize = (upzero_else_set_one(nSize) + (ret[TJ].blockSize) - 1) / (ret[TJ].blockSize);
//hipOccupancyMaxPotentialBlockSize(&(ret[Static].minGridSize), &(ret[Static].blockSize), update_static_hit, 0, sSize);
ret[Static].blockSize = 128;
ret[Static].gridSize = (upzero_else_set_one(nSize) + (ret[Static].blockSize) - 1) / (ret[Static].blockSize);
ret[LIFEB].blockSize = ret[LIFE].blockSize;
ret[LIFEB].gridSize = ret[LIFE].gridSize;
ret[LIFEB].minGridSize = ret[LIFE].minGridSize;
return ret;
}
| ce078a0da84fad8db602569a86edba73ef3970da.cu | /* This program is writen by qp09.
* usually just for fun.
* Sat March 12 2016
*/
#include "../neuron/lif/GLIF.h"
#include "../neuron/lifb/GLIFB.h"
#include "../neuron/tj/GTJ.h"
#include "../neuron/max/GMax.h"
#include "../neuron/poisson/GPoisson.h"
#include "../neuron/array/GArray.h"
#include "../neuron/constant/GConstant.h"
#include "../synapse/static/GStatic.h"
//#include "../../include/GNeuron.h"
//#include "../../include/GSynapse.h"
#include "../utils/utils.h"
#include "runtime.h"
#include "gpu_func.h"
int cudaUpdateConstant(void *data, int num, int start_id, BlockSize *pSize)
{
update_constant_neuron<<<pSize->gridSize, pSize->blockSize>>>((GConstantNeurons*)data, num, start_id);
return 0;
}
int cudaUpdatePoisson(void *data, int num, int start_id, BlockSize *pSize)
{
update_poisson_neuron<<<pSize->gridSize, pSize->blockSize>>>((GPoissonNeurons*)data, num, start_id);
return 0;
}
int cudaUpdateArray(void *data, int num, int start_id, BlockSize *pSize)
{
update_array_neuron<<<pSize->gridSize, pSize->blockSize>>>((GArrayNeurons*)data, num, start_id);
return 0;
}
int cudaUpdateLIFE(void *data, int num, int start_id, BlockSize *pSize)
{
find_life_neuron<<<pSize->gridSize, pSize->blockSize>>>((GLIFENeurons*)data, num, start_id);
update_life_neuron<<<pSize->gridSize, pSize->blockSize>>>((GLIFENeurons*)data, num, start_id);
//update_dense_life_neuron<<<pSize->gridSize, pSize->blockSize>>>((GLIFENeurons*)data, num, start_id);
return 0;
}
int cudaUpdateTJ(void *data, int num, int start_id, BlockSize *pSize)
{
update_tj_neuron<<<pSize->gridSize, pSize->blockSize>>>((GTJNeurons*)data, num, start_id);
return 0;
}
int cudaUpdateMax(void *data, int num, int start_id, BlockSize *pSize)
{
update_max_neuron<<<pSize->gridSize, pSize->blockSize>>>((GMaxNeurons*)data, num, start_id);
return 0;
}
int cudaUpdateStatic(void *data, int num, int start_id, BlockSize *pSize)
{
//update_static_hit<<<pSize->gridSize, pSize->blockSize>>>((GStaticSynapses*)data, num, start_id);
//reset_active_synapse<<<1, 1>>>();
update_dense_static_hit<<<pSize->gridSize, pSize->blockSize>>>((GStaticSynapses*)data, num, start_id);
return 0;
}
int cudaUpdateLIFEB(void *data, int num, int start_id, BlockSize *pSize)
{
find_lifeb_neuron<<<pSize->gridSize, pSize->blockSize>>>((GLIFEBNeurons*)data, num, start_id);
update_lifeb_neuron<<<pSize->gridSize, pSize->blockSize>>>((GLIFEBNeurons*)data, num, start_id);
//update_dense_life_neuron<<<pSize->gridSize, pSize->blockSize>>>((GLIFENeurons*)data, num, start_id);
return 0;
}
int addCrossNeurons(int *ids, int num)
{
add_cross_neuron<<<(num+MAXBLOCKSIZE-1)/MAXBLOCKSIZE, MAXBLOCKSIZE>>>(ids, num);
add_cross_neuron<<<1, 1>>>(nullptr, num);
return 0;
}
int cudaDeliverNeurons(int *idx2index, int *crossnode_index2idx, int *global_cross_data, int *fired_n_num, int node_num, int neuron_num)
{
deliver_neurons<<<(neuron_num + MAXBLOCKSIZE-1)/MAXBLOCKSIZE, MAXBLOCKSIZE>>>(idx2index, crossnode_index2idx, global_cross_data, fired_n_num, node_num);
return 0;
}
BlockSize * getBlockSize(int nSize, int sSize)
{
BlockSize *ret = (BlockSize*)malloc(sizeof(BlockSize)*TYPESIZE);
memset(ret, 0, sizeof(BlockSize)*TYPESIZE);
cudaOccupancyMaxPotentialBlockSize(&(ret[Constant].minGridSize), &(ret[Constant].blockSize), update_constant_neuron, 0, nSize);
ret[Constant].gridSize = (upzero_else_set_one(nSize) + (ret[Constant].blockSize) - 1) / (ret[Constant].blockSize);
cudaOccupancyMaxPotentialBlockSize(&(ret[Poisson].minGridSize), &(ret[Poisson].blockSize), update_poisson_neuron, 0, nSize);
ret[Poisson].gridSize = (upzero_else_set_one(nSize) + (ret[Poisson].blockSize) - 1) / (ret[Poisson].blockSize);
cudaOccupancyMaxPotentialBlockSize(&(ret[Array].minGridSize), &(ret[Array].blockSize), update_poisson_neuron, 0, nSize);
ret[Array].gridSize = (upzero_else_set_one(nSize) + (ret[Array].blockSize) - 1) / (ret[Array].blockSize);
cudaOccupancyMaxPotentialBlockSize(&(ret[Decide].minGridSize), &(ret[Decide].blockSize), update_poisson_neuron, 0, nSize);
ret[Decide].gridSize = (upzero_else_set_one(nSize) + (ret[Decide].blockSize) - 1) / (ret[Decide].blockSize);
cudaOccupancyMaxPotentialBlockSize(&(ret[FFT].minGridSize), &(ret[FFT].blockSize), update_poisson_neuron, 0, nSize);
ret[FFT].gridSize = (upzero_else_set_one(nSize) + (ret[FFT].blockSize) - 1) / (ret[FFT].blockSize);
cudaOccupancyMaxPotentialBlockSize(&(ret[Mem].minGridSize), &(ret[Mem].blockSize), update_poisson_neuron, 0, nSize);
ret[Mem].gridSize = (upzero_else_set_one(nSize) + (ret[Mem].blockSize) - 1) / (ret[Mem].blockSize);
cudaOccupancyMaxPotentialBlockSize(&(ret[Max].minGridSize), &(ret[Max].blockSize), update_tj_neuron, 0, nSize);
ret[Max].gridSize = (upzero_else_set_one(nSize) + (ret[Max].blockSize) - 1) / (ret[Max].blockSize);
cudaOccupancyMaxPotentialBlockSize(&(ret[LIFE].minGridSize), &(ret[LIFE].blockSize), update_life_neuron, 0, nSize);
ret[LIFE].gridSize = (upzero_else_set_one(nSize) + (ret[LIFE].blockSize) - 1) / (ret[LIFE].blockSize);
cudaOccupancyMaxPotentialBlockSize(&(ret[TJ].minGridSize), &(ret[TJ].blockSize), update_life_neuron, 0, nSize);
ret[TJ].gridSize = (upzero_else_set_one(nSize) + (ret[TJ].blockSize) - 1) / (ret[TJ].blockSize);
//cudaOccupancyMaxPotentialBlockSize(&(ret[Static].minGridSize), &(ret[Static].blockSize), update_static_hit, 0, sSize);
ret[Static].blockSize = 128;
ret[Static].gridSize = (upzero_else_set_one(nSize) + (ret[Static].blockSize) - 1) / (ret[Static].blockSize);
ret[LIFEB].blockSize = ret[LIFE].blockSize;
ret[LIFEB].gridSize = ret[LIFE].gridSize;
ret[LIFEB].minGridSize = ret[LIFE].minGridSize;
return ret;
}
|
d4fa8c65dee419d64beaa2f07bbe7bc688fd6fa7.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <limits>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
using std::cout;
using std::endl;
typedef unsigned long long Count;
typedef std::numeric_limits<double> DblLim;
const Count WARP_SIZE = 32; // Warp size
const Count NBLOCKS = 512; // Number of total cuda cores on my GPU
const Count ITERATIONS = 100000000; // Number of points to generate (each thread)
// This kernel is
__global__ void picount(Count *totals) {
// Define some shared memory: all threads in this block
__shared__ Count counter[WARP_SIZE];
// Unique ID of the thread
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize RNG
hiprandState_t rng;
hiprand_init(clock64(), tid, 0, &rng);
// Initialize the counter
counter[threadIdx.x] = 0;
// Computation loop
for (int i = 0; i < ITERATIONS; i++) {
float x = hiprand_uniform(&rng); // Random x position in [0,1]
float y = hiprand_uniform(&rng); // Random y position in [0,1]
counter[threadIdx.x] += 1 - int(x * x + y * y); // Hit test
}
// The first thread in *every block* should sum the results
if (threadIdx.x == 0) {
// Reset count for this block
totals[blockIdx.x] = 0;
// Accumulate results
for (int i = 0; i < WARP_SIZE; i++) {
totals[blockIdx.x] += counter[i];
}
}
}
int main(int argc, char **argv) {
int numDev;
hipGetDeviceCount(&numDev);
if (numDev < 1) {
cout << "CUDA device missing! Do you need to use optirun?\n";
return 1;
}
cout << "Starting simulation with " << NBLOCKS << " blocks, " << WARP_SIZE << " threads, and " << ITERATIONS << " iterations\n";
float elapsed1000 = 0;
hipEvent_t start1000, stop1000;
hipEventCreate(&start1000);
hipEventCreate(&stop1000);
// Allocate host and device memory to store the counters
Count *hOut, *dOut;
hOut = new Count[NBLOCKS]; // Host memory
hipMalloc(&dOut, sizeof(Count) * NBLOCKS); // Device memory
// Launch kernel
hipLaunchKernelGGL(( picount), dim3(NBLOCKS), dim3(WARP_SIZE), 0, 0, dOut);
// Copy back memory used on device and free
hipMemcpy(hOut, dOut, sizeof(Count) * NBLOCKS, hipMemcpyDeviceToHost);
hipFree(dOut);
// Compute total hits
Count total = 0;
for (int i = 0; i < NBLOCKS; i++) {
total += hOut[i];
}
Count tests = NBLOCKS * ITERATIONS * WARP_SIZE;
cout << "Approximated PI using " << tests << " random tests\n";
hipEventRecord(stop1000, 0);
hipEventSynchronize(stop1000);
//Calculating the total time of execution
hipEventElapsedTime(&elapsed1000, start1000, stop1000);
// Freeing the events created before
hipEventDestroy(start1000);
hipEventDestroy(stop1000);
// Set maximum precision for decimal printing
cout.precision(DblLim::max_digits10);
cout << "PI ~= " << 4.0 * (double)total/(double)tests << endl;
//Showing the time of execution
printf("\n\t||| The elapsed time in gpu was %.2f ms |||", elapsed1000);
printf("\n");
return 0;
}
| d4fa8c65dee419d64beaa2f07bbe7bc688fd6fa7.cu | #include <iostream>
#include <limits>
#include <cuda.h>
#include <curand_kernel.h>
using std::cout;
using std::endl;
typedef unsigned long long Count;
typedef std::numeric_limits<double> DblLim;
const Count WARP_SIZE = 32; // Warp size
const Count NBLOCKS = 512; // Number of total cuda cores on my GPU
const Count ITERATIONS = 100000000; // Number of points to generate (each thread)
// This kernel is
__global__ void picount(Count *totals) {
// Define some shared memory: all threads in this block
__shared__ Count counter[WARP_SIZE];
// Unique ID of the thread
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize RNG
curandState_t rng;
curand_init(clock64(), tid, 0, &rng);
// Initialize the counter
counter[threadIdx.x] = 0;
// Computation loop
for (int i = 0; i < ITERATIONS; i++) {
float x = curand_uniform(&rng); // Random x position in [0,1]
float y = curand_uniform(&rng); // Random y position in [0,1]
counter[threadIdx.x] += 1 - int(x * x + y * y); // Hit test
}
// The first thread in *every block* should sum the results
if (threadIdx.x == 0) {
// Reset count for this block
totals[blockIdx.x] = 0;
// Accumulate results
for (int i = 0; i < WARP_SIZE; i++) {
totals[blockIdx.x] += counter[i];
}
}
}
int main(int argc, char **argv) {
int numDev;
cudaGetDeviceCount(&numDev);
if (numDev < 1) {
cout << "CUDA device missing! Do you need to use optirun?\n";
return 1;
}
cout << "Starting simulation with " << NBLOCKS << " blocks, " << WARP_SIZE << " threads, and " << ITERATIONS << " iterations\n";
float elapsed1000 = 0;
cudaEvent_t start1000, stop1000;
cudaEventCreate(&start1000);
cudaEventCreate(&stop1000);
// Allocate host and device memory to store the counters
Count *hOut, *dOut;
hOut = new Count[NBLOCKS]; // Host memory
cudaMalloc(&dOut, sizeof(Count) * NBLOCKS); // Device memory
// Launch kernel
picount<<<NBLOCKS, WARP_SIZE>>>(dOut);
// Copy back memory used on device and free
cudaMemcpy(hOut, dOut, sizeof(Count) * NBLOCKS, cudaMemcpyDeviceToHost);
cudaFree(dOut);
// Compute total hits
Count total = 0;
for (int i = 0; i < NBLOCKS; i++) {
total += hOut[i];
}
Count tests = NBLOCKS * ITERATIONS * WARP_SIZE;
cout << "Approximated PI using " << tests << " random tests\n";
cudaEventRecord(stop1000, 0);
cudaEventSynchronize(stop1000);
//Calculating the total time of execution
cudaEventElapsedTime(&elapsed1000, start1000, stop1000);
// Freeing the events created before
cudaEventDestroy(start1000);
cudaEventDestroy(stop1000);
// Set maximum precision for decimal printing
cout.precision(DblLim::max_digits10);
cout << "PI ~= " << 4.0 * (double)total/(double)tests << endl;
//Showing the time of execution
printf("\n\t||| The elapsed time in gpu was %.2f ms |||", elapsed1000);
printf("\n");
return 0;
}
|
70b71d4907d2ca4e535effc81ba026b4408ca997.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "custom_kernels.cuh"
#include "Pancake.h"
#include "hash_array.h"
#include "cuda_helper.h"
#include <cooperative_groups/memcpy_async.h>
#include <cooperative_groups/reduce.h>
#include <cuda/pipeline>
#include <hip/hip_cooperative_groups.h>
#include <cuda/barrier>
namespace cg = cooperative_groups;
constexpr uint32_t npow2(uint32_t v)
{
//return v == 1 ? 1 : 1 << (64 - __lzcnt(v - 1));
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
static_assert(npow2(3) == 4);
//https://www.apriorit.com/dev-blog/614-cpp-cuda-accelerate-algorithm-cpu-gpu
template <typename T>
__device__ void atomicMinFloat(T* const address, const T value)
{
if(*address <= value)
{
return;
}
int* const addressAsI = (int*)address;
int old = *addressAsI, assumed;
do
{
assumed = old;
if(__int_as_float(assumed) <= value)
{
break;
}
old = atomicCAS(addressAsI, assumed, __float_as_int(value));
} while(assumed != old);
}
__device__ char atomicMinChar(char* address, char val)
{
unsigned int* base_address = (unsigned int*)((size_t)address & ~3);
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int sel = selectors[(size_t)address & 3];
unsigned int old, assumed, min_, new_;
old = *base_address;
do {
assumed = old;
min_ = min(val, (char)__byte_perm(old, 0, ((size_t)address & 3)));
new_ = __byte_perm(old, min_, sel);
old = atomicCAS(base_address, assumed, new_);
} while(assumed != old);
return old;
}
template <typename T>
__global__ void myReduceMinAtomic(const int num_batch, const int num_frontier, const T* __restrict__ mult_results, T* __restrict__ batch_answers)
{
__shared__ char sharedMin;
for(int batch_idx = blockIdx.x; batch_idx < num_batch; batch_idx += gridDim.x) {
const T* start_results = mult_results + batch_idx * num_frontier;
__syncthreads();
if(0 == threadIdx.x)
{
sharedMin = INT8_MAX;
}
__syncthreads();
T localMin = INT8_MAX;
for(int i = threadIdx.x; i < num_frontier; i += blockDim.x)
{
localMin = MIN(localMin, start_results[i]);
}
atomicMinChar(&sharedMin, localMin);
__syncthreads();
if(0 == threadIdx.x)
{
batch_answers[batch_idx] = sharedMin;
}
}
}
#define REDUCE_THREADS 64
template <typename T>
__global__ void myReduceMinSharedMem(const uint32_t xDim, const uint32_t xStride, const T* __restrict__ mult_results, T* __restrict__ batch_answers)
{
cg::thread_block block = cg::this_thread_block();
__shared__ T sharedMin[REDUCE_THREADS];
constexpr size_t max_val = std::numeric_limits<T>::max();
//for(int batch_idx = blockIdx.x; batch_idx < num_batch; batch_idx += gridDim.x) {
const T* start_results = mult_results + blockIdx.x * xStride;
T localMin = max_val;
for(unsigned int i = threadIdx.x; i < xDim; i += blockDim.x)
{
const uint8_t tmpMinVal = start_results[i];
localMin = MIN(localMin, tmpMinVal);
}
sharedMin[threadIdx.x] = localMin;
block.sync();
for(unsigned int s = REDUCE_THREADS / 2; s > 0; s >>= 1) {
if(threadIdx.x < s) {
const uint8_t tmpMinVal = sharedMin[threadIdx.x + s];
if(localMin > tmpMinVal) {
localMin = tmpMinVal;
sharedMin[threadIdx.x] = localMin;
}
}
block.sync();
}
if(0 == threadIdx.x)
{
batch_answers[blockIdx.x] = localMin;
}
//}
}
template<typename T>
__global__
void cuda_min_kernel(int num_batch, int num_frontier, const T* __restrict__ mult_results, T* __restrict__ batch_answers)
{
for(int batch_idx = blockIdx.x * blockDim.x + threadIdx.x, stride = blockDim.x * gridDim.x; batch_idx < num_batch; batch_idx += stride)
{
const T* __restrict__ start_results = mult_results + batch_idx * num_frontier;
T min = start_results[0];
for(int frontier_idx = 1; frontier_idx < num_frontier; ++frontier_idx)
{
if(start_results[frontier_idx] < min) min = start_results[frontier_idx];
}
batch_answers[batch_idx] = min;
}
}
__global__ void transpose_device(const int rowDim, const int colDim, const uint32_t* __restrict__ input, uint32_t* output)
{
for(int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < rowDim * colDim; tid += blockDim.x * gridDim.x) {
const int orow = tid % colDim;
const int ocol = tid / colDim;
output[orow * rowDim + ocol] = input[tid];
}
}
#define TILE 16
__global__
void tiled_cuda_bitwise_set_intersection(const uint32_t cols_a,//x-axis
const uint32_t rows_b,//y-axis
const unsigned max_a,
const uint32_t* __restrict__ hash_a,
const uint8_t* __restrict__ g_vals,
const uint32_t* __restrict__ hash_b,
uint8_t* __restrict__ results)
{
assert(blockIdx.x * blockDim.x < cols_a);
assert(blockIdx.y * blockDim.y < rows_b);
__shared__ uint8_t sMin[TILE][TILE];
uint8_t localMin = UINT8_MAX;
__shared__ uint32_t sA[NUM_INTS_PER_PANCAKE][TILE];
__shared__ uint32_t sB[TILE][NUM_INTS_PER_PANCAKE];
uint32_t localB[NUM_INTS_PER_PANCAKE];
uint32_t localA[NUM_INTS_PER_PANCAKE];
cg::thread_block block = cg::this_thread_block();
//for(uint32_t by = blockIdx.y; by < int_div_ceil(rows_b, NUM_INTS_PER_PANCAKE); by += gridDim.y) {
const uint32_t output_row = blockIdx.y * blockDim.y + threadIdx.y;
if(output_row < rows_b) {
for(uint32_t tidx = threadIdx.x; tidx < NUM_INTS_PER_PANCAKE; tidx += blockDim.x) {
sB[threadIdx.y][tidx] = hash_b[output_row * NUM_INTS_PER_PANCAKE + tidx];
}
}
block.sync();
if(output_row < rows_b) {
#pragma unroll
for(uint32_t i = 0; i < NUM_INTS_PER_PANCAKE; ++i) {
localB[i] = sB[threadIdx.y][i];
}
}
//x goes 0 to rows_a
for(uint32_t bx = blockIdx.x; bx < max_a; bx += gridDim.x) {
uint32_t output_col = bx * blockDim.x + threadIdx.x;
if(output_col < cols_a) {
for(int tidy = threadIdx.y; tidy < NUM_INTS_PER_PANCAKE; tidy += blockDim.y) {
sA[tidy][threadIdx.x] = hash_a[tidy * cols_a + output_col];
//sA[tidy][threadIdx.x] = hash_a[tidy + output_col * NUM_INTS_PER_PANCAKE];
}
}
block.sync();
if(output_row < rows_b && output_col < cols_a) {
#pragma unroll
for(uint32_t i = 0; i < NUM_INTS_PER_PANCAKE; ++i) {
localA[i] = sA[i][threadIdx.x];
}
constexpr Mask gap_mask;
uint32_t tmpF = 0;
uint32_t tmpB = 0;
uint32_t tmpMin;
#pragma unroll
for(uint32_t i = 0; i < NUM_GAP_INTS; ++i) {
uint32_t A = localA[i];
uint32_t B = localB[i];
tmpF += __popc(B & (A | gap_mask[i]));
tmpB += __popc(A & (B | gap_mask[i]));
}
tmpMin = MIN(tmpF, tmpB);
assert(localMin > GAPX);
#pragma unroll
for(uint32_t i = NUM_GAP_INTS; i < NUM_INTS_PER_PANCAKE; ++i) {
uint32_t A = localA[i];
uint32_t B = localB[i];
tmpMin += __popc(A & B);
}
const uint8_t h_val = static_cast<uint8_t>(NUM_PANCAKES + g_vals[output_col] - tmpMin);
localMin = MIN(localMin, h_val);
}
block.sync();
}
sMin[threadIdx.y][threadIdx.x] = localMin;
block.sync();
for(unsigned int s = TILE / 2; s > 0; s >>= 1) {
if(threadIdx.x < s) {
const uint8_t tmpMinVal = sMin[threadIdx.y][threadIdx.x + s];
if(tmpMinVal < localMin) {
localMin = tmpMinVal;
sMin[threadIdx.y][threadIdx.x] = tmpMinVal;
}
}
block.sync();
}
if(0 == threadIdx.x && output_row < rows_b)
{
results[output_row * gridDim.x + blockIdx.x] = localMin;
}
//}
}
__global__
void naive_cuda_bitwise_set_intersection(int rows_a, int rows_b, const uint32_t* __restrict__ hash_a, const uint32_t* __restrict__ g_vals,
const uint32_t* __restrict__ hash_b, uint32_t* __restrict__ results)
{
constexpr Mask gap_mask;
for(int batch_idx = blockIdx.x * blockDim.x + threadIdx.x, max = rows_a * rows_b, stride = blockDim.x * gridDim.x; batch_idx < max; batch_idx += stride)
{
int col = batch_idx / rows_a;
int row = batch_idx % rows_a;
int tmpF = 0;
int tmpB = 0;
for(int i = 0; i < NUM_INTS_PER_PANCAKE; ++i) {
uint32_t A = hash_a[row * NUM_INTS_PER_PANCAKE + i];
uint32_t B = hash_b[col * NUM_INTS_PER_PANCAKE + i];
tmpF += __popc(B & (A | gap_mask[i]));
tmpB += __popc(A & (B | gap_mask[i]));
}
results[batch_idx] = NUM_PANCAKES + g_vals[row] - MIN(tmpF, tmpB);
}
}
void bitwise_set_intersection(hipStream_t stream,
int rows_a,
int rows_b,
const uint32_t* __restrict__ hash_a,
const uint8_t* __restrict__ g_vals,
const uint32_t* __restrict__ hash_b,
uint8_t* __restrict__ mult_results,
uint8_t* __restrict__ d_answers)
{
//constexpr int threadsPerBlock = 256;
//int blocksPerGrid = (rows_a * rows_b + threadsPerBlock - 1) / threadsPerBlock;
//naive_cuda_bitwise_set_intersection << <blocksPerGrid, threadsPerBlock, 0, stream >> > (rows_a, rows_b, hash_a, g_vals, hash_b, mult_results);
constexpr uint32_t MAX_BLOCKS_X = 1024;
dim3 threadsPerBlock(TILE, TILE, 1);
int max_a = int_div_ceil(rows_a, threadsPerBlock.x);
uint32_t gridDimX = MIN(MAX_BLOCKS_X, max_a);
uint32_t gridDimY = int_div_ceil(rows_b, threadsPerBlock.y);
assert(gridDimY <= 65535);
dim3 blocksPerGrid(gridDimX, gridDimY, 1);
tiled_cuda_bitwise_set_intersection << <blocksPerGrid, threadsPerBlock, 0, stream >> > (rows_a, rows_b, max_a, hash_a, g_vals, hash_b, mult_results);
CUDA_CHECK_RESULT(hipGetLastError());
threadsPerBlock = dim3(TILE, TILE, 1);
//reduceMin2 << <blocksPerGrid, threadsPerBlock, TILE* gridDimX * sizeof(uint8_t), stream >> > (rows_a, int_div_ceil(rows_a, gridDimX) rows_b, mult_results, d_answers);
//myReduceMinAtomic << <1024, 96, 0, stream >> > (rows_b, rows_a, mult_results, d_answers);
myReduceMinSharedMem << <rows_b, REDUCE_THREADS, 0, stream >> > (gridDimX, gridDimX, mult_results, d_answers);
CUDA_CHECK_RESULT(hipGetLastError());
}
void reduce_min(hipStream_t stream, int num_batch, int num_frontier, const uint8_t* __restrict__ mult_results, uint8_t* __restrict__ d_batch_answers)
{
constexpr int threadsPerBlock = 96;
int blocksPerGrid = MIN(int_div_ceil(num_batch * num_frontier, threadsPerBlock), 16384);
myReduceMinAtomic << <blocksPerGrid, threadsPerBlock, 0, stream >> > (num_batch, num_frontier, mult_results, d_batch_answers);
}
void transpose_cuda(hipStream_t stream,
const int rows,
const int cols,
const uint32_t* __restrict__ input,
uint32_t* __restrict output)
{
constexpr int threadsPerBlock = 96;
int blocksPerGrid = MIN(int_div_ceil(rows * cols, threadsPerBlock), 16384);
transpose_device << <blocksPerGrid, threadsPerBlock, 0, stream >> > (rows, cols, input, output);
CUDA_CHECK_RESULT(hipGetLastError());
}
| 70b71d4907d2ca4e535effc81ba026b4408ca997.cu | #include <stdio.h>
#include "custom_kernels.cuh"
#include "Pancake.h"
#include "hash_array.h"
#include "cuda_helper.h"
#include <cooperative_groups/memcpy_async.h>
#include <cooperative_groups/reduce.h>
#include <cuda/pipeline>
#include <cooperative_groups.h>
#include <cuda/barrier>
namespace cg = cooperative_groups;
constexpr uint32_t npow2(uint32_t v)
{
//return v == 1 ? 1 : 1 << (64 - __lzcnt(v - 1));
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
static_assert(npow2(3) == 4);
//https://www.apriorit.com/dev-blog/614-cpp-cuda-accelerate-algorithm-cpu-gpu
template <typename T>
__device__ void atomicMinFloat(T* const address, const T value)
{
if(*address <= value)
{
return;
}
int* const addressAsI = (int*)address;
int old = *addressAsI, assumed;
do
{
assumed = old;
if(__int_as_float(assumed) <= value)
{
break;
}
old = atomicCAS(addressAsI, assumed, __float_as_int(value));
} while(assumed != old);
}
__device__ char atomicMinChar(char* address, char val)
{
unsigned int* base_address = (unsigned int*)((size_t)address & ~3);
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int sel = selectors[(size_t)address & 3];
unsigned int old, assumed, min_, new_;
old = *base_address;
do {
assumed = old;
min_ = min(val, (char)__byte_perm(old, 0, ((size_t)address & 3)));
new_ = __byte_perm(old, min_, sel);
old = atomicCAS(base_address, assumed, new_);
} while(assumed != old);
return old;
}
template <typename T>
__global__ void myReduceMinAtomic(const int num_batch, const int num_frontier, const T* __restrict__ mult_results, T* __restrict__ batch_answers)
{
__shared__ char sharedMin;
for(int batch_idx = blockIdx.x; batch_idx < num_batch; batch_idx += gridDim.x) {
const T* start_results = mult_results + batch_idx * num_frontier;
__syncthreads();
if(0 == threadIdx.x)
{
sharedMin = INT8_MAX;
}
__syncthreads();
T localMin = INT8_MAX;
for(int i = threadIdx.x; i < num_frontier; i += blockDim.x)
{
localMin = MIN(localMin, start_results[i]);
}
atomicMinChar(&sharedMin, localMin);
__syncthreads();
if(0 == threadIdx.x)
{
batch_answers[batch_idx] = sharedMin;
}
}
}
#define REDUCE_THREADS 64
template <typename T>
__global__ void myReduceMinSharedMem(const uint32_t xDim, const uint32_t xStride, const T* __restrict__ mult_results, T* __restrict__ batch_answers)
{
cg::thread_block block = cg::this_thread_block();
__shared__ T sharedMin[REDUCE_THREADS];
constexpr size_t max_val = std::numeric_limits<T>::max();
//for(int batch_idx = blockIdx.x; batch_idx < num_batch; batch_idx += gridDim.x) {
const T* start_results = mult_results + blockIdx.x * xStride;
T localMin = max_val;
for(unsigned int i = threadIdx.x; i < xDim; i += blockDim.x)
{
const uint8_t tmpMinVal = start_results[i];
localMin = MIN(localMin, tmpMinVal);
}
sharedMin[threadIdx.x] = localMin;
block.sync();
for(unsigned int s = REDUCE_THREADS / 2; s > 0; s >>= 1) {
if(threadIdx.x < s) {
const uint8_t tmpMinVal = sharedMin[threadIdx.x + s];
if(localMin > tmpMinVal) {
localMin = tmpMinVal;
sharedMin[threadIdx.x] = localMin;
}
}
block.sync();
}
if(0 == threadIdx.x)
{
batch_answers[blockIdx.x] = localMin;
}
//}
}
template<typename T>
__global__
void cuda_min_kernel(int num_batch, int num_frontier, const T* __restrict__ mult_results, T* __restrict__ batch_answers)
{
for(int batch_idx = blockIdx.x * blockDim.x + threadIdx.x, stride = blockDim.x * gridDim.x; batch_idx < num_batch; batch_idx += stride)
{
const T* __restrict__ start_results = mult_results + batch_idx * num_frontier;
T min = start_results[0];
for(int frontier_idx = 1; frontier_idx < num_frontier; ++frontier_idx)
{
if(start_results[frontier_idx] < min) min = start_results[frontier_idx];
}
batch_answers[batch_idx] = min;
}
}
__global__ void transpose_device(const int rowDim, const int colDim, const uint32_t* __restrict__ input, uint32_t* output)
{
for(int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < rowDim * colDim; tid += blockDim.x * gridDim.x) {
const int orow = tid % colDim;
const int ocol = tid / colDim;
output[orow * rowDim + ocol] = input[tid];
}
}
#define TILE 16
__global__
void tiled_cuda_bitwise_set_intersection(const uint32_t cols_a,//x-axis
const uint32_t rows_b,//y-axis
const unsigned max_a,
const uint32_t* __restrict__ hash_a,
const uint8_t* __restrict__ g_vals,
const uint32_t* __restrict__ hash_b,
uint8_t* __restrict__ results)
{
assert(blockIdx.x * blockDim.x < cols_a);
assert(blockIdx.y * blockDim.y < rows_b);
__shared__ uint8_t sMin[TILE][TILE];
uint8_t localMin = UINT8_MAX;
__shared__ uint32_t sA[NUM_INTS_PER_PANCAKE][TILE];
__shared__ uint32_t sB[TILE][NUM_INTS_PER_PANCAKE];
uint32_t localB[NUM_INTS_PER_PANCAKE];
uint32_t localA[NUM_INTS_PER_PANCAKE];
cg::thread_block block = cg::this_thread_block();
//for(uint32_t by = blockIdx.y; by < int_div_ceil(rows_b, NUM_INTS_PER_PANCAKE); by += gridDim.y) {
const uint32_t output_row = blockIdx.y * blockDim.y + threadIdx.y;
if(output_row < rows_b) {
for(uint32_t tidx = threadIdx.x; tidx < NUM_INTS_PER_PANCAKE; tidx += blockDim.x) {
sB[threadIdx.y][tidx] = hash_b[output_row * NUM_INTS_PER_PANCAKE + tidx];
}
}
block.sync();
if(output_row < rows_b) {
#pragma unroll
for(uint32_t i = 0; i < NUM_INTS_PER_PANCAKE; ++i) {
localB[i] = sB[threadIdx.y][i];
}
}
//x goes 0 to rows_a
for(uint32_t bx = blockIdx.x; bx < max_a; bx += gridDim.x) {
uint32_t output_col = bx * blockDim.x + threadIdx.x;
if(output_col < cols_a) {
for(int tidy = threadIdx.y; tidy < NUM_INTS_PER_PANCAKE; tidy += blockDim.y) {
sA[tidy][threadIdx.x] = hash_a[tidy * cols_a + output_col];
//sA[tidy][threadIdx.x] = hash_a[tidy + output_col * NUM_INTS_PER_PANCAKE];
}
}
block.sync();
if(output_row < rows_b && output_col < cols_a) {
#pragma unroll
for(uint32_t i = 0; i < NUM_INTS_PER_PANCAKE; ++i) {
localA[i] = sA[i][threadIdx.x];
}
constexpr Mask gap_mask;
uint32_t tmpF = 0;
uint32_t tmpB = 0;
uint32_t tmpMin;
#pragma unroll
for(uint32_t i = 0; i < NUM_GAP_INTS; ++i) {
uint32_t A = localA[i];
uint32_t B = localB[i];
tmpF += __popc(B & (A | gap_mask[i]));
tmpB += __popc(A & (B | gap_mask[i]));
}
tmpMin = MIN(tmpF, tmpB);
assert(localMin > GAPX);
#pragma unroll
for(uint32_t i = NUM_GAP_INTS; i < NUM_INTS_PER_PANCAKE; ++i) {
uint32_t A = localA[i];
uint32_t B = localB[i];
tmpMin += __popc(A & B);
}
const uint8_t h_val = static_cast<uint8_t>(NUM_PANCAKES + g_vals[output_col] - tmpMin);
localMin = MIN(localMin, h_val);
}
block.sync();
}
sMin[threadIdx.y][threadIdx.x] = localMin;
block.sync();
for(unsigned int s = TILE / 2; s > 0; s >>= 1) {
if(threadIdx.x < s) {
const uint8_t tmpMinVal = sMin[threadIdx.y][threadIdx.x + s];
if(tmpMinVal < localMin) {
localMin = tmpMinVal;
sMin[threadIdx.y][threadIdx.x] = tmpMinVal;
}
}
block.sync();
}
if(0 == threadIdx.x && output_row < rows_b)
{
results[output_row * gridDim.x + blockIdx.x] = localMin;
}
//}
}
__global__
void naive_cuda_bitwise_set_intersection(int rows_a, int rows_b, const uint32_t* __restrict__ hash_a, const uint32_t* __restrict__ g_vals,
const uint32_t* __restrict__ hash_b, uint32_t* __restrict__ results)
{
constexpr Mask gap_mask;
for(int batch_idx = blockIdx.x * blockDim.x + threadIdx.x, max = rows_a * rows_b, stride = blockDim.x * gridDim.x; batch_idx < max; batch_idx += stride)
{
int col = batch_idx / rows_a;
int row = batch_idx % rows_a;
int tmpF = 0;
int tmpB = 0;
for(int i = 0; i < NUM_INTS_PER_PANCAKE; ++i) {
uint32_t A = hash_a[row * NUM_INTS_PER_PANCAKE + i];
uint32_t B = hash_b[col * NUM_INTS_PER_PANCAKE + i];
tmpF += __popc(B & (A | gap_mask[i]));
tmpB += __popc(A & (B | gap_mask[i]));
}
results[batch_idx] = NUM_PANCAKES + g_vals[row] - MIN(tmpF, tmpB);
}
}
void bitwise_set_intersection(cudaStream_t stream,
int rows_a,
int rows_b,
const uint32_t* __restrict__ hash_a,
const uint8_t* __restrict__ g_vals,
const uint32_t* __restrict__ hash_b,
uint8_t* __restrict__ mult_results,
uint8_t* __restrict__ d_answers)
{
//constexpr int threadsPerBlock = 256;
//int blocksPerGrid = (rows_a * rows_b + threadsPerBlock - 1) / threadsPerBlock;
//naive_cuda_bitwise_set_intersection << <blocksPerGrid, threadsPerBlock, 0, stream >> > (rows_a, rows_b, hash_a, g_vals, hash_b, mult_results);
constexpr uint32_t MAX_BLOCKS_X = 1024;
dim3 threadsPerBlock(TILE, TILE, 1);
int max_a = int_div_ceil(rows_a, threadsPerBlock.x);
uint32_t gridDimX = MIN(MAX_BLOCKS_X, max_a);
uint32_t gridDimY = int_div_ceil(rows_b, threadsPerBlock.y);
assert(gridDimY <= 65535);
dim3 blocksPerGrid(gridDimX, gridDimY, 1);
tiled_cuda_bitwise_set_intersection << <blocksPerGrid, threadsPerBlock, 0, stream >> > (rows_a, rows_b, max_a, hash_a, g_vals, hash_b, mult_results);
CUDA_CHECK_RESULT(cudaGetLastError());
threadsPerBlock = dim3(TILE, TILE, 1);
//reduceMin2 << <blocksPerGrid, threadsPerBlock, TILE* gridDimX * sizeof(uint8_t), stream >> > (rows_a, int_div_ceil(rows_a, gridDimX) rows_b, mult_results, d_answers);
//myReduceMinAtomic << <1024, 96, 0, stream >> > (rows_b, rows_a, mult_results, d_answers);
myReduceMinSharedMem << <rows_b, REDUCE_THREADS, 0, stream >> > (gridDimX, gridDimX, mult_results, d_answers);
CUDA_CHECK_RESULT(cudaGetLastError());
}
void reduce_min(cudaStream_t stream, int num_batch, int num_frontier, const uint8_t* __restrict__ mult_results, uint8_t* __restrict__ d_batch_answers)
{
constexpr int threadsPerBlock = 96;
int blocksPerGrid = MIN(int_div_ceil(num_batch * num_frontier, threadsPerBlock), 16384);
myReduceMinAtomic << <blocksPerGrid, threadsPerBlock, 0, stream >> > (num_batch, num_frontier, mult_results, d_batch_answers);
}
void transpose_cuda(cudaStream_t stream,
const int rows,
const int cols,
const uint32_t* __restrict__ input,
uint32_t* __restrict output)
{
constexpr int threadsPerBlock = 96;
int blocksPerGrid = MIN(int_div_ceil(rows * cols, threadsPerBlock), 16384);
transpose_device << <blocksPerGrid, threadsPerBlock, 0, stream >> > (rows, cols, input, output);
CUDA_CHECK_RESULT(cudaGetLastError());
}
|
9c15f9b89717c32d642813d3ead03b23e78807f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <stdint.h>
#include <SDL/SDL.h>
#include <unistd.h>
typedef struct
{
double min_re, min_im, max_re, max_im;
int nb_pts_re, nb_pts_im;
} cplx_plan_struct;
enum order_enum {UP, DOWN, RIGHT, LEFT, PLUS, MINUS};
__global__
void z_funct(Uint32 *d_img, cplx_plan_struct * d_cplx_plan, double c_re, double c_im, int nb_ite)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int x = i % d_cplx_plan->nb_pts_re;
int y = i / d_cplx_plan->nb_pts_re;
double z_re = d_cplx_plan->min_re +
(d_cplx_plan->max_re-d_cplx_plan->min_re)
/d_cplx_plan->nb_pts_re*x;
double z_im = d_cplx_plan->max_im -
(d_cplx_plan->max_im-d_cplx_plan->min_im)
/d_cplx_plan->nb_pts_im*y;
double z_re_tmp = 0;
int t=0;
while ((z_re*z_re+z_im*z_im <= 4) && (t<nb_ite))
{
z_re_tmp = z_re*z_re - z_im*z_im + c_re;
z_im = 2*z_im*z_re + c_im;
z_re = z_re_tmp;
t++;
}
int r,g,b;
if (t == nb_ite)
{
r=0; g=0; b=0;
}
else
{
if ( t < nb_ite/2) r = 0;
else r = 2*floor(256./nb_ite*t-128);
g = floor(256./nb_ite*(nb_ite-2*abs(nb_ite/2.-t)));
// if ( t < nb_ite/2) b = 256-2*floor(256./nb_ite*t);
if ( t < nb_ite/2) b = 256-2*floor(128-256./nb_ite*t);
else b = 0;
}
d_img[i] = r*65536+g*256+b;
}
void mv_cplx_plan(cplx_plan_struct * cplx_plan, enum order_enum order)
{
double size_re, size_im;
switch (order)
{
case UP :
size_re = (cplx_plan->max_im - cplx_plan->min_im)/3;
cplx_plan->min_im += size_re;
cplx_plan->max_im += size_re;
break;
case DOWN :
size_re = (cplx_plan->max_im - cplx_plan->min_im)/3;
cplx_plan->min_im -= size_re;
cplx_plan->max_im -= size_re;
break;
case LEFT :
size_im = (cplx_plan->max_re - cplx_plan->min_re)/3;
cplx_plan->min_re -= size_im;
cplx_plan->max_re -= size_im;
break;
case RIGHT :
size_im = (cplx_plan->max_re - cplx_plan->min_re)/3;
cplx_plan->min_re += size_im;
cplx_plan->max_re += size_im;
break;
case MINUS :
size_re = (cplx_plan->max_re - cplx_plan->min_re)/4;
size_im = (cplx_plan->max_im - cplx_plan->min_im)/4;
cplx_plan->min_re -= size_re;
cplx_plan->max_re += size_re;
cplx_plan->min_im -= size_im;
cplx_plan->max_im += size_im;
break;
case PLUS :
size_re = (cplx_plan->max_re - cplx_plan->min_re)/4;
size_im = (cplx_plan->max_im - cplx_plan->min_im)/4;
cplx_plan->min_re += size_re;
cplx_plan->max_re -= size_re;
cplx_plan->min_im += size_im;
cplx_plan->max_im -= size_im;
break;
}
}
int main(int argc, char * argv[])
{
if (argc == 1)
{
/* printf( "Help \n
8 args : \n \n
arg1 = min real part | arg2 = min imaginary part \n
arg3 = max real part | arg4 = max imaginary part \n
arg5 = number of points on the real axe | arg6 = number of points on the imaginary axe \n
arg7 = nb of iterations | arg8 = limit convergence
\n \n
4 args : \n \n
arg1 = number of points on the real axe | arg2 = number of points on the imaginary axe \n
arg3 = nb of iterations
\n \n") ;*/
return 1;
}
double max_re, max_im, min_re, min_im, c_re, c_im;
int nb_pts_re, nb_pts_im,nb_ite;
if (argc == 10)
{
try
{
min_re = atof(argv[1]);
min_im = atof(argv[2]);
max_re = atof(argv[3]);
max_im = atof(argv[4]);
nb_pts_re = atoi(argv[5]);
nb_pts_im = atoi(argv[6]);
c_re = atof(argv[7]);
c_im = atof(argv[8]);
nb_ite = atoi(argv[9]);
}
catch (...)
{
printf( "Bad Args : see help (type nameofprogram without args)\n\n");
return 1;
}
}
if (argc == 6 )
{
min_re = -2;
min_im = -1;
max_re = 1;
max_im = 1;
try
{
nb_pts_re = atoi(argv[1]);
nb_pts_im = atoi(argv[2]);
c_re = atof(argv[3]);
c_im = atof(argv[4]);
nb_ite = atoi(argv[5]);
}
catch (...)
{
printf( "Bad Args : see help (type nameofprogram without args)\n\n");
return 1;
}
}
int size_i = sizeof(Uint32)*nb_pts_re*nb_pts_im;
Uint32 *d_img;
hipMalloc(&d_img,size_i);
cplx_plan_struct cplx_plan, *d_cplx_plan;
hipMalloc(&d_cplx_plan, sizeof(cplx_plan_struct));
dim3 blockDim = 1024;
dim3 gridDim = (nb_pts_re*nb_pts_im)/1024 + 1;
cplx_plan.min_re = min_re;
cplx_plan.min_im = min_im;
cplx_plan.max_re = max_re;
cplx_plan.max_im = max_im;
cplx_plan.nb_pts_re = nb_pts_re;
cplx_plan.nb_pts_im = nb_pts_im;
SDL_Init(SDL_INIT_VIDEO);
SDL_Surface *SDL_img = SDL_SetVideoMode(nb_pts_im, nb_pts_re, 32, SDL_HWSURFACE | SDL_DOUBLEBUF);
SDL_Event event;
bool quit = false;
bool recalc = true;
bool refresh = true;
int mouse_x, mouse_y;
double x, y;
while ( !quit )
{
while( SDL_PollEvent( &event ) )
{
switch( event.type )
{
case SDL_KEYDOWN:
switch ( event.key.keysym.sym )
{
case SDLK_UP:
mv_cplx_plan(&cplx_plan, UP);
recalc = true;
refresh = true;
break;
case SDLK_DOWN:
mv_cplx_plan(&cplx_plan, DOWN);
recalc = true;
refresh = true;
break;
case SDLK_LEFT:
mv_cplx_plan(&cplx_plan, LEFT);
recalc = true;
refresh = true;
break;
case SDLK_RIGHT:
mv_cplx_plan(&cplx_plan, RIGHT);
recalc = true;
refresh = true;
break;
case SDLK_KP_PLUS:
mv_cplx_plan(&cplx_plan, PLUS);
recalc = true;
refresh = true;
break;
case SDLK_KP_MINUS:
mv_cplx_plan(&cplx_plan, MINUS);
recalc = true;
refresh = true;
break;
case SDLK_KP_MULTIPLY:
nb_ite *= 2;
recalc = true;
refresh = true;
break;
case SDLK_KP_DIVIDE:
nb_ite /= 2;
recalc = true;
refresh = true;
break;
case SDLK_q:
quit = true;
recalc = false;
refresh = true;
break;
case SDLK_i:
cplx_plan.min_re = -2;
cplx_plan.min_im = -1;
cplx_plan.max_re = 1;
cplx_plan.max_im = 1;
recalc = true;
refresh = true;
break;
case SDLK_s :
SDL_SaveBMP(SDL_img, "data_julia.bmp");
break;
default:
break;
}
break;
case SDL_MOUSEMOTION :
SDL_GetMouseState(&mouse_x, &mouse_y);
refresh = true;
break;
default:
break;
}
}
if (refresh)
{
if (recalc)
{
recalc = false;
hipMemcpy(d_cplx_plan, &cplx_plan, sizeof(cplx_plan_struct), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( z_funct), dim3(gridDim),dim3(blockDim), 0, 0, d_img, d_cplx_plan, c_re, c_im, nb_ite);
hipMemcpy(SDL_img->pixels, d_img, size_i, hipMemcpyDeviceToHost);
}
refresh = false;
x = cplx_plan.min_re + mouse_x*1./nb_pts_re*(cplx_plan.max_re-cplx_plan.min_re);
y = cplx_plan.min_im + mouse_y*1./nb_pts_im*(cplx_plan.max_im-cplx_plan.min_im);
printf("\rmouse position (re : im) %.20lf : %.20lf | nb_ite %d ", x, y, nb_ite);
SDL_Flip(SDL_img);
}
}
SDL_Quit();
printf("\n\n");
}
| 9c15f9b89717c32d642813d3ead03b23e78807f8.cu | #include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <stdint.h>
#include <SDL/SDL.h>
#include <unistd.h>
typedef struct
{
double min_re, min_im, max_re, max_im;
int nb_pts_re, nb_pts_im;
} cplx_plan_struct;
enum order_enum {UP, DOWN, RIGHT, LEFT, PLUS, MINUS};
__global__
void z_funct(Uint32 *d_img, cplx_plan_struct * d_cplx_plan, double c_re, double c_im, int nb_ite)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int x = i % d_cplx_plan->nb_pts_re;
int y = i / d_cplx_plan->nb_pts_re;
double z_re = d_cplx_plan->min_re +
(d_cplx_plan->max_re-d_cplx_plan->min_re)
/d_cplx_plan->nb_pts_re*x;
double z_im = d_cplx_plan->max_im -
(d_cplx_plan->max_im-d_cplx_plan->min_im)
/d_cplx_plan->nb_pts_im*y;
double z_re_tmp = 0;
int t=0;
while ((z_re*z_re+z_im*z_im <= 4) && (t<nb_ite))
{
z_re_tmp = z_re*z_re - z_im*z_im + c_re;
z_im = 2*z_im*z_re + c_im;
z_re = z_re_tmp;
t++;
}
int r,g,b;
if (t == nb_ite)
{
r=0; g=0; b=0;
}
else
{
if ( t < nb_ite/2) r = 0;
else r = 2*floor(256./nb_ite*t-128);
g = floor(256./nb_ite*(nb_ite-2*abs(nb_ite/2.-t)));
// if ( t < nb_ite/2) b = 256-2*floor(256./nb_ite*t);
if ( t < nb_ite/2) b = 256-2*floor(128-256./nb_ite*t);
else b = 0;
}
d_img[i] = r*65536+g*256+b;
}
void mv_cplx_plan(cplx_plan_struct * cplx_plan, enum order_enum order)
{
double size_re, size_im;
switch (order)
{
case UP :
size_re = (cplx_plan->max_im - cplx_plan->min_im)/3;
cplx_plan->min_im += size_re;
cplx_plan->max_im += size_re;
break;
case DOWN :
size_re = (cplx_plan->max_im - cplx_plan->min_im)/3;
cplx_plan->min_im -= size_re;
cplx_plan->max_im -= size_re;
break;
case LEFT :
size_im = (cplx_plan->max_re - cplx_plan->min_re)/3;
cplx_plan->min_re -= size_im;
cplx_plan->max_re -= size_im;
break;
case RIGHT :
size_im = (cplx_plan->max_re - cplx_plan->min_re)/3;
cplx_plan->min_re += size_im;
cplx_plan->max_re += size_im;
break;
case MINUS :
size_re = (cplx_plan->max_re - cplx_plan->min_re)/4;
size_im = (cplx_plan->max_im - cplx_plan->min_im)/4;
cplx_plan->min_re -= size_re;
cplx_plan->max_re += size_re;
cplx_plan->min_im -= size_im;
cplx_plan->max_im += size_im;
break;
case PLUS :
size_re = (cplx_plan->max_re - cplx_plan->min_re)/4;
size_im = (cplx_plan->max_im - cplx_plan->min_im)/4;
cplx_plan->min_re += size_re;
cplx_plan->max_re -= size_re;
cplx_plan->min_im += size_im;
cplx_plan->max_im -= size_im;
break;
}
}
int main(int argc, char * argv[])
{
if (argc == 1)
{
/* printf( "Help \n
8 args : \n \n
arg1 = min real part | arg2 = min imaginary part \n
arg3 = max real part | arg4 = max imaginary part \n
arg5 = number of points on the real axe | arg6 = number of points on the imaginary axe \n
arg7 = nb of iterations | arg8 = limit convergence
\n \n
4 args : \n \n
arg1 = number of points on the real axe | arg2 = number of points on the imaginary axe \n
arg3 = nb of iterations
\n \n") ;*/
return 1;
}
double max_re, max_im, min_re, min_im, c_re, c_im;
int nb_pts_re, nb_pts_im,nb_ite;
if (argc == 10)
{
try
{
min_re = atof(argv[1]);
min_im = atof(argv[2]);
max_re = atof(argv[3]);
max_im = atof(argv[4]);
nb_pts_re = atoi(argv[5]);
nb_pts_im = atoi(argv[6]);
c_re = atof(argv[7]);
c_im = atof(argv[8]);
nb_ite = atoi(argv[9]);
}
catch (...)
{
printf( "Bad Args : see help (type nameofprogram without args)\n\n");
return 1;
}
}
if (argc == 6 )
{
min_re = -2;
min_im = -1;
max_re = 1;
max_im = 1;
try
{
nb_pts_re = atoi(argv[1]);
nb_pts_im = atoi(argv[2]);
c_re = atof(argv[3]);
c_im = atof(argv[4]);
nb_ite = atoi(argv[5]);
}
catch (...)
{
printf( "Bad Args : see help (type nameofprogram without args)\n\n");
return 1;
}
}
int size_i = sizeof(Uint32)*nb_pts_re*nb_pts_im;
Uint32 *d_img;
cudaMalloc(&d_img,size_i);
cplx_plan_struct cplx_plan, *d_cplx_plan;
cudaMalloc(&d_cplx_plan, sizeof(cplx_plan_struct));
dim3 blockDim = 1024;
dim3 gridDim = (nb_pts_re*nb_pts_im)/1024 + 1;
cplx_plan.min_re = min_re;
cplx_plan.min_im = min_im;
cplx_plan.max_re = max_re;
cplx_plan.max_im = max_im;
cplx_plan.nb_pts_re = nb_pts_re;
cplx_plan.nb_pts_im = nb_pts_im;
SDL_Init(SDL_INIT_VIDEO);
SDL_Surface *SDL_img = SDL_SetVideoMode(nb_pts_im, nb_pts_re, 32, SDL_HWSURFACE | SDL_DOUBLEBUF);
SDL_Event event;
bool quit = false;
bool recalc = true;
bool refresh = true;
int mouse_x, mouse_y;
double x, y;
while ( !quit )
{
while( SDL_PollEvent( &event ) )
{
switch( event.type )
{
case SDL_KEYDOWN:
switch ( event.key.keysym.sym )
{
case SDLK_UP:
mv_cplx_plan(&cplx_plan, UP);
recalc = true;
refresh = true;
break;
case SDLK_DOWN:
mv_cplx_plan(&cplx_plan, DOWN);
recalc = true;
refresh = true;
break;
case SDLK_LEFT:
mv_cplx_plan(&cplx_plan, LEFT);
recalc = true;
refresh = true;
break;
case SDLK_RIGHT:
mv_cplx_plan(&cplx_plan, RIGHT);
recalc = true;
refresh = true;
break;
case SDLK_KP_PLUS:
mv_cplx_plan(&cplx_plan, PLUS);
recalc = true;
refresh = true;
break;
case SDLK_KP_MINUS:
mv_cplx_plan(&cplx_plan, MINUS);
recalc = true;
refresh = true;
break;
case SDLK_KP_MULTIPLY:
nb_ite *= 2;
recalc = true;
refresh = true;
break;
case SDLK_KP_DIVIDE:
nb_ite /= 2;
recalc = true;
refresh = true;
break;
case SDLK_q:
quit = true;
recalc = false;
refresh = true;
break;
case SDLK_i:
cplx_plan.min_re = -2;
cplx_plan.min_im = -1;
cplx_plan.max_re = 1;
cplx_plan.max_im = 1;
recalc = true;
refresh = true;
break;
case SDLK_s :
SDL_SaveBMP(SDL_img, "data_julia.bmp");
break;
default:
break;
}
break;
case SDL_MOUSEMOTION :
SDL_GetMouseState(&mouse_x, &mouse_y);
refresh = true;
break;
default:
break;
}
}
if (refresh)
{
if (recalc)
{
recalc = false;
cudaMemcpy(d_cplx_plan, &cplx_plan, sizeof(cplx_plan_struct), cudaMemcpyHostToDevice);
z_funct<<<gridDim,blockDim>>>(d_img, d_cplx_plan, c_re, c_im, nb_ite);
cudaMemcpy(SDL_img->pixels, d_img, size_i, cudaMemcpyDeviceToHost);
}
refresh = false;
x = cplx_plan.min_re + mouse_x*1./nb_pts_re*(cplx_plan.max_re-cplx_plan.min_re);
y = cplx_plan.min_im + mouse_y*1./nb_pts_im*(cplx_plan.max_im-cplx_plan.min_im);
printf("\rmouse position (re : im) %.20lf : %.20lf | nb_ite %d ", x, y, nb_ite);
SDL_Flip(SDL_img);
}
}
SDL_Quit();
printf("\n\n");
}
|
3afdc251cccde8795e2e34e6a988560709173f28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_gtScalar (int n, double *result, double *x, double y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = (x[id] > y)?1.0:0.0;
}
} | 3afdc251cccde8795e2e34e6a988560709173f28.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_gtScalar (int n, double *result, double *x, double y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = (x[id] > y)?1.0:0.0;
}
} |
cebeafbb404afb597df1bb6b94e1736452a7fef1.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
// #include <iomanip>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <ctime>
#include "common/book.h"
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
/* id get thread id: 1D block and 2D grid <<<(32,32),32>>>*/
#define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x) // 2D grid,1D block
// #define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x+threadIdx.y*blockDim.x) // 2D grid,2D block
/* get block id: 2D grid */
#define get_bid() (blockIdx.x + blockIdx.y * gridDim.x)
/* blockid*/
// #define get_tid_per_block() (threadIdx.x+threadIdx.y*blockDim.x) // 2D block
#define get_tid_per_block() (threadIdx.x)
using namespace std;
typedef float FLOAT;
/***/
__global__ void global_reduce_sum(FLOAT *d_in,FLOAT *d_out,int N)
{
// d_in,d_out (GPU)
/**
* 1block
* blockblock
* 2blockblock
* block blockDim.x*blockDim.y (1blockblockDim.y=1)
* block __syncthreads()
* 31block321warpwarp
* __syncthreads()
* 4block
* block(block)block
*/
//
int idx = get_tid(); //
int tid = get_tid_per_block(); //blockthreadsIdxblock blockblock
int bid = get_bid(); // blockblockblock
/**
* <<<128,256>>>
* 1block,128
* 211
* 31281block(256)
*/
// block
for(unsigned int s=blockDim.x/2;s>0;s>>=1) // s/=2
{
if(tid<s & idx + s<N)
// if(tid<s)
d_in[idx]+=d_in[idx + s];
__syncthreads(); // block
}
// block 0
if(tid==0)
d_out[bid]=d_in[idx];
}
/**block*/
__global__ void share_reduce_sum(FLOAT *d_in,FLOAT *d_out,int N)
{
// extern __shared__ FLOAT sdatas[]; //
extern __shared__ volatile FLOAT sdatas[]; // volatile
int idx = get_tid(); //
int tid = get_tid_per_block(); //blockthreadsIdxblock blockblock
int bid = get_bid(); // blockblockblock
// block
sdatas[tid]=idx<N?d_in[idx] : 0;
__syncthreads();
/**
* <<<128,256>>>
* 1block,128
* 211
* 31281block(256)
*/
/*
// block
for(unsigned int s=blockDim.x/2;s>0;s>>=1) // s/=2
{
if(tid<s & idx + s<N)
// if(tid<s)
sdatas[tid]+=sdatas[tid + s];
__syncthreads(); // block
}
*/
// block
for(unsigned int s=blockDim.x/2;s>32;s>>=1) // s/=2
{
if(tid<s & idx + s<N)
// if(tid<s)
sdatas[tid]+=sdatas[tid + s];
__syncthreads(); // block
}
// 321warp __syncthreads()
if (tid<32)
{
sdatas[tid]+=sdatas[tid + 32];
sdatas[tid]+=sdatas[tid + 16];
sdatas[tid]+=sdatas[tid + 8];
sdatas[tid]+=sdatas[tid + 4];
sdatas[tid]+=sdatas[tid + 2];
sdatas[tid]+=sdatas[tid + 1];
}
// block 0
if(tid==0)
d_out[bid]=sdatas[0];
}
/**block */
__global__ void share_reduce_sum2(FLOAT *d_in,FLOAT *d_out,int N)
{
extern __shared__ volatile FLOAT sdatas[]; // volatile
int idx = get_tid(); //
int tid = get_tid_per_block(); //blockthreadsIdxblock blockblock
int bid = get_bid(); // blockblockblock
/**
* idxtidbid () --
* d_ind_out -- block
* sdatas block-- block
*/
// block
sdatas[tid]=idx<N?d_in[idx] : 0;
__syncthreads();
/*
// block
for(unsigned int s=blockDim.x/2;s>32;s>>=1) // s/=2
{
if(tid<s & idx + s<N)
// if(tid<s)
sdatas[tid]+=sdatas[tid + s];
__syncthreads(); // block
}
*/
// blockDim.x=1024
if(tid<512)
sdatas[tid]+=sdatas[tid + 512];
__syncthreads();
if(tid<256)
sdatas[tid]+=sdatas[tid + 256];
__syncthreads();
if(tid<128)
sdatas[tid]+=sdatas[tid + 128];
__syncthreads();
if(tid<64)
sdatas[tid]+=sdatas[tid + 64];
__syncthreads();
// 321warp __syncthreads()
if (tid<32)
{
sdatas[tid]+=sdatas[tid + 32];
sdatas[tid]+=sdatas[tid + 16];
sdatas[tid]+=sdatas[tid + 8];
sdatas[tid]+=sdatas[tid + 4];
sdatas[tid]+=sdatas[tid + 2];
sdatas[tid]+=sdatas[tid + 1];
}
// block 0
if(tid==0)
d_out[bid]=sdatas[0];
}
/**block */
__global__ void share_reduce_sum3(FLOAT *d_in,FLOAT *d_out,int N)
{
extern __shared__ volatile FLOAT sdatas[]; // volatile
int idx = get_tid(); //
int tid = get_tid_per_block(); //blockthreadsIdxblock blockblock
int bid = get_bid(); // blockblockblock
/**
* idxtidbid () --
* d_ind_out -- block
* sdatas block-- block
*/
// block
//
sdatas[tid]=idx<N?d_in[idx]+d_in[idx + blockDim.x/2] : 0;
__syncthreads();
/*
// block
for(unsigned int s=blockDim.x/2;s>32;s>>=1) // s/=2
{
if(tid<s & idx + s<N)
// if(tid<s)
sdatas[tid]+=sdatas[tid + s];
__syncthreads(); // block
}
*/
// blockDim.x=1024
/**
if(tid<512)
sdatas[tid]+=sdatas[tid + 512];
__syncthreads();
*/
if(tid<256)
sdatas[tid]+=sdatas[tid + 256];
__syncthreads();
if(tid<128)
sdatas[tid]+=sdatas[tid + 128];
__syncthreads();
if(tid<64)
sdatas[tid]+=sdatas[tid + 64];
__syncthreads();
// 321warp __syncthreads()
if (tid<32)
{
sdatas[tid]+=sdatas[tid + 32];
sdatas[tid]+=sdatas[tid + 16];
sdatas[tid]+=sdatas[tid + 8];
sdatas[tid]+=sdatas[tid + 4];
sdatas[tid]+=sdatas[tid + 2];
sdatas[tid]+=sdatas[tid + 1];
}
// block 0
if(tid==0)
d_out[bid]=sdatas[0];
}
int main(int argc, char *argv[])
{
mycout<<" 2"<<endl;
int N=980000; // 1024*1024
int nbytes = N * sizeof(FLOAT);
//
/* 1D block block256x1thread*/
int bs = 1024; // (32*2^n)1024()
int num_bs=bs;
/* 1D grid */
int grid=ceil(1.0*N / num_bs);
int num_grid=grid;
/* 2D grid*/
// int s=ceil(sqrt(1.0*N / num_bs));
// dim3 grid(s,s);
// int num_grid=s*s;
//
int grid2=1;
int gbytes = num_grid * sizeof(FLOAT); //
int onebytes = 1 * sizeof(FLOAT); //
/**======1CPU ==========*/
FLOAT *dev_x=NULL,*host_x=NULL;
FLOAT *dev_y=NULL; //
FLOAT *dev_z=NULL,*host_z=NULL;//
// malloc cudaMallocHost
// host_x=(FLOAT*)malloc(nbytes);
// host_z=(FLOAT*)malloc(onebytes);
HANDLE_ERROR(hipHostMalloc((void **)&host_x, nbytes));
HANDLE_ERROR(hipHostMalloc((void **)&host_z, onebytes));
/*CPU*/
for (int i=0; i<N; i++) {
host_x[i] = 1;
}
/**======2GPU ======*/
HANDLE_ERROR(hipMalloc((void **)&dev_x, nbytes));
HANDLE_ERROR(hipMalloc((void **)&dev_y, gbytes));
HANDLE_ERROR(hipMalloc((void **)&dev_z, onebytes));
/**2*/
cout << fixed ;// << setprecision(2);
cout.precision(3);
// printf("allocated %.2f MB on GPU\n", nbytes / (1024.f * 1024.f));
// cout<<"allocated "<< (nbytes / (1024.f * 1024.f))<<" MB on GPU"<<endl;
/**======3CPUGPU======*/
HANDLE_ERROR(hipMemcpy(dev_x, host_x, nbytes, hipMemcpyHostToDevice));
/**======4GPU======*/
HANDLE_ERROR(hipDeviceSynchronize()); // CPUGPU
clock_t start = clock();
{
// stage 1
// global_reduce_sum<<<grid,bs>>>(dev_x,dev_y,N);
// share_reduce_sum<<<grid,bs,bs*sizeof(FLOAT)>>>(dev_x,dev_y,N);
// share_reduce_sum2<<<grid,bs,bs*sizeof(FLOAT)>>>(dev_x,dev_y,N);
hipLaunchKernelGGL(( share_reduce_sum3), dim3(grid),dim3(bs),bs*sizeof(FLOAT), 0, dev_x,dev_y,N);
}
{
// stage 2
// global_reduce_sum<<<grid2,bs>>>(dev_y,dev_z,num_grid);
// share_reduce_sum<<<grid2,bs,bs*sizeof(FLOAT)>>>(dev_y,dev_z,num_grid);
// share_reduce_sum2<<<grid2,bs,bs*sizeof(FLOAT)>>>(dev_y,dev_z,num_grid);
hipLaunchKernelGGL(( share_reduce_sum3), dim3(grid2),dim3(bs),bs*sizeof(FLOAT), 0, dev_y,dev_z,num_grid);
}
// HANDLE_ERROR(hipDeviceSynchronize()); // CPUGPU
/**======5GPUCPU======*/
HANDLE_ERROR(hipMemcpy(host_z,dev_z, onebytes, hipMemcpyDeviceToHost));
// cout.precision(15);
mycout<<"GPU cost time:"<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s"<<endl;
//
cout << host_z[0]<<endl;
/**======6======*/
HANDLE_ERROR(hipFree(dev_x));
HANDLE_ERROR(hipFree(dev_y));
HANDLE_ERROR(hipFree(dev_z));
// hipHostMalloc
HANDLE_ERROR(hipHostFree(host_x));
HANDLE_ERROR(hipHostFree(host_z));
return 0;
}
| cebeafbb404afb597df1bb6b94e1736452a7fef1.cu | #include <iostream>
// #include <iomanip>
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <ctime>
#include "common/book.h"
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
/* 全局线程id get thread id: 1D block and 2D grid <<<(32,32),32>>>*/
#define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x) // 2D grid,1D block
// #define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x+threadIdx.y*blockDim.x) // 2D grid,2D block
/* get block id: 2D grid */
#define get_bid() (blockIdx.x + blockIdx.y * gridDim.x)
/* 每个block的线程id*/
// #define get_tid_per_block() (threadIdx.x+threadIdx.y*blockDim.x) // 2D block
#define get_tid_per_block() (threadIdx.x)
using namespace std;
typedef float FLOAT;
/**使用全局内存*/
__global__ void global_reduce_sum(FLOAT *d_in,FLOAT *d_out,int N)
{
// d_in,d_out 都是(GPU内)全局变量
/**
* 1、全局内存使用全局索引访问,主要用于block之间通信
* (block内部线程间也是可以通信,但效率低,block内部通信完全可以使用共享内存方式)
* 2、共享内存主要用于block内部各线程之间的通信,针对每个block内部
* 而block内部线程总数为 blockDim.x*blockDim.y (1维block,则blockDim.y=1)
* block内部线程同步使用 __syncthreads()
* 3、1个block内每32个线程组成1个warp,执行相同的指令,每个warp内是自动同步
* 因此不需使用 __syncthreads() 减少开销提高效率
* 4、如果是内存对齐,不需要跨block访问数据,使用全局内存,以每个线程为单元,如:向量加法
* 如果是要跨block访问数据,使用共享内存(block内部线程同步),以block为单元,如:归约
*/
// 定义每个线程的局部变量(每个线程的局部内存)
int idx = get_tid(); // 全局索引,针对全局内存访问 (以每个线程为计算单元)
int tid = get_tid_per_block(); //每个block中threads的Idx,针对每个block的共享内存访问 (以每个block为计算单元(以每个block内所有线程组为计算单元))
int bid = get_bid(); // block的索引(存储每个block中间结果的访问索引)(每个block的索引)
/**
* 假设<<<128,256>>>
* 1、先计算每个block内所有线程的结果,计算完成后会有128个中间结果
* 2、如果1计算的中间结果比较大,可以重复1操作
* 3、在将这128个值放在1个block(256个线程)计算得到最后的结果
*/
// 以每个block为计算单元
for(unsigned int s=blockDim.x/2;s>0;s>>=1) // s/=2
{
if(tid<s & idx + s<N)
// if(tid<s)
d_in[idx]+=d_in[idx + s];
__syncthreads(); // block 内部线程同步
}
// 每个block 取第0个线程赋值,获取中间结果
if(tid==0)
d_out[bid]=d_in[idx];
}
/**block内使用共享内存*/
__global__ void share_reduce_sum(FLOAT *d_in,FLOAT *d_out,int N)
{
// extern __shared__ FLOAT sdatas[]; // 声明共享内存
extern __shared__ volatile FLOAT sdatas[]; // 使用了循环展开需加上volatile 字段,否则结果不正确
int idx = get_tid(); // 全局索引,针对全局内存访问 (以每个线程为计算单元)
int tid = get_tid_per_block(); //每个block中threads的Idx,针对每个block的共享内存访问 (以每个block为计算单元(以每个block内所有线程组为计算单元))
int bid = get_bid(); // block的索引(存储每个block中间结果的访问索引)(每个block的索引)
// 每个block将对应的全局内存复制到共享内存
sdatas[tid]=idx<N?d_in[idx] : 0;
__syncthreads();
/**
* 假设<<<128,256>>>
* 1、先计算每个block内所有线程的结果,计算完成后会有128个中间结果
* 2、如果1计算的中间结果比较大,可以重复1操作
* 3、在将这128个值放在1个block(256个线程)计算得到最后的结果
*/
/*
// 以每个block为计算单元
for(unsigned int s=blockDim.x/2;s>0;s>>=1) // s/=2
{
if(tid<s & idx + s<N)
// if(tid<s)
sdatas[tid]+=sdatas[tid + s];
__syncthreads(); // block 内部线程同步
}
*/
// 以每个block为计算单元
for(unsigned int s=blockDim.x/2;s>32;s>>=1) // s/=2
{
if(tid<s & idx + s<N)
// if(tid<s)
sdatas[tid]+=sdatas[tid + s];
__syncthreads(); // block 内部线程同步
}
// 32个线程对应1个warp 可以直接展开,且会自动同步,不需要使用__syncthreads()
if (tid<32)
{
sdatas[tid]+=sdatas[tid + 32];
sdatas[tid]+=sdatas[tid + 16];
sdatas[tid]+=sdatas[tid + 8];
sdatas[tid]+=sdatas[tid + 4];
sdatas[tid]+=sdatas[tid + 2];
sdatas[tid]+=sdatas[tid + 1];
}
// 每个block 取第0个线程赋值,获取中间结果
if(tid==0)
d_out[bid]=sdatas[0];
}
/**block内使用共享内存 完全展开*/
__global__ void share_reduce_sum2(FLOAT *d_in,FLOAT *d_out,int N)
{
extern __shared__ volatile FLOAT sdatas[]; // 使用了循环展开需加上volatile 字段,否则结果不正确
int idx = get_tid(); // 全局索引,针对全局内存访问 (以每个线程为计算单元)
int tid = get_tid_per_block(); //每个block中threads的Idx,针对每个block的共享内存访问 (以每个block为计算单元(以每个block内所有线程组为计算单元))
int bid = get_bid(); // block的索引(存储每个block中间结果的访问索引)(每个block的索引)
/**说明
* idx,tid,bid 每个线程的局部变量(线程的局部内存) -- 针对每个线程
* d_in,d_out 全局变量(全局内存)-- 针对所有线程(包括跨block之间的线程)
* sdatas 每个block内线程的共享变量(共享内存)-- 针对每个block内部的所有线程
*/
// 每个block将对应的全局内存复制到共享内存
sdatas[tid]=idx<N?d_in[idx] : 0;
__syncthreads();
/*
// 以每个block为计算单元
for(unsigned int s=blockDim.x/2;s>32;s>>=1) // s/=2
{
if(tid<s & idx + s<N)
// if(tid<s)
sdatas[tid]+=sdatas[tid + s];
__syncthreads(); // block 内部线程同步
}
*/
// blockDim.x=1024
if(tid<512)
sdatas[tid]+=sdatas[tid + 512];
__syncthreads();
if(tid<256)
sdatas[tid]+=sdatas[tid + 256];
__syncthreads();
if(tid<128)
sdatas[tid]+=sdatas[tid + 128];
__syncthreads();
if(tid<64)
sdatas[tid]+=sdatas[tid + 64];
__syncthreads();
// 32个线程对应1个warp 可以直接展开,且会自动同步,不需要使用__syncthreads()
if (tid<32)
{
sdatas[tid]+=sdatas[tid + 32];
sdatas[tid]+=sdatas[tid + 16];
sdatas[tid]+=sdatas[tid + 8];
sdatas[tid]+=sdatas[tid + 4];
sdatas[tid]+=sdatas[tid + 2];
sdatas[tid]+=sdatas[tid + 1];
}
// 每个block 取第0个线程赋值,获取中间结果
if(tid==0)
d_out[bid]=sdatas[0];
}
/**block内使用共享内存 完全展开*/
__global__ void share_reduce_sum3(FLOAT *d_in,FLOAT *d_out,int N)
{
extern __shared__ volatile FLOAT sdatas[]; // 使用了循环展开需加上volatile 字段,否则结果不正确
int idx = get_tid(); // 全局索引,针对全局内存访问 (以每个线程为计算单元)
int tid = get_tid_per_block(); //每个block中threads的Idx,针对每个block的共享内存访问 (以每个block为计算单元(以每个block内所有线程组为计算单元))
int bid = get_bid(); // block的索引(存储每个block中间结果的访问索引)(每个block的索引)
/**说明
* idx,tid,bid 每个线程的局部变量(线程的局部内存) -- 针对每个线程
* d_in,d_out 全局变量(全局内存)-- 针对所有线程(包括跨block之间的线程)
* sdatas 每个block内线程的共享变量(共享内存)-- 针对每个block内部的所有线程
*/
// 每个block将对应的全局内存复制到共享内存
// 在写入共享内存时就做一次加法操作,减少数据量,加快速度
sdatas[tid]=idx<N?d_in[idx]+d_in[idx + blockDim.x/2] : 0;
__syncthreads();
/*
// 以每个block为计算单元
for(unsigned int s=blockDim.x/2;s>32;s>>=1) // s/=2
{
if(tid<s & idx + s<N)
// if(tid<s)
sdatas[tid]+=sdatas[tid + s];
__syncthreads(); // block 内部线程同步
}
*/
// blockDim.x=1024
/** 在写入共享内存时 已经做过了这一步操作
if(tid<512)
sdatas[tid]+=sdatas[tid + 512];
__syncthreads();
*/
if(tid<256)
sdatas[tid]+=sdatas[tid + 256];
__syncthreads();
if(tid<128)
sdatas[tid]+=sdatas[tid + 128];
__syncthreads();
if(tid<64)
sdatas[tid]+=sdatas[tid + 64];
__syncthreads();
// 32个线程对应1个warp 可以直接展开,且会自动同步,不需要使用__syncthreads()
if (tid<32)
{
sdatas[tid]+=sdatas[tid + 32];
sdatas[tid]+=sdatas[tid + 16];
sdatas[tid]+=sdatas[tid + 8];
sdatas[tid]+=sdatas[tid + 4];
sdatas[tid]+=sdatas[tid + 2];
sdatas[tid]+=sdatas[tid + 1];
}
// 每个block 取第0个线程赋值,获取中间结果
if(tid==0)
d_out[bid]=sdatas[0];
}
int main(int argc, char *argv[])
{
mycout<<"归约算法求和 2阶段求和"<<endl;
int N=980000; // 使用二阶段 最多取值为 1024*1024
int nbytes = N * sizeof(FLOAT);
// 阶段一
/* 1D block 每个block是256x1个thread*/
int bs = 1024; // 必须是(32*2^n),最大1024(需查看显卡)
int num_bs=bs;
/* 1D grid */
int grid=ceil(1.0*N / num_bs);
int num_grid=grid;
/* 2D grid*/
// int s=ceil(sqrt(1.0*N / num_bs));
// dim3 grid(s,s);
// int num_grid=s*s;
// 阶段二汇总
int grid2=1;
int gbytes = num_grid * sizeof(FLOAT); // 中间结果
int onebytes = 1 * sizeof(FLOAT); // 最终结果
/**======1、CPU 创建变量并赋值初始化==========*/
FLOAT *dev_x=NULL,*host_x=NULL;
FLOAT *dev_y=NULL; // 阶段一临时中间变量
FLOAT *dev_z=NULL,*host_z=NULL;// 最终结果
// 普通malloc分配内存速度比 cudaMallocHost慢
// host_x=(FLOAT*)malloc(nbytes);
// host_z=(FLOAT*)malloc(onebytes);
HANDLE_ERROR(cudaMallocHost((void **)&host_x, nbytes));
HANDLE_ERROR(cudaMallocHost((void **)&host_z, onebytes));
/*给CPU变量赋值*/
for (int i=0; i<N; i++) {
host_x[i] = 1;
}
/**======2、GPU 变量分配内存======*/
HANDLE_ERROR(cudaMalloc((void **)&dev_x, nbytes));
HANDLE_ERROR(cudaMalloc((void **)&dev_y, gbytes));
HANDLE_ERROR(cudaMalloc((void **)&dev_z, onebytes));
/**强制小数格式显示,并保留2个小数精度*/
cout << fixed ;// << setprecision(2);
cout.precision(3);
// printf("allocated %.2f MB on GPU\n", nbytes / (1024.f * 1024.f));
// cout<<"allocated "<< (nbytes / (1024.f * 1024.f))<<" MB on GPU"<<endl;
/**======3、将CPU数据拷贝给GPU======*/
HANDLE_ERROR(cudaMemcpy(dev_x, host_x, nbytes, cudaMemcpyHostToDevice));
/**======4、调用GPU计算======*/
HANDLE_ERROR(cudaDeviceSynchronize()); // 等待CPU数据完全拷贝给GPU
clock_t start = clock();
{
// stage 1
// global_reduce_sum<<<grid,bs>>>(dev_x,dev_y,N);
// share_reduce_sum<<<grid,bs,bs*sizeof(FLOAT)>>>(dev_x,dev_y,N);
// share_reduce_sum2<<<grid,bs,bs*sizeof(FLOAT)>>>(dev_x,dev_y,N);
share_reduce_sum3<<<grid,bs,bs*sizeof(FLOAT)>>>(dev_x,dev_y,N);
}
{
// stage 2
// global_reduce_sum<<<grid2,bs>>>(dev_y,dev_z,num_grid);
// share_reduce_sum<<<grid2,bs,bs*sizeof(FLOAT)>>>(dev_y,dev_z,num_grid);
// share_reduce_sum2<<<grid2,bs,bs*sizeof(FLOAT)>>>(dev_y,dev_z,num_grid);
share_reduce_sum3<<<grid2,bs,bs*sizeof(FLOAT)>>>(dev_y,dev_z,num_grid);
}
// HANDLE_ERROR(cudaDeviceSynchronize()); // CPU等待GPU计算完成
/**======5、GPU计算结果拷贝给CPU======*/
HANDLE_ERROR(cudaMemcpy(host_z,dev_z, onebytes, cudaMemcpyDeviceToHost));
// cout.precision(15);
mycout<<"GPU cost time:"<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s"<<endl;
// 打印结果
cout << host_z[0]<<endl;
/**======6、释放内存======*/
HANDLE_ERROR(cudaFree(dev_x));
HANDLE_ERROR(cudaFree(dev_y));
HANDLE_ERROR(cudaFree(dev_z));
// cudaMallocHost 释放方式
HANDLE_ERROR(cudaFreeHost(host_x));
HANDLE_ERROR(cudaFreeHost(host_z));
return 0;
}
|
ceb36528b861c8e1fbde53992feb32081e353878.hip | // !!! This is a file automatically generated by hipify!!!
#include "../inc/yyfnutil.h"
int main(){
hipblasHandle_t cn;
CublasCreate(&cn);
int m = 1024, n = 256;
float *A = (float*) Malloc(m*n*sizeof(float));
for(int i = 0; i < m; i += 2){
for(int j = 0; j < n; j++){
A[i*n+j] = 1.0f;
}
for(int j = 0; j < n; j++){
A[i*n+n+j] = 0.0f;
}
}
float *V = (float*) Malloc(n*sizeof(float));
for(int i = 0; i < n; i++) V[i] = 1.0f;
float *y = (float*) Malloc(m*sizeof(float));
float *d_A;
CudaMalloc((void**)&d_A, m*n*sizeof(float));
CudaMemcpy(d_A, A, m*n*sizeof(float), hipMemcpyHostToDevice);
float *d_V;
CudaMalloc((void**)&d_V, n*sizeof(float));
hipMemcpy(d_V, V, n*sizeof(float), hipMemcpyHostToDevice);
float *d_y;
CudaMalloc((void**)&d_y, m*sizeof(float));
float alpha = 1.0f, beta = 0.0f;
CublasSgemv(cn, HIPBLAS_OP_N, m, n, &alpha, d_A, n, d_V, 1, &beta, d_y, 1);
CudaDeviceSynchronize();
CudaMemcpy(y, d_y, m*sizeof(float), hipMemcpyDeviceToHost);
bool passed = true;
for(int i = 0; i < m; i += 2){
if(256.0f != y[i]){
passed = false;
printf("%d, %f\n", i, y[i]);
}
if(0.0f != y[i+1]){
passed = false;
printf("%d, %f\n", i, y[i+1]);
}
}
printf("%s %s\n", __FILE__, passed?"Passed":"failed");
free(A);
free(V);
free(y);
CudaFree(d_A);
CudaFree(d_V);
CudaFree(d_y);
CublasDestroy(cn);
return 0;
}
| ceb36528b861c8e1fbde53992feb32081e353878.cu | #include "../inc/yyfnutil.h"
int main(){
cublasHandle_t cn;
CublasCreate(&cn);
int m = 1024, n = 256;
float *A = (float*) Malloc(m*n*sizeof(float));
for(int i = 0; i < m; i += 2){
for(int j = 0; j < n; j++){
A[i*n+j] = 1.0f;
}
for(int j = 0; j < n; j++){
A[i*n+n+j] = 0.0f;
}
}
float *V = (float*) Malloc(n*sizeof(float));
for(int i = 0; i < n; i++) V[i] = 1.0f;
float *y = (float*) Malloc(m*sizeof(float));
float *d_A;
CudaMalloc((void**)&d_A, m*n*sizeof(float));
CudaMemcpy(d_A, A, m*n*sizeof(float), cudaMemcpyHostToDevice);
float *d_V;
CudaMalloc((void**)&d_V, n*sizeof(float));
cudaMemcpy(d_V, V, n*sizeof(float), cudaMemcpyHostToDevice);
float *d_y;
CudaMalloc((void**)&d_y, m*sizeof(float));
float alpha = 1.0f, beta = 0.0f;
CublasSgemv(cn, CUBLAS_OP_N, m, n, &alpha, d_A, n, d_V, 1, &beta, d_y, 1);
CudaDeviceSynchronize();
CudaMemcpy(y, d_y, m*sizeof(float), cudaMemcpyDeviceToHost);
bool passed = true;
for(int i = 0; i < m; i += 2){
if(256.0f != y[i]){
passed = false;
printf("%d, %f\n", i, y[i]);
}
if(0.0f != y[i+1]){
passed = false;
printf("%d, %f\n", i, y[i+1]);
}
}
printf("%s %s\n", __FILE__, passed?"Passed":"failed");
free(A);
free(V);
free(y);
CudaFree(d_A);
CudaFree(d_V);
CudaFree(d_y);
CublasDestroy(cn);
return 0;
}
|
14387b7af2fed514675b9a2d49e44dfc1a7902fe.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <iostream>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/device_vector.h>
// TOOD: where to use?
template <typename scalar_t>
__global__ void tempKernel(
const scalar_t* __restrict__ g_in,
scalar_t* __restrict__ g_temp,
size_t n) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (g_in[i]-0.0 < 0.0001 && g_in[i]-0.0 > -0.0001){
g_temp[i] = 0;
}
else {
g_temp[i] = 2;
}
}
}
template <typename scalar_t>
__global__ void maskKernel(
const scalar_t* __restrict__ g_in,
int* __restrict__ g_decodeMask,
size_t n) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (g_in[i] == 0){
g_decodeMask[i] = 0;
}
else {
g_decodeMask[i] = 1;
}
}
}
// TODO: where to use?
__global__ void prefixsumKernel(
const int* __restrict__ X,
int* __restrict__ XY,
int* __restrict__ Y,
size_t InputSize) {
auto BLOCK_SIZE = 32*((InputSize+32)/32);
//printf("BLOCK_SIZE=%d\n", BLOCK_SIZE);
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < InputSize) {XY[threadIdx.x] = X[i];}
for (int stride = 1; stride <= BLOCK_SIZE; stride *= 2) {
int index = (threadIdx.x+1)*stride*2 - 1;
if(index < 2*BLOCK_SIZE)
XY[index] += XY[index - stride]; //index is alway bigger than stride
}
for (int stride = BLOCK_SIZE/2; stride > 0; stride /= 2) {
//for (int stride2 = BLOCK_SIZE/2; stride2 > 0; stride2 = stride2/2) {
int index2 = (threadIdx.x+1)*stride*2 - 1;
if(index2 < 2*BLOCK_SIZE)
XY[index2 + stride] += XY[index2];
}
if (i < InputSize) Y[i] = XY[threadIdx.x];
}
__global__ void compactKernel(int* __restrict__ g_scannedBackwardMask,
int* g_compactedBackwardMask,
int* g_totalRuns,
size_t n) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (i == (n - 1)) {
g_compactedBackwardMask[g_scannedBackwardMask[i]] = i + 1;
*g_totalRuns = g_scannedBackwardMask[i];
}
if (i == 0) {
if(g_scannedBackwardMask[0] == 1) {
g_compactedBackwardMask[0] = 0;
}
}
else if (g_scannedBackwardMask[i] != g_scannedBackwardMask[i - 1]) {
g_compactedBackwardMask[g_scannedBackwardMask[i] - 1] = i;
}
g_compactedBackwardMask[g_scannedBackwardMask[n-1]] = n;
g_totalRuns[0] = g_scannedBackwardMask[n-1];
}
}
template <typename scalar_t>
__global__ void scatterKernel(
int* g_compactedBackwardMask,
int* g_totalRuns,
scalar_t* __restrict__ g_countsOut) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
int n = *g_totalRuns;
for (int i = index; i < n; i += stride) {
if (i == 0)
g_countsOut[i] = g_compactedBackwardMask[i];
else
g_countsOut[i] = g_compactedBackwardMask[i] - g_compactedBackwardMask[i-1] - 1;
}
g_countsOut[n] = g_compactedBackwardMask[n];
}
template <typename scalar_t>
__global__ void recordKernel(
int* g_compactedBackwardMask,
int* g_totalRuns,
scalar_t* __restrict__ g_in,
scalar_t* __restrict__ g_symbolsOut) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
int n = *g_totalRuns;
for (int i = index; i < n; i += stride) {
if(g_compactedBackwardMask[i] != -1){
g_symbolsOut[i] = g_in[g_compactedBackwardMask[i]];
}
}
}
// TODO: where to use?
std::vector<at::Tensor> rle_cuda_encode_2(at::Tensor input, at::Tensor input_int) {
const auto n = input.size(1);
const int threads = 512; //256
int blocks = (n + threads ) / threads;
if(blocks > 65535)
blocks = 65535;
int *compactedBackwardMask;
auto g_countsOut = at::ones({1, n}, input_int.type()).to(at::kCUDA);
auto g_symbolsOut = at::ones({1, n}, input.type()).to(at::kCUDA);
if(0 != hipMalloc(&compactedBackwardMask, n*sizeof(int)))
std::cout<<__LINE__<<" hipMalloc error "<<std::endl;
thrust::inclusive_scan(thrust::device, compactedBackwardMask, compactedBackwardMask + n, compactedBackwardMask);
return {g_countsOut, g_symbolsOut};
}
std::vector<at::Tensor> rle_cuda_encode(at::Tensor input, at::Tensor input_int) {
int device;
hipGetDevice(&device);
const auto n = input.size(1);
const int threads = 512; //256
int blocks = (n + threads ) / threads;
if(blocks > 65535)
blocks = 65535;
int *decodeMask, *scannedBackwardMask;
if(0 != hipMalloc(&decodeMask, n*sizeof(int)))
std::cout<<__LINE__<<" hipMalloc error "<<std::endl;
hipDeviceSynchronize();
if(0 != hipMalloc(&scannedBackwardMask, n*sizeof(int)))
std::cout<<__LINE__<<" hipMalloc error "<<std::endl;
auto err = hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
AT_DISPATCH_INTEGRAL_TYPES(input_int.type(), "rle_encode_cuda", ([&] {
hipLaunchKernelGGL(( maskKernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
input_int.data<scalar_t>(),
decodeMask,
n);
}));
err = hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
thrust::inclusive_scan(thrust::device, decodeMask, decodeMask + n, scannedBackwardMask);
err = hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
err = hipFree(decodeMask);
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
hipDeviceSynchronize();
int *totalRuns, *compactedBackwardMask;
hipMalloc(&compactedBackwardMask, (n+1)*sizeof(int));
hipDeviceSynchronize();
hipMallocManaged(&totalRuns, sizeof(int));
hipDeviceSynchronize();
hipLaunchKernelGGL(( compactKernel), dim3(blocks), dim3(threads), 0, 0, scannedBackwardMask, compactedBackwardMask, totalRuns, n);
err = hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
err = hipFree(scannedBackwardMask);
hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
int k = totalRuns[0]+1;
auto g_countsOut = at::ones({1, k}, input_int.type()).to(at::kCUDA);
hipDeviceSynchronize();
AT_DISPATCH_INTEGRAL_TYPES(input_int.type(), "rle_encode_cuda", ([&] {
hipLaunchKernelGGL(( scatterKernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
compactedBackwardMask,
totalRuns,
g_countsOut.data<scalar_t>());
}));
err = hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
auto g_symbolsOut = at::ones({1, *totalRuns}, input.type()).to(at::kCUDA);
hipDeviceSynchronize();
AT_DISPATCH_FLOATING_TYPES(input.type(), "rle_encode_cuda", ([&] {
hipLaunchKernelGGL(( recordKernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
compactedBackwardMask,
totalRuns,
input.data<scalar_t>(),
g_symbolsOut.data<scalar_t>());
}));
err = hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
err = hipFree(compactedBackwardMask);
hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
err = hipFree(totalRuns);
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
err = hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
return {g_countsOut, g_symbolsOut};
}
template <typename scalar_t>
__global__ void sumzeroKernel(
scalar_t* __restrict__ g_countsOut,
int* result,
size_t n) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
result[i] = g_countsOut[i];
}
}
__global__ void sumindexKernel(
int* result,
size_t n ) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
result[i] += i;
}
}
template <typename scalar_t>
__global__ void decodeKernel(
int* temp,
scalar_t* __restrict__ g_symbolsOut,
scalar_t* __restrict__ g_output,
size_t n) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
g_output[temp[i]] = g_symbolsOut[i];
}
}
at::Tensor rle_cuda_decode(at::Tensor countsOut, at::Tensor symbolsOut, at::Tensor result) {
const auto n = symbolsOut.size(1);
const int threads = 256; //256
int blocks = (n + threads - 1) / threads;
if(blocks > 65535)
blocks = 65535;
int *temp;
hipError_t err;
if(0 != hipMalloc(&temp, n*sizeof(int)))
std::cout<<__LINE__<<" malloc failed"<<std::endl;
err = hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
AT_DISPATCH_INTEGRAL_TYPES(countsOut.type(), "rle_encode_cuda", ([&] {
hipLaunchKernelGGL(( sumzeroKernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
countsOut.data<scalar_t>(),
temp,
n);
}));
err = hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
thrust::inclusive_scan(thrust::device, temp, temp + n, temp);
err = hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
hipLaunchKernelGGL(( sumindexKernel), dim3(blocks), dim3(threads), 0, 0, temp, n);
err = hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
auto totalSize = symbolsOut.size(1);
hipDeviceSynchronize();
AT_DISPATCH_FLOATING_TYPES(symbolsOut.type(), "rle_encode_cuda", ([&] {
hipLaunchKernelGGL(( decodeKernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
temp,
symbolsOut.data<scalar_t>(),
result.data<scalar_t>(),
totalSize);
}));
err = hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
err = hipFree(temp);
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
err = hipDeviceSynchronize();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, hipGetErrorString( err ) );
exit( -1 );
}
return result;
}
| 14387b7af2fed514675b9a2d49e44dfc1a7902fe.cu | #include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <iostream>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/device_vector.h>
// TOOD: where to use?
template <typename scalar_t>
__global__ void tempKernel(
const scalar_t* __restrict__ g_in,
scalar_t* __restrict__ g_temp,
size_t n) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (g_in[i]-0.0 < 0.0001 && g_in[i]-0.0 > -0.0001){
g_temp[i] = 0;
}
else {
g_temp[i] = 2;
}
}
}
template <typename scalar_t>
__global__ void maskKernel(
const scalar_t* __restrict__ g_in,
int* __restrict__ g_decodeMask,
size_t n) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (g_in[i] == 0){
g_decodeMask[i] = 0;
}
else {
g_decodeMask[i] = 1;
}
}
}
// TODO: where to use?
__global__ void prefixsumKernel(
const int* __restrict__ X,
int* __restrict__ XY,
int* __restrict__ Y,
size_t InputSize) {
auto BLOCK_SIZE = 32*((InputSize+32)/32);
//printf("BLOCK_SIZE=%d\n", BLOCK_SIZE);
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < InputSize) {XY[threadIdx.x] = X[i];}
for (int stride = 1; stride <= BLOCK_SIZE; stride *= 2) {
int index = (threadIdx.x+1)*stride*2 - 1;
if(index < 2*BLOCK_SIZE)
XY[index] += XY[index - stride]; //index is alway bigger than stride
}
for (int stride = BLOCK_SIZE/2; stride > 0; stride /= 2) {
//for (int stride2 = BLOCK_SIZE/2; stride2 > 0; stride2 = stride2/2) {
int index2 = (threadIdx.x+1)*stride*2 - 1;
if(index2 < 2*BLOCK_SIZE)
XY[index2 + stride] += XY[index2];
}
if (i < InputSize) Y[i] = XY[threadIdx.x];
}
__global__ void compactKernel(int* __restrict__ g_scannedBackwardMask,
int* g_compactedBackwardMask,
int* g_totalRuns,
size_t n) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (i == (n - 1)) {
g_compactedBackwardMask[g_scannedBackwardMask[i]] = i + 1;
*g_totalRuns = g_scannedBackwardMask[i];
}
if (i == 0) {
if(g_scannedBackwardMask[0] == 1) {
g_compactedBackwardMask[0] = 0;
}
}
else if (g_scannedBackwardMask[i] != g_scannedBackwardMask[i - 1]) {
g_compactedBackwardMask[g_scannedBackwardMask[i] - 1] = i;
}
g_compactedBackwardMask[g_scannedBackwardMask[n-1]] = n;
g_totalRuns[0] = g_scannedBackwardMask[n-1];
}
}
template <typename scalar_t>
__global__ void scatterKernel(
int* g_compactedBackwardMask,
int* g_totalRuns,
scalar_t* __restrict__ g_countsOut) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
int n = *g_totalRuns;
for (int i = index; i < n; i += stride) {
if (i == 0)
g_countsOut[i] = g_compactedBackwardMask[i];
else
g_countsOut[i] = g_compactedBackwardMask[i] - g_compactedBackwardMask[i-1] - 1;
}
g_countsOut[n] = g_compactedBackwardMask[n];
}
template <typename scalar_t>
__global__ void recordKernel(
int* g_compactedBackwardMask,
int* g_totalRuns,
scalar_t* __restrict__ g_in,
scalar_t* __restrict__ g_symbolsOut) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
int n = *g_totalRuns;
for (int i = index; i < n; i += stride) {
if(g_compactedBackwardMask[i] != -1){
g_symbolsOut[i] = g_in[g_compactedBackwardMask[i]];
}
}
}
// TODO: where to use?
std::vector<at::Tensor> rle_cuda_encode_2(at::Tensor input, at::Tensor input_int) {
const auto n = input.size(1);
const int threads = 512; //256
int blocks = (n + threads ) / threads;
if(blocks > 65535)
blocks = 65535;
int *compactedBackwardMask;
auto g_countsOut = at::ones({1, n}, input_int.type()).to(at::kCUDA);
auto g_symbolsOut = at::ones({1, n}, input.type()).to(at::kCUDA);
if(0 != cudaMalloc(&compactedBackwardMask, n*sizeof(int)))
std::cout<<__LINE__<<" cudaMalloc error "<<std::endl;
thrust::inclusive_scan(thrust::device, compactedBackwardMask, compactedBackwardMask + n, compactedBackwardMask);
return {g_countsOut, g_symbolsOut};
}
std::vector<at::Tensor> rle_cuda_encode(at::Tensor input, at::Tensor input_int) {
int device;
cudaGetDevice(&device);
const auto n = input.size(1);
const int threads = 512; //256
int blocks = (n + threads ) / threads;
if(blocks > 65535)
blocks = 65535;
int *decodeMask, *scannedBackwardMask;
if(0 != cudaMalloc(&decodeMask, n*sizeof(int)))
std::cout<<__LINE__<<" cudaMalloc error "<<std::endl;
cudaDeviceSynchronize();
if(0 != cudaMalloc(&scannedBackwardMask, n*sizeof(int)))
std::cout<<__LINE__<<" cudaMalloc error "<<std::endl;
auto err = cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
AT_DISPATCH_INTEGRAL_TYPES(input_int.type(), "rle_encode_cuda", ([&] {
maskKernel<scalar_t><<<blocks, threads>>>(
input_int.data<scalar_t>(),
decodeMask,
n);
}));
err = cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
thrust::inclusive_scan(thrust::device, decodeMask, decodeMask + n, scannedBackwardMask);
err = cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
err = cudaFree(decodeMask);
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
cudaDeviceSynchronize();
int *totalRuns, *compactedBackwardMask;
cudaMalloc(&compactedBackwardMask, (n+1)*sizeof(int));
cudaDeviceSynchronize();
cudaMallocManaged(&totalRuns, sizeof(int));
cudaDeviceSynchronize();
compactKernel<<<blocks, threads>>>(scannedBackwardMask, compactedBackwardMask, totalRuns, n);
err = cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
err = cudaFree(scannedBackwardMask);
cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
int k = totalRuns[0]+1;
auto g_countsOut = at::ones({1, k}, input_int.type()).to(at::kCUDA);
cudaDeviceSynchronize();
AT_DISPATCH_INTEGRAL_TYPES(input_int.type(), "rle_encode_cuda", ([&] {
scatterKernel<scalar_t><<<blocks, threads>>>(
compactedBackwardMask,
totalRuns,
g_countsOut.data<scalar_t>());
}));
err = cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
auto g_symbolsOut = at::ones({1, *totalRuns}, input.type()).to(at::kCUDA);
cudaDeviceSynchronize();
AT_DISPATCH_FLOATING_TYPES(input.type(), "rle_encode_cuda", ([&] {
recordKernel<scalar_t><<<blocks, threads>>>(
compactedBackwardMask,
totalRuns,
input.data<scalar_t>(),
g_symbolsOut.data<scalar_t>());
}));
err = cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
err = cudaFree(compactedBackwardMask);
cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
err = cudaFree(totalRuns);
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
err = cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
return {g_countsOut, g_symbolsOut};
}
template <typename scalar_t>
__global__ void sumzeroKernel(
scalar_t* __restrict__ g_countsOut,
int* result,
size_t n) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
result[i] = g_countsOut[i];
}
}
__global__ void sumindexKernel(
int* result,
size_t n ) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
result[i] += i;
}
}
template <typename scalar_t>
__global__ void decodeKernel(
int* temp,
scalar_t* __restrict__ g_symbolsOut,
scalar_t* __restrict__ g_output,
size_t n) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
g_output[temp[i]] = g_symbolsOut[i];
}
}
at::Tensor rle_cuda_decode(at::Tensor countsOut, at::Tensor symbolsOut, at::Tensor result) {
const auto n = symbolsOut.size(1);
const int threads = 256; //256
int blocks = (n + threads - 1) / threads;
if(blocks > 65535)
blocks = 65535;
int *temp;
cudaError err;
if(0 != cudaMalloc(&temp, n*sizeof(int)))
std::cout<<__LINE__<<" malloc failed"<<std::endl;
err = cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
AT_DISPATCH_INTEGRAL_TYPES(countsOut.type(), "rle_encode_cuda", ([&] {
sumzeroKernel<scalar_t><<<blocks, threads>>>(
countsOut.data<scalar_t>(),
temp,
n);
}));
err = cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
thrust::inclusive_scan(thrust::device, temp, temp + n, temp);
err = cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
sumindexKernel<<<blocks, threads>>>(temp, n);
err = cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
auto totalSize = symbolsOut.size(1);
cudaDeviceSynchronize();
AT_DISPATCH_FLOATING_TYPES(symbolsOut.type(), "rle_encode_cuda", ([&] {
decodeKernel<scalar_t><<<blocks, threads>>>(
temp,
symbolsOut.data<scalar_t>(),
result.data<scalar_t>(),
totalSize);
}));
err = cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
err = cudaFree(temp);
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
err = cudaDeviceSynchronize();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
exit( -1 );
}
return result;
}
|
556f7a6f599d2bc343e7b2aaacb8df8da77628cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cell_grid.cuh>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_mtgp32_host.h>
////////////////////////////// Fitness texture ////////////////////////////////
struct TextureInfo
{
byte *device_data;
size_t pitch;
hipChannelFormatDesc textureCFD;
};
// texture<unsigned char, 2, hipReadModeElementType> fitnessTexRef;
texture<uint16_t, 2, hipReadModeElementType> fitnessTexRef;
TextureInfo fitnessTex = {};
///////////////////////////////////////////////////////////////////////////////
struct RandomGeneratorInfo
{
hiprandStateMtgp32_t *state;
int xMin;
int yMin;
int xMax;
int yMax;
};
//////////////////////////////////////////////////////// KERNELS /////////////////////////////////////////////////////////////////////
__global__ void generate_random_population(CellGridInfo gridInfo, RandomGeneratorInfo rng)
{
//NOTE: This version, with 200 threads in 1D block inside 1D grid is about 9x faster than
// version with 2D blocks.
size_t n = gridInfo.width * gridInfo.height;
uint rngStateOffset = threadIdx.x;
uint tIdX = (blockIdx.x * blockDim.x) + threadIdx.x;
uint strideX = blockDim.x * gridDim.x;
while (tIdX < n)
{
float f1 = hiprand_uniform(&rng.state[rngStateOffset]);
float f2 = hiprand_uniform(&rng.state[rngStateOffset]);
int x = rng.xMin + ((int)(f1 * (rng.xMax - rng.xMin) + 0.999999));
int y = rng.yMin + ((int)(f2 * (rng.yMax - rng.yMin) + 0.999999));
Cell rnd(x, y);
rnd.fitness = tex2D<uint16_t>(fitnessTexRef, x, y);
gridInfo.data[tIdX] = rnd;
tIdX += strideX;
}
}
// This kernel will evole current population into new one.
template <NeighborhoodType neigh>
__global__ void evolve_kernel(const CellGridInfo currPop, CellGridInfo nextPop)
{
uint tIdX = (blockIdx.x * blockDim.x) + threadIdx.x;
uint tIdY = (blockIdx.y * blockDim.y) + threadIdx.y;
uint strideX = blockDim.x * gridDim.x;
uint strideY = blockDim.y * gridDim.y;
while (tIdX < currPop.width)
{
tIdY = (blockIdx.y * blockDim.y) + threadIdx.y;
while (tIdY < currPop.height)
{
//Cell *cell = ((Cell *)((char *)currPop.data + tIdY * currPop.pitch) + tIdX);
Cell *cell = &currPop.data[(tIdY * currPop.width) + tIdX];
// We can't find partner in cell code, becuse we don't know the fitness value.
// We would have to do 2 iteratios of this loops. One beforehand to just setup fitness value,
// then synchronize all threads and find the mating partner.
Cell *partner = nullptr;
Cell *neighArr;
int neighSize;
switch (neigh)
{
case NeighborhoodType_L5:
{
neighSize = 4;
Cell neighborhood[4];
cell->get_neighborhood<neigh>(tIdX, tIdY, currPop, neighborhood);
neighArr = neighborhood;
}
break;
case NeighborhoodType_L9:
case NeighborhoodType_C9:
{
neighSize = 8;
Cell neighborhood[8];
cell->get_neighborhood<neigh>(tIdX, tIdY, currPop, neighborhood);
neighArr = neighborhood;
}
break;
case NeighborhoodType_C13:
{
neighSize = 12;
Cell neighborhood[12];
cell->get_neighborhood<neigh>(tIdX, tIdY, currPop, neighborhood);
neighArr = neighborhood;
}
break;
}
float bestFitness = -1;
{
for (size_t i = 0; i < neighSize; i++)
{
neighArr[i].fitness = (float)tex2D<uint16_t>(fitnessTexRef, neighArr[i].x, neighArr[i].y);
if (neighArr[i].fitness > bestFitness)
{
bestFitness = neighArr[i].fitness;
partner = &neighArr[i];
}
}
}
Cell offspring = Cell(cell, partner);
offspring.fitness = (float)tex2D<uint16_t>(fitnessTexRef, offspring.x, offspring.y);
//offspring.fitness = 1.0f;
//*((Cell *)((char *)nextPop.data + tIdY * nextPop.pitch) + tIdX) = offspring;
nextPop.data[(tIdY * nextPop.width) + tIdX] = offspring;
tIdY += strideY;
}
tIdX += strideX;
}
}
template <unsigned int blockSize>
__device__ void warp_reduce(volatile float *sData, unsigned int tId)
{
if (blockSize >= 64)
sData[tId] += sData[tId + 32];
if (blockSize >= 32)
sData[tId] += sData[tId + 16];
if (blockSize >= 16)
sData[tId] += sData[tId + 8];
if (blockSize >= 8)
sData[tId] += sData[tId + 4];
if (blockSize >= 4)
sData[tId] += sData[tId + 2];
if (blockSize >= 2)
sData[tId] += sData[tId + 1];
}
template <unsigned int blockSize>
__global__ void smart_reduce(CellGridInfo grid, unsigned int n, float *finalSum)
{
extern __shared__ float sData[];
unsigned int tId = threadIdx.x;
unsigned int i = blockIdx.x * (blockSize * 2) + tId;
unsigned int gridSize = blockSize * 2 * gridDim.x;
sData[tId] = 0.0f;
while (i < n)
{
sData[tId] += grid.data[i].fitness + grid.data[i + blockSize].fitness;
i += gridSize;
}
__syncthreads();
if (blockSize >= 512)
{
if (tId < 256)
{
sData[tId] += sData[tId + 256];
}
__syncthreads();
}
if (blockSize >= 256)
{
if (tId < 128)
{
sData[tId] += sData[tId + 128];
}
__syncthreads();
}
if (blockSize >= 128)
{
if (tId < 64)
{
sData[tId] += sData[tId + 64];
}
__syncthreads();
}
if (tId < 32)
warp_reduce<blockSize>(sData, tId);
if (tId == 0)
{
atomicAdd(finalSum, sData[0]);
}
}
////////////////////////////////////////////////// END OF KERNELS /////////////////////////////////////////////////////////////////////
CellGrid::CellGrid() {}
CellGrid::CellGrid(const size_t width, const size_t height, KernelSettings kernelSettings)
{
this->width = width;
this->height = height;
this->kernelSettings = kernelSettings;
}
CellGrid::~CellGrid()
{
// Unbind texture and release its memory.
CUDA_CALL(hipUnbindTexture(fitnessTexRef));
CUDA_CALL(hipFree(fitnessTex.device_data));
// Release populations memory.
if (device_currPopMemory != nullptr)
hipFree(device_currPopMemory);
if (device_nextPopMemory != nullptr)
hipFree(device_nextPopMemory);
}
void CellGrid::create_fitness_texture(const Image &fitnessImage)
{
uint channelCount = fitnessImage.channel_count();
assert((channelCount == 1) ||
(channelCount == 2) ||
(channelCount == 4) &&
"Cuda texture only support 1,2 or 4 sized vectors.");
textureWidth = fitnessImage.width();
textureHeight = fitnessImage.height();
size_t memoryWidth = textureWidth * pixel_byte_size(fitnessImage.image_type());
size_t memoryRowCount = textureHeight;
CUDA_CALL(hipMallocPitch((void **)&fitnessTex.device_data, &fitnessTex.pitch, memoryWidth, memoryRowCount));
CUDA_CALL(hipMemcpy2D(fitnessTex.device_data, fitnessTex.pitch, fitnessImage.data(), fitnessImage.pitch(), memoryWidth, memoryRowCount, hipMemcpyHostToDevice));
fitnessTex.textureCFD = hipCreateChannelDesc(16, 0, 0, 0, hipChannelFormatKindUnsigned);
fitnessTexRef.normalized = false;
fitnessTexRef.filterMode = hipFilterModePoint;
fitnessTexRef.addressMode[0] = hipAddressModeClamp;
fitnessTexRef.addressMode[1] = hipAddressModeClamp;
CUDA_CALL(hipBindTexture2D(0, &fitnessTexRef, fitnessTex.device_data, &fitnessTex.textureCFD, textureWidth, textureHeight, fitnessTex.pitch));
}
void CellGrid::initialize_grid(const Image &fitnessImage)
{
create_fitness_texture(fitnessImage);
// Allocate pitched memory for populations of cells.
// CUDA_CALL(hipMallocPitch((void **)&device_currPopMemory, &currPopPitch, width * sizeof(Cell), height));
// CUDA_CALL(hipMallocPitch((void **)&device_nextPopMemory, &nextPopPitch, width * sizeof(Cell), height));
//NOTE: For now we are using normal un-pitched memory.
currPopPitch = nextPopPitch = width * sizeof(Cell);
CUDA_CALL(hipMalloc((void **)&device_currPopMemory, width * height * sizeof(Cell)));
CUDA_CALL(hipMalloc((void **)&device_nextPopMemory, width * height * sizeof(Cell)));
assert(currPopPitch == nextPopPitch && "Population memory pitch doesn't align!");
hiprandStateMtgp32_t *device_randomStates;
mtgp32_kernel_params_t *device_kernelParams;
size_t stateCount = 200; // rngGridDim * rngGridDim;
assert(stateCount <= 200 && "Only 200 state params are prepared by Nvidia.");
CUDA_CALL(hipMalloc((void **)&device_randomStates, stateCount * sizeof(hiprandStateMtgp32_t)));
CUDA_CALL(hipMalloc((void **)&device_kernelParams, sizeof(mtgp32_kernel_params_t)));
CURAND_CALL(hiprandMakeMTGP32Constants(mtgp32dc_params_fast_11213, device_kernelParams));
CURAND_CALL(hiprandMakeMTGP32KernelState(device_randomStates, mtgp32dc_params_fast_11213, device_kernelParams, stateCount, time(NULL)));
CellGridInfo currPop = {};
currPop.data = device_currPopMemory;
currPop.pitch = currPopPitch;
currPop.width = width;
currPop.height = height;
RandomGeneratorInfo rng = {};
rng.xMin = 0;
rng.yMin = 0;
rng.xMax = textureWidth;
rng.yMax = textureHeight;
printf("RNG interval xMax: %u yMax: %u\n", rng.xMax, rng.yMax);
rng.state = device_randomStates;
uint blockCount = get_number_of_parts(width * height, 200);
CUDA_TIMED_BLOCK_START("InitialPopulationGeneration");
hipLaunchKernelGGL(( generate_random_population), dim3(dim3(blockCount, 1, 1)), dim3(dim3(200, 1, 1)), 0, 0, currPop, rng);
CUDA_TIMED_BLOCK_END(true);
CUDA_CALL(hipFree(device_randomStates));
printf("Grid initialized\n");
}
void CellGrid::print_cell_grid(const Cell *data, const size_t pitch, bool fitness) const
{
if (device_currPopMemory == nullptr)
return;
Cell *tmpMemory;
CUDA_CALL(hipHostMalloc((void **)&tmpMemory, pitch * height, hipHostMallocWriteCombined));
CUDA_CALL(hipMemcpy(tmpMemory, data, pitch * height, hipMemcpyDeviceToHost));
Cell *dataPtr = tmpMemory;
for (size_t row = 0; row < height; row++)
{
for (size_t col = 0; col < width; col++)
{
if (fitness)
printf("%2.1f ", dataPtr[col].fitness);
else
printf("[%i;%i] ", dataPtr[col].x, dataPtr[col].y);
}
printf("\n");
dataPtr = (Cell *)(((char *)dataPtr) + pitch);
}
CUDA_CALL(hipHostFree(tmpMemory));
}
void CellGrid::evolve(float &evolutionTime)
{
CellGridInfo currPop = {};
currPop.data = device_currPopMemory;
currPop.pitch = currPopPitch;
currPop.width = width;
currPop.height = height;
CellGridInfo nextPop = {};
nextPop.data = device_nextPopMemory;
nextPop.pitch = nextPopPitch;
nextPop.width = width;
nextPop.height = height;
// CUDA_CALL(hipMemset2D(device_nextPopMemory, nextPopPitch, 5, width * sizeof(Cell), height));
// Memory needs to be copied only if we decide to take some cells from old population.
// CUDA_CALL(hipMemcpy2D(device_nextPopMemory, nextPopPitch, device_currPopMemory, currPopPitch, width * sizeof(Cell), height, hipMemcpyDeviceToDevice));
CUDA_TIMED_BLOCK_START("Evolve");
hipLaunchKernelGGL(( evolve_kernel<NeighborhoodType_L9>), dim3(kernelSettings.gridDimension), dim3(kernelSettings.blockDimension), 0, 0, currPop, nextPop);
CUDA_TIMED_BLOCK_END(true);
evolutionTime = elapsedTime;
Cell *tmp = device_currPopMemory;
device_currPopMemory = device_nextPopMemory;
device_nextPopMemory = tmp;
}
float CellGrid::get_average_fitness(float &reduceTime) const
{
unsigned int n = width * height;
constexpr unsigned int ReduceTPB = 512;
unsigned int numberOfBlocks = get_number_of_parts(n, ReduceTPB);
//printf("number of blocks %u\n", numberOfBlocks);
dim3 dimGrid = dim3(numberOfBlocks, 1, 1);
dim3 dimBlock = dim3(ReduceTPB, 1, 1);
unsigned int sMemSize = ReduceTPB * sizeof(float);
CellGridInfo gridInfo = {};
gridInfo.data = device_currPopMemory;
gridInfo.pitch = currPopPitch;
gridInfo.width = width;
gridInfo.height = height;
CUDA_TIMED_BLOCK_START("complete_smart_reduce");
float *device_finalSum;
CUDA_CALL(hipMalloc((void **)&device_finalSum, sizeof(float)));
CUDA_CALL(hipMemset(device_finalSum, 0, sizeof(float)));
hipLaunchKernelGGL(( smart_reduce<ReduceTPB>), dim3(dimGrid), dim3(dimBlock), sMemSize, 0, gridInfo, n, device_finalSum);
float sum = 0;
CUDA_CALL(hipMemcpy(&sum, device_finalSum, sizeof(float), hipMemcpyDeviceToHost));
CUDA_CALL(hipFree(device_finalSum));
CUDA_CALL(hipDeviceSynchronize());
CUDA_TIMED_BLOCK_END(false);
reduceTime = elapsedTime;
return sum / (float)n;
}
Cell *CellGrid::get_device_population_memory() const
{
return this->device_currPopMemory;
}
KernelSettings CellGrid::get_kernel_settings() const
{
return this->kernelSettings;
} | 556f7a6f599d2bc343e7b2aaacb8df8da77628cc.cu | #include <cell_grid.cuh>
#include <curand_kernel.h>
#include <curand_mtgp32_host.h>
////////////////////////////// Fitness texture ////////////////////////////////
struct TextureInfo
{
byte *device_data;
size_t pitch;
cudaChannelFormatDesc textureCFD;
};
// texture<unsigned char, 2, cudaReadModeElementType> fitnessTexRef;
texture<uint16_t, 2, cudaReadModeElementType> fitnessTexRef;
TextureInfo fitnessTex = {};
///////////////////////////////////////////////////////////////////////////////
struct RandomGeneratorInfo
{
curandStateMtgp32 *state;
int xMin;
int yMin;
int xMax;
int yMax;
};
//////////////////////////////////////////////////////// KERNELS /////////////////////////////////////////////////////////////////////
__global__ void generate_random_population(CellGridInfo gridInfo, RandomGeneratorInfo rng)
{
//NOTE: This version, with 200 threads in 1D block inside 1D grid is about 9x faster than
// version with 2D blocks.
size_t n = gridInfo.width * gridInfo.height;
uint rngStateOffset = threadIdx.x;
uint tIdX = (blockIdx.x * blockDim.x) + threadIdx.x;
uint strideX = blockDim.x * gridDim.x;
while (tIdX < n)
{
float f1 = curand_uniform(&rng.state[rngStateOffset]);
float f2 = curand_uniform(&rng.state[rngStateOffset]);
int x = rng.xMin + ((int)(f1 * (rng.xMax - rng.xMin) + 0.999999));
int y = rng.yMin + ((int)(f2 * (rng.yMax - rng.yMin) + 0.999999));
Cell rnd(x, y);
rnd.fitness = tex2D<uint16_t>(fitnessTexRef, x, y);
gridInfo.data[tIdX] = rnd;
tIdX += strideX;
}
}
// This kernel will evole current population into new one.
template <NeighborhoodType neigh>
__global__ void evolve_kernel(const CellGridInfo currPop, CellGridInfo nextPop)
{
uint tIdX = (blockIdx.x * blockDim.x) + threadIdx.x;
uint tIdY = (blockIdx.y * blockDim.y) + threadIdx.y;
uint strideX = blockDim.x * gridDim.x;
uint strideY = blockDim.y * gridDim.y;
while (tIdX < currPop.width)
{
tIdY = (blockIdx.y * blockDim.y) + threadIdx.y;
while (tIdY < currPop.height)
{
//Cell *cell = ((Cell *)((char *)currPop.data + tIdY * currPop.pitch) + tIdX);
Cell *cell = &currPop.data[(tIdY * currPop.width) + tIdX];
// We can't find partner in cell code, becuse we don't know the fitness value.
// We would have to do 2 iteratios of this loops. One beforehand to just setup fitness value,
// then synchronize all threads and find the mating partner.
Cell *partner = nullptr;
Cell *neighArr;
int neighSize;
switch (neigh)
{
case NeighborhoodType_L5:
{
neighSize = 4;
Cell neighborhood[4];
cell->get_neighborhood<neigh>(tIdX, tIdY, currPop, neighborhood);
neighArr = neighborhood;
}
break;
case NeighborhoodType_L9:
case NeighborhoodType_C9:
{
neighSize = 8;
Cell neighborhood[8];
cell->get_neighborhood<neigh>(tIdX, tIdY, currPop, neighborhood);
neighArr = neighborhood;
}
break;
case NeighborhoodType_C13:
{
neighSize = 12;
Cell neighborhood[12];
cell->get_neighborhood<neigh>(tIdX, tIdY, currPop, neighborhood);
neighArr = neighborhood;
}
break;
}
float bestFitness = -1;
{
for (size_t i = 0; i < neighSize; i++)
{
neighArr[i].fitness = (float)tex2D<uint16_t>(fitnessTexRef, neighArr[i].x, neighArr[i].y);
if (neighArr[i].fitness > bestFitness)
{
bestFitness = neighArr[i].fitness;
partner = &neighArr[i];
}
}
}
Cell offspring = Cell(cell, partner);
offspring.fitness = (float)tex2D<uint16_t>(fitnessTexRef, offspring.x, offspring.y);
//offspring.fitness = 1.0f;
//*((Cell *)((char *)nextPop.data + tIdY * nextPop.pitch) + tIdX) = offspring;
nextPop.data[(tIdY * nextPop.width) + tIdX] = offspring;
tIdY += strideY;
}
tIdX += strideX;
}
}
template <unsigned int blockSize>
__device__ void warp_reduce(volatile float *sData, unsigned int tId)
{
if (blockSize >= 64)
sData[tId] += sData[tId + 32];
if (blockSize >= 32)
sData[tId] += sData[tId + 16];
if (blockSize >= 16)
sData[tId] += sData[tId + 8];
if (blockSize >= 8)
sData[tId] += sData[tId + 4];
if (blockSize >= 4)
sData[tId] += sData[tId + 2];
if (blockSize >= 2)
sData[tId] += sData[tId + 1];
}
template <unsigned int blockSize>
__global__ void smart_reduce(CellGridInfo grid, unsigned int n, float *finalSum)
{
extern __shared__ float sData[];
unsigned int tId = threadIdx.x;
unsigned int i = blockIdx.x * (blockSize * 2) + tId;
unsigned int gridSize = blockSize * 2 * gridDim.x;
sData[tId] = 0.0f;
while (i < n)
{
sData[tId] += grid.data[i].fitness + grid.data[i + blockSize].fitness;
i += gridSize;
}
__syncthreads();
if (blockSize >= 512)
{
if (tId < 256)
{
sData[tId] += sData[tId + 256];
}
__syncthreads();
}
if (blockSize >= 256)
{
if (tId < 128)
{
sData[tId] += sData[tId + 128];
}
__syncthreads();
}
if (blockSize >= 128)
{
if (tId < 64)
{
sData[tId] += sData[tId + 64];
}
__syncthreads();
}
if (tId < 32)
warp_reduce<blockSize>(sData, tId);
if (tId == 0)
{
atomicAdd(finalSum, sData[0]);
}
}
////////////////////////////////////////////////// END OF KERNELS /////////////////////////////////////////////////////////////////////
CellGrid::CellGrid() {}
CellGrid::CellGrid(const size_t width, const size_t height, KernelSettings kernelSettings)
{
this->width = width;
this->height = height;
this->kernelSettings = kernelSettings;
}
CellGrid::~CellGrid()
{
// Unbind texture and release its memory.
CUDA_CALL(cudaUnbindTexture(fitnessTexRef));
CUDA_CALL(cudaFree(fitnessTex.device_data));
// Release populations memory.
if (device_currPopMemory != nullptr)
cudaFree(device_currPopMemory);
if (device_nextPopMemory != nullptr)
cudaFree(device_nextPopMemory);
}
void CellGrid::create_fitness_texture(const Image &fitnessImage)
{
uint channelCount = fitnessImage.channel_count();
assert((channelCount == 1) ||
(channelCount == 2) ||
(channelCount == 4) &&
"Cuda texture only support 1,2 or 4 sized vectors.");
textureWidth = fitnessImage.width();
textureHeight = fitnessImage.height();
size_t memoryWidth = textureWidth * pixel_byte_size(fitnessImage.image_type());
size_t memoryRowCount = textureHeight;
CUDA_CALL(cudaMallocPitch((void **)&fitnessTex.device_data, &fitnessTex.pitch, memoryWidth, memoryRowCount));
CUDA_CALL(cudaMemcpy2D(fitnessTex.device_data, fitnessTex.pitch, fitnessImage.data(), fitnessImage.pitch(), memoryWidth, memoryRowCount, cudaMemcpyHostToDevice));
fitnessTex.textureCFD = cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindUnsigned);
fitnessTexRef.normalized = false;
fitnessTexRef.filterMode = cudaFilterModePoint;
fitnessTexRef.addressMode[0] = cudaAddressModeClamp;
fitnessTexRef.addressMode[1] = cudaAddressModeClamp;
CUDA_CALL(cudaBindTexture2D(0, &fitnessTexRef, fitnessTex.device_data, &fitnessTex.textureCFD, textureWidth, textureHeight, fitnessTex.pitch));
}
void CellGrid::initialize_grid(const Image &fitnessImage)
{
create_fitness_texture(fitnessImage);
// Allocate pitched memory for populations of cells.
// CUDA_CALL(cudaMallocPitch((void **)&device_currPopMemory, &currPopPitch, width * sizeof(Cell), height));
// CUDA_CALL(cudaMallocPitch((void **)&device_nextPopMemory, &nextPopPitch, width * sizeof(Cell), height));
//NOTE: For now we are using normal un-pitched memory.
currPopPitch = nextPopPitch = width * sizeof(Cell);
CUDA_CALL(cudaMalloc((void **)&device_currPopMemory, width * height * sizeof(Cell)));
CUDA_CALL(cudaMalloc((void **)&device_nextPopMemory, width * height * sizeof(Cell)));
assert(currPopPitch == nextPopPitch && "Population memory pitch doesn't align!");
curandStateMtgp32 *device_randomStates;
mtgp32_kernel_params *device_kernelParams;
size_t stateCount = 200; // rngGridDim * rngGridDim;
assert(stateCount <= 200 && "Only 200 state params are prepared by Nvidia.");
CUDA_CALL(cudaMalloc((void **)&device_randomStates, stateCount * sizeof(curandStateMtgp32)));
CUDA_CALL(cudaMalloc((void **)&device_kernelParams, sizeof(mtgp32_kernel_params)));
CURAND_CALL(curandMakeMTGP32Constants(mtgp32dc_params_fast_11213, device_kernelParams));
CURAND_CALL(curandMakeMTGP32KernelState(device_randomStates, mtgp32dc_params_fast_11213, device_kernelParams, stateCount, time(NULL)));
CellGridInfo currPop = {};
currPop.data = device_currPopMemory;
currPop.pitch = currPopPitch;
currPop.width = width;
currPop.height = height;
RandomGeneratorInfo rng = {};
rng.xMin = 0;
rng.yMin = 0;
rng.xMax = textureWidth;
rng.yMax = textureHeight;
printf("RNG interval xMax: %u yMax: %u\n", rng.xMax, rng.yMax);
rng.state = device_randomStates;
uint blockCount = get_number_of_parts(width * height, 200);
CUDA_TIMED_BLOCK_START("InitialPopulationGeneration");
generate_random_population<<<dim3(blockCount, 1, 1), dim3(200, 1, 1)>>>(currPop, rng);
CUDA_TIMED_BLOCK_END(true);
CUDA_CALL(cudaFree(device_randomStates));
printf("Grid initialized\n");
}
void CellGrid::print_cell_grid(const Cell *data, const size_t pitch, bool fitness) const
{
if (device_currPopMemory == nullptr)
return;
Cell *tmpMemory;
CUDA_CALL(cudaHostAlloc((void **)&tmpMemory, pitch * height, cudaHostAllocWriteCombined));
CUDA_CALL(cudaMemcpy(tmpMemory, data, pitch * height, cudaMemcpyDeviceToHost));
Cell *dataPtr = tmpMemory;
for (size_t row = 0; row < height; row++)
{
for (size_t col = 0; col < width; col++)
{
if (fitness)
printf("%2.1f ", dataPtr[col].fitness);
else
printf("[%i;%i] ", dataPtr[col].x, dataPtr[col].y);
}
printf("\n");
dataPtr = (Cell *)(((char *)dataPtr) + pitch);
}
CUDA_CALL(cudaFreeHost(tmpMemory));
}
void CellGrid::evolve(float &evolutionTime)
{
CellGridInfo currPop = {};
currPop.data = device_currPopMemory;
currPop.pitch = currPopPitch;
currPop.width = width;
currPop.height = height;
CellGridInfo nextPop = {};
nextPop.data = device_nextPopMemory;
nextPop.pitch = nextPopPitch;
nextPop.width = width;
nextPop.height = height;
// CUDA_CALL(cudaMemset2D(device_nextPopMemory, nextPopPitch, 5, width * sizeof(Cell), height));
// Memory needs to be copied only if we decide to take some cells from old population.
// CUDA_CALL(cudaMemcpy2D(device_nextPopMemory, nextPopPitch, device_currPopMemory, currPopPitch, width * sizeof(Cell), height, cudaMemcpyDeviceToDevice));
CUDA_TIMED_BLOCK_START("Evolve");
evolve_kernel<NeighborhoodType_L9><<<kernelSettings.gridDimension, kernelSettings.blockDimension>>>(currPop, nextPop);
CUDA_TIMED_BLOCK_END(true);
evolutionTime = elapsedTime;
Cell *tmp = device_currPopMemory;
device_currPopMemory = device_nextPopMemory;
device_nextPopMemory = tmp;
}
float CellGrid::get_average_fitness(float &reduceTime) const
{
unsigned int n = width * height;
constexpr unsigned int ReduceTPB = 512;
unsigned int numberOfBlocks = get_number_of_parts(n, ReduceTPB);
//printf("number of blocks %u\n", numberOfBlocks);
dim3 dimGrid = dim3(numberOfBlocks, 1, 1);
dim3 dimBlock = dim3(ReduceTPB, 1, 1);
unsigned int sMemSize = ReduceTPB * sizeof(float);
CellGridInfo gridInfo = {};
gridInfo.data = device_currPopMemory;
gridInfo.pitch = currPopPitch;
gridInfo.width = width;
gridInfo.height = height;
CUDA_TIMED_BLOCK_START("complete_smart_reduce");
float *device_finalSum;
CUDA_CALL(cudaMalloc((void **)&device_finalSum, sizeof(float)));
CUDA_CALL(cudaMemset(device_finalSum, 0, sizeof(float)));
smart_reduce<ReduceTPB><<<dimGrid, dimBlock, sMemSize>>>(gridInfo, n, device_finalSum);
float sum = 0;
CUDA_CALL(cudaMemcpy(&sum, device_finalSum, sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaFree(device_finalSum));
CUDA_CALL(cudaDeviceSynchronize());
CUDA_TIMED_BLOCK_END(false);
reduceTime = elapsedTime;
return sum / (float)n;
}
Cell *CellGrid::get_device_population_memory() const
{
return this->device_currPopMemory;
}
KernelSettings CellGrid::get_kernel_settings() const
{
return this->kernelSettings;
} |
562fdb9fc32a2fcc6ae213160179c678fc704e0a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#include "saxpy.h"
#include "common_cuda.h"
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
result[index] = alpha * x[index] + y[index];
}
static inline
int getBlocks(long working_set_size, int threadsPerBlock) {
// TODO: implement and use this interface if necessary
}
void
getArrays(int size, float **xarray, float **yarray, float **resultarray) {
// TODO: implement and use this interface if necessary
CHECK_CUDA_ERROR(hipMallocManaged((void **) &(*xarray), size * sizeof(float)));
CHECK_CUDA_ERROR(hipMallocManaged((void **) &(*yarray), size * sizeof(float)));
CHECK_CUDA_ERROR(hipMallocManaged((void **) &(*resultarray), size * sizeof(float)));
}
void
freeArrays(float *xarray, float *yarray, float *resultarray) {
// TODO: implement and use this interface if necessary
hipFree(xarray);
hipFree(yarray);
hipFree(resultarray);
}
void
saxpyCuda(long total_elems, float alpha, float* xarray, float* yarray, float* resultarray, int partitions) {
const int threadsPerBlock = 512; // change this if necessary
//
// TODO: do we need to allocate device memory buffers on the GPU here?
// Nope!
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
//
// TODO: do we need copy here?
// Nope!
//
// TODO: insert time here to begin timing only the kernel
//
double startGPUTime = CycleTimer::currentSeconds();
// compute number of blocks and threads per block
uint32_t num_blocks = (total_elems + (threadsPerBlock - 1)) / threadsPerBlock;
// run saxpy_kernel on the GPU
hipLaunchKernelGGL(( saxpy_kernel) , dim3(num_blocks), dim3(threadsPerBlock), 0, 0, total_elems, alpha, xarray, yarray, resultarray);
//
// TODO: insert timer here to time only the kernel. Since the
// kernel will run asynchronously with the calling CPU thread, you
// need to call hipDeviceSynchronize() before your timer to
// ensure the kernel running on the GPU has completed. (Otherwise
// you will incorrectly observe that almost no time elapses!)
//
hipDeviceSynchronize();
double endGPUTime = CycleTimer::currentSeconds();
double timeKernel = endGPUTime - startGPUTime;
hipError_t errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode));
}
//
// TODO: copy result from GPU using hipMemcpy
// No need
// What would be copy time when we use UVM?
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
totalTimeAvg += overallDuration;
timeKernelAvg += timeKernel;
//
// TODO free device memory if you allocate some device memory earlier in this function.
// No need
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
| 562fdb9fc32a2fcc6ae213160179c678fc704e0a.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#include "saxpy.h"
#include "common_cuda.h"
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
result[index] = alpha * x[index] + y[index];
}
static inline
int getBlocks(long working_set_size, int threadsPerBlock) {
// TODO: implement and use this interface if necessary
}
void
getArrays(int size, float **xarray, float **yarray, float **resultarray) {
// TODO: implement and use this interface if necessary
CHECK_CUDA_ERROR(cudaMallocManaged((void **) &(*xarray), size * sizeof(float)));
CHECK_CUDA_ERROR(cudaMallocManaged((void **) &(*yarray), size * sizeof(float)));
CHECK_CUDA_ERROR(cudaMallocManaged((void **) &(*resultarray), size * sizeof(float)));
}
void
freeArrays(float *xarray, float *yarray, float *resultarray) {
// TODO: implement and use this interface if necessary
cudaFree(xarray);
cudaFree(yarray);
cudaFree(resultarray);
}
void
saxpyCuda(long total_elems, float alpha, float* xarray, float* yarray, float* resultarray, int partitions) {
const int threadsPerBlock = 512; // change this if necessary
//
// TODO: do we need to allocate device memory buffers on the GPU here?
// Nope!
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
//
// TODO: do we need copy here?
// Nope!
//
// TODO: insert time here to begin timing only the kernel
//
double startGPUTime = CycleTimer::currentSeconds();
// compute number of blocks and threads per block
uint32_t num_blocks = (total_elems + (threadsPerBlock - 1)) / threadsPerBlock;
// run saxpy_kernel on the GPU
saxpy_kernel <<<num_blocks, threadsPerBlock>>>(total_elems, alpha, xarray, yarray, resultarray);
//
// TODO: insert timer here to time only the kernel. Since the
// kernel will run asynchronously with the calling CPU thread, you
// need to call cudaDeviceSynchronize() before your timer to
// ensure the kernel running on the GPU has completed. (Otherwise
// you will incorrectly observe that almost no time elapses!)
//
cudaDeviceSynchronize();
double endGPUTime = CycleTimer::currentSeconds();
double timeKernel = endGPUTime - startGPUTime;
cudaError_t errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode));
}
//
// TODO: copy result from GPU using cudaMemcpy
// No need
// What would be copy time when we use UVM?
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
totalTimeAvg += overallDuration;
timeKernelAvg += timeKernel;
//
// TODO free device memory if you allocate some device memory earlier in this function.
// No need
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
814b3275bfb15d69ed981613a90bdd3716a34589.hip | // !!! This is a file automatically generated by hipify!!!
//#include "stdafx.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
// Kernel that executes on the CUDA device
__global__ void square_array(float *a, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx] * a[idx];
}
int main(void)
{
float *a_h, *a_d; // Pointer to host & device arrays
const int N = 10; // Number of elements in arrays
size_t size = N * sizeof(float);
a_h = (float *)malloc(size); // Allocate array on host
hipMalloc((void **) &a_d, size); // Allocate array on device
//Initialize host array and copy it to CUDA device
for (int i=0; i<N; i++) a_h[i] = (float)i;
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice);
// Do calculation on device:
int block_size = 4;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
hipLaunchKernelGGL(( square_array) , dim3(n_blocks), dim3(block_size) , 0, 0, a_d, N);
// Retrieve result from device and store it in host array
hipMemcpy(a_h, a_d, sizeof(float)*N, hipMemcpyDeviceToHost);
// Print results
for (int i=0; i<N; i++) printf("%d %f\n", i, a_h[i]);
// Cleanup
free(a_h); hipFree(a_d);
}
| 814b3275bfb15d69ed981613a90bdd3716a34589.cu | //#include "stdafx.h"
#include <stdio.h>
#include <cuda.h>
// Kernel that executes on the CUDA device
__global__ void square_array(float *a, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx] * a[idx];
}
int main(void)
{
float *a_h, *a_d; // Pointer to host & device arrays
const int N = 10; // Number of elements in arrays
size_t size = N * sizeof(float);
a_h = (float *)malloc(size); // Allocate array on host
cudaMalloc((void **) &a_d, size); // Allocate array on device
//Initialize host array and copy it to CUDA device
for (int i=0; i<N; i++) a_h[i] = (float)i;
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
// Do calculation on device:
int block_size = 4;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
square_array <<< n_blocks, block_size >>> (a_d, N);
// Retrieve result from device and store it in host array
cudaMemcpy(a_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
// Print results
for (int i=0; i<N; i++) printf("%d %f\n", i, a_h[i]);
// Cleanup
free(a_h); cudaFree(a_d);
}
|
9d837bfc910d814b9263cc2ef3b4fb7ee0015991.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// nvcc evert-cuda.cu -o evert-cuda -use_fast_math -O3 -Xcompiler "-Ofast -march=native" -Xptxas "-O3 --verbose --warn-on-local-memory-usage --warn-on-spills" && ./evert-cuda
#include <stdint.h>
#include <stdio.h>
#include <locale.h>
#define M_TAU 6.283185 // The (approximate) arclength of a circle of radius 1
// GPU general data!
#define EV_NGPUS 2
#define EV_GPU_MAIN 1
#define EV_IMG_DIR "." // "/media/tmp" // "."
#define EV_EPSILON 0.001f
#define EV_RGB_BG 0x080808
#define EV_NLIGHTS 6 // The number of 2-faces of a cube!
// PIXEL SHADER data!
#define EV_NFRAMES 7 // If the number of frame is EVEN, then there's NO MIDDLE FRAME (and vv)! WARN! NFRAMES must be at least 2
#define EV_NSAMPLES (1<<1)
#define EV_NBOUNCES 4
#define EV_IMG_W (1920>>1)
#define EV_IMG_H (1080>>1)
#define EV_CAM_FOV (M_PI/3) // 2: 90 fov, 3: 60 fov, 4: 45 fov, 6: 30 fov
#define EV_CAM_POS { 0, 0, 5.0}
#define EV_CAM_DIR {-0,-0.03,-1.0}
#define EV_CAM_ROT_YZ 0.0 // Camera rotation over the yz-plane
#define EV_CAM_ROT_ZX 0.0 // Camera rotation over the zx-plane
#define EV_CAM_ROT_XY 0.0 // Camera rotation over the xy-plane
// MESH shader data! @theta is the AZIMUTHAL parameter; @v is the POLAR parameter!
#define EV_NSTRIPS 8
#define EV_THETA_MIN (0)
#define EV_PHI_MIN (0 + EV_EPSILON)
#define EV_THETA_MAX ((8./EV_NSTRIPS)*M_TAU) // 8
#define EV_PHI_MAX ((2./2) *M_PI - EV_EPSILON) // 2
#define EV_THETA_NVERTS (30*1*(EV_THETA_MAX-EV_THETA_MIN)/M_TAU*EV_NSTRIPS)
#define EV_PHI_NVERTS (30*2*(EV_PHI_MAX -EV_PHI_MIN) /M_PI *2)
#define EV_RGB_FRONT 0xff9999 // 0xff6666
#define EV_RGB_BACK 0x5eaeff // 0x1188ff
// STAGE times!
#define EV_CORRUGATE_TDEL 1.f
#define EV_PUSH_TDEL 2.f
#define EV_TWIST_TDEL 6.f
#define EV_UNPUSH_TDEL 2.f
#define EV_UNCORRUGATE_TDEL 1.f
#define EV_CORRUGATE_TINI (0.f)
#define EV_PUSH_TINI (EV_CORRUGATE_TINI+EV_CORRUGATE_TDEL)
#define EV_TWIST_TINI (EV_PUSH_TINI +EV_PUSH_TDEL)
#define EV_UNPUSH_TINI (EV_TWIST_TINI +EV_TWIST_TDEL)
#define EV_UNCORRUGATE_TINI (EV_UNPUSH_TINI +EV_UNPUSH_TDEL)
#define EV_TMIN (EV_CORRUGATE_TINI)
#define EV_TMAX (EV_CORRUGATE_TINI + EV_CORRUGATE_TDEL+EV_PUSH_TDEL+EV_TWIST_TDEL+EV_UNPUSH_TDEL+EV_UNCORRUGATE_TDEL)
// ----------------------------------------------------------------------------------------------------------------------------#
typedef uint8_t u8;
typedef float f32;
typedef int32_t i32;
typedef uint32_t u32;
typedef double f64;
typedef uint64_t u64;
// ----------------------------------------------------------------------------------------------------------------------------#
#include <time.h>
struct dt_t{
f64 t0, t1;
};
f64 dt_abs(){ struct timespec tabs; clock_gettime(CLOCK_MONOTONIC,&tabs); return tabs.tv_sec + 1e-9*tabs.tv_nsec; } // m_checksys(st,"clock_gettime");
f64 dt_del(dt_t* dt){ return dt->t1 - dt->t0; } // Get `relative time`, ie. a time delta between 2 absolute times! The time delta is returned in seconds, and its resolution is in nanoseconds!
void dt_ini(dt_t* dt){ dt->t0 = dt_abs(); }
void dt_end(dt_t* dt){ dt->t1 = dt_abs(); }
// ----------------------------------------------------------------------------------------------------------------------------#
#define cuda_check(){ hipError_t err; while((err=hipGetLastError()) != hipSuccess) printf("\x1b[91mFAIL\x1b[0m \x1b[32mCUDA\x1b[0m \x1b[32m%s\x1b[0m:\x1b[94mL%d\x1b[0m \x1b[35m%s\x1b[0m \x1b[33m%s\x1b[0m \x1b[37m%s\x1b[0m\n", __FILE__,__LINE__,__func__, hipGetErrorName(err),hipGetErrorString(err)); }
#define m_divceilu(N, D) (((N)%(D)) ? (N)/(D)+1 : (N)/(D))
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
/* @block */
// ----------------------------------------------------------------------------------------------------------------------------#
struct vec3{ // Just a simple 3D vector!
union{ // Access the `vec3` using array notation of by specifying the name of a component!
f32 data[3];
struct{ f32 x0, x1, x2; };
};
__device__ __host__ vec3(){}
__device__ __host__ vec3(f32 a0, f32 a1, f32 a2){ x0=a0; x1=a1; x2=a2; }
__device__ __host__ f32 operator[](int idx){ return data[idx]; }
};
__device__ __host__ vec3 operator*(f32 s, vec3 v){ return {s*v[0], s*v[1], s*v[2]}; } // Scalar multiplication!
__device__ __host__ vec3 operator+(vec3 v0, vec3 v1){ return {v0[0]+v1[0], v0[1]+v1[1], v0[2]+v1[2]}; } // Vector addition!
__device__ __host__ vec3 operator-(vec3 v0, vec3 v1){ return {v0[0]-v1[0], v0[1]-v1[1], v0[2]-v1[2]}; } // Vector subtraction!
__device__ __host__ vec3 operator*(vec3 v0, vec3 v1){ return {v0[0]*v1[0], v0[1]*v1[1], v0[2]*v1[2]}; } // Vector multiplication!
__device__ __host__ f32 dot(vec3 v0, vec3 v1){ return v0[0]*v1[0] + v0[1]*v1[1] + v0[2]*v1[2]; } // Quite important for triangle intersection and a bit for the path tracer!
__device__ __host__ vec3 cross(vec3 v0, vec3 v1){ // Homology of R3: 0 --> 1 --> 2 --> 0 --> 1 --> 2 --> 0 --> ...
return {v0[1]*v1[2] - v0[2]*v1[1], // 0 --> 1 --> 2
v0[2]*v1[0] - v0[0]*v1[2], // 1 --> 2 --> 0
v0[0]*v1[1] - v0[1]*v1[0]}; // 2 --> 0 --> 1
}
__device__ __host__ vec3 normalize(vec3 v){ return rsqrtf(dot(v,v)) * v; }
// ----------------------------------------------------------------------------------------------------------------------------#
// The mighty quaternions! A real Clifford algebra (aka. a geometric algebra) related to:
// spinors, 3D rotations, the 3-sphere living in 4D, the gauge group SU(2) from quantum flavordynamics,
// the Whitehead tower of the orthogonal group O(3), the string group String(3), the fivebrane group Fivebrane(3), ...
struct quat{
union{
f32 data[4];
struct{ f32 x0, x1, x2, x3; };
};
__device__ __host__ quat(){}
__device__ __host__ quat(f32 a0, f32 a1, f32 a2, f32 a3){ x0=a0; x1=a1; x2=a2; x3=a3; }
__device__ __host__ quat(f32 s, vec3 v){ x0=s; x1=v[0]; x2=v[1]; x3=v[2]; }
__device__ __host__ f32 operator[](int idx){ return data[idx]; }
};
__device__ __host__ quat operator*(quat q0, quat q1){ // The quaternion product is a sort of "twisted" product of 4D vectors
return {q0[0]*q1[0] - q0[1]*q1[1] - q0[2]*q1[2] - q0[3]*q1[3],
q0[0]*q1[1] + q0[1]*q1[0] + q0[2]*q1[3] - q0[3]*q1[2],
q0[0]*q1[2] - q0[1]*q1[3] + q0[2]*q1[0] + q0[3]*q1[1],
q0[0]*q1[3] + q0[1]*q1[2] - q0[2]*q1[1] + q0[3]*q1[0]};
}
__device__ __host__ quat conj(quat q){ return {q[0], -q[1], -q[2], -q[3]}; } // The quaternion inverse of a quaternion `q` is just `conj(q) / quad(q)`, just like for complex numbers!
__device__ __host__ quat versor(f32 angle, vec3 dir){
return {cosf(.5*angle), sinf(.5*angle)*normalize(dir)};
}
__device__ __host__ vec3 qrotl(vec3 v, quat versor){ // WARN! @versor must be a unit-quaternion!
quat p_rot = versor * quat(0,v) * conj(versor); // Left-conjugation by @versor! The quaternion-inverse of a unit-quaternion is its quaternion-conjugate!
return {p_rot[1], p_rot[2], p_rot[3]};
}
// ----------------------------------------------------------------------------------------------------------------------------#
__forceinline__ __device__ f32 clamp01(f32 x){ return __saturatef(x); }
__forceinline__ __device__ vec3 clamp01(vec3 v){ return {clamp01(v[0]), clamp01(v[1]), clamp01(v[2])}; }
__forceinline__ __device__ f32 rgb_gamma_decode(f32 channel){ return __powf(channel, 2.2/1); }
__forceinline__ __device__ f32 rgb_gamma_encode(f32 channel){ return __powf(channel, 1/2.2); }
__forceinline__ __device__ f32 rgb_u8_to_f32( u8 channel){ return rgb_gamma_decode(channel/255.); }
__forceinline__ __device__ u8 rgb_f32_to_u8( f32 channel){ return 255.*rgb_gamma_encode(channel) + .5; }
__forceinline__ __device__ vec3 bgr8u_to_rgb32f(u32 bgr8u){
return {rgb_u8_to_f32((bgr8u>>0x10)&0xff),
rgb_u8_to_f32((bgr8u>>0x08)&0xff),
rgb_u8_to_f32((bgr8u>>0x00)&0xff)};
}
__forceinline__ __device__ u32 rgb32f_to_bgr8u(vec3 rgbf32){
return (rgb_f32_to_u8(rgbf32[0])<<0x10) |
(rgb_f32_to_u8(rgbf32[1])<<0x08) |
(rgb_f32_to_u8(rgbf32[2])<<0x00);
}
__forceinline__ __device__ u32 rgb32f_to_rgb8u(vec3 rgbf32){
return (rgb_f32_to_u8(rgbf32[0])<<0x00) |
(rgb_f32_to_u8(rgbf32[1])<<0x08) |
(rgb_f32_to_u8(rgbf32[2])<<0x10);
}
__forceinline__ __device__ f32 rand_f32(u32* seed0, u32* seed1){ // Random number generator from https://github.com/gz/rust-raytracer
*seed0 = 36969*(*seed0&0xffff) + (*seed0>>0x10);
*seed1 = 18489*(*seed1&0xffff) + (*seed1>>0x10);
u32 val_u32 = 0x40000000 | (((*seed0<<0x10) + *seed1) & 0x007fffff);
return .5f * (*(f32*)&val_u32) - 1.f;
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @block Geometric data structures! Each geometric primitive needs its own intersection routine!
// ----------------------------------------------------------------------------------------------------------------------------#
struct light_t{
vec3 vert0; // Geometry/intersection data! vert0 IS the main vertex, edge01 IS vert1 - vert0, edge02 IS vert2 - vert0
vec3 edge01;
vec3 edge02;
vec3 emission; // Lighting/rendering data!
};
struct triangle_t{
vec3 vert0; // Geometry/intersection data! vert0 IS the main vertex, edge01 IS vert1 - vert0, edge02 IS vert2 - vert0
vec3 edge01;
vec3 edge02;
u32 albedo_back; // Lighting/rendering data! Albedo is the base color input, aka. a diffuse map... or something
u32 albedo_front;
};
enum geom_type_t{ GEOM_UNKNOWN=0, GEOM_LIGHT, GEOM_TRIANGLE};
// ----------------------------------------------------------------------------------------------------------------------------#
// @section Path tracing data structures!
struct ray_t{
vec3 pos; // Ray origin!
vec3 dir; // Ray direction!
};
struct intersect_t{ // We return this data structure upon hitting something when path tracing!
f32 t;
int front; // Did we hit the front or the back?
};
struct hit_t{
f32 t; // The position of the hit in RAY COORDINATES. A ray is 1-dimensional, so its coordinates are 1-dimensional, too! Here we record *where* we hit the object!
u32 idx; // The object index, so that we know which object we hit!
u32 type; // What type of object did we hit, and in which mesh?
int front; // Did we hit the front or the back?
};
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
/* @block EVERT code, originally by Nathaniel Thurston (Copyright 1995 Geometry Center, University of Minnesota)
What follows is a fork of Nathaniel Thurston's `evert` code, which implemented the Thurston sphere eversion for the movie Outside In, of meme-tastic fame.
The basic building blocks in `evert` are the so-called jets.
I have no idea what they're supposed to "be", but in `evert` they keep track of differential data associated to a scalar field.
As far as `evert` is concerned, they seem to have been used to implement automatic differentiation,
since partial derivatives are used to concoct the "figure eight" towards the end of the eversion code.
// ----------------------------------------------------------------------------------------------------------------------------#
Copyright (c) 1993
The National Science and Technology Research Center for
Computation and Visualization of Geometric Structures
(The Geometry Center)
University of Minnesota
1300 South Second Street
Minneapolis, MN 55454 USA
email: [email protected]
The software distributed here is copyrighted as noted above.
It is free software and may be obtained via anonymous ftp from
ftp.geom.umn.edu. It may be freely copied, modified, and
redistributed under the following conditions:
1. All copyright notices must remain intact in all files.
2. A copy of this file (COPYING) must be distributed along with any
copies that you redistribute; this includes copies that you have
modified, or copies of programs or other software products that
include this software.
3. If you modify this software, you must include a notice giving the
name of the person performing the modification, the date of
modification, and the reason for such modification.
4. When distributing modified versions of this software, or other
software products that include this software, you must provide
notice that the original source code may be obtained as noted
above.
5. There is no warranty or other guarantee of fitness for this
software, it is provided solely "as is". Bug reports or fixes may
be sent to the email address above; the authors may or may not act
on them as they desire.
If you use an image produced by this software in a publication or
presentation, we request that you credit the Geometry Center with a
notice such as the following:
Figures 1, 2, and 5-300 were generated with software written at the
Geometry Center, University of Minnesota.
*/
// ----------------------------------------------------------------------------------------------------------------------------#
/* @section A 1-jet, aka. a first-order jet, aka. a scalar field (evaluated at some point) together with its 1st-order partial derivatives (evaluated at some point)!
We can think of a k-jet as an "augmented" floating-point number: the first entry in the struct is the value of the number,
and all the following entries are the partial derivatives up to order k.
Since we only need first-order partial derivatives, we'll only implement 1-jets.
*/
struct jet{
f32 f; // Scalar value of a 2D scalar field!
f32 fu, fv; // 1st-order partial derivatives of a 2D scalar field!
__forceinline__ __device__ jet(){}
__forceinline__ __device__ jet(f32 s){ f=s; fu=0; fv=0; }
__forceinline__ __device__ jet(f32 s, f32 su, f32 sv){ f=s; fu=su; fv=sv; }
};
__forceinline__ __device__ jet operator+(jet x0, jet x1){ return {x0.f + x1.f, x0.fu + x1.fu, x0.fv + x1.fv}; } // 1st-order partial derivatives of the addition of two 2D scalar fields!
__forceinline__ __device__ jet operator-(jet x0, jet x1){ return {x0.f - x1.f, x0.fu - x1.fu, x0.fv - x1.fv}; } // 1st-order partial derivatives of the subtraction of two 2D scalar fields!
__forceinline__ __device__ jet operator*(jet x0, jet x1){ // 1st-order partial derivatives of the product of two 2D scalar fields!
return {x0.f *x1.f,
x0.fu*x1.f + x0.f*x1.fu,
x0.fv*x1.f + x0.f*x1.fv};
}
__forceinline__ __device__ jet operator%(jet x, f32 s){
x.f = fmod(x.f, s);
if(x.f<0) x.f += s;
return x;
}
__forceinline__ __device__ jet operator^(jet x, f32 s){ // Derivatives of the n-th power?
f32 f0 = powf(x.f, s);
f32 f1 = x.f==0 ? 0 : s*f0/x.f; // Avoid division by zero
return {f0, f1*x.fu, f1*x.fv};
}
__forceinline__ __device__ jet operator/(jet x0, jet x1){ return x0 * (x1^-1); } // Derivatives of the quotient!
__forceinline__ __device__ jet ev_interpolate(jet x0, jet x1, jet t){ return (jet(1)-t)*x0 + t*x1; }
__forceinline__ __device__ jet ev_partial_diff(jet x, int idx){ return {idx==0 ? x.fu : x.fv, 0,0}; } // Keep the partial WRT u, or the partial WRT v?
__forceinline__ __device__ jet ev_cos(jet x){ f32 c=cosf(x.f); f32 dc=-sinf(x.f); return {c, dc*x.fu, dc*x.fv}; } // Derivatives of the cosine of a scalar field!
__forceinline__ __device__ jet ev_sin(jet x){ f32 s=sinf(x.f); f32 ds= cosf(x.f); return {s, ds*x.fu, ds*x.fv}; } // Derivatives of the sine of a scalar field!
// ----------------------------------------------------------------------------------------------------------------------------#
/* @section A 3D vector of 1-jets!
If a k-jet can be interpreted as a "fat" floating-point number (a number augmented with differential data),
then a vector of k-jets is a vector of "fat" numbers: each entry in the vector is a "fat" number.
All the complicated bookkeeping needed to implement automatic differentiation happens at the level of k-jets,
so vectors of k-jets need only implement vector-specific stuff.
A 3D vector (of 1-jets) will represent a point in 3D space.
*/
struct vjet{
jet x0, x1, x2;
};
__forceinline__ __device__ vjet operator*(jet s, vjet v){ return {s*v.x0, s*v.x1, s*v.x2}; } // Scalar multiplication!
__forceinline__ __device__ vjet operator+(vjet v0, vjet v1){ return {v0.x0 + v1.x0, v0.x1 + v1.x1, v0.x2 + v1.x2}; } // Vector addition!
__forceinline__ __device__ vjet operator-(vjet v0, vjet v1){ return {v0.x0 - v1.x0, v0.x1 - v1.x1, v0.x2 - v1.x2}; } // Vector subtraction!
__forceinline__ __device__ jet ev_dot( vjet v0, vjet v1){ return v0.x0*v1.x0 + v0.x1*v1.x1 + v0.x2*v1.x2; }
__forceinline__ __device__ vjet ev_cross(vjet v0, vjet v1){ // Homology of R3: 0 --> 1 --> 2 --> 0 --> 1 --> 2 --> 0 --> ...
return {v0.x1*v1.x2 - v0.x2*v1.x1, // 0 --> 1 --> 2
v0.x2*v1.x0 - v0.x0*v1.x2, // 1 --> 2 --> 0
v0.x0*v1.x1 - v0.x1*v1.x0}; // 2 --> 0 --> 1
}
__forceinline__ __device__ vjet ev_normalize(vjet v){
jet s = ev_dot(v,v);
if(s.f>0) s = s^-.5; // Avoid division by zero!
else s = jet(0);
return s*v;
}
__forceinline__ __device__ vjet ev_interpolate( vjet v0, vjet v1, jet t){ return (jet(1)-t)*v0 + t*v1; }
__forceinline__ __device__ vjet ev_partial_diff(vjet v, int idx){ return {ev_partial_diff(v.x0,idx), ev_partial_diff(v.x1,idx), ev_partial_diff(v.x2,idx)}; }
// ----------------------------------------------------------------------------------------------------------------------------#
/* @section A quaternion made of 1-jets!
If a k-jet can be interpreted as a "fat" floating-point number (a number augmented with differential data),
and a vector of k-jets is a vector of "fat" numbers,
then a quaternion of 1-jets is a just quaternion... whose entries are not plain numbers, but "fat" numbers.
All the complicated bookkeeping needed to implement automatic differentiation happens at the level of k-jets,
so quaternions of k-jets need only implement quaternion-specific stuff.
We'll use quaternions to do rotations in 3D.
*/
struct qjet{
jet x0, x1, x2, x3;
__forceinline__ __device__ qjet(jet a0, jet a1, jet a2, jet a3){ x0=a0; x1=a1; x2=a2; x3=a3; }
__forceinline__ __device__ qjet(jet s, vjet v){ x0=s; x1=v.x0; x2=v.x1; x3=v.x2; }
};
__forceinline__ __device__ qjet operator*(qjet q0, qjet q1){
return {q0.x0*q1.x0 - q0.x1*q1.x1 - q0.x2*q1.x2 - q0.x3*q1.x3,
q0.x0*q1.x1 + q0.x1*q1.x0 + q0.x2*q1.x3 - q0.x3*q1.x2,
q0.x0*q1.x2 - q0.x1*q1.x3 + q0.x2*q1.x0 + q0.x3*q1.x1,
q0.x0*q1.x3 + q0.x1*q1.x2 - q0.x2*q1.x1 + q0.x3*q1.x0};
}
__forceinline__ __device__ qjet ev_conj(qjet q){ return {q.x0, -1*q.x1, -1*q.x2, -1*q.x3}; } // The quaternion-inverse of `q` is just `conj(q) / quad(q)`, just like for complex numbers!
__forceinline__ __device__ qjet ev_versor(jet angle, vjet dir){
return {ev_cos(.5*angle), ev_sin(.5*angle)*ev_normalize(dir)};
}
__forceinline__ __device__ vjet ev_qrot3d(vjet v, qjet versor){
qjet p_rot = ev_conj(versor) * qjet(0,v) * versor; // Right-conjugation by @versor! The quaternion-conjugate of a unit-quaternion is its quaternion-inverse!
return {p_rot.x1, p_rot.x2, p_rot.x3};
}
__forceinline__ __device__ vjet ev_qrot_yz(vjet v, jet angle){ return ev_qrot3d(v, ev_versor(angle, {jet(1),jet(0),jet(0)})); } // Rotation over the yz-plane
__forceinline__ __device__ vjet ev_qrot_zx(vjet v, jet angle){ return ev_qrot3d(v, ev_versor(angle, {jet(0),jet(1),jet(0)})); } // Rotation over the zx-plane
__forceinline__ __device__ vjet ev_qrot_xy(vjet v, jet angle){ return ev_qrot3d(v, ev_versor(angle, {jet(0),jet(0),jet(1)})); } // Rotation over the xy-plane
// ----------------------------------------------------------------------------------------------------------------------------#
// @section Sphere parametrization and geometric deformations!
__forceinline__ __device__ vjet ev_sphere_arc(jet phi, f32 radius_x0, f32 radius_x1, f32 radius_x2){ // Trace out a meridian, since the horizontal angle is fixed!
jet s0 = radius_x0 * ev_sin(jet(0,0,1)) * ev_sin(phi); // Keep the horizontal angle constant, vary the vertical angle!
jet s1 = radius_x1 * ev_cos(jet(0,0,1)) * ev_sin(phi); // Keep the horizontal angle constant, vary the vertical angle!
jet s2 = radius_x2 * ev_cos(phi);
return {s0, s1, s2};
}
__forceinline__ __device__ jet ev_phi_deform0(jet phi){ // Map the (0..pi) interval to itself, but with some curvature!
if(phi.f <= M_PI/2) return -2/M_PI*(phi^2) + 2*phi;
else return 2/M_PI*(phi^2) - 2*phi + jet(M_PI);
}
__forceinline__ __device__ jet ev_phi_deform1(jet phi){ // Map (0..xi) to (0..xi) with some curvature, and map (xi..pi) to (5xi..6xi) with some curvature!
if(phi.f <= M_PI/2) return 2/M_PI*(phi^2);
else return -2/M_PI*(phi^2) + 4*phi + jet(M_PI);
}
__forceinline__ __device__ jet ev_phi_deform2(jet phi){
if(phi.f > M_PI/2) phi = jet(M_PI) - phi;
return -16/(M_PI*M_PI*M_PI)*(phi^3) + 12/(M_PI*M_PI)*(phi^2);
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
/* @section Thurston-eversion stages! A 3D vjet (of 1-jets, 2-jets, or 3-jets, or k-jets) always represents a point in R3!
So, if the output of a function is a vjet, then we can always render its output as a vertex in R3!
*/
// ----------------------------------------------------------------------------------------------------------------------------#
// Low-level stages!
__forceinline__ __device__ vjet ev_stage1(jet phi){ return ev_sphere_arc(phi,+1,+1,+1); }
__forceinline__ __device__ vjet ev_stage2(jet phi){ return ev_interpolate(ev_sphere_arc(ev_phi_deform0(phi),+.9,+.9,-1), ev_sphere_arc(ev_phi_deform1(phi),+1,+1,+.5), ev_phi_deform2(phi)); }
__forceinline__ __device__ vjet ev_stage3(jet phi){ return ev_interpolate(ev_sphere_arc(ev_phi_deform0(phi),-.9,-.9,-1), ev_sphere_arc(ev_phi_deform1(phi),-1,+1,-.5), ev_phi_deform2(phi)); }
__forceinline__ __device__ vjet ev_stage4(jet phi){ return ev_sphere_arc(phi,-1,-1,-1); }
// ----------------------------------------------------------------------------------------------------------------------------#
// Mid-level stages!
__forceinline__ __device__ vjet ev_scene12(jet phi, f32 t){ return ev_interpolate(ev_stage1(phi), ev_stage2(phi), jet(t)); }
__forceinline__ __device__ vjet ev_scene23(jet phi, f32 t){ // The heart of the TWIST stage! Notice the rotations here! =D
t *= .5;
f32 tt = (phi.f<=M_PI/2) ? t : -t;
vjet rot_xy = ev_qrot_xy(ev_sphere_arc(ev_phi_deform0(phi),+0.9,+0.9,-1.0), M_TAU*jet(tt));
vjet rot_zx = ev_qrot_zx(ev_sphere_arc(ev_phi_deform1(phi),+1.0,+1.0,+0.5), M_TAU*jet(t));
return ev_interpolate(rot_xy, rot_zx, ev_phi_deform2(phi));
}
__forceinline__ __device__ vjet ev_scene34(jet phi, f32 t){ return ev_interpolate(ev_stage3(phi), ev_stage4(phi), jet(t)); }
// ----------------------------------------------------------------------------------------------------------------------------#
// High-level stages!
__forceinline__ __device__ vjet ev_figure8(vjet w,vjet h, vjet bend, jet form, jet theta){ // At the end of the twisting phase, the corrugations have nearly become figure eights!
theta = theta%1;
jet height = 1 - ev_cos(2*M_TAU*theta);
if(.25<theta.f && theta.f<.75) height = 4-height;
height = .6*height;
h = h + (height*height)/(8*8) * bend;
form = 2*form - form*form;
return ev_sin(2*M_TAU*theta)*w + ev_interpolate(2-2*ev_cos(M_TAU*theta), height, form)*h;
}
__forceinline__ __device__ vjet ev_add_figure8(vjet p, jet theta, jet phi, jet form, i32 nstrips){
jet size = -0.2 * ev_phi_deform2(phi) * form; // 0.2 is like a scale constant?
vjet du = ev_normalize(ev_partial_diff(p,0)); // Is this the partial with respect to theta, or with respect to phi?
vjet dv = ev_normalize(ev_partial_diff(p,1)); // Is this the partial with respect to theta, or with respect to phi?
vjet h = 1.0*size * ev_normalize(ev_cross(du,dv));
vjet w = 1.1*size * ev_normalize(ev_cross(h, du)); // The 1.1 factor gives more thickness/width to the corrugations?
vjet bend = ev_partial_diff(size,0)/ev_partial_diff(phi,0) * du;
vjet fig8 = ev_figure8(w,h, bend, form, (f32)nstrips/(f32)M_TAU*theta);
return ev_qrot_xy(p+fig8, theta);
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @section Thurston-eversion phases!
// @theta goes from 0 to TAU. It describes a full circle on the xy-plane
// @phi goes from 0 to PI. It describes half-a-circle on the zx-plane
__device__ vjet ev_corrugate( jet theta, jet phi, f32 t, i32 nstrips){ vjet p=ev_stage1( phi ); return ev_add_figure8(p, theta,phi, jet(t) *ev_phi_deform2(phi), nstrips); }
__device__ vjet ev_push( jet theta, jet phi, f32 t, i32 nstrips){ vjet p=ev_scene12(phi,t); return ev_add_figure8(p, theta,phi, jet(1) *ev_phi_deform2(phi), nstrips); }
__device__ vjet ev_twist( jet theta, jet phi, f32 t, i32 nstrips){ vjet p=ev_scene23(phi,t); return ev_add_figure8(p, theta,phi, jet(1) *ev_phi_deform2(phi), nstrips); }
__device__ vjet ev_unpush( jet theta, jet phi, f32 t, i32 nstrips){ vjet p=ev_scene34(phi,t); return ev_add_figure8(p, theta,phi, jet(1) *ev_phi_deform2(phi), nstrips); }
__device__ vjet ev_uncorrugate(jet theta, jet phi, f32 t, i32 nstrips){ vjet p=ev_stage4( phi ); return ev_add_figure8(p, theta,phi, jet(1-t)*ev_phi_deform2(phi), nstrips); }
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @section Thurston-eversion geometry driver!
// @t must be in the [0..1] interval!
// @theta can be in any interval, although its range should be at most TAU (unless you want to cover the sphere multiple times)! Eg. [0..TAU) is good. [-PI..PI) is good.
// @phi must be in the (0..phi) interval, unless you want to hit those pesky singularities at the poles of the standard sphere parametrization!
__device__ void ev_quad(f32 t, f32 theta,f32 dtheta, f32 phi,f32 dphi, i32 nstrips, vjet* vert0_jet,vjet* vert1_jet,vjet* vert2_jet,vjet* vert3_jet){
f32 t_ev = t;
if(t_ev-EV_EPSILON < EV_CORRUGATE_TINI+EV_CORRUGATE_TDEL){ // WARN! For some reason we need to subtract the time by EV_EPSILON?
*vert0_jet = ev_corrugate(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_CORRUGATE_TINI)/EV_CORRUGATE_TDEL, nstrips);
*vert1_jet = ev_corrugate(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_CORRUGATE_TINI)/EV_CORRUGATE_TDEL, nstrips);
*vert2_jet = ev_corrugate(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_CORRUGATE_TINI)/EV_CORRUGATE_TDEL, nstrips);
*vert3_jet = ev_corrugate(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_CORRUGATE_TINI)/EV_CORRUGATE_TDEL, nstrips);
}else if(t_ev-EV_EPSILON < EV_PUSH_TINI+EV_PUSH_TDEL){
*vert0_jet = ev_push(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_PUSH_TINI)/EV_PUSH_TDEL, nstrips);
*vert1_jet = ev_push(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_PUSH_TINI)/EV_PUSH_TDEL, nstrips);
*vert2_jet = ev_push(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_PUSH_TINI)/EV_PUSH_TDEL, nstrips);
*vert3_jet = ev_push(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_PUSH_TINI)/EV_PUSH_TDEL, nstrips);
}else if(t_ev-EV_EPSILON < EV_TWIST_TINI+EV_TWIST_TDEL){
*vert0_jet = ev_twist(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_TWIST_TINI)/EV_TWIST_TDEL, nstrips);
*vert1_jet = ev_twist(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_TWIST_TINI)/EV_TWIST_TDEL, nstrips);
*vert2_jet = ev_twist(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_TWIST_TINI)/EV_TWIST_TDEL, nstrips);
*vert3_jet = ev_twist(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_TWIST_TINI)/EV_TWIST_TDEL, nstrips);
}else if(t_ev-EV_EPSILON < EV_UNPUSH_TINI+EV_UNPUSH_TDEL){
*vert0_jet = ev_unpush(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_UNPUSH_TINI)/EV_UNPUSH_TDEL, nstrips);
*vert1_jet = ev_unpush(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_UNPUSH_TINI)/EV_UNPUSH_TDEL, nstrips);
*vert2_jet = ev_unpush(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_UNPUSH_TINI)/EV_UNPUSH_TDEL, nstrips);
*vert3_jet = ev_unpush(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_UNPUSH_TINI)/EV_UNPUSH_TDEL, nstrips);
}else if(t_ev-EV_EPSILON < EV_UNCORRUGATE_TINI+EV_UNCORRUGATE_TDEL){
*vert0_jet = ev_uncorrugate(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_UNCORRUGATE_TINI)/EV_UNCORRUGATE_TDEL, nstrips);
*vert1_jet = ev_uncorrugate(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_UNCORRUGATE_TINI)/EV_UNCORRUGATE_TDEL, nstrips);
*vert2_jet = ev_uncorrugate(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_UNCORRUGATE_TINI)/EV_UNCORRUGATE_TDEL, nstrips);
*vert3_jet = ev_uncorrugate(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_UNCORRUGATE_TINI)/EV_UNCORRUGATE_TDEL, nstrips);
}
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @block CUDA kernels for doing sexy stuff to the mesh
// CUDA kernel for the lights!
__global__ void ker_lights_init(light_t* lights){
f32 z = vec3(EV_CAM_POS).x2 + EV_EPSILON;
f32 x = 1024;
lights[0] = {{-x/4, -2.2, -x/4}, {+x, 0, 0}, { 0, 0,+x}, {1.4,1.4,1.8}}; // Bottom face?
lights[1] = {{-x/4, +1.8, -x/4}, {+x, 0, 0}, { 0, 0,+x}, {1.4,1.4,1.8}}; // Top face?
lights[2] = {{-3.7, -x/4, +x/4}, { 0,+x, 0}, { 0, 0,-x}, {1.4,1.4,1.8}}; // Left face?
lights[3] = {{+3.7, +x/4, -x/4}, { 0, 0,+x}, { 0,-x, 0}, {1.4,1.4,1.8}}; // Right face?
lights[4] = {{+x/4, +x/4, +z}, { 0,-x, 0}, {-x, 0, 0}, {1.4,1.4,1.8}}; // Front face?
lights[5] = {{-x/4, -x/4, -2}, {+x, 0, 0}, { 0,+x, 0}, bgr8u_to_rgb32f(EV_RGB_BG)}; // Back face!
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
/* @section This is the "mesh driver", ie. it takes Nathaniel Thurton's jet/evert stuff and actually creates the sphere eversion animation!
It creates vertex coordinates OUT OF THIN AIR (ie. out of kernel coordinates), per FRAME! How sexy is that? 0.5M triangles in 0.5ms!
*/
__global__ void ker_mesh_shader(f32 t, quat rot, u32 theta_nverts, u32 phi_nverts, triangle_t* triangles){
u32 x = blockIdx.x*blockDim.x + threadIdx.x;
u32 y = blockIdx.y*blockDim.y + threadIdx.y;
u32 thr_idx = (blockIdx.y*gridDim.x + blockIdx.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x; // Global thread index, see richiesams blogspot!
// ----------------------------------------------------------------------------------------------------------------------------#
f32 theta_gap = +.06f; // +.06f -.01f
f32 phi_gap = +.06f; // +.06f -.01f
i32 theta_idx = x;
i32 phi_idx = y;
f32 dtheta = (EV_THETA_MAX-EV_THETA_MIN) / theta_nverts; // "Vanilla" delta-theta!
f32 dphi = (EV_PHI_MAX -EV_PHI_MIN) / phi_nverts; // "Vanilla" delta-phi!
f32 theta = dtheta*(f32)theta_idx + EV_THETA_MIN; // Now theta is in [0 .. theta_max)
f32 phi = dphi *(f32)phi_idx + EV_PHI_MIN; // Now phi is in (0 .. phi_max)
f32 dtheta_gap = (EV_THETA_MAX-EV_THETA_MIN) / (theta_nverts + theta_gap*theta_nverts); // Delta-theta w/ a gap!
f32 dphi_gap = (EV_PHI_MAX -EV_PHI_MIN) / (phi_nverts + phi_gap *phi_nverts); // Delta-phi w/ a gap!
vjet vert0_jet, vert1_jet, vert2_jet, vert3_jet; ev_quad(t, theta,dtheta_gap, phi,dphi_gap, EV_NSTRIPS, &vert0_jet,&vert1_jet,&vert2_jet,&vert3_jet);
// ----------------------------------------------------------------------------------------------------------------------------#
vec3 vert0 = qrotl(vec3(vert0_jet.x0.f, vert0_jet.x1.f, vert0_jet.x2.f), rot);
vec3 vert1 = qrotl(vec3(vert1_jet.x0.f, vert1_jet.x1.f, vert1_jet.x2.f), rot);
vec3 vert2 = qrotl(vec3(vert2_jet.x0.f, vert2_jet.x1.f, vert2_jet.x2.f), rot);
vec3 vert3 = qrotl(vec3(vert3_jet.x0.f, vert3_jet.x1.f, vert3_jet.x2.f), rot);
vec3 color0 = bgr8u_to_rgb32f(EV_RGB_FRONT); // sin(theta): as `theta` goes from 0 to TAU, `sin(theta)` goes from 0 to 0
vec3 color1 = bgr8u_to_rgb32f(EV_RGB_BACK); // sin(2*phi): as `phi` goes from 0 to PI, `sin(2*phi)` goes from 0 to 0
vec3 dcolor0 = .2f * vec3(0,0,(sinf(theta)+1)/2);
vec3 dcolor1 = .3f * vec3((sinf(theta)+1)/2,0,0);
triangle_t triangle;
triangle.albedo_back = rgb32f_to_bgr8u(clamp01(color1 + dcolor1));
triangle.albedo_front = rgb32f_to_bgr8u(clamp01(color0 + dcolor0));
triangle.vert0=vert0; triangle.edge01=vert1-vert0; triangle.edge02=vert3-vert0; triangles[2*thr_idx+0]=triangle;
triangle.vert0=vert2; triangle.edge01=vert3-vert2; triangle.edge02=vert1-vert2; triangles[2*thr_idx+1]=triangle;
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
/* @block CUDA renderer: a path tracer!
This is a CUDA path tracer, originally by Sam Lapere.
I just stole it and repurposed it.
https://github.com/straaljager/GPU-path-tracing-tutorial-2
*/
// ----------------------------------------------------------------------------------------------------------------------------#
// CUDA device code for the geometry intersection, used when path tracing!
__forceinline__ __device__ intersect_t pt_triangle_intersect(triangle_t self, ray_t ray){ // Intersect this geometric primitive with a ray! Return the distance, or 0 if there's no hit!
if(self.edge01[0]==0.f && self.edge01[1]==0.f && self.edge01[2]==0.f) return {0.f,0}; // This allows us to have "trivial" primitives in the mesh and not break the path tracer!
vec3 ray_dir = ray.dir;
vec3 edge01 = self.edge01;
vec3 edge02 = self.edge02;
vec3 op = ray.pos - self.vert0;
vec3 pvec = cross(ray_dir,edge02);
f32 det = __fdividef(1.f, dot(edge01,pvec)); // CUDA intrinsic! // I think __fdividef(1/x) is FASTER than __frcp_r*() for computing multiplicative inverses / reciprocals!
f32 u = det * dot(op,pvec); if(u<0.f || u >1.f) return {0.f,0}; // No intersection! Early exit DOES help!
vec3 qvec = cross(op,edge01);
f32 v = det * dot(ray_dir,qvec); if(v<0.f || u+v>1.f) return {0.f,0}; // No intersection!
f32 t = det * dot(edge02,qvec);
return {t, det>0.f};
}
__forceinline__ __device__ vec3 pt_triangle_normal(triangle_t self, vec3 x){ // A triangle has to curvature, so it's normal vector field is a CONSTANT vector field: its value is constant!
return normalize(cross(self.edge01,self.edge02)); // Cross product of two triangle edges yields a vector orthogonal to the triangle plane! Weee! A normal MUST be a unit vector!
}
__forceinline__ __device__ intersect_t pt_light_intersect(light_t self, ray_t ray){ // Intersect this geometric primitive with a ray! Return the distance, or 0 if there's no hit!
return pt_triangle_intersect({self.vert0, self.edge01, self.edge02}, ray);
}
__forceinline__ __device__ vec3 pt_light_normal(light_t self, vec3 x){ // A triangle has to curvature, so it's normal vector field is a CONSTANT vector field: it's value is constant!
return normalize(cross(self.edge01,self.edge02)); // Cross product of two triangle edges yields a vector orthogonal to the triangle plane! Weee! A normal MUST be a unit vector!
}
// ----------------------------------------------------------------------------------------------------------------------------#
__forceinline__ __device__ hit_t pt_scene_intersect(ray_t ray, u32 nlights,light_t* lights, u32 ntriangles,triangle_t* triangles){
hit_t hit = {t:1e38f, idx:0, type:GEOM_UNKNOWN, front:-1}; // @pos is ray coordinate of the closest intersection
for(int i=0; i<nlights; ++i){
intersect_t intersect = pt_light_intersect(lights[i], ray);
f32 t = intersect.t; if(t<EV_EPSILON || t>hit.t) continue;
hit.t = t;
hit.idx = i;
hit.type = GEOM_LIGHT;
} // Record the position of the closest intersection point in RAY COORDINATES (which are 1-dimensional, so you need a single number), and also the ID of the object in question
for(int i=0; i<ntriangles; ++i){
intersect_t intersect = pt_triangle_intersect(triangles[i], ray);
f32 t = intersect.t; if(t<EV_EPSILON || t>hit.t) continue;
hit.t = t;
hit.idx = i;
hit.type = GEOM_TRIANGLE;
hit.front = intersect.front;
} // Record the position of the closest intersection point in RAY COORDINATES (which are 1-dimensional, so you need a single number), and also the ID of the object in question
return hit;
}
// ----------------------------------------------------------------------------------------------------------------------------#
__forceinline__ __device__ vec3 pt_normal_out(vec3 normal, vec3 ray_dir){
return dot(normal,ray_dir)<0 ? normal : -1*normal; // "Outwards" normal, to create a "bounce"!
}
// Sample a random direction on the dome/hemisphere around the hitpoint base on the normal at that point!
__forceinline__ __device__ vec3 pt_dome_randdir(vec3 normal_out, uint* seed_x, uint* seed_y){
// Compute local orthonormal basis/basis uvw at hitpoint, to compute the (random) ray direction.
// 1st vector is normal at hitpoint, 2nd vector is orthogonal to 1st, 3rd vector is orthogonal to first others
vec3 basis_w = normal_out;
vec3 axis = fabs(basis_w[0])<.1f ? vec3(1,0,0) : vec3(0,1,0);
vec3 basis_u = normalize(cross(axis, basis_w)); // We shouldn't need to normalize this, but, if we don't, then we introduce artifacts!
vec3 basis_v = cross(basis_w, basis_u); // Right-handed uvw-basis! The homology is: u -> v -> w -> u -> ...
// All our geometric primitives (just triangles) are diffuse, which reflect light uniformly in all directions!
// Generate random direction in hemisphere above hitpoint (see "Realistic Ray Tracing", P. Shirley)
f32 rand_tau = rand_f32(seed_x,seed_y) * M_TAU; // Get random number on unit circle for azimuth
f32 rand_one = rand_f32(seed_x,seed_y); // Get random number for elevation
f32 rand_sqrt = sqrtf(rand_one); // No FAST intrinsic for sqrt?
f32 cos_tau, sin_tau; __sincosf(rand_tau, &sin_tau,&cos_tau);
return cos_tau*rand_sqrt*basis_u + sin_tau*rand_sqrt*basis_v + sqrtf(1.f-rand_one)*basis_w; // Random ray direction on the hemisphere/dome around a point! Cosine-weighted importance sampling, favors ray directions closer to normal direction!
}
// ----------------------------------------------------------------------------------------------------------------------------#
// Here we solve the rendering equation: outgoing_radiance (at x) == emitted_radiance (at x) + reflected_radiance (at x).
// Reflected radiance is sum/integral of incoming radiance from all directions in hemisphere above point, multiplied by reflectance function of material (BRDF) and cosine incident angle
__device__ vec3 pt_radiance_integral(ray_t ray, uint* seed_x,uint* seed_y, u32 nlights,light_t* lights, u32 ntriangles,triangle_t* triangles){
vec3 rgb = vec3(0,0,0); // This will integrate/sum/accumulate the color over all bounces!
vec3 fade = vec3(1,1,1);
vec3 rgb_bg = bgr8u_to_rgb32f(EV_RGB_BG);
for(int bounce=0; bounce<EV_NBOUNCES; ++bounce){ // Iteration up to N bounces: replaces recursion in CPU code!
hit_t hit = pt_scene_intersect(ray, nlights,lights, ntriangles,triangles); if(hit.t==1e38f) return vec3(0,0,0); // No intersection! Return black!
vec3 hit_pos = ray.pos + hit.t*ray.dir; // @hit_pos is the hit position in WORLD COORDINATES! @hit.t is the hit position in RAY COORDINATES!
// ----------------------------------------------------------------
vec3 obj_normal, obj_rgb, obj_emi;
switch(hit.type){ // Retrieve the geometric data of the object we hit!
case GEOM_LIGHT:{
light_t obj = lights[hit.idx];
obj_normal = pt_light_normal(obj, hit_pos);
obj_rgb = vec3(0,0,0);
obj_emi = obj.emission;
}break;
case GEOM_TRIANGLE:{
triangle_t obj = triangles[hit.idx];
obj_normal = pt_triangle_normal(obj, hit_pos);
obj_rgb = hit.front ? bgr8u_to_rgb32f(obj.albedo_front) : bgr8u_to_rgb32f(obj.albedo_back);
obj_emi = vec3(0,0,0);
}break;
}
rgb = rgb + fade*obj_emi; // Add emission of current object to accumulated color (first term in rendering equation sum)
// ----------------------------------------------------------------
vec3 obj_normal_out = pt_normal_out(obj_normal, ray.dir); // "Outwards" normal, to create a "bounce"!
vec3 dome_dir = pt_dome_randdir(obj_normal_out, seed_x,seed_y);
fade = dot(obj_normal_out, dome_dir) * obj_rgb * fade; // 0) Integrate/sum/accumulate the fade! Weigh light/color energy using cosine of angle between normal and incident light!
ray.pos = hit_pos + EV_EPSILON*obj_normal_out; // 1) Launch a new raw starting by "bouncing" it from the object! Offset ray position slightly to prevent self intersection
ray.dir = dome_dir; // "Bounce" the ray from the surface at the hit position, oriented by the surface normal!
}
return rgb;
}
// ----------------------------------------------------------------------------------------------------------------------------# Map a CUDA thread to each pixel!
__global__ void ker_pixel_shader(u32 img_w,u32 img_h, u32 img_w_min,u32 img_w_max,u32 img_h_min,u32 img_h_max, u32* img_data, u32 nlights,light_t* lights, u32 ntriangles,triangle_t* triangles, f32 cam_fov,vec3 cam_pos,vec3 cam_dir,quat cam_rot, u32 seed){
u32 px_x = blockIdx.x*blockDim.x + threadIdx.x;
u32 px_y = blockIdx.y*blockDim.y + threadIdx.y; if(px_x>=(img_w_max-img_w_min) || px_y>=(img_h_max-img_h_min)) return;
u32 seed_x = px_x + seed;
u32 seed_y = px_y + seed;
// ----------------------------------------------------------------
cam_dir = qrotl(cam_dir, cam_rot);
vec3 cam_dir_x = qrotl((.5*cam_fov) * vec3((f32)img_w/img_h, 0, 0), cam_rot); // Cam ray is directed at the lower-left corner of the screen!
vec3 cam_dir_y = (.5*cam_fov) * normalize(cross(cam_dir,cam_dir_x)); // Cam ray is directed at the lower-left corner of the screen!
// ----------------------------------------------------------------
vec3 px_rgb = vec3(0,0,0); // Final pixel color! Init to zero for each pixel!
for(int sample=0; sample<EV_NSAMPLES; ++sample){ // Samples per pixel! Camera rays are pushed forward to start in interior
f32 cam_dx = (px_x + rand_f32(&seed_x,&seed_y)) / img_w - .5;
f32 cam_dy = (px_y + rand_f32(&seed_x,&seed_y)) / img_h - .5 + (f32)img_h_min/img_h;
vec3 px_pos = cam_pos;
vec3 px_dir = cam_dir + cam_dx*cam_dir_x + cam_dy*cam_dir_y;
ray_t px_ray = {px_pos, normalize(px_dir)};
px_rgb = px_rgb + 1.f/EV_NSAMPLES * pt_radiance_integral(px_ray, &seed_x,&seed_y, nlights,lights, ntriangles,triangles);
}
img_data[px_y*img_w + px_x] = rgb32f_to_rgb8u(clamp01(px_rgb));
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @block Multi-GPU computing data structure!
struct gpu_work_t{
// General parameters!
u32 id; // Device ID!
f32 t; // Time!
quat rot; // Rotation quaternion!
// Mesh shader stuff!
u32 theta_nverts, phi_nverts;
u32 nlights;
u32 ntriangles;
light_t* lights_gpu;
triangle_t* triangles_gpu;
// Pixel shader stuff!
u32 img_w, img_h;
u32 img_w_min, img_w_max;
u32 img_h_min, img_h_max;
u32 img_tile_nelems;
u32 img_tile_stride;
u32* img_tile_gpu;
u32* img_full_cpu;
};
gpu_work_t* gpu_work_init(u32 gpu_id, u32 img_w,u32 img_h, u32 img_w_min,u32 img_w_max,u32 img_h_min,u32 img_h_max, u32 img_tile_stride){
gpu_work_t* gpu = (gpu_work_t*)malloc(sizeof(gpu_work_t));
gpu->id = gpu_id;
hipSetDevice(gpu->id); cuda_check();
// ---------------------------------------------------------------- Mesh shader parameterss
gpu->theta_nverts = ceilf(EV_THETA_NVERTS);
gpu->phi_nverts = ceilf(EV_PHI_NVERTS);
gpu->nlights = EV_NLIGHTS;
gpu->ntriangles = gpu->theta_nverts * gpu->phi_nverts * 2;
// ---------------------------------------------------------------- Pixel shader parameters!
gpu->img_w = img_w;
gpu->img_h = img_h;
gpu->img_w_min = img_w_min;
gpu->img_w_max = img_w_max;
gpu->img_h_min = img_h_min;
gpu->img_h_max = img_h_max;
gpu->img_tile_nelems = (img_w_max-img_w_min) * (img_h_max-img_h_min);
gpu->img_tile_stride = img_tile_stride;
// ---------------------------------------------------------------- Mesh shader buffers!
hipMalloc(&gpu->lights_gpu, sizeof(light_t) *gpu->nlights);
hipMalloc(&gpu->triangles_gpu, sizeof(triangle_t)*gpu->ntriangles);
// ---------------------------------------------------------------- Pixel shader buffers!
hipMalloc(&gpu->img_tile_gpu, sizeof(u32)*gpu->img_tile_nelems);
if(gpu->id==EV_GPU_MAIN) hipHostMalloc(&gpu->img_full_cpu, sizeof(u32)*gpu->img_w*gpu->img_h);
cuda_check();
return gpu;
}
void gpu_work_free(gpu_work_t* gpu){
hipSetDevice(gpu->id);
hipFree(gpu->triangles_gpu);
hipFree(gpu->lights_gpu);
hipFree(gpu->img_tile_gpu);
if(gpu->id==EV_GPU_MAIN) hipHostFree(gpu->img_full_cpu);
hipDeviceReset(); cuda_check();
free(gpu);
}
void gpu_sync(gpu_work_t* gpu){ // Always sync (only) stream zero
hipSetDevice(gpu->id);
hipStreamSynchronize(0); cuda_check();
}
void gpu_mesh_shader(gpu_work_t* gpu){
hipSetDevice(gpu->id);
dim3 block_dim = {1,1,1}; // Launch `block_dim.x * block_dim.y * block_dim.z` nthreads per block! So, `32 * 32 * 1` nthreads per block! Max nthreads per block on Titan V is 1024!
dim3 grid_dim = {m_divceilu(gpu->theta_nverts,block_dim.x), m_divceilu(gpu->phi_nverts,block_dim.y), 1}; // Launch ` grid_dim.x * grid_dim.y * grid_dim.z` nblocks per grid!
hipLaunchKernelGGL(( ker_lights_init), dim3(1),dim3(1), 0, 0, gpu->lights_gpu); cuda_check();
hipLaunchKernelGGL(( ker_mesh_shader), dim3(grid_dim),dim3(block_dim), 0, 0, gpu->t,gpu->rot, gpu->theta_nverts,gpu->phi_nverts, gpu->triangles_gpu); cuda_check();
}
void gpu_pixel_shader(gpu_work_t* gpu, u32* img_cpu, u32 seed){
quat cam_rot_yz = versor(EV_CAM_ROT_YZ, vec3(1,0,0));
quat cam_rot_zx = versor(EV_CAM_ROT_ZX, vec3(0,1,0));
quat cam_rot_xy = versor(EV_CAM_ROT_XY, vec3(0,0,1));
quat cam_rot = cam_rot_xy * cam_rot_zx * cam_rot_yz;
hipSetDevice(gpu->id);
dim3 block_dim = {8,8,1};
dim3 grid_dim = {m_divceilu((gpu->img_w_max-gpu->img_w_min), block_dim.x), m_divceilu((gpu->img_h_max-gpu->img_h_min), block_dim.y), 1};
hipLaunchKernelGGL(( ker_pixel_shader), dim3(grid_dim),dim3(block_dim), 0, 0, gpu->img_w,gpu->img_h, gpu->img_w_min,gpu->img_w_max, gpu->img_h_min,gpu->img_h_max, gpu->img_tile_gpu, gpu->nlights,gpu->lights_gpu, gpu->ntriangles,gpu->triangles_gpu, EV_CAM_FOV,vec3(EV_CAM_POS),normalize(EV_CAM_DIR),cam_rot, seed); cuda_check();
hipMemcpyAsync(img_cpu + gpu->img_tile_stride, gpu->img_tile_gpu, sizeof(u32)*gpu->img_tile_nelems, hipMemcpyDeviceToHost, 0); cuda_check(); // Default stream!
}
// ----------------------------------------------------------------------------------------------------------------------------#
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
void gpu_img_save(gpu_work_t* gpu, const char* img_dir, i32 frame){ // PPM+fwrite u32 saves 1920x1024 @ 60ms! PPM+fwrite float3 saves 1920x1024 @ 150ms! PNG+fwrite saves 1920x1024 @ 700ms!
char img_path[PATH_MAX]={}; snprintf(img_path,PATH_MAX-1, "%s/%04d.ppm",img_dir,frame);
char header_txt[256]={};
int header_nbytes = snprintf(header_txt,sizeof(header_txt)-1,"P6\n%d %d\n%d\n", gpu->img_w, gpu->img_h, 255);
u32 pixel_nbytes = 3*gpu->img_w*gpu->img_h;
// open/ftruncate/mmap/close/memcpy/munmap 1920x1080: 7ms! fwrite: 80ms! write: 1380ms!
int fd = open(img_path, O_RDWR|O_CREAT|O_TRUNC, 0b110100100); if(fd==-1){ printf("\x1b[31mWARN\x1b[0m Can't \x1b[35mopen\x1b[0m \x1b[33m%s\x1b[0m! Discarding rendered image \x1b[31m=(\x1b[0m ",img_path); return; } // O_RDONLY O_WRONLY O_RDWR
int st = ftruncate(fd, header_nbytes+pixel_nbytes); if(st==-1) printf("\x1b[31mWARN\x1b[0m Can't \x1b[35mftruncate\x1b[0m \x1b[33m%s\x1b[0m! ",img_path);
u8* data8_dst = (u8*)mmap(NULL, header_nbytes+pixel_nbytes, PROT_READ|PROT_WRITE,MAP_SHARED, fd,0); if(data8_dst==MAP_FAILED) printf("\x1b[31mWARN\x1b[0m Can't \x1b[35mmmap\x1b[0m \x1b[33m%s\x1b[0m! ",img_path);
st=close(fd); if(st==-1) printf("\x1b[31mWARN\x1b[0m Can't close file descriptor!"); // `mmap` adds an extra reference to the file associated with the file descriptor which is not removed by a subsequent `close` on that file descriptor!
memcpy(data8_dst, header_txt, header_nbytes);
data8_dst += header_nbytes;
i32 height = gpu->img_h; // Caching these guys takes us from 7ms to 5ms!
i32 width = gpu->img_w;
u32* data32_src = gpu->img_full_cpu;
for(i32 i=height; 0<i; --i){ // The vertical index iterates backwards! =D
for(i32 j=0; j<width; ++j){
i32 lidx = (i-1)*width + j;
memcpy(data8_dst + 3*lidx, data32_src + lidx, 3); // memcpy 1920x1080: 6-7ms, single-byte addressing 1920x1080: 7-8ms
}
}
munmap(data8_dst, header_nbytes+pixel_nbytes);
}
// ----------------------------------------------------------------------------------------------------------------------------#
void gpus_render_to_disk(gpu_work_t** gpus){ // Driver of GPU work!
u64 nintersections = (u64)EV_IMG_W*EV_IMG_H * EV_NSAMPLES*EV_NBOUNCES * gpus[EV_GPU_MAIN]->ntriangles;
u32 seed = time(NULL);
f32 t = EV_TMIN;
f32 dt = (EV_TMAX-EV_TMIN) / max(1,EV_NFRAMES-1);
f64 spf; dt_t tdel;
putchar(0x0a);
printf("\x1b[94mimg_w\x1b[0m \x1b[0m%d\x1b[0m\n", gpus[EV_GPU_MAIN]->img_w);
printf("\x1b[35mimg_h\x1b[0m \x1b[0m%d\x1b[0m\n", gpus[EV_GPU_MAIN]->img_h);
printf("\x1b[31mnframes\x1b[0m \x1b[0m%d\x1b[0m\n", EV_NFRAMES);
printf("\x1b[32mnsamples\x1b[0m \x1b[0m%d\x1b[0m\n", EV_NSAMPLES);
printf("\x1b[94mnbounces\x1b[0m \x1b[0m%d\x1b[0m\n", EV_NBOUNCES);
putchar(0x0a);
printf("\x1b[32mimg\x1b[0m dir \x1b[33m%s\x1b[0m\n", EV_IMG_DIR);
printf("\x1b[32mtriangles\x1b[0m nelems \x1b[94m%'u\x1b[0m\n", gpus[EV_GPU_MAIN]->ntriangles);
printf("\x1b[32mtriangles\x1b[0m nbytes \x1b[35m%'lu\x1b[0m\n", gpus[EV_GPU_MAIN]->ntriangles*sizeof(triangle_t));
putchar(0x0a);
printf("\x1b[32mnintersections\x1b[0m any frame \x1b[94m%'lu\x1b[0m\n", nintersections);
printf("\x1b[32mnintersections\x1b[0m all frames \x1b[35m%'lu\x1b[0m\n", nintersections * EV_NFRAMES);
// ----------------------------------------------------------------------------------------------------------------------------#
puts("");
for(int frame=0; frame<EV_NFRAMES; ++frame){
printf("\x1b[35m%04d\x1b[0m \x1b[31m%6.3f\x1b[0m ", frame, t); fflush(stdout);
// ----------------------------------------------------------------
quat rot_yz = versor(-.09*M_TAU, vec3(1,0,0));
quat rot_zx = versor(-.03*M_TAU, vec3(0,1,0));
quat rot_xy = versor(+.01*t, vec3(0,0,1));
for(int gpu=0; gpu<EV_NGPUS; ++gpu){
gpus[gpu]->t = t;
gpus[gpu]->rot = rot_xy * rot_zx * rot_yz;
}
// ----------------------------------------------------------------
dt_ini(&tdel);
for(int gpu=0; gpu<EV_NGPUS; ++gpu) gpu_mesh_shader(gpus[gpu]);
for(int gpu=0; gpu<EV_NGPUS; ++gpu) gpu_sync(gpus[gpu]); // No need to sync here!
dt_end(&tdel); spf=dt_del(&tdel); printf("mesh_shdr \x1b[32m%.6f\x1b[0m ", spf); fflush(stdout);
// ----------------------------------------------------------------
dt_ini(&tdel);
for(int gpu=0; gpu<EV_NGPUS; ++gpu) gpu_pixel_shader(gpus[gpu], gpus[EV_GPU_MAIN]->img_full_cpu, seed);
for(int gpu=0; gpu<EV_NGPUS; ++gpu) gpu_sync(gpus[gpu]);
dt_end(&tdel); spf=dt_del(&tdel); printf("px_shdr \x1b[32m%.3f\x1b[0m px/s \x1b[94m%'.0f\x1b[0m ", spf, (gpus[EV_GPU_MAIN]->img_w*gpus[EV_GPU_MAIN]->img_h)/spf); fflush(stdout);
printf("%s \x1b[31m%'.0f\x1b[0m ", "prim/s", (f64)gpus[EV_GPU_MAIN]->ntriangles/spf); fflush(stdout);
printf("%s \x1b[32m%'.0f\x1b[0m ", "rays/s", ((f64)nintersections/gpus[EV_GPU_MAIN]->ntriangles)/spf); fflush(stdout);
printf("%s \x1b[94m%'.0f\x1b[0m ", "ints/s", (f64)nintersections/spf); fflush(stdout);
// ----------------------------------------------------------------
dt_ini(&tdel); // 1920x1024 @ 80ms!
gpu_img_save(gpus[EV_GPU_MAIN], EV_IMG_DIR, frame);
dt_end(&tdel); spf=dt_del(&tdel); printf("%s \x1b[32m%.3f\x1b[0m ", "ppm", spf); fflush(stdout);
// ----------------------------------------------------------------
putchar(0x0a);
t += dt;
}
// ----------------------------------------------------------------------------------------------------------------------------#
puts(""); dt_ini(&tdel);
for(int gpu=0; gpu<EV_NGPUS; ++gpu) gpu_work_free(gpus[gpu]);
dt_end(&tdel); printf("%s \x1b[33m%.6f\x1b[0m\n", "gpus_free", dt_del(&tdel));
}
// ----------------------------------------------------------------------------------------------------------------------------#
int main(){
setlocale(LC_NUMERIC, "");
gpu_work_t* gpus[EV_NGPUS];
u32 img_w_min,img_w_max, img_h_min,img_h_max, img_tile_stride;
dt_t tdel; dt_ini(&tdel);
img_w_min = 0;
img_w_max = EV_IMG_W;
img_h_min = 0*EV_IMG_H/EV_NGPUS;
img_h_max = 1*EV_IMG_H/EV_NGPUS + 64;
img_tile_stride = img_h_min*EV_IMG_W; // This is for the final copy of the rendered tiles from all GPUs to CPU memory!
gpus[0] = gpu_work_init(0, EV_IMG_W,EV_IMG_H, img_w_min,img_w_max,img_h_min,img_h_max, img_tile_stride);
img_w_min = 0;
img_w_max = EV_IMG_W;
img_h_min = 1*EV_IMG_H/EV_NGPUS + 64;
img_h_max = 2*EV_IMG_H/EV_NGPUS;
img_tile_stride = img_h_min*EV_IMG_W; // This is for the final copy of the rendered tiles from all GPUs to CPU memory!
gpus[1] = gpu_work_init(1, EV_IMG_W,EV_IMG_H, img_w_min,img_w_max,img_h_min,img_h_max, img_tile_stride);
dt_end(&tdel); printf("%s \x1b[33m%.6f\x1b[0m\n", "gpus_init", dt_del(&tdel));
gpus_render_to_disk(gpus);
}
| 9d837bfc910d814b9263cc2ef3b4fb7ee0015991.cu | // nvcc evert-cuda.cu -o evert-cuda -use_fast_math -O3 -Xcompiler "-Ofast -march=native" -Xptxas "-O3 --verbose --warn-on-local-memory-usage --warn-on-spills" && ./evert-cuda
#include <stdint.h>
#include <stdio.h>
#include <locale.h>
#define M_TAU 6.283185 // The (approximate) arclength of a circle of radius 1
// GPU general data!
#define EV_NGPUS 2
#define EV_GPU_MAIN 1
#define EV_IMG_DIR "." // "/media/tmp" // "."
#define EV_EPSILON 0.001f
#define EV_RGB_BG 0x080808
#define EV_NLIGHTS 6 // The number of 2-faces of a cube!
// PIXEL SHADER data!
#define EV_NFRAMES 7 // If the number of frame is EVEN, then there's NO MIDDLE FRAME (and vv)! WARN! NFRAMES must be at least 2
#define EV_NSAMPLES (1<<1)
#define EV_NBOUNCES 4
#define EV_IMG_W (1920>>1)
#define EV_IMG_H (1080>>1)
#define EV_CAM_FOV (M_PI/3) // 2: 90 fov, 3: 60 fov, 4: 45 fov, 6: 30 fov
#define EV_CAM_POS { 0, 0, 5.0}
#define EV_CAM_DIR {-0,-0.03,-1.0}
#define EV_CAM_ROT_YZ 0.0 // Camera rotation over the yz-plane
#define EV_CAM_ROT_ZX 0.0 // Camera rotation over the zx-plane
#define EV_CAM_ROT_XY 0.0 // Camera rotation over the xy-plane
// MESH shader data! @theta is the AZIMUTHAL parameter; @v is the POLAR parameter!
#define EV_NSTRIPS 8
#define EV_THETA_MIN (0)
#define EV_PHI_MIN (0 + EV_EPSILON)
#define EV_THETA_MAX ((8./EV_NSTRIPS)*M_TAU) // 8
#define EV_PHI_MAX ((2./2) *M_PI - EV_EPSILON) // 2
#define EV_THETA_NVERTS (30*1*(EV_THETA_MAX-EV_THETA_MIN)/M_TAU*EV_NSTRIPS)
#define EV_PHI_NVERTS (30*2*(EV_PHI_MAX -EV_PHI_MIN) /M_PI *2)
#define EV_RGB_FRONT 0xff9999 // 0xff6666
#define EV_RGB_BACK 0x5eaeff // 0x1188ff
// STAGE times!
#define EV_CORRUGATE_TDEL 1.f
#define EV_PUSH_TDEL 2.f
#define EV_TWIST_TDEL 6.f
#define EV_UNPUSH_TDEL 2.f
#define EV_UNCORRUGATE_TDEL 1.f
#define EV_CORRUGATE_TINI (0.f)
#define EV_PUSH_TINI (EV_CORRUGATE_TINI+EV_CORRUGATE_TDEL)
#define EV_TWIST_TINI (EV_PUSH_TINI +EV_PUSH_TDEL)
#define EV_UNPUSH_TINI (EV_TWIST_TINI +EV_TWIST_TDEL)
#define EV_UNCORRUGATE_TINI (EV_UNPUSH_TINI +EV_UNPUSH_TDEL)
#define EV_TMIN (EV_CORRUGATE_TINI)
#define EV_TMAX (EV_CORRUGATE_TINI + EV_CORRUGATE_TDEL+EV_PUSH_TDEL+EV_TWIST_TDEL+EV_UNPUSH_TDEL+EV_UNCORRUGATE_TDEL)
// ----------------------------------------------------------------------------------------------------------------------------#
typedef uint8_t u8;
typedef float f32;
typedef int32_t i32;
typedef uint32_t u32;
typedef double f64;
typedef uint64_t u64;
// ----------------------------------------------------------------------------------------------------------------------------#
#include <time.h>
struct dt_t{
f64 t0, t1;
};
f64 dt_abs(){ struct timespec tabs; clock_gettime(CLOCK_MONOTONIC,&tabs); return tabs.tv_sec + 1e-9*tabs.tv_nsec; } // m_checksys(st,"clock_gettime");
f64 dt_del(dt_t* dt){ return dt->t1 - dt->t0; } // Get `relative time`, ie. a time delta between 2 absolute times! The time delta is returned in seconds, and its resolution is in nanoseconds!
void dt_ini(dt_t* dt){ dt->t0 = dt_abs(); }
void dt_end(dt_t* dt){ dt->t1 = dt_abs(); }
// ----------------------------------------------------------------------------------------------------------------------------#
#define cuda_check(){ cudaError_t err; while((err=cudaGetLastError()) != cudaSuccess) printf("\x1b[91mFAIL\x1b[0m \x1b[32mCUDA\x1b[0m \x1b[32m%s\x1b[0m:\x1b[94mL%d\x1b[0m \x1b[35m%s\x1b[0m \x1b[33m%s\x1b[0m \x1b[37m%s\x1b[0m\n", __FILE__,__LINE__,__func__, cudaGetErrorName(err),cudaGetErrorString(err)); }
#define m_divceilu(N, D) (((N)%(D)) ? (N)/(D)+1 : (N)/(D))
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
/* @block */
// ----------------------------------------------------------------------------------------------------------------------------#
struct vec3{ // Just a simple 3D vector!
union{ // Access the `vec3` using array notation of by specifying the name of a component!
f32 data[3];
struct{ f32 x0, x1, x2; };
};
__device__ __host__ vec3(){}
__device__ __host__ vec3(f32 a0, f32 a1, f32 a2){ x0=a0; x1=a1; x2=a2; }
__device__ __host__ f32 operator[](int idx){ return data[idx]; }
};
__device__ __host__ vec3 operator*(f32 s, vec3 v){ return {s*v[0], s*v[1], s*v[2]}; } // Scalar multiplication!
__device__ __host__ vec3 operator+(vec3 v0, vec3 v1){ return {v0[0]+v1[0], v0[1]+v1[1], v0[2]+v1[2]}; } // Vector addition!
__device__ __host__ vec3 operator-(vec3 v0, vec3 v1){ return {v0[0]-v1[0], v0[1]-v1[1], v0[2]-v1[2]}; } // Vector subtraction!
__device__ __host__ vec3 operator*(vec3 v0, vec3 v1){ return {v0[0]*v1[0], v0[1]*v1[1], v0[2]*v1[2]}; } // Vector multiplication!
__device__ __host__ f32 dot(vec3 v0, vec3 v1){ return v0[0]*v1[0] + v0[1]*v1[1] + v0[2]*v1[2]; } // Quite important for triangle intersection and a bit for the path tracer!
__device__ __host__ vec3 cross(vec3 v0, vec3 v1){ // Homology of R3: 0 --> 1 --> 2 --> 0 --> 1 --> 2 --> 0 --> ...
return {v0[1]*v1[2] - v0[2]*v1[1], // 0 --> 1 --> 2
v0[2]*v1[0] - v0[0]*v1[2], // 1 --> 2 --> 0
v0[0]*v1[1] - v0[1]*v1[0]}; // 2 --> 0 --> 1
}
__device__ __host__ vec3 normalize(vec3 v){ return rsqrtf(dot(v,v)) * v; }
// ----------------------------------------------------------------------------------------------------------------------------#
// The mighty quaternions! A real Clifford algebra (aka. a geometric algebra) related to:
// spinors, 3D rotations, the 3-sphere living in 4D, the gauge group SU(2) from quantum flavordynamics,
// the Whitehead tower of the orthogonal group O(3), the string group String(3), the fivebrane group Fivebrane(3), ...
struct quat{
union{
f32 data[4];
struct{ f32 x0, x1, x2, x3; };
};
__device__ __host__ quat(){}
__device__ __host__ quat(f32 a0, f32 a1, f32 a2, f32 a3){ x0=a0; x1=a1; x2=a2; x3=a3; }
__device__ __host__ quat(f32 s, vec3 v){ x0=s; x1=v[0]; x2=v[1]; x3=v[2]; }
__device__ __host__ f32 operator[](int idx){ return data[idx]; }
};
__device__ __host__ quat operator*(quat q0, quat q1){ // The quaternion product is a sort of "twisted" product of 4D vectors
return {q0[0]*q1[0] - q0[1]*q1[1] - q0[2]*q1[2] - q0[3]*q1[3],
q0[0]*q1[1] + q0[1]*q1[0] + q0[2]*q1[3] - q0[3]*q1[2],
q0[0]*q1[2] - q0[1]*q1[3] + q0[2]*q1[0] + q0[3]*q1[1],
q0[0]*q1[3] + q0[1]*q1[2] - q0[2]*q1[1] + q0[3]*q1[0]};
}
__device__ __host__ quat conj(quat q){ return {q[0], -q[1], -q[2], -q[3]}; } // The quaternion inverse of a quaternion `q` is just `conj(q) / quad(q)`, just like for complex numbers!
__device__ __host__ quat versor(f32 angle, vec3 dir){
return {cosf(.5*angle), sinf(.5*angle)*normalize(dir)};
}
__device__ __host__ vec3 qrotl(vec3 v, quat versor){ // WARN! @versor must be a unit-quaternion!
quat p_rot = versor * quat(0,v) * conj(versor); // Left-conjugation by @versor! The quaternion-inverse of a unit-quaternion is its quaternion-conjugate!
return {p_rot[1], p_rot[2], p_rot[3]};
}
// ----------------------------------------------------------------------------------------------------------------------------#
__forceinline__ __device__ f32 clamp01(f32 x){ return __saturatef(x); }
__forceinline__ __device__ vec3 clamp01(vec3 v){ return {clamp01(v[0]), clamp01(v[1]), clamp01(v[2])}; }
__forceinline__ __device__ f32 rgb_gamma_decode(f32 channel){ return __powf(channel, 2.2/1); }
__forceinline__ __device__ f32 rgb_gamma_encode(f32 channel){ return __powf(channel, 1/2.2); }
__forceinline__ __device__ f32 rgb_u8_to_f32( u8 channel){ return rgb_gamma_decode(channel/255.); }
__forceinline__ __device__ u8 rgb_f32_to_u8( f32 channel){ return 255.*rgb_gamma_encode(channel) + .5; }
__forceinline__ __device__ vec3 bgr8u_to_rgb32f(u32 bgr8u){
return {rgb_u8_to_f32((bgr8u>>0x10)&0xff),
rgb_u8_to_f32((bgr8u>>0x08)&0xff),
rgb_u8_to_f32((bgr8u>>0x00)&0xff)};
}
__forceinline__ __device__ u32 rgb32f_to_bgr8u(vec3 rgbf32){
return (rgb_f32_to_u8(rgbf32[0])<<0x10) |
(rgb_f32_to_u8(rgbf32[1])<<0x08) |
(rgb_f32_to_u8(rgbf32[2])<<0x00);
}
__forceinline__ __device__ u32 rgb32f_to_rgb8u(vec3 rgbf32){
return (rgb_f32_to_u8(rgbf32[0])<<0x00) |
(rgb_f32_to_u8(rgbf32[1])<<0x08) |
(rgb_f32_to_u8(rgbf32[2])<<0x10);
}
__forceinline__ __device__ f32 rand_f32(u32* seed0, u32* seed1){ // Random number generator from https://github.com/gz/rust-raytracer
*seed0 = 36969*(*seed0&0xffff) + (*seed0>>0x10);
*seed1 = 18489*(*seed1&0xffff) + (*seed1>>0x10);
u32 val_u32 = 0x40000000 | (((*seed0<<0x10) + *seed1) & 0x007fffff);
return .5f * (*(f32*)&val_u32) - 1.f;
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @block Geometric data structures! Each geometric primitive needs its own intersection routine!
// ----------------------------------------------------------------------------------------------------------------------------#
struct light_t{
vec3 vert0; // Geometry/intersection data! vert0 IS the main vertex, edge01 IS vert1 - vert0, edge02 IS vert2 - vert0
vec3 edge01;
vec3 edge02;
vec3 emission; // Lighting/rendering data!
};
struct triangle_t{
vec3 vert0; // Geometry/intersection data! vert0 IS the main vertex, edge01 IS vert1 - vert0, edge02 IS vert2 - vert0
vec3 edge01;
vec3 edge02;
u32 albedo_back; // Lighting/rendering data! Albedo is the base color input, aka. a diffuse map... or something
u32 albedo_front;
};
enum geom_type_t{ GEOM_UNKNOWN=0, GEOM_LIGHT, GEOM_TRIANGLE};
// ----------------------------------------------------------------------------------------------------------------------------#
// @section Path tracing data structures!
struct ray_t{
vec3 pos; // Ray origin!
vec3 dir; // Ray direction!
};
struct intersect_t{ // We return this data structure upon hitting something when path tracing!
f32 t;
int front; // Did we hit the front or the back?
};
struct hit_t{
f32 t; // The position of the hit in RAY COORDINATES. A ray is 1-dimensional, so its coordinates are 1-dimensional, too! Here we record *where* we hit the object!
u32 idx; // The object index, so that we know which object we hit!
u32 type; // What type of object did we hit, and in which mesh?
int front; // Did we hit the front or the back?
};
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
/* @block EVERT code, originally by Nathaniel Thurston (Copyright 1995 Geometry Center, University of Minnesota)
What follows is a fork of Nathaniel Thurston's `evert` code, which implemented the Thurston sphere eversion for the movie Outside In, of meme-tastic fame.
The basic building blocks in `evert` are the so-called jets.
I have no idea what they're supposed to "be", but in `evert` they keep track of differential data associated to a scalar field.
As far as `evert` is concerned, they seem to have been used to implement automatic differentiation,
since partial derivatives are used to concoct the "figure eight" towards the end of the eversion code.
// ----------------------------------------------------------------------------------------------------------------------------#
Copyright (c) 1993
The National Science and Technology Research Center for
Computation and Visualization of Geometric Structures
(The Geometry Center)
University of Minnesota
1300 South Second Street
Minneapolis, MN 55454 USA
email: [email protected]
The software distributed here is copyrighted as noted above.
It is free software and may be obtained via anonymous ftp from
ftp.geom.umn.edu. It may be freely copied, modified, and
redistributed under the following conditions:
1. All copyright notices must remain intact in all files.
2. A copy of this file (COPYING) must be distributed along with any
copies that you redistribute; this includes copies that you have
modified, or copies of programs or other software products that
include this software.
3. If you modify this software, you must include a notice giving the
name of the person performing the modification, the date of
modification, and the reason for such modification.
4. When distributing modified versions of this software, or other
software products that include this software, you must provide
notice that the original source code may be obtained as noted
above.
5. There is no warranty or other guarantee of fitness for this
software, it is provided solely "as is". Bug reports or fixes may
be sent to the email address above; the authors may or may not act
on them as they desire.
If you use an image produced by this software in a publication or
presentation, we request that you credit the Geometry Center with a
notice such as the following:
Figures 1, 2, and 5-300 were generated with software written at the
Geometry Center, University of Minnesota.
*/
// ----------------------------------------------------------------------------------------------------------------------------#
/* @section A 1-jet, aka. a first-order jet, aka. a scalar field (evaluated at some point) together with its 1st-order partial derivatives (evaluated at some point)!
We can think of a k-jet as an "augmented" floating-point number: the first entry in the struct is the value of the number,
and all the following entries are the partial derivatives up to order k.
Since we only need first-order partial derivatives, we'll only implement 1-jets.
*/
struct jet{
f32 f; // Scalar value of a 2D scalar field!
f32 fu, fv; // 1st-order partial derivatives of a 2D scalar field!
__forceinline__ __device__ jet(){}
__forceinline__ __device__ jet(f32 s){ f=s; fu=0; fv=0; }
__forceinline__ __device__ jet(f32 s, f32 su, f32 sv){ f=s; fu=su; fv=sv; }
};
__forceinline__ __device__ jet operator+(jet x0, jet x1){ return {x0.f + x1.f, x0.fu + x1.fu, x0.fv + x1.fv}; } // 1st-order partial derivatives of the addition of two 2D scalar fields!
__forceinline__ __device__ jet operator-(jet x0, jet x1){ return {x0.f - x1.f, x0.fu - x1.fu, x0.fv - x1.fv}; } // 1st-order partial derivatives of the subtraction of two 2D scalar fields!
__forceinline__ __device__ jet operator*(jet x0, jet x1){ // 1st-order partial derivatives of the product of two 2D scalar fields!
return {x0.f *x1.f,
x0.fu*x1.f + x0.f*x1.fu,
x0.fv*x1.f + x0.f*x1.fv};
}
__forceinline__ __device__ jet operator%(jet x, f32 s){
x.f = fmod(x.f, s);
if(x.f<0) x.f += s;
return x;
}
__forceinline__ __device__ jet operator^(jet x, f32 s){ // Derivatives of the n-th power?
f32 f0 = powf(x.f, s);
f32 f1 = x.f==0 ? 0 : s*f0/x.f; // Avoid division by zero
return {f0, f1*x.fu, f1*x.fv};
}
__forceinline__ __device__ jet operator/(jet x0, jet x1){ return x0 * (x1^-1); } // Derivatives of the quotient!
__forceinline__ __device__ jet ev_interpolate(jet x0, jet x1, jet t){ return (jet(1)-t)*x0 + t*x1; }
__forceinline__ __device__ jet ev_partial_diff(jet x, int idx){ return {idx==0 ? x.fu : x.fv, 0,0}; } // Keep the partial WRT u, or the partial WRT v?
__forceinline__ __device__ jet ev_cos(jet x){ f32 c=cosf(x.f); f32 dc=-sinf(x.f); return {c, dc*x.fu, dc*x.fv}; } // Derivatives of the cosine of a scalar field!
__forceinline__ __device__ jet ev_sin(jet x){ f32 s=sinf(x.f); f32 ds= cosf(x.f); return {s, ds*x.fu, ds*x.fv}; } // Derivatives of the sine of a scalar field!
// ----------------------------------------------------------------------------------------------------------------------------#
/* @section A 3D vector of 1-jets!
If a k-jet can be interpreted as a "fat" floating-point number (a number augmented with differential data),
then a vector of k-jets is a vector of "fat" numbers: each entry in the vector is a "fat" number.
All the complicated bookkeeping needed to implement automatic differentiation happens at the level of k-jets,
so vectors of k-jets need only implement vector-specific stuff.
A 3D vector (of 1-jets) will represent a point in 3D space.
*/
struct vjet{
jet x0, x1, x2;
};
__forceinline__ __device__ vjet operator*(jet s, vjet v){ return {s*v.x0, s*v.x1, s*v.x2}; } // Scalar multiplication!
__forceinline__ __device__ vjet operator+(vjet v0, vjet v1){ return {v0.x0 + v1.x0, v0.x1 + v1.x1, v0.x2 + v1.x2}; } // Vector addition!
__forceinline__ __device__ vjet operator-(vjet v0, vjet v1){ return {v0.x0 - v1.x0, v0.x1 - v1.x1, v0.x2 - v1.x2}; } // Vector subtraction!
__forceinline__ __device__ jet ev_dot( vjet v0, vjet v1){ return v0.x0*v1.x0 + v0.x1*v1.x1 + v0.x2*v1.x2; }
__forceinline__ __device__ vjet ev_cross(vjet v0, vjet v1){ // Homology of R3: 0 --> 1 --> 2 --> 0 --> 1 --> 2 --> 0 --> ...
return {v0.x1*v1.x2 - v0.x2*v1.x1, // 0 --> 1 --> 2
v0.x2*v1.x0 - v0.x0*v1.x2, // 1 --> 2 --> 0
v0.x0*v1.x1 - v0.x1*v1.x0}; // 2 --> 0 --> 1
}
__forceinline__ __device__ vjet ev_normalize(vjet v){
jet s = ev_dot(v,v);
if(s.f>0) s = s^-.5; // Avoid division by zero!
else s = jet(0);
return s*v;
}
__forceinline__ __device__ vjet ev_interpolate( vjet v0, vjet v1, jet t){ return (jet(1)-t)*v0 + t*v1; }
__forceinline__ __device__ vjet ev_partial_diff(vjet v, int idx){ return {ev_partial_diff(v.x0,idx), ev_partial_diff(v.x1,idx), ev_partial_diff(v.x2,idx)}; }
// ----------------------------------------------------------------------------------------------------------------------------#
/* @section A quaternion made of 1-jets!
If a k-jet can be interpreted as a "fat" floating-point number (a number augmented with differential data),
and a vector of k-jets is a vector of "fat" numbers,
then a quaternion of 1-jets is a just quaternion... whose entries are not plain numbers, but "fat" numbers.
All the complicated bookkeeping needed to implement automatic differentiation happens at the level of k-jets,
so quaternions of k-jets need only implement quaternion-specific stuff.
We'll use quaternions to do rotations in 3D.
*/
struct qjet{
jet x0, x1, x2, x3;
__forceinline__ __device__ qjet(jet a0, jet a1, jet a2, jet a3){ x0=a0; x1=a1; x2=a2; x3=a3; }
__forceinline__ __device__ qjet(jet s, vjet v){ x0=s; x1=v.x0; x2=v.x1; x3=v.x2; }
};
__forceinline__ __device__ qjet operator*(qjet q0, qjet q1){
return {q0.x0*q1.x0 - q0.x1*q1.x1 - q0.x2*q1.x2 - q0.x3*q1.x3,
q0.x0*q1.x1 + q0.x1*q1.x0 + q0.x2*q1.x3 - q0.x3*q1.x2,
q0.x0*q1.x2 - q0.x1*q1.x3 + q0.x2*q1.x0 + q0.x3*q1.x1,
q0.x0*q1.x3 + q0.x1*q1.x2 - q0.x2*q1.x1 + q0.x3*q1.x0};
}
__forceinline__ __device__ qjet ev_conj(qjet q){ return {q.x0, -1*q.x1, -1*q.x2, -1*q.x3}; } // The quaternion-inverse of `q` is just `conj(q) / quad(q)`, just like for complex numbers!
__forceinline__ __device__ qjet ev_versor(jet angle, vjet dir){
return {ev_cos(.5*angle), ev_sin(.5*angle)*ev_normalize(dir)};
}
__forceinline__ __device__ vjet ev_qrot3d(vjet v, qjet versor){
qjet p_rot = ev_conj(versor) * qjet(0,v) * versor; // Right-conjugation by @versor! The quaternion-conjugate of a unit-quaternion is its quaternion-inverse!
return {p_rot.x1, p_rot.x2, p_rot.x3};
}
__forceinline__ __device__ vjet ev_qrot_yz(vjet v, jet angle){ return ev_qrot3d(v, ev_versor(angle, {jet(1),jet(0),jet(0)})); } // Rotation over the yz-plane
__forceinline__ __device__ vjet ev_qrot_zx(vjet v, jet angle){ return ev_qrot3d(v, ev_versor(angle, {jet(0),jet(1),jet(0)})); } // Rotation over the zx-plane
__forceinline__ __device__ vjet ev_qrot_xy(vjet v, jet angle){ return ev_qrot3d(v, ev_versor(angle, {jet(0),jet(0),jet(1)})); } // Rotation over the xy-plane
// ----------------------------------------------------------------------------------------------------------------------------#
// @section Sphere parametrization and geometric deformations!
__forceinline__ __device__ vjet ev_sphere_arc(jet phi, f32 radius_x0, f32 radius_x1, f32 radius_x2){ // Trace out a meridian, since the horizontal angle is fixed!
jet s0 = radius_x0 * ev_sin(jet(0,0,1)) * ev_sin(phi); // Keep the horizontal angle constant, vary the vertical angle!
jet s1 = radius_x1 * ev_cos(jet(0,0,1)) * ev_sin(phi); // Keep the horizontal angle constant, vary the vertical angle!
jet s2 = radius_x2 * ev_cos(phi);
return {s0, s1, s2};
}
__forceinline__ __device__ jet ev_phi_deform0(jet phi){ // Map the (0..pi) interval to itself, but with some curvature!
if(phi.f <= M_PI/2) return -2/M_PI*(phi^2) + 2*phi;
else return 2/M_PI*(phi^2) - 2*phi + jet(M_PI);
}
__forceinline__ __device__ jet ev_phi_deform1(jet phi){ // Map (0..xi) to (0..xi) with some curvature, and map (xi..pi) to (5xi..6xi) with some curvature!
if(phi.f <= M_PI/2) return 2/M_PI*(phi^2);
else return -2/M_PI*(phi^2) + 4*phi + jet(M_PI);
}
__forceinline__ __device__ jet ev_phi_deform2(jet phi){
if(phi.f > M_PI/2) phi = jet(M_PI) - phi;
return -16/(M_PI*M_PI*M_PI)*(phi^3) + 12/(M_PI*M_PI)*(phi^2);
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
/* @section Thurston-eversion stages! A 3D vjet (of 1-jets, 2-jets, or 3-jets, or k-jets) always represents a point in R3!
So, if the output of a function is a vjet, then we can always render its output as a vertex in R3!
*/
// ----------------------------------------------------------------------------------------------------------------------------#
// Low-level stages!
__forceinline__ __device__ vjet ev_stage1(jet phi){ return ev_sphere_arc(phi,+1,+1,+1); }
__forceinline__ __device__ vjet ev_stage2(jet phi){ return ev_interpolate(ev_sphere_arc(ev_phi_deform0(phi),+.9,+.9,-1), ev_sphere_arc(ev_phi_deform1(phi),+1,+1,+.5), ev_phi_deform2(phi)); }
__forceinline__ __device__ vjet ev_stage3(jet phi){ return ev_interpolate(ev_sphere_arc(ev_phi_deform0(phi),-.9,-.9,-1), ev_sphere_arc(ev_phi_deform1(phi),-1,+1,-.5), ev_phi_deform2(phi)); }
__forceinline__ __device__ vjet ev_stage4(jet phi){ return ev_sphere_arc(phi,-1,-1,-1); }
// ----------------------------------------------------------------------------------------------------------------------------#
// Mid-level stages!
__forceinline__ __device__ vjet ev_scene12(jet phi, f32 t){ return ev_interpolate(ev_stage1(phi), ev_stage2(phi), jet(t)); }
__forceinline__ __device__ vjet ev_scene23(jet phi, f32 t){ // The heart of the TWIST stage! Notice the rotations here! =D
t *= .5;
f32 tt = (phi.f<=M_PI/2) ? t : -t;
vjet rot_xy = ev_qrot_xy(ev_sphere_arc(ev_phi_deform0(phi),+0.9,+0.9,-1.0), M_TAU*jet(tt));
vjet rot_zx = ev_qrot_zx(ev_sphere_arc(ev_phi_deform1(phi),+1.0,+1.0,+0.5), M_TAU*jet(t));
return ev_interpolate(rot_xy, rot_zx, ev_phi_deform2(phi));
}
__forceinline__ __device__ vjet ev_scene34(jet phi, f32 t){ return ev_interpolate(ev_stage3(phi), ev_stage4(phi), jet(t)); }
// ----------------------------------------------------------------------------------------------------------------------------#
// High-level stages!
__forceinline__ __device__ vjet ev_figure8(vjet w,vjet h, vjet bend, jet form, jet theta){ // At the end of the twisting phase, the corrugations have nearly become figure eights!
theta = theta%1;
jet height = 1 - ev_cos(2*M_TAU*theta);
if(.25<theta.f && theta.f<.75) height = 4-height;
height = .6*height;
h = h + (height*height)/(8*8) * bend;
form = 2*form - form*form;
return ev_sin(2*M_TAU*theta)*w + ev_interpolate(2-2*ev_cos(M_TAU*theta), height, form)*h;
}
__forceinline__ __device__ vjet ev_add_figure8(vjet p, jet theta, jet phi, jet form, i32 nstrips){
jet size = -0.2 * ev_phi_deform2(phi) * form; // 0.2 is like a scale constant?
vjet du = ev_normalize(ev_partial_diff(p,0)); // Is this the partial with respect to theta, or with respect to phi?
vjet dv = ev_normalize(ev_partial_diff(p,1)); // Is this the partial with respect to theta, or with respect to phi?
vjet h = 1.0*size * ev_normalize(ev_cross(du,dv));
vjet w = 1.1*size * ev_normalize(ev_cross(h, du)); // The 1.1 factor gives more thickness/width to the corrugations?
vjet bend = ev_partial_diff(size,0)/ev_partial_diff(phi,0) * du;
vjet fig8 = ev_figure8(w,h, bend, form, (f32)nstrips/(f32)M_TAU*theta);
return ev_qrot_xy(p+fig8, theta);
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @section Thurston-eversion phases!
// @theta goes from 0 to TAU. It describes a full circle on the xy-plane
// @phi goes from 0 to PI. It describes half-a-circle on the zx-plane
__device__ vjet ev_corrugate( jet theta, jet phi, f32 t, i32 nstrips){ vjet p=ev_stage1( phi ); return ev_add_figure8(p, theta,phi, jet(t) *ev_phi_deform2(phi), nstrips); }
__device__ vjet ev_push( jet theta, jet phi, f32 t, i32 nstrips){ vjet p=ev_scene12(phi,t); return ev_add_figure8(p, theta,phi, jet(1) *ev_phi_deform2(phi), nstrips); }
__device__ vjet ev_twist( jet theta, jet phi, f32 t, i32 nstrips){ vjet p=ev_scene23(phi,t); return ev_add_figure8(p, theta,phi, jet(1) *ev_phi_deform2(phi), nstrips); }
__device__ vjet ev_unpush( jet theta, jet phi, f32 t, i32 nstrips){ vjet p=ev_scene34(phi,t); return ev_add_figure8(p, theta,phi, jet(1) *ev_phi_deform2(phi), nstrips); }
__device__ vjet ev_uncorrugate(jet theta, jet phi, f32 t, i32 nstrips){ vjet p=ev_stage4( phi ); return ev_add_figure8(p, theta,phi, jet(1-t)*ev_phi_deform2(phi), nstrips); }
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @section Thurston-eversion geometry driver!
// @t must be in the [0..1] interval!
// @theta can be in any interval, although its range should be at most TAU (unless you want to cover the sphere multiple times)! Eg. [0..TAU) is good. [-PI..PI) is good.
// @phi must be in the (0..phi) interval, unless you want to hit those pesky singularities at the poles of the standard sphere parametrization!
__device__ void ev_quad(f32 t, f32 theta,f32 dtheta, f32 phi,f32 dphi, i32 nstrips, vjet* vert0_jet,vjet* vert1_jet,vjet* vert2_jet,vjet* vert3_jet){
f32 t_ev = t;
if(t_ev-EV_EPSILON < EV_CORRUGATE_TINI+EV_CORRUGATE_TDEL){ // WARN! For some reason we need to subtract the time by EV_EPSILON?
*vert0_jet = ev_corrugate(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_CORRUGATE_TINI)/EV_CORRUGATE_TDEL, nstrips);
*vert1_jet = ev_corrugate(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_CORRUGATE_TINI)/EV_CORRUGATE_TDEL, nstrips);
*vert2_jet = ev_corrugate(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_CORRUGATE_TINI)/EV_CORRUGATE_TDEL, nstrips);
*vert3_jet = ev_corrugate(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_CORRUGATE_TINI)/EV_CORRUGATE_TDEL, nstrips);
}else if(t_ev-EV_EPSILON < EV_PUSH_TINI+EV_PUSH_TDEL){
*vert0_jet = ev_push(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_PUSH_TINI)/EV_PUSH_TDEL, nstrips);
*vert1_jet = ev_push(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_PUSH_TINI)/EV_PUSH_TDEL, nstrips);
*vert2_jet = ev_push(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_PUSH_TINI)/EV_PUSH_TDEL, nstrips);
*vert3_jet = ev_push(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_PUSH_TINI)/EV_PUSH_TDEL, nstrips);
}else if(t_ev-EV_EPSILON < EV_TWIST_TINI+EV_TWIST_TDEL){
*vert0_jet = ev_twist(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_TWIST_TINI)/EV_TWIST_TDEL, nstrips);
*vert1_jet = ev_twist(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_TWIST_TINI)/EV_TWIST_TDEL, nstrips);
*vert2_jet = ev_twist(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_TWIST_TINI)/EV_TWIST_TDEL, nstrips);
*vert3_jet = ev_twist(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_TWIST_TINI)/EV_TWIST_TDEL, nstrips);
}else if(t_ev-EV_EPSILON < EV_UNPUSH_TINI+EV_UNPUSH_TDEL){
*vert0_jet = ev_unpush(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_UNPUSH_TINI)/EV_UNPUSH_TDEL, nstrips);
*vert1_jet = ev_unpush(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_UNPUSH_TINI)/EV_UNPUSH_TDEL, nstrips);
*vert2_jet = ev_unpush(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_UNPUSH_TINI)/EV_UNPUSH_TDEL, nstrips);
*vert3_jet = ev_unpush(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_UNPUSH_TINI)/EV_UNPUSH_TDEL, nstrips);
}else if(t_ev-EV_EPSILON < EV_UNCORRUGATE_TINI+EV_UNCORRUGATE_TDEL){
*vert0_jet = ev_uncorrugate(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_UNCORRUGATE_TINI)/EV_UNCORRUGATE_TDEL, nstrips);
*vert1_jet = ev_uncorrugate(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t_ev-EV_UNCORRUGATE_TINI)/EV_UNCORRUGATE_TDEL, nstrips);
*vert2_jet = ev_uncorrugate(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_UNCORRUGATE_TINI)/EV_UNCORRUGATE_TDEL, nstrips);
*vert3_jet = ev_uncorrugate(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t_ev-EV_UNCORRUGATE_TINI)/EV_UNCORRUGATE_TDEL, nstrips);
}
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @block CUDA kernels for doing sexy stuff to the mesh
// CUDA kernel for the lights!
__global__ void ker_lights_init(light_t* lights){
f32 z = vec3(EV_CAM_POS).x2 + EV_EPSILON;
f32 x = 1024;
lights[0] = {{-x/4, -2.2, -x/4}, {+x, 0, 0}, { 0, 0,+x}, {1.4,1.4,1.8}}; // Bottom face?
lights[1] = {{-x/4, +1.8, -x/4}, {+x, 0, 0}, { 0, 0,+x}, {1.4,1.4,1.8}}; // Top face?
lights[2] = {{-3.7, -x/4, +x/4}, { 0,+x, 0}, { 0, 0,-x}, {1.4,1.4,1.8}}; // Left face?
lights[3] = {{+3.7, +x/4, -x/4}, { 0, 0,+x}, { 0,-x, 0}, {1.4,1.4,1.8}}; // Right face?
lights[4] = {{+x/4, +x/4, +z}, { 0,-x, 0}, {-x, 0, 0}, {1.4,1.4,1.8}}; // Front face?
lights[5] = {{-x/4, -x/4, -2}, {+x, 0, 0}, { 0,+x, 0}, bgr8u_to_rgb32f(EV_RGB_BG)}; // Back face!
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
/* @section This is the "mesh driver", ie. it takes Nathaniel Thurton's jet/evert stuff and actually creates the sphere eversion animation!
It creates vertex coordinates OUT OF THIN AIR (ie. out of kernel coordinates), per FRAME! How sexy is that? 0.5M triangles in 0.5ms!
*/
__global__ void ker_mesh_shader(f32 t, quat rot, u32 theta_nverts, u32 phi_nverts, triangle_t* triangles){
u32 x = blockIdx.x*blockDim.x + threadIdx.x;
u32 y = blockIdx.y*blockDim.y + threadIdx.y;
u32 thr_idx = (blockIdx.y*gridDim.x + blockIdx.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x; // Global thread index, see richiesams blogspot!
// ----------------------------------------------------------------------------------------------------------------------------#
f32 theta_gap = +.06f; // +.06f -.01f
f32 phi_gap = +.06f; // +.06f -.01f
i32 theta_idx = x;
i32 phi_idx = y;
f32 dtheta = (EV_THETA_MAX-EV_THETA_MIN) / theta_nverts; // "Vanilla" delta-theta!
f32 dphi = (EV_PHI_MAX -EV_PHI_MIN) / phi_nverts; // "Vanilla" delta-phi!
f32 theta = dtheta*(f32)theta_idx + EV_THETA_MIN; // Now theta is in [0 .. theta_max)
f32 phi = dphi *(f32)phi_idx + EV_PHI_MIN; // Now phi is in (0 .. phi_max)
f32 dtheta_gap = (EV_THETA_MAX-EV_THETA_MIN) / (theta_nverts + theta_gap*theta_nverts); // Delta-theta w/ a gap!
f32 dphi_gap = (EV_PHI_MAX -EV_PHI_MIN) / (phi_nverts + phi_gap *phi_nverts); // Delta-phi w/ a gap!
vjet vert0_jet, vert1_jet, vert2_jet, vert3_jet; ev_quad(t, theta,dtheta_gap, phi,dphi_gap, EV_NSTRIPS, &vert0_jet,&vert1_jet,&vert2_jet,&vert3_jet);
// ----------------------------------------------------------------------------------------------------------------------------#
vec3 vert0 = qrotl(vec3(vert0_jet.x0.f, vert0_jet.x1.f, vert0_jet.x2.f), rot);
vec3 vert1 = qrotl(vec3(vert1_jet.x0.f, vert1_jet.x1.f, vert1_jet.x2.f), rot);
vec3 vert2 = qrotl(vec3(vert2_jet.x0.f, vert2_jet.x1.f, vert2_jet.x2.f), rot);
vec3 vert3 = qrotl(vec3(vert3_jet.x0.f, vert3_jet.x1.f, vert3_jet.x2.f), rot);
vec3 color0 = bgr8u_to_rgb32f(EV_RGB_FRONT); // sin(theta): as `theta` goes from 0 to TAU, `sin(theta)` goes from 0 to 0
vec3 color1 = bgr8u_to_rgb32f(EV_RGB_BACK); // sin(2*phi): as `phi` goes from 0 to PI, `sin(2*phi)` goes from 0 to 0
vec3 dcolor0 = .2f * vec3(0,0,(sinf(theta)+1)/2);
vec3 dcolor1 = .3f * vec3((sinf(theta)+1)/2,0,0);
triangle_t triangle;
triangle.albedo_back = rgb32f_to_bgr8u(clamp01(color1 + dcolor1));
triangle.albedo_front = rgb32f_to_bgr8u(clamp01(color0 + dcolor0));
triangle.vert0=vert0; triangle.edge01=vert1-vert0; triangle.edge02=vert3-vert0; triangles[2*thr_idx+0]=triangle;
triangle.vert0=vert2; triangle.edge01=vert3-vert2; triangle.edge02=vert1-vert2; triangles[2*thr_idx+1]=triangle;
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
/* @block CUDA renderer: a path tracer!
This is a CUDA path tracer, originally by Sam Lapere.
I just stole it and repurposed it.
https://github.com/straaljager/GPU-path-tracing-tutorial-2
*/
// ----------------------------------------------------------------------------------------------------------------------------#
// CUDA device code for the geometry intersection, used when path tracing!
__forceinline__ __device__ intersect_t pt_triangle_intersect(triangle_t self, ray_t ray){ // Intersect this geometric primitive with a ray! Return the distance, or 0 if there's no hit!
if(self.edge01[0]==0.f && self.edge01[1]==0.f && self.edge01[2]==0.f) return {0.f,0}; // This allows us to have "trivial" primitives in the mesh and not break the path tracer!
vec3 ray_dir = ray.dir;
vec3 edge01 = self.edge01;
vec3 edge02 = self.edge02;
vec3 op = ray.pos - self.vert0;
vec3 pvec = cross(ray_dir,edge02);
f32 det = __fdividef(1.f, dot(edge01,pvec)); // CUDA intrinsic! // I think __fdividef(1/x) is FASTER than __frcp_r*() for computing multiplicative inverses / reciprocals!
f32 u = det * dot(op,pvec); if(u<0.f || u >1.f) return {0.f,0}; // No intersection! Early exit DOES help!
vec3 qvec = cross(op,edge01);
f32 v = det * dot(ray_dir,qvec); if(v<0.f || u+v>1.f) return {0.f,0}; // No intersection!
f32 t = det * dot(edge02,qvec);
return {t, det>0.f};
}
__forceinline__ __device__ vec3 pt_triangle_normal(triangle_t self, vec3 x){ // A triangle has to curvature, so it's normal vector field is a CONSTANT vector field: its value is constant!
return normalize(cross(self.edge01,self.edge02)); // Cross product of two triangle edges yields a vector orthogonal to the triangle plane! Weee! A normal MUST be a unit vector!
}
__forceinline__ __device__ intersect_t pt_light_intersect(light_t self, ray_t ray){ // Intersect this geometric primitive with a ray! Return the distance, or 0 if there's no hit!
return pt_triangle_intersect({self.vert0, self.edge01, self.edge02}, ray);
}
__forceinline__ __device__ vec3 pt_light_normal(light_t self, vec3 x){ // A triangle has to curvature, so it's normal vector field is a CONSTANT vector field: it's value is constant!
return normalize(cross(self.edge01,self.edge02)); // Cross product of two triangle edges yields a vector orthogonal to the triangle plane! Weee! A normal MUST be a unit vector!
}
// ----------------------------------------------------------------------------------------------------------------------------#
__forceinline__ __device__ hit_t pt_scene_intersect(ray_t ray, u32 nlights,light_t* lights, u32 ntriangles,triangle_t* triangles){
hit_t hit = {t:1e38f, idx:0, type:GEOM_UNKNOWN, front:-1}; // @pos is ray coordinate of the closest intersection
for(int i=0; i<nlights; ++i){
intersect_t intersect = pt_light_intersect(lights[i], ray);
f32 t = intersect.t; if(t<EV_EPSILON || t>hit.t) continue;
hit.t = t;
hit.idx = i;
hit.type = GEOM_LIGHT;
} // Record the position of the closest intersection point in RAY COORDINATES (which are 1-dimensional, so you need a single number), and also the ID of the object in question
for(int i=0; i<ntriangles; ++i){
intersect_t intersect = pt_triangle_intersect(triangles[i], ray);
f32 t = intersect.t; if(t<EV_EPSILON || t>hit.t) continue;
hit.t = t;
hit.idx = i;
hit.type = GEOM_TRIANGLE;
hit.front = intersect.front;
} // Record the position of the closest intersection point in RAY COORDINATES (which are 1-dimensional, so you need a single number), and also the ID of the object in question
return hit;
}
// ----------------------------------------------------------------------------------------------------------------------------#
__forceinline__ __device__ vec3 pt_normal_out(vec3 normal, vec3 ray_dir){
return dot(normal,ray_dir)<0 ? normal : -1*normal; // "Outwards" normal, to create a "bounce"!
}
// Sample a random direction on the dome/hemisphere around the hitpoint base on the normal at that point!
__forceinline__ __device__ vec3 pt_dome_randdir(vec3 normal_out, uint* seed_x, uint* seed_y){
// Compute local orthonormal basis/basis uvw at hitpoint, to compute the (random) ray direction.
// 1st vector is normal at hitpoint, 2nd vector is orthogonal to 1st, 3rd vector is orthogonal to first others
vec3 basis_w = normal_out;
vec3 axis = fabs(basis_w[0])<.1f ? vec3(1,0,0) : vec3(0,1,0);
vec3 basis_u = normalize(cross(axis, basis_w)); // We shouldn't need to normalize this, but, if we don't, then we introduce artifacts!
vec3 basis_v = cross(basis_w, basis_u); // Right-handed uvw-basis! The homology is: u -> v -> w -> u -> ...
// All our geometric primitives (just triangles) are diffuse, which reflect light uniformly in all directions!
// Generate random direction in hemisphere above hitpoint (see "Realistic Ray Tracing", P. Shirley)
f32 rand_tau = rand_f32(seed_x,seed_y) * M_TAU; // Get random number on unit circle for azimuth
f32 rand_one = rand_f32(seed_x,seed_y); // Get random number for elevation
f32 rand_sqrt = sqrtf(rand_one); // No FAST intrinsic for sqrt?
f32 cos_tau, sin_tau; __sincosf(rand_tau, &sin_tau,&cos_tau);
return cos_tau*rand_sqrt*basis_u + sin_tau*rand_sqrt*basis_v + sqrtf(1.f-rand_one)*basis_w; // Random ray direction on the hemisphere/dome around a point! Cosine-weighted importance sampling, favors ray directions closer to normal direction!
}
// ----------------------------------------------------------------------------------------------------------------------------#
// Here we solve the rendering equation: outgoing_radiance (at x) == emitted_radiance (at x) + reflected_radiance (at x).
// Reflected radiance is sum/integral of incoming radiance from all directions in hemisphere above point, multiplied by reflectance function of material (BRDF) and cosine incident angle
__device__ vec3 pt_radiance_integral(ray_t ray, uint* seed_x,uint* seed_y, u32 nlights,light_t* lights, u32 ntriangles,triangle_t* triangles){
vec3 rgb = vec3(0,0,0); // This will integrate/sum/accumulate the color over all bounces!
vec3 fade = vec3(1,1,1);
vec3 rgb_bg = bgr8u_to_rgb32f(EV_RGB_BG);
for(int bounce=0; bounce<EV_NBOUNCES; ++bounce){ // Iteration up to N bounces: replaces recursion in CPU code!
hit_t hit = pt_scene_intersect(ray, nlights,lights, ntriangles,triangles); if(hit.t==1e38f) return vec3(0,0,0); // No intersection! Return black!
vec3 hit_pos = ray.pos + hit.t*ray.dir; // @hit_pos is the hit position in WORLD COORDINATES! @hit.t is the hit position in RAY COORDINATES!
// ----------------------------------------------------------------
vec3 obj_normal, obj_rgb, obj_emi;
switch(hit.type){ // Retrieve the geometric data of the object we hit!
case GEOM_LIGHT:{
light_t obj = lights[hit.idx];
obj_normal = pt_light_normal(obj, hit_pos);
obj_rgb = vec3(0,0,0);
obj_emi = obj.emission;
}break;
case GEOM_TRIANGLE:{
triangle_t obj = triangles[hit.idx];
obj_normal = pt_triangle_normal(obj, hit_pos);
obj_rgb = hit.front ? bgr8u_to_rgb32f(obj.albedo_front) : bgr8u_to_rgb32f(obj.albedo_back);
obj_emi = vec3(0,0,0);
}break;
}
rgb = rgb + fade*obj_emi; // Add emission of current object to accumulated color (first term in rendering equation sum)
// ----------------------------------------------------------------
vec3 obj_normal_out = pt_normal_out(obj_normal, ray.dir); // "Outwards" normal, to create a "bounce"!
vec3 dome_dir = pt_dome_randdir(obj_normal_out, seed_x,seed_y);
fade = dot(obj_normal_out, dome_dir) * obj_rgb * fade; // 0) Integrate/sum/accumulate the fade! Weigh light/color energy using cosine of angle between normal and incident light!
ray.pos = hit_pos + EV_EPSILON*obj_normal_out; // 1) Launch a new raw starting by "bouncing" it from the object! Offset ray position slightly to prevent self intersection
ray.dir = dome_dir; // "Bounce" the ray from the surface at the hit position, oriented by the surface normal!
}
return rgb;
}
// ----------------------------------------------------------------------------------------------------------------------------# Map a CUDA thread to each pixel!
__global__ void ker_pixel_shader(u32 img_w,u32 img_h, u32 img_w_min,u32 img_w_max,u32 img_h_min,u32 img_h_max, u32* img_data, u32 nlights,light_t* lights, u32 ntriangles,triangle_t* triangles, f32 cam_fov,vec3 cam_pos,vec3 cam_dir,quat cam_rot, u32 seed){
u32 px_x = blockIdx.x*blockDim.x + threadIdx.x;
u32 px_y = blockIdx.y*blockDim.y + threadIdx.y; if(px_x>=(img_w_max-img_w_min) || px_y>=(img_h_max-img_h_min)) return;
u32 seed_x = px_x + seed;
u32 seed_y = px_y + seed;
// ----------------------------------------------------------------
cam_dir = qrotl(cam_dir, cam_rot);
vec3 cam_dir_x = qrotl((.5*cam_fov) * vec3((f32)img_w/img_h, 0, 0), cam_rot); // Cam ray is directed at the lower-left corner of the screen!
vec3 cam_dir_y = (.5*cam_fov) * normalize(cross(cam_dir,cam_dir_x)); // Cam ray is directed at the lower-left corner of the screen!
// ----------------------------------------------------------------
vec3 px_rgb = vec3(0,0,0); // Final pixel color! Init to zero for each pixel!
for(int sample=0; sample<EV_NSAMPLES; ++sample){ // Samples per pixel! Camera rays are pushed forward to start in interior
f32 cam_dx = (px_x + rand_f32(&seed_x,&seed_y)) / img_w - .5;
f32 cam_dy = (px_y + rand_f32(&seed_x,&seed_y)) / img_h - .5 + (f32)img_h_min/img_h;
vec3 px_pos = cam_pos;
vec3 px_dir = cam_dir + cam_dx*cam_dir_x + cam_dy*cam_dir_y;
ray_t px_ray = {px_pos, normalize(px_dir)};
px_rgb = px_rgb + 1.f/EV_NSAMPLES * pt_radiance_integral(px_ray, &seed_x,&seed_y, nlights,lights, ntriangles,triangles);
}
img_data[px_y*img_w + px_x] = rgb32f_to_rgb8u(clamp01(px_rgb));
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @block Multi-GPU computing data structure!
struct gpu_work_t{
// General parameters!
u32 id; // Device ID!
f32 t; // Time!
quat rot; // Rotation quaternion!
// Mesh shader stuff!
u32 theta_nverts, phi_nverts;
u32 nlights;
u32 ntriangles;
light_t* lights_gpu;
triangle_t* triangles_gpu;
// Pixel shader stuff!
u32 img_w, img_h;
u32 img_w_min, img_w_max;
u32 img_h_min, img_h_max;
u32 img_tile_nelems;
u32 img_tile_stride;
u32* img_tile_gpu;
u32* img_full_cpu;
};
gpu_work_t* gpu_work_init(u32 gpu_id, u32 img_w,u32 img_h, u32 img_w_min,u32 img_w_max,u32 img_h_min,u32 img_h_max, u32 img_tile_stride){
gpu_work_t* gpu = (gpu_work_t*)malloc(sizeof(gpu_work_t));
gpu->id = gpu_id;
cudaSetDevice(gpu->id); cuda_check();
// ---------------------------------------------------------------- Mesh shader parameterss
gpu->theta_nverts = ceilf(EV_THETA_NVERTS);
gpu->phi_nverts = ceilf(EV_PHI_NVERTS);
gpu->nlights = EV_NLIGHTS;
gpu->ntriangles = gpu->theta_nverts * gpu->phi_nverts * 2;
// ---------------------------------------------------------------- Pixel shader parameters!
gpu->img_w = img_w;
gpu->img_h = img_h;
gpu->img_w_min = img_w_min;
gpu->img_w_max = img_w_max;
gpu->img_h_min = img_h_min;
gpu->img_h_max = img_h_max;
gpu->img_tile_nelems = (img_w_max-img_w_min) * (img_h_max-img_h_min);
gpu->img_tile_stride = img_tile_stride;
// ---------------------------------------------------------------- Mesh shader buffers!
cudaMalloc(&gpu->lights_gpu, sizeof(light_t) *gpu->nlights);
cudaMalloc(&gpu->triangles_gpu, sizeof(triangle_t)*gpu->ntriangles);
// ---------------------------------------------------------------- Pixel shader buffers!
cudaMalloc(&gpu->img_tile_gpu, sizeof(u32)*gpu->img_tile_nelems);
if(gpu->id==EV_GPU_MAIN) cudaMallocHost(&gpu->img_full_cpu, sizeof(u32)*gpu->img_w*gpu->img_h);
cuda_check();
return gpu;
}
void gpu_work_free(gpu_work_t* gpu){
cudaSetDevice(gpu->id);
cudaFree(gpu->triangles_gpu);
cudaFree(gpu->lights_gpu);
cudaFree(gpu->img_tile_gpu);
if(gpu->id==EV_GPU_MAIN) cudaFreeHost(gpu->img_full_cpu);
cudaDeviceReset(); cuda_check();
free(gpu);
}
void gpu_sync(gpu_work_t* gpu){ // Always sync (only) stream zero
cudaSetDevice(gpu->id);
cudaStreamSynchronize(0); cuda_check();
}
void gpu_mesh_shader(gpu_work_t* gpu){
cudaSetDevice(gpu->id);
dim3 block_dim = {1,1,1}; // Launch `block_dim.x * block_dim.y * block_dim.z` nthreads per block! So, `32 * 32 * 1` nthreads per block! Max nthreads per block on Titan V is 1024!
dim3 grid_dim = {m_divceilu(gpu->theta_nverts,block_dim.x), m_divceilu(gpu->phi_nverts,block_dim.y), 1}; // Launch ` grid_dim.x * grid_dim.y * grid_dim.z` nblocks per grid!
ker_lights_init<<<1,1>>>(gpu->lights_gpu); cuda_check();
ker_mesh_shader<<<grid_dim,block_dim>>>(gpu->t,gpu->rot, gpu->theta_nverts,gpu->phi_nverts, gpu->triangles_gpu); cuda_check();
}
void gpu_pixel_shader(gpu_work_t* gpu, u32* img_cpu, u32 seed){
quat cam_rot_yz = versor(EV_CAM_ROT_YZ, vec3(1,0,0));
quat cam_rot_zx = versor(EV_CAM_ROT_ZX, vec3(0,1,0));
quat cam_rot_xy = versor(EV_CAM_ROT_XY, vec3(0,0,1));
quat cam_rot = cam_rot_xy * cam_rot_zx * cam_rot_yz;
cudaSetDevice(gpu->id);
dim3 block_dim = {8,8,1};
dim3 grid_dim = {m_divceilu((gpu->img_w_max-gpu->img_w_min), block_dim.x), m_divceilu((gpu->img_h_max-gpu->img_h_min), block_dim.y), 1};
ker_pixel_shader<<<grid_dim,block_dim>>>(gpu->img_w,gpu->img_h, gpu->img_w_min,gpu->img_w_max, gpu->img_h_min,gpu->img_h_max, gpu->img_tile_gpu, gpu->nlights,gpu->lights_gpu, gpu->ntriangles,gpu->triangles_gpu, EV_CAM_FOV,vec3(EV_CAM_POS),normalize(EV_CAM_DIR),cam_rot, seed); cuda_check();
cudaMemcpyAsync(img_cpu + gpu->img_tile_stride, gpu->img_tile_gpu, sizeof(u32)*gpu->img_tile_nelems, cudaMemcpyDeviceToHost, 0); cuda_check(); // Default stream!
}
// ----------------------------------------------------------------------------------------------------------------------------#
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
void gpu_img_save(gpu_work_t* gpu, const char* img_dir, i32 frame){ // PPM+fwrite u32 saves 1920x1024 @ 60ms! PPM+fwrite float3 saves 1920x1024 @ 150ms! PNG+fwrite saves 1920x1024 @ 700ms!
char img_path[PATH_MAX]={}; snprintf(img_path,PATH_MAX-1, "%s/%04d.ppm",img_dir,frame);
char header_txt[256]={};
int header_nbytes = snprintf(header_txt,sizeof(header_txt)-1,"P6\n%d %d\n%d\n", gpu->img_w, gpu->img_h, 255);
u32 pixel_nbytes = 3*gpu->img_w*gpu->img_h;
// open/ftruncate/mmap/close/memcpy/munmap 1920x1080: 7ms! fwrite: 80ms! write: 1380ms!
int fd = open(img_path, O_RDWR|O_CREAT|O_TRUNC, 0b110100100); if(fd==-1){ printf("\x1b[31mWARN\x1b[0m Can't \x1b[35mopen\x1b[0m \x1b[33m%s\x1b[0m! Discarding rendered image \x1b[31m=(\x1b[0m ",img_path); return; } // O_RDONLY O_WRONLY O_RDWR
int st = ftruncate(fd, header_nbytes+pixel_nbytes); if(st==-1) printf("\x1b[31mWARN\x1b[0m Can't \x1b[35mftruncate\x1b[0m \x1b[33m%s\x1b[0m! ",img_path);
u8* data8_dst = (u8*)mmap(NULL, header_nbytes+pixel_nbytes, PROT_READ|PROT_WRITE,MAP_SHARED, fd,0); if(data8_dst==MAP_FAILED) printf("\x1b[31mWARN\x1b[0m Can't \x1b[35mmmap\x1b[0m \x1b[33m%s\x1b[0m! ",img_path);
st=close(fd); if(st==-1) printf("\x1b[31mWARN\x1b[0m Can't close file descriptor!"); // `mmap` adds an extra reference to the file associated with the file descriptor which is not removed by a subsequent `close` on that file descriptor!
memcpy(data8_dst, header_txt, header_nbytes);
data8_dst += header_nbytes;
i32 height = gpu->img_h; // Caching these guys takes us from 7ms to 5ms!
i32 width = gpu->img_w;
u32* data32_src = gpu->img_full_cpu;
for(i32 i=height; 0<i; --i){ // The vertical index iterates backwards! =D
for(i32 j=0; j<width; ++j){
i32 lidx = (i-1)*width + j;
memcpy(data8_dst + 3*lidx, data32_src + lidx, 3); // memcpy 1920x1080: 6-7ms, single-byte addressing 1920x1080: 7-8ms
}
}
munmap(data8_dst, header_nbytes+pixel_nbytes);
}
// ----------------------------------------------------------------------------------------------------------------------------#
void gpus_render_to_disk(gpu_work_t** gpus){ // Driver of GPU work!
u64 nintersections = (u64)EV_IMG_W*EV_IMG_H * EV_NSAMPLES*EV_NBOUNCES * gpus[EV_GPU_MAIN]->ntriangles;
u32 seed = time(NULL);
f32 t = EV_TMIN;
f32 dt = (EV_TMAX-EV_TMIN) / max(1,EV_NFRAMES-1);
f64 spf; dt_t tdel;
putchar(0x0a);
printf("\x1b[94mimg_w\x1b[0m \x1b[0m%d\x1b[0m\n", gpus[EV_GPU_MAIN]->img_w);
printf("\x1b[35mimg_h\x1b[0m \x1b[0m%d\x1b[0m\n", gpus[EV_GPU_MAIN]->img_h);
printf("\x1b[31mnframes\x1b[0m \x1b[0m%d\x1b[0m\n", EV_NFRAMES);
printf("\x1b[32mnsamples\x1b[0m \x1b[0m%d\x1b[0m\n", EV_NSAMPLES);
printf("\x1b[94mnbounces\x1b[0m \x1b[0m%d\x1b[0m\n", EV_NBOUNCES);
putchar(0x0a);
printf("\x1b[32mimg\x1b[0m dir \x1b[33m%s\x1b[0m\n", EV_IMG_DIR);
printf("\x1b[32mtriangles\x1b[0m nelems \x1b[94m%'u\x1b[0m\n", gpus[EV_GPU_MAIN]->ntriangles);
printf("\x1b[32mtriangles\x1b[0m nbytes \x1b[35m%'lu\x1b[0m\n", gpus[EV_GPU_MAIN]->ntriangles*sizeof(triangle_t));
putchar(0x0a);
printf("\x1b[32mnintersections\x1b[0m any frame \x1b[94m%'lu\x1b[0m\n", nintersections);
printf("\x1b[32mnintersections\x1b[0m all frames \x1b[35m%'lu\x1b[0m\n", nintersections * EV_NFRAMES);
// ----------------------------------------------------------------------------------------------------------------------------#
puts("");
for(int frame=0; frame<EV_NFRAMES; ++frame){
printf("\x1b[35m%04d\x1b[0m \x1b[31m%6.3f\x1b[0m ", frame, t); fflush(stdout);
// ----------------------------------------------------------------
quat rot_yz = versor(-.09*M_TAU, vec3(1,0,0));
quat rot_zx = versor(-.03*M_TAU, vec3(0,1,0));
quat rot_xy = versor(+.01*t, vec3(0,0,1));
for(int gpu=0; gpu<EV_NGPUS; ++gpu){
gpus[gpu]->t = t;
gpus[gpu]->rot = rot_xy * rot_zx * rot_yz;
}
// ----------------------------------------------------------------
dt_ini(&tdel);
for(int gpu=0; gpu<EV_NGPUS; ++gpu) gpu_mesh_shader(gpus[gpu]);
for(int gpu=0; gpu<EV_NGPUS; ++gpu) gpu_sync(gpus[gpu]); // No need to sync here!
dt_end(&tdel); spf=dt_del(&tdel); printf("mesh_shdr \x1b[32m%.6f\x1b[0m ", spf); fflush(stdout);
// ----------------------------------------------------------------
dt_ini(&tdel);
for(int gpu=0; gpu<EV_NGPUS; ++gpu) gpu_pixel_shader(gpus[gpu], gpus[EV_GPU_MAIN]->img_full_cpu, seed);
for(int gpu=0; gpu<EV_NGPUS; ++gpu) gpu_sync(gpus[gpu]);
dt_end(&tdel); spf=dt_del(&tdel); printf("px_shdr \x1b[32m%.3f\x1b[0m px/s \x1b[94m%'.0f\x1b[0m ", spf, (gpus[EV_GPU_MAIN]->img_w*gpus[EV_GPU_MAIN]->img_h)/spf); fflush(stdout);
printf("%s \x1b[31m%'.0f\x1b[0m ", "prim/s", (f64)gpus[EV_GPU_MAIN]->ntriangles/spf); fflush(stdout);
printf("%s \x1b[32m%'.0f\x1b[0m ", "rays/s", ((f64)nintersections/gpus[EV_GPU_MAIN]->ntriangles)/spf); fflush(stdout);
printf("%s \x1b[94m%'.0f\x1b[0m ", "ints/s", (f64)nintersections/spf); fflush(stdout);
// ----------------------------------------------------------------
dt_ini(&tdel); // 1920x1024 @ 80ms!
gpu_img_save(gpus[EV_GPU_MAIN], EV_IMG_DIR, frame);
dt_end(&tdel); spf=dt_del(&tdel); printf("%s \x1b[32m%.3f\x1b[0m ", "ppm", spf); fflush(stdout);
// ----------------------------------------------------------------
putchar(0x0a);
t += dt;
}
// ----------------------------------------------------------------------------------------------------------------------------#
puts(""); dt_ini(&tdel);
for(int gpu=0; gpu<EV_NGPUS; ++gpu) gpu_work_free(gpus[gpu]);
dt_end(&tdel); printf("%s \x1b[33m%.6f\x1b[0m\n", "gpus_free", dt_del(&tdel));
}
// ----------------------------------------------------------------------------------------------------------------------------#
int main(){
setlocale(LC_NUMERIC, "");
gpu_work_t* gpus[EV_NGPUS];
u32 img_w_min,img_w_max, img_h_min,img_h_max, img_tile_stride;
dt_t tdel; dt_ini(&tdel);
img_w_min = 0;
img_w_max = EV_IMG_W;
img_h_min = 0*EV_IMG_H/EV_NGPUS;
img_h_max = 1*EV_IMG_H/EV_NGPUS + 64;
img_tile_stride = img_h_min*EV_IMG_W; // This is for the final copy of the rendered tiles from all GPUs to CPU memory!
gpus[0] = gpu_work_init(0, EV_IMG_W,EV_IMG_H, img_w_min,img_w_max,img_h_min,img_h_max, img_tile_stride);
img_w_min = 0;
img_w_max = EV_IMG_W;
img_h_min = 1*EV_IMG_H/EV_NGPUS + 64;
img_h_max = 2*EV_IMG_H/EV_NGPUS;
img_tile_stride = img_h_min*EV_IMG_W; // This is for the final copy of the rendered tiles from all GPUs to CPU memory!
gpus[1] = gpu_work_init(1, EV_IMG_W,EV_IMG_H, img_w_min,img_w_max,img_h_min,img_h_max, img_tile_stride);
dt_end(&tdel); printf("%s \x1b[33m%.6f\x1b[0m\n", "gpus_init", dt_del(&tdel));
gpus_render_to_disk(gpus);
}
|
91039749c024233bc07967823b1d19e99024dd55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------------------------------------------------------------------------
// Copyrighted by Marko Rakita.
// Author: Marko Rakita
// File contains: Neural network convolutional layer.
// Created: 01/03/2016.
// ----------------------------------------------------------------------------------------------------
#include "include/convolutionallayer.cuh"
const uint ConvolutionalLayer::c_biasesGradientsSumsPerThread = 128;
const uint ConvolutionalLayer::c_biasesGradientsPartialSumThreadsPerBlock = 128;
ConvolutionalLayer::ConvolutionalLayer(ParallelismMode parallelismMode, hipStream_t deviceCalculationStream, hipStream_t deviceMemoryStream, uint indexInTier,
uint tierSize, uint inputNumChannels, uint inputDataWidth, uint inputDataHeight, uint inputDataCount, bool holdsInputData, uint numFilters, uint filterWidth,
uint filterHeight, uint numFilterChannels, bool initializeWeights, float weightsDeviation, bool initializeBiases, float biasesInitialValue,
float filtersUpdateMomentum, float filtersUpdateDecay, float filtersUpdateLearningRateProgressStep, float filtersUpdateStartingLearningRate,
float filtersUpdateLearningRateUpdateFactor, float biasesUpdateMomentum, float biasesUpdateDecay, float biasesUpdateLearningRateProgressStep,
float biasesUpdateStartingLearningRate, float biasesUpdateLearningRateUpdateFactor, int paddingX, int paddingY, uint stride,
ActivationType activationType, bool holdsActivationGradients)
{
m_layerType = LayerType::Convolutional;
m_parallelismMode = parallelismMode;
m_deviceCalculationStream = deviceCalculationStream;
m_deviceMemoryStream = deviceMemoryStream;
m_indexInTier = indexInTier;
m_tierSize = tierSize;
m_activationType = activationType;
m_inputNumChannels = inputNumChannels;
m_inputDataWidth = inputDataWidth;
m_inputDataHeight = inputDataHeight;
m_inputDataSize = m_inputDataWidth * m_inputDataHeight;
m_inputDataCount = inputDataCount;
m_holdsInputData = holdsInputData;
m_numFilters = numFilters;
m_filterWidth = filterWidth;
m_filterHeight = filterHeight;
m_filterSize = m_filterWidth * m_filterHeight;
m_numFilterChannels = numFilterChannels;
m_filtersUpdateMomentum = filtersUpdateMomentum;
m_filtersUpdateDecay = filtersUpdateDecay;
m_filtersUpdateLearningRateProgressStep = filtersUpdateLearningRateProgressStep;
m_filtersUpdateStartingLearningRate = filtersUpdateStartingLearningRate;
m_filtersUpdateLearningRateUpdateFactor = filtersUpdateLearningRateUpdateFactor;
m_biasesUpdateMomentum = biasesUpdateMomentum;
m_biasesUpdateDecay = biasesUpdateDecay;
m_biasesUpdateLearningRateProgressStep = biasesUpdateLearningRateProgressStep;
m_biasesUpdateStartingLearningRate = biasesUpdateStartingLearningRate;
m_biasesUpdateLearningRateUpdateFactor = biasesUpdateLearningRateUpdateFactor;
m_paddingX = paddingX;
m_paddingY = paddingY;
m_stride = stride;
m_numPatchesX = 1 + (uint)ceil((double)(2 * paddingX + m_inputDataWidth - m_filterWidth) / m_stride);
m_numPatchesY = 1 + (uint)ceil((double)(2 * paddingY + m_inputDataHeight - m_filterHeight) / m_stride);
m_activationNumChannels = m_numFilters;
m_activationDataWidth = m_numPatchesX;
m_activationDataHeight = m_numPatchesY;
m_activationDataSize = m_activationDataWidth * m_activationDataHeight;
// Allocating input data buffer.
m_inputBufferSize = m_inputNumChannels * m_inputDataSize * m_inputDataCount * sizeof(float);
if (m_holdsInputData)
{
CudaAssert(hipMalloc<float>(&m_inputDataBuffer, m_inputBufferSize));
}
// Allocating input gradients buffer.
CudaAssert(hipMalloc<float>(&m_inputGradientsBuffer, m_inputBufferSize));
// Allocating filters buffers.
m_filtersBufferSize = m_numFilters * m_filterSize * m_numFilterChannels * sizeof(float);
CudaAssert(hipMalloc<float>(&m_filtersBuffer, m_filtersBufferSize));
CudaAssert(hipMalloc<float>(&m_filtersGradientsBuffer, m_filtersBufferSize));
m_preactivationGradientsPerChunkWidth = m_inputNumChannels > 3 ? 3 : 4;
m_filtersGradientsPerChunkBufferSize = DivideUp(m_numPatchesX, m_preactivationGradientsPerChunkWidth) *
DivideUp(m_numPatchesY, m_preactivationGradientsPerChunkWidth) * m_filtersBufferSize;
CudaAssert(hipMalloc<float>(&m_filtersGradientsPerChunkBuffer, m_filtersGradientsPerChunkBufferSize));
CudaAssert(hipMalloc<float>(&m_filtersUpdateBuffer, m_filtersBufferSize));
// Initializing filter weights.
if (initializeWeights)
{
InitializeParamsFromDistribution(m_filtersBuffer, m_filtersBufferSize, weightsDeviation);
InitializeParamsToValue(m_filtersUpdateBuffer, m_filtersBufferSize, 0.f);
}
// Allocating biases buffers.
m_biasesBufferSize = m_numFilters * sizeof(float);
CudaAssert(hipMalloc<float>(&m_biasesBuffer, m_biasesBufferSize));
CudaAssert(hipMalloc<float>(&m_biasesGradientsBuffer, m_biasesBufferSize));
CudaAssert(hipMalloc<float>(&m_biasesUpdateBuffer, m_biasesBufferSize));
// Allocating buffer for holding partial sums for calculating biases gradients.
m_biasesGradientsPartialSumBlocks = DivideUp(DivideUp(m_inputDataCount * m_numPatchesY * m_numPatchesX, c_biasesGradientsSumsPerThread),
c_biasesGradientsPartialSumThreadsPerBlock);
CudaAssert(hipMalloc<float>(&m_biasesGradientsPartialSumsBuffer,
DivideUp(m_inputDataCount * m_numPatchesY * m_numPatchesX, c_biasesGradientsSumsPerThread) * m_biasesBufferSize));
// Initializing biases.
if (initializeBiases)
{
InitializeParamsToValue(m_biasesBuffer, m_biasesBufferSize, biasesInitialValue);
InitializeParamsToValue(m_biasesUpdateBuffer, m_biasesBufferSize, 0.f);
}
// Allocating preactivation and activation data buffers.
m_activationBufferSize = m_numFilters * m_activationDataSize * m_inputDataCount * sizeof(float);
CudaAssert(hipMalloc<float>(&m_preactivationDataBuffer, m_activationBufferSize));
CudaAssert(hipMalloc<float>(&m_activationDataBuffer, m_activationBufferSize));
// Allocating preactivation gradients buffer.
CudaAssert(hipMalloc<float>(&m_preactivationGradientsBuffer, m_activationBufferSize));
// Allocating activation gradients buffer.
m_holdsActivationGradients = holdsActivationGradients;
if (m_holdsActivationGradients)
{
CudaAssert(hipMalloc<float>(&m_activationGradientsBuffer, m_activationBufferSize));
}
}
void ConvolutionalLayer::Reinitialize(uint newInputDataCount)
{
Layer::Reinitialize(newInputDataCount);
m_biasesGradientsPartialSumBlocks = DivideUp(DivideUp(m_inputDataCount * m_numPatchesY * m_numPatchesX, c_biasesGradientsSumsPerThread),
c_biasesGradientsPartialSumThreadsPerBlock);
m_activationBufferSize = m_numFilters * m_activationDataSize * m_inputDataCount * sizeof(float);
}
void ConvolutionalLayer::CopyFiltersFromHost(float* hostFiltersBuffer)
{
CudaAssert(hipMemcpyAsync(m_filtersBuffer, hostFiltersBuffer, m_filtersBufferSize, hipMemcpyHostToDevice, m_deviceMemoryStream));
SynchronizeMemoryOperations();
}
void ConvolutionalLayer::CopyFiltersUpdateFromHost(float* hostFiltersUpdateBuffer)
{
CudaAssert(hipMemcpyAsync(m_filtersUpdateBuffer, hostFiltersUpdateBuffer, m_filtersBufferSize, hipMemcpyHostToDevice, m_deviceMemoryStream));
SynchronizeMemoryOperations();
}
void ConvolutionalLayer::CopyBiasesFromHost(float* hostBiasesBuffer)
{
CudaAssert(hipMemcpyAsync(m_biasesBuffer, hostBiasesBuffer, m_biasesBufferSize, hipMemcpyHostToDevice, m_deviceMemoryStream));
SynchronizeMemoryOperations();
}
void ConvolutionalLayer::CopyBiasesUpdateFromHost(float* hostBiasesUpdateBuffer)
{
CudaAssert(hipMemcpyAsync(m_biasesUpdateBuffer, hostBiasesUpdateBuffer, m_biasesBufferSize, hipMemcpyHostToDevice, m_deviceMemoryStream));
SynchronizeMemoryOperations();
}
ConvolutionalLayer::~ConvolutionalLayer()
{
CudaAssert(hipFree(m_filtersBuffer));
CudaAssert(hipFree(m_filtersGradientsBuffer));
CudaAssert(hipFree(m_filtersGradientsPerChunkBuffer));
CudaAssert(hipFree(m_filtersUpdateBuffer));
CudaAssert(hipFree(m_biasesBuffer));
CudaAssert(hipFree(m_biasesGradientsBuffer));
CudaAssert(hipFree(m_biasesGradientsPartialSumsBuffer));
CudaAssert(hipFree(m_biasesUpdateBuffer));
CudaAssert(hipFree(m_preactivationDataBuffer));
CudaAssert(hipFree(m_preactivationGradientsBuffer));
}
void ConvolutionalLayer::LoadInputs()
{
CommonLoadInputs();
}
/*
Applies filters on data from input (which has less than or equal to 3 channels).
Each thread applies specified number of filters to specified number of input data.
Grid is organized in a way that different collumns work on different data, and different rows work on different filters or same filters but different patch.
Rows are sorted first by patch than by filters.
Needs to be function with template parameters since loops must have constant parameters to be unrolled.
*/
template <uint c_blockWidth, uint c_dataPerThread, uint c_blockHeight, uint c_filtersPerThread, uint c_numChannels, uint c_cacheLength, bool c_lastBatch>
__global__ void ApplyFiltersOnInputData(float* dataBuffer, const uint dataWidth, const uint dataHeight, const uint dataSize, const uint dataCount, const int paddingX,
const int paddingY, float* filtersBuffer, const uint filterWidth, const uint filterHeight, const uint filterSize, const uint numFilters, const uint stride,
const uint numPatchesX, const uint numPatchesY, float* preactivationsBuffer)
{
const uint c_dataPerBlock = c_blockWidth * c_dataPerThread;
const uint c_filtersPerBlock = c_blockHeight * c_filtersPerThread;
// Since same filters are used across threads in same block row and same data in threads across same block collumn,
// we will benefit from caching data and filters into shared memory.
// In each pass we are caching cache length pixels from each channel of filters/data.
const uint c_cacheSize = c_cacheLength * c_numChannels;
__shared__ float dataCache[c_cacheSize][c_dataPerBlock];
__shared__ float filtersCache[c_cacheSize][c_filtersPerBlock];
// Positioning filters buffer, it will be loaded into cache, one window by window, where window has dimensions FiltersPerBlock x CacheLength.
const uint c_blocksPerPatch = numFilters / c_filtersPerBlock;
const uint c_filtersOffset = (blockIdx.y % c_blocksPerPatch) * c_filtersPerBlock;
const uint c_threadIndex = threadIdx.y * c_blockWidth + threadIdx.x;
// Filter cache index represents column in filters data cache window, i.e. which filter are we caching.
const uint c_filtersCacheIndex = c_threadIndex % c_filtersPerBlock;
// Filter cache position represents row in filters data cache window, i.e. which filter pixel are we caching.
const uint c_filtersCachePosition = c_threadIndex / c_filtersPerBlock;
filtersBuffer += c_filtersOffset + c_filtersCachePosition * numFilters + c_filtersCacheIndex;
// Positioning data buffer.
const uint c_dataOffset = blockIdx.x * c_dataPerBlock + threadIdx.x;
dataBuffer += c_dataOffset;
// Positioning preactivations buffer.
const uint c_numPatches = numPatchesX * numPatchesY;
const uint c_patchIndex = blockIdx.y / c_blocksPerPatch;
preactivationsBuffer += (c_filtersOffset + threadIdx.y * c_filtersPerThread) * dataCount * c_numPatches + c_patchIndex * dataCount + c_dataOffset;
// Initializing buffer for this thread calculated preactivations.
float threadPreactivations[c_filtersPerThread][c_dataPerThread];
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadPreactivations[filterIndex][dataIndex] = 0.f;
}
}
// Calculating this thread preactivations.
const uint c_filtersCacheMaxPosition = c_blockWidth / c_filtersPerThread;
const bool c_blockFitsInFilters = c_blockWidth % c_filtersPerThread == 0;
const bool c_blockCoversFilters = c_cacheLength % c_filtersCacheMaxPosition == 0;
const bool c_blockCoversCache = c_cacheLength % c_blockHeight == 0;
const int c_dataPositionX = -paddingX + (c_patchIndex % numPatchesX) * stride;
const int c_dataPositionY = -paddingY + (c_patchIndex / numPatchesX) * stride;
for (uint filterPosition = 0; filterPosition < filterSize; filterPosition += c_cacheLength)
{
// Loading filters cache from filter position.
// Each thread in block loads some of the filters data cache window pixels (window with FiltersPerBlock x CacheLength dimensions).
// Perfect case is when block totally covers the filters data cache window,
// worse case is when it just fits the filters data cache window so we have to cover it in couple passes,
// worst case is when it doesn't fit the filters data cache window so some of block threads are resident.
if (c_blockFitsInFilters || c_filtersCachePosition < c_filtersCacheMaxPosition)
{
#pragma unroll
for (uint passedCachePosition = 0; passedCachePosition < c_cacheLength; passedCachePosition += c_filtersCacheMaxPosition)
{
const uint c_currCachePosition = passedCachePosition + c_filtersCachePosition;
if (c_blockCoversFilters || c_currCachePosition < c_cacheLength)
{
if (filterPosition + c_currCachePosition < filterSize)
{
#pragma unroll
for (uint channel = 0; channel < c_numChannels; ++channel)
{
filtersCache[channel * c_cacheLength + c_currCachePosition][c_filtersCacheIndex] =
filtersBuffer[(channel * filterSize + filterPosition + passedCachePosition) * numFilters];
}
}
else
{
#pragma unroll
for (uint channel = 0; channel < c_numChannels; ++channel)
{
filtersCache[channel * c_cacheLength + c_currCachePosition][c_filtersCacheIndex] = 0.f;
}
}
}
}
}
// Loading data cache from filter position in data patch.
// Each thread in a block loads some data pixel into the data cache, and threads in the same column load
// different pixels from same data, while threads in the same row load pixels on the same position but from
// different data.
#pragma unroll
for (uint passedCachePosition = 0; passedCachePosition < c_cacheLength; passedCachePosition += c_blockHeight)
{
const uint c_currCachePosition = passedCachePosition + threadIdx.y;
const uint c_currFilterPosition = filterPosition + c_currCachePosition;
if (c_currFilterPosition < filterSize && (c_blockCoversCache || c_currCachePosition < c_cacheLength))
{
const int c_currDataPositionX = c_dataPositionX + c_currFilterPosition % filterWidth;
const int c_currDataPositionY = c_dataPositionY + c_currFilterPosition / filterWidth;
if (c_currDataPositionX >= 0 && c_currDataPositionX < dataWidth && c_currDataPositionY >= 0 && c_currDataPositionY < dataHeight)
{
float* currDataBufferPosition = dataBuffer + (c_currDataPositionY * dataWidth + c_currDataPositionX) * dataCount;
#pragma unroll
for (uint channel = 0; channel < c_numChannels; ++channel)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
if (!c_lastBatch || c_dataOffset + dataIndex * c_blockWidth < dataCount)
{
dataCache[c_currCachePosition + channel * c_cacheLength][threadIdx.x * c_dataPerThread + dataIndex] =
currDataBufferPosition[channel * dataCount * dataSize + dataIndex * c_blockWidth];
}
else
{
dataCache[c_currCachePosition + channel * c_cacheLength][threadIdx.x * c_dataPerThread + dataIndex] = 0.f;
}
}
}
}
else
{
// Fill padded positions with zeros.
#pragma unroll
for (uint channel = 0; channel < c_numChannels; ++channel)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
dataCache[c_currCachePosition + channel * c_cacheLength][threadIdx.x * c_dataPerThread + dataIndex] = 0.f;
}
}
}
}
}
__syncthreads();
// Applying loaded filter cache to loaded data cache.
#pragma unroll
for (uint cacheIndex = 0; cacheIndex < c_cacheSize; ++cacheIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadPreactivations[filterIndex][dataIndex] += dataCache[cacheIndex][threadIdx.x * c_dataPerThread + dataIndex] *
filtersCache[cacheIndex][threadIdx.y * c_filtersPerThread + filterIndex];
}
}
}
__syncthreads();
}
// Writing this thread calculated preactivations into preactivations buffer.
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
if (!c_lastBatch || c_dataOffset + dataIndex * c_blockWidth < dataCount)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
preactivationsBuffer[dataIndex * c_blockWidth + filterIndex * dataCount * c_numPatches] = threadPreactivations[filterIndex][dataIndex];
}
}
}
}
/*
Applies filters on filtered data (resulted from previously applied filters).
Each thread applies specified number of filters to specified number of input data.
Grid is organized in a way that different collumns work on different data, and different rows work on different filters or same filters but different patch.
Rows are sorted first by patch than by filters.
Needs to be function with template parameters since loops must have constant parameters to be unrolled.
*/
template <uint c_blockWidth, uint c_dataPerThread, uint c_blockHeight, uint c_filtersPerThread, uint c_cacheLength, bool c_lastBatch>
__global__ void ApplyFiltersOnFilteredData(float* dataBuffer, const uint dataWidth, const uint dataHeight, const uint dataSize, const uint dataCount,
const int paddingX, const int paddingY, float* filtersBuffer, const uint filterWidth, const uint filterHeight, const uint filterSize, const uint numFilters,
const uint numFilterChannels, const uint stride, const uint numPatchesX, const uint numPatchesY, float* preactivationsBuffer)
{
const uint c_dataPerBlock = c_blockWidth * c_dataPerThread;
const uint c_filtersPerBlock = c_blockHeight * c_filtersPerThread;
// Since same filters are used across threads in same block row and same data in threads across same block collumn,
// we will benefit from caching data and filters into shared memory.
// In each pass we are caching one pixel from cache length channels of filters/data.
__shared__ float dataCache[c_cacheLength][c_dataPerBlock];
__shared__ float filtersCache[c_cacheLength][c_filtersPerBlock];
// Positioning filters buffer, it will be loaded into cache, one window by window, where window has dimensions FiltersPerBlock x CacheLength.
const uint c_blocksPerPatch = numFilters / c_filtersPerBlock;
const uint c_filtersOffset = (blockIdx.y % c_blocksPerPatch) * c_filtersPerBlock;
const uint c_threadIndex = threadIdx.y * c_blockWidth + threadIdx.x;
// Filter cache index represents column in filters data cache window, i.e. which filter are we caching.
const uint c_filtersCacheIndex = c_threadIndex % c_filtersPerBlock;
// Filter cache position represents row in filters data cache window, i.e. which filter channel are we caching.
const uint c_filtersCachePosition = c_threadIndex / c_filtersPerBlock;
filtersBuffer += c_filtersOffset + c_filtersCachePosition * numFilters * filterSize + c_filtersCacheIndex;
// Positioning data buffer.
const uint c_dataOffset = blockIdx.x * c_dataPerBlock + threadIdx.x;
dataBuffer += threadIdx.y * dataCount * dataSize + c_dataOffset;
// Positioning preactivations buffer.
const uint c_numPatches = numPatchesX * numPatchesY;
const uint c_patchIndex = blockIdx.y / c_blocksPerPatch;
preactivationsBuffer += (c_filtersOffset + threadIdx.y) * dataCount * c_numPatches + c_patchIndex * dataCount + c_dataOffset;
// Initializing buffer for this thread calculated preactivations.
float threadPreactivations[c_filtersPerThread][c_dataPerThread];
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadPreactivations[filterIndex][dataIndex] = 0.f;
}
}
// Calculating this thread preactivations.
const uint c_filtersCacheMaxPosition = c_blockWidth / c_filtersPerThread;
const bool c_blockCoversFilters = c_cacheLength % c_filtersCacheMaxPosition == 0;
const bool c_blockCoversCache = c_cacheLength % c_blockHeight == 0;
const int c_dataPositionX = -paddingX + (c_patchIndex % numPatchesX) * stride;
const int c_dataPositionY = -paddingY + (c_patchIndex / numPatchesX) * stride;
const uint c_dataStartPositionX = max(0, c_dataPositionX);
const uint c_dataEndPositionX = min(c_dataPositionX + filterWidth, dataWidth);
const uint c_dataStartPositionY = max(0, c_dataPositionY);
const uint c_dataEndPositionY = min(c_dataPositionY + filterHeight, dataHeight);
for (uint currDataPositionY = c_dataStartPositionY; currDataPositionY < c_dataEndPositionY; ++currDataPositionY)
{
const uint c_currFilterPositionY = currDataPositionY - c_dataPositionY;
for (uint currDataPositionX = c_dataStartPositionX; currDataPositionX < c_dataEndPositionX; ++currDataPositionX)
{
const uint c_currFilterPositionX = currDataPositionX - c_dataPositionX;
const uint c_currFilterPosition = c_currFilterPositionY * filterWidth + c_currFilterPositionX;
const uint c_currDataPosition = currDataPositionY * dataWidth + currDataPositionX;
for (uint currChannelPosition = 0; currChannelPosition < numFilterChannels; currChannelPosition += c_cacheLength)
{
// Loading filters cache from filter position.
if (c_filtersCachePosition < c_filtersCacheMaxPosition)
{
#pragma unroll
for (uint passedCachePosition = 0; passedCachePosition < c_cacheLength; passedCachePosition += c_filtersCacheMaxPosition)
{
const uint c_currCachePosition = passedCachePosition + c_filtersCachePosition;
if (c_blockCoversFilters || c_currCachePosition < c_cacheLength)
{
filtersCache[c_currCachePosition][c_filtersCacheIndex] =
filtersBuffer[((currChannelPosition + passedCachePosition)* filterSize + c_currFilterPosition) * numFilters];
}
}
}
// Loading data cache from filter position in data patch.
float* currDataBufferPosition = dataBuffer + (currChannelPosition * dataSize + c_currDataPosition) * dataCount;
#pragma unroll
for (uint passedCachePosition = 0; passedCachePosition < c_cacheLength; passedCachePosition += c_blockHeight)
{
const uint c_currCachePosition = passedCachePosition + threadIdx.y;
if (c_blockCoversCache || c_currCachePosition < c_cacheLength)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
if (!c_lastBatch || c_dataOffset + dataIndex * c_blockWidth < dataCount)
{
dataCache[c_currCachePosition][threadIdx.x + dataIndex * c_blockWidth] =
currDataBufferPosition[passedCachePosition * dataCount * dataSize + dataIndex * c_blockWidth];
}
else
{
dataCache[c_currCachePosition][threadIdx.x + dataIndex * c_blockWidth] = 0.f;
}
}
}
}
__syncthreads();
// Applying loaded filter cache to loaded data cache.
#pragma unroll
for (uint cacheIndex = 0; cacheIndex < c_cacheLength; ++cacheIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
threadPreactivations[filterIndex][dataIndex] += dataCache[cacheIndex][dataIndex * c_blockWidth + threadIdx.x] *
filtersCache[cacheIndex][filterIndex * c_blockHeight + threadIdx.y];
}
}
}
__syncthreads();
}
}
}
// Writing this thread calculated preactivations into preactivations buffer.
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
if (!c_lastBatch || c_dataOffset + dataIndex * c_blockWidth < dataCount)
{
preactivationsBuffer[dataIndex * c_blockWidth + filterIndex * c_blockHeight * dataCount * c_numPatches] = threadPreactivations[filterIndex][dataIndex];
}
}
}
}
void ConvolutionalLayer::CalculatePreactivations()
{
uint dataPerThread = m_inputDataCount % 128 == 0 ? 4 : (m_inputDataCount % 64 == 0 ? 2 : 1);
uint filtersPerThread = m_numFilters % 64 == 0 ? 16 : ((m_inputNumChannels <= 3 && m_numFilters % 48 == 0) ? 12 : (m_numFilters % 32 == 0 ? 8 : 4));
uint blockWidth = 32;
uint blockHeight = (m_numFilters % 128 == 0 && m_numFilterChannels % 8 == 0 && dataPerThread < 4) ? 8 : 4;
dim3 blockDimensions(blockWidth, blockHeight);
dim3 gridDimensions(DivideUp(m_inputDataCount, blockWidth * dataPerThread), (m_activationDataSize * m_numFilters) / (blockHeight * filtersPerThread));
bool lastBatch = m_inputDataCount % (blockWidth * dataPerThread) != 0;
if (lastBatch)
{
if (m_inputNumChannels < 3)
{
ShipAssert(false, "Currently not supported!");
}
else if (m_inputNumChannels == 3)
{
if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 16, 3, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 12, 3, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 8, 3, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 4, 3, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputNumChannels % 8 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 8, 16, 8, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 16, 8, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 8, 8, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 4, 8, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputNumChannels % 4 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 16, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 16, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 8, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 4, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
}
else
{
if (m_inputNumChannels < 3)
{
ShipAssert(false, "Currently not supported!");
}
else if (m_inputNumChannels == 3)
{
if (m_inputDataCount % 128 == 0)
{
if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 4, 4, 16, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 4, 4, 12, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 4, 4, 8, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 4, 4, 4, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputDataCount % 64 == 0)
{
if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 2, 4, 16, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 2, 4, 12, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 2, 4, 8, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 2, 4, 4, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputDataCount % 32 == 0)
{
if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 16, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 12, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 8, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 4, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
}
else if (m_inputNumChannels % 8 == 0)
{
if (m_inputDataCount % 128 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 16, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 16, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 8, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 4, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputDataCount % 64 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 8, 16, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 16, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 8, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 4, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputDataCount % 32 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 8, 16, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 16, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 8, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 4, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
}
else if (m_inputNumChannels % 4 == 0)
{
if (m_inputDataCount % 128 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 16, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 16, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 8, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 4, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputDataCount % 64 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 16, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 16, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 8, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 4, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputDataCount % 32 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 16, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 16, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 8, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 4, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
}
}
CudaAssert(hipGetLastError());
}
/*
Does grid stride and adds biases to preactivations.
*/
__global__ void AddFilterBiases(float* preactivations, float* biases, const uint width, const uint height)
{
for (uint y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += gridDim.y * blockDim.y)
{
int laneId = threadIdx.x % warpSize;
int biasValue;
if (laneId == 0)
{
biasValue = biases[y];
}
biasValue = __shfl(biasValue, 0);
for (uint x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += gridDim.x * blockDim.x)
{
preactivations[y * width + x] += biasValue;
}
}
}
void ConvolutionalLayer::AddBiases()
{
dim3 blockDimensions(Config::MAX_NUM_THREADS, 1);
const uint c_width = m_inputDataCount * m_numPatchesX * m_numPatchesY;
const uint c_blocksPerWidth = max((uint)1, c_width / (uint)Config::MAX_NUM_THREADS);
uint gridX = c_blocksPerWidth;
if (c_blocksPerWidth >= 128)
{
gridX = 128;
}
else if (c_blocksPerWidth >= 64)
{
gridX = 64;
}
else if (c_blocksPerWidth >= 32)
{
gridX = 32;
}
dim3 gridDimensions(gridX, 64);
LAUNCH_KERNEL_ASYNC(AddFilterBiases, gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationDataBuffer, m_biasesBuffer, c_width, m_numFilters);
CudaAssert(hipGetLastError());
}
void ConvolutionalLayer::CalculateActivations()
{
ApplyActivation(m_activationType, m_preactivationDataBuffer, (uint)(m_activationBufferSize / sizeof(float)), m_activationDataBuffer, m_deviceCalculationStream);
}
void ConvolutionalLayer::DoForwardProp(PropagationMode propagationMode)
{
CalculatePreactivations();
AddBiases();
CalculateActivations();
}
/*
Calculates partial sums for biases gradients.
*/
__global__ void __CalculateBiasesGradientsPartialSums(float* preactivationGradients, const uint numElementsToSum, float* partialSumsBuffer)
{
float partialSum = 0.f;
const uint c_preactivationsGradientsOffset = blockIdx.y * numElementsToSum;
for (uint partialSumIndex = blockIdx.x * blockDim.x + threadIdx.x; partialSumIndex < numElementsToSum; partialSumIndex += gridDim.x * blockDim.x)
{
partialSum += preactivationGradients[c_preactivationsGradientsOffset + partialSumIndex];
}
partialSumsBuffer[blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x] = partialSum;
}
/*
Calculates biases gradients, each thread calculating gradient for one bias.
*/
__global__ void __CalculateConvolutionalBiasesGradients(float* partialSumsBuffer, const uint numFilters, const uint numPartialSums, const uint batchSize,
float* biasesGradients)
{
const uint c_filterIndex = blockIdx.x * blockDim.x + threadIdx.x;
const uint c_filterPartialSumsOffset = c_filterIndex * numPartialSums;
if (c_filterIndex < numFilters)
{
float biasGradient = 0.f;
for (uint partialSumIndex = 0; partialSumIndex < numPartialSums; ++partialSumIndex)
{
biasGradient += partialSumsBuffer[c_filterPartialSumsOffset + partialSumIndex];
}
biasesGradients[c_filterIndex] = biasGradient / (float)batchSize;
}
}
void ConvolutionalLayer::CalculateBiasesGradients()
{
// Summing biases into temp buffer.
const uint c_width = m_inputDataCount * m_numPatchesY * m_numPatchesX;
dim3 blockDimensions(c_biasesGradientsPartialSumThreadsPerBlock);
dim3 gridDimensions(m_biasesGradientsPartialSumBlocks, m_numFilters);
LAUNCH_KERNEL_ASYNC(__CalculateBiasesGradientsPartialSums, gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
c_width, m_biasesGradientsPartialSumsBuffer);
CudaAssert(hipGetLastError());
// Summing from partial sums buffer to biases gradients buffer.
const uint c_numThreadsPerBlock = 128;
const uint c_numBlocks = DivideUp(m_numFilters, c_numThreadsPerBlock);
const uint c_batchSize = m_parallelismMode == ParallelismMode::Model ? m_inputDataCount : m_tierSize * m_inputDataCount;
LAUNCH_KERNEL_ASYNC(__CalculateConvolutionalBiasesGradients, dim3(c_numBlocks), dim3(c_numThreadsPerBlock), m_deviceCalculationStream)(m_biasesGradientsPartialSumsBuffer,
m_numFilters, m_biasesGradientsPartialSumBlocks * c_biasesGradientsPartialSumThreadsPerBlock, c_batchSize, m_biasesGradientsBuffer);
CudaAssert(hipGetLastError());
}
/*
Calculates weights gradients on input data.
Each thread calculates gradient for specified number of weights per thread, for specified number of filters per thread,
in one chunk of preactivations.
Needs to be function with template parameters since loops must have constant parameters to be unrolled.
*/
template <uint c_blockWidth, uint c_filtersPerThread, uint c_blockHeight, uint c_pixelsPerThread, uint c_dataPerLoad, uint c_pixelsPerLoad, uint c_numChannels, bool c_lastBatch>
__global__ void CalculateInputDataWeightsGradients(float* inputBuffer, const uint dataWidth, const uint dataHeight, const uint dataSize, const uint dataCount,
const int paddingX, const int paddingY, float* preactivationGradients, const uint filterWidth, const uint filterHeight, const uint filterSize, const uint numFilters,
const uint stride, const uint numPatchesX, const uint numPatchesY, const uint preactivationGradientsPerChunkWidth, float* filtersGradientsPerChunkBuffer)
{
const uint c_filtersPerBlock = c_blockWidth * c_filtersPerThread;
const uint c_pixelsPerBlock = c_blockHeight * c_pixelsPerThread;
const uint c_pixelsPerCache = c_blockHeight * c_pixelsPerLoad;
// Since same filters are used across threads in same block row and same gradients in threads across same block collumn,
// we will benefit from caching input and gradients into shared memory.
__shared__ float inputCache[c_pixelsPerCache * c_numChannels][c_dataPerLoad];
__shared__ int inputPixelOffsetsCache[c_pixelsPerBlock];
__shared__ float gradientsCache[c_filtersPerBlock][c_dataPerLoad + 1];
// Positioning inputs buffer.
const uint c_threadIndex = threadIdx.y * c_blockWidth + threadIdx.x;
const uint c_cacheLoadIndex = c_threadIndex / c_dataPerLoad;
const uint c_dataLoadIndex = c_threadIndex % c_dataPerLoad;
inputBuffer += c_dataLoadIndex;
// Positioning preactivation gradients buffer.
const uint c_blocksPerChunk = numFilters / c_filtersPerBlock;
const uint c_filterOffset = (blockIdx.x % c_blocksPerChunk) * c_filtersPerBlock;
const uint c_numPatches = numPatchesX * numPatchesY;
preactivationGradients += c_filterOffset * c_numPatches * dataCount + c_dataLoadIndex;
// Positioning gradients buffer.
const uint c_chunkIndex = blockIdx.x / c_blocksPerChunk;
const uint c_pixelOffset = blockIdx.y * c_pixelsPerBlock;
filtersGradientsPerChunkBuffer += c_chunkIndex * c_numChannels * numFilters * filterSize + (c_pixelOffset + threadIdx.y) * numFilters + c_filterOffset + threadIdx.x;
// Initializing buffer for this thread calculated gradients.
float threadGradients[c_numChannels][c_pixelsPerThread][c_filtersPerThread];
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
#pragma unroll
for (uint pixelIndex = 0; pixelIndex < c_pixelsPerThread; ++pixelIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
threadGradients[channelIndex][pixelIndex][filterIndex] = 0.f;
}
}
}
// Calculating this thread gradients.
const uint c_numChunksX = (numPatchesX + preactivationGradientsPerChunkWidth - 1) / preactivationGradientsPerChunkWidth;
const uint c_chunkY = c_chunkIndex / c_numChunksX;
const uint c_chunkX = c_chunkIndex % c_numChunksX;
const uint c_patchY = c_chunkY * preactivationGradientsPerChunkWidth;
const uint c_patchX = c_chunkX * preactivationGradientsPerChunkWidth;
const uint c_firstPatchY = c_patchY;
const uint c_firstPatchX = c_patchX;
const uint c_lastPatchY = min(numPatchesY, c_patchY + preactivationGradientsPerChunkWidth);
const uint c_lastPatchX = min(numPatchesX, c_patchX + preactivationGradientsPerChunkWidth);
const uint c_filterPixelY = (c_pixelOffset + c_threadIndex) / filterWidth;
const uint c_filterPixelX = (c_pixelOffset + c_threadIndex) % filterWidth;
for (uint patchY = c_firstPatchY; patchY < c_lastPatchY; ++patchY)
{
const int c_inputPixelY = (int)(c_filterPixelY + patchY * stride) - paddingY;
for (uint patchX = c_firstPatchX; patchX < c_lastPatchX; ++patchX)
{
const int c_inputPixelX = (int)(c_filterPixelX + patchX * stride) - paddingX;
const uint c_patch = patchY * numPatchesX + patchX;
// Loading input pixels offsets cache.
__syncthreads();
if (c_threadIndex < c_pixelsPerBlock)
{
const int c_inputPixelOffset = (c_inputPixelY * (int)dataWidth + c_inputPixelX) * (int)dataCount;
inputPixelOffsetsCache[c_threadIndex] = (c_inputPixelY >= 0 && c_inputPixelY < dataHeight &&
c_inputPixelX >= 0 && c_inputPixelX < dataWidth) ? c_inputPixelOffset : -1;
}
__syncthreads();
// Load input pixels and gradient pixels for data per load images, and calculate filter gradients on them.
for (uint dataIndex = 0; dataIndex < dataCount; dataIndex += c_dataPerLoad)
{
const uint cacheLoadSlide = (c_blockWidth * c_blockHeight) / c_dataPerLoad;
// Load gradients cache.
if (!c_lastBatch || dataIndex + c_dataLoadIndex < dataCount)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerBlock; filterIndex += cacheLoadSlide)
{
const uint c_filterToLoad = ((c_cacheLoadIndex + filterIndex) % c_filtersPerThread) * c_blockWidth + (c_cacheLoadIndex + filterIndex) / c_filtersPerThread;
if (c_filtersPerBlock % cacheLoadSlide == 0 || c_cacheLoadIndex + filterIndex < c_filtersPerBlock)
{
gradientsCache[c_cacheLoadIndex + filterIndex][c_dataLoadIndex] = preactivationGradients[c_filterToLoad * c_numPatches * dataCount +
c_patch * dataCount + dataIndex];
}
}
}
else
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerBlock; filterIndex += cacheLoadSlide)
{
if (c_filtersPerBlock % cacheLoadSlide == 0 || c_cacheLoadIndex + filterIndex < c_filtersPerBlock)
{
gradientsCache[c_cacheLoadIndex + filterIndex][c_dataLoadIndex] = 0.f;
}
}
}
// Load inputs, cache per cache, and calculate gradients.
#pragma unroll
for (uint pixelIndex = 0; pixelIndex < c_pixelsPerThread; pixelIndex += c_pixelsPerLoad)
{
// Load inputs cache.
#pragma unroll
for (uint loadPixelIndex = 0; loadPixelIndex < c_pixelsPerCache; loadPixelIndex += cacheLoadSlide)
{
if (c_pixelsPerCache % cacheLoadSlide == 0 || c_cacheLoadIndex + loadPixelIndex < c_pixelsPerCache)
{
const uint c_filterPixel = pixelIndex * c_blockHeight + c_cacheLoadIndex + loadPixelIndex;
if (c_pixelOffset + c_filterPixel < filterSize && (!c_lastBatch || dataIndex + c_dataLoadIndex < dataCount))
{
const int c_inputPixelOffset = inputPixelOffsetsCache[c_filterPixel];
if (c_inputPixelOffset >= 0)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
inputCache[channelIndex * c_pixelsPerCache + c_cacheLoadIndex + loadPixelIndex][c_dataLoadIndex] =
inputBuffer[channelIndex * dataSize * dataCount + c_inputPixelOffset + dataIndex];
}
}
else
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
inputCache[channelIndex * c_pixelsPerCache + c_cacheLoadIndex + loadPixelIndex][c_dataLoadIndex] = 0.f;
}
}
}
else
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
inputCache[channelIndex * c_pixelsPerCache + c_cacheLoadIndex + loadPixelIndex][c_dataLoadIndex] = 0.f;
}
}
}
}
__syncthreads();
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
#pragma unroll
for (uint loadedDataIndex = 0; loadedDataIndex < c_dataPerLoad; ++loadedDataIndex)
{
#pragma unroll
for (uint loadedPixelIndex = 0; loadedPixelIndex < c_pixelsPerLoad; ++loadedPixelIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
threadGradients[channelIndex][pixelIndex + loadedPixelIndex][filterIndex] +=
inputCache[channelIndex * c_pixelsPerCache + loadedPixelIndex * c_blockHeight + threadIdx.y][loadedDataIndex] *
gradientsCache[threadIdx.x * c_filtersPerThread + filterIndex][loadedDataIndex];
}
}
}
}
__syncthreads();
}
}
}
}
// Writing this thread calculated gradients into gradients buffer.
#pragma unroll
for (uint pixelIndex = 0; pixelIndex < c_pixelsPerThread; ++pixelIndex)
{
if (c_pixelOffset + pixelIndex * c_blockHeight + threadIdx.y < filterSize)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
filtersGradientsPerChunkBuffer[(channelIndex * filterSize + pixelIndex * c_blockHeight) * numFilters + filterIndex * c_blockWidth] =
threadGradients[channelIndex][pixelIndex][filterIndex];
}
}
}
}
}
/*
Calculates weights gradients on filtered data (resulted from previously applied filters).
Each thread calculates gradient for one weight in specified number of filters per thread and specified number of filter channels per thread,
in one chunk of preactivations.
Needs to be function with template parameters since loops must have constant parameters to be unrolled.
*/
template <uint c_blockWidth, uint c_filtersPerThread, uint c_blockHeight, uint c_channelsPerThread, uint c_dataPerLoad, bool c_lastBatch>
__global__ void CalculateFilteredDataWeightsGradients(float* inputBuffer, const uint dataWidth, const uint dataHeight, const uint dataSize, const uint dataCount,
const int paddingX, const int paddingY, float* preactivationGradients, const uint filterWidth, const uint filterHeight, const uint filterSize, const uint numFilters,
const uint numFilterChannels, const uint stride, const uint numPatchesX, const uint numPatchesY, const uint preactivationGradientsPerChunkWidth,
float* filtersGradientsPerChunkBuffer)
{
const uint c_filtersPerBlock = c_blockWidth * c_filtersPerThread;
const uint c_channelsPerBlock = c_blockHeight * c_channelsPerThread;
// Since same filters are used across threads in same block row and same gradients in threads across same block collumn,
// we will benefit from caching input and gradients into shared memory.
__shared__ float inputCache[c_channelsPerBlock][c_dataPerLoad];
__shared__ float gradientsCache[c_filtersPerBlock][c_dataPerLoad + 1];
// Positioning inputs buffer.
const uint c_threadIndex = threadIdx.y * c_blockWidth + threadIdx.x;
const uint c_cacheLoadIndex = c_threadIndex / c_dataPerLoad;
const uint c_dataLoadIndex = c_threadIndex % c_dataPerLoad;
const uint c_channelOffset = blockIdx.y * c_channelsPerBlock;
inputBuffer += (c_channelOffset + c_cacheLoadIndex) * dataSize * dataCount + c_dataLoadIndex;
// Positioning preactivation gradients buffer.
const uint c_blocksPerChunk = numFilters / c_filtersPerBlock;
const uint c_filterOffset = (blockIdx.x % c_blocksPerChunk) * c_filtersPerBlock;
const uint c_numPatches = numPatchesX * numPatchesY;
preactivationGradients += (c_filterOffset + c_cacheLoadIndex) * c_numPatches * dataCount + c_dataLoadIndex;
// Positioning gradients buffer.
const uint c_chunkIndex = blockIdx.x / c_blocksPerChunk;
const uint c_filterPixel = blockIdx.z;
filtersGradientsPerChunkBuffer += (c_chunkIndex * numFilterChannels + c_channelOffset + threadIdx.y) * numFilters * filterSize +
c_filterPixel * numFilters + c_filterOffset + threadIdx.x;
// Initializing buffer for this thread calculated gradients.
float threadGradients[c_channelsPerThread][c_filtersPerThread];
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerThread; ++channelIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
threadGradients[channelIndex][filterIndex] = 0.f;
}
}
// Calculating this thread gradients.
const uint c_filterPixelY = c_filterPixel / filterWidth;
const uint c_filterPixelX = c_filterPixel % filterWidth;
const uint c_numChunksX = (numPatchesX + preactivationGradientsPerChunkWidth - 1) / preactivationGradientsPerChunkWidth;
const uint c_chunkY = c_chunkIndex / c_numChunksX;
const uint c_chunkX = c_chunkIndex % c_numChunksX;
const uint c_patchY = c_chunkY * preactivationGradientsPerChunkWidth;
const uint c_patchX = c_chunkX * preactivationGradientsPerChunkWidth;
const uint c_firstPatchY = (uint)max((int)c_patchY, (-(int)c_filterPixelY + paddingY + (int)stride - 1) / (int)stride);
const uint c_firstPatchX = (uint)max((int)c_patchX, (-(int)c_filterPixelX + paddingX + (int)stride - 1) / (int)stride);
const uint c_lastPatchY = min(numPatchesY, min(c_patchY + preactivationGradientsPerChunkWidth, (dataHeight - c_filterPixelY + (uint)paddingY + stride - 1) / stride));
const uint c_lastPatchX = min(numPatchesX, min(c_patchX + preactivationGradientsPerChunkWidth, (dataWidth - c_filterPixelX + (uint)paddingX + stride - 1) / stride));
float* inputCacheLoad = &inputCache[c_cacheLoadIndex][c_dataLoadIndex];
float* gradientsCacheLoad = &gradientsCache[c_cacheLoadIndex][c_dataLoadIndex];
for (uint patchY = c_firstPatchY; patchY < c_lastPatchY; ++patchY)
{
const uint c_inputPixelY = c_filterPixelY + patchY * stride - (uint)paddingY;
for (uint patchX = c_firstPatchX; patchX < c_lastPatchX; ++patchX)
{
const uint c_patch = patchY * numPatchesX + patchX;
const uint c_inputPixelX = c_filterPixelX + patchX * stride - (uint)paddingX;
const uint c_inputPixel = (c_inputPixelY * dataWidth + c_inputPixelX) * dataCount;
// Load input pixels and gradient pixels for data per load images, and calculate filter gradients on them.
for (uint dataIndex = 0; dataIndex < dataCount; dataIndex += c_dataPerLoad)
{
const uint cacheLoadSlide = (c_blockWidth * c_blockHeight) / c_dataPerLoad;
if (!c_lastBatch || dataIndex + c_dataLoadIndex < dataCount)
{
// Load inputs cache.
if (c_cacheLoadIndex < c_channelsPerBlock)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerBlock; channelIndex += cacheLoadSlide)
{
if (c_channelsPerBlock % cacheLoadSlide == 0 || c_cacheLoadIndex + channelIndex < c_channelsPerBlock)
{
inputCacheLoad[channelIndex * c_dataPerLoad] = inputBuffer[channelIndex * dataSize * dataCount + c_inputPixel + dataIndex];
}
}
}
// Load gradients cache.
if (c_cacheLoadIndex < c_filtersPerBlock)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerBlock; filterIndex += cacheLoadSlide)
{
if (c_filtersPerBlock % cacheLoadSlide == 0 || c_cacheLoadIndex + filterIndex < c_filtersPerBlock)
{
gradientsCacheLoad[filterIndex * (c_dataPerLoad + 1)] = preactivationGradients[filterIndex * c_numPatches * dataCount + c_patch * dataCount + dataIndex];
}
}
}
}
else
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerBlock; channelIndex += cacheLoadSlide)
{
if (c_channelsPerBlock % cacheLoadSlide == 0 || c_cacheLoadIndex + channelIndex < c_channelsPerBlock)
{
inputCacheLoad[channelIndex * c_dataPerLoad] = 0.f;
}
}
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerBlock; filterIndex += cacheLoadSlide)
{
if (c_filtersPerBlock % cacheLoadSlide == 0 || c_cacheLoadIndex + filterIndex < c_filtersPerBlock)
{
gradientsCacheLoad[filterIndex * (c_dataPerLoad + 1)] = 0.f;
}
}
}
__syncthreads();
#pragma unroll
for (uint loadedDataIndex = 0; loadedDataIndex < c_dataPerLoad; ++loadedDataIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerThread; ++channelIndex)
{
threadGradients[channelIndex][filterIndex] += inputCache[channelIndex * c_blockHeight + threadIdx.y][loadedDataIndex] *
gradientsCache[filterIndex * c_blockWidth + threadIdx.x][loadedDataIndex];
}
}
}
__syncthreads();
}
}
}
// Writing this thread calculated gradients into gradients buffer.
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerThread; ++channelIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
filtersGradientsPerChunkBuffer[channelIndex * c_blockHeight * numFilters * filterSize + filterIndex * c_blockWidth] = threadGradients[channelIndex][filterIndex];
}
}
}
/*
Aggregates calculated weights gradients from chunks.
*/
__global__ void AggregateWeightsGradientsFromChunks(float* filtersGradientsPerChunkBuffer, uint numChunks, uint filtersBufferLength, const uint batchSize,
float* filtersGradientsBuffer)
{
const uint c_filterIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (c_filterIndex < filtersBufferLength)
{
float filterGradient = 0.f;
for (uint chunkIndex = 0; chunkIndex < numChunks; ++chunkIndex)
{
filterGradient += filtersGradientsPerChunkBuffer[chunkIndex * filtersBufferLength + c_filterIndex];
}
filtersGradientsBuffer[c_filterIndex] = filterGradient / (float)batchSize;
}
}
void ConvolutionalLayer::CalculateWeightsGradients()
{
// Calculating weights gradients for chunks of preactivations.
uint numChunksX = DivideUp(m_numPatchesX, m_preactivationGradientsPerChunkWidth);
uint numChunksY = DivideUp(m_numPatchesY, m_preactivationGradientsPerChunkWidth);
uint numChunks = numChunksX * numChunksY;
dim3 gridDimensions;
uint dataPerLoad, blockWidth, blockHeight;
if (m_inputNumChannels > 3)
{
uint filtersPerThread = m_numFilters % 64 == 0 ? 4 : (m_numFilters % 32 == 0 ? 2 : 1);
blockWidth = m_numFilters % 128 == 0 ? 32 : 16;
uint channelsPerThread = m_inputNumChannels % 64 == 0 ? 8 : (m_inputNumChannels % 48 == 0 ? 6 : (m_inputNumChannels % 32 == 0 ? 8 : 4));
blockHeight = (m_inputNumChannels / channelsPerThread) % 8 == 0 ? 8 : 4;
dataPerLoad = (filtersPerThread * channelsPerThread) < 32 ? 32 : 16;
gridDimensions = dim3(numChunks * (m_numFilters / (blockWidth * filtersPerThread)), m_inputNumChannels / (blockHeight * channelsPerThread), m_filterSize);
}
else
{
uint filtersPerThread = 1;
uint pixelsPerThread = 16;
blockHeight = 16;
blockWidth = 16;
dataPerLoad = 32;
if (m_numFilters % 64 == 0)
{
filtersPerThread = 4;
pixelsPerThread = 2;
blockHeight = 16;
blockWidth = 16;
dataPerLoad = 32;
}
else if (m_numFilters % 48 == 0)
{
filtersPerThread = 3;
pixelsPerThread = 4;
blockHeight = 16;
blockWidth = 16;
dataPerLoad = 32;
}
else if (m_numFilters % 32 == 0)
{
filtersPerThread = 2;
pixelsPerThread = 2;
blockHeight = 8;
blockWidth = 16;
dataPerLoad = 16;
}
gridDimensions = dim3(numChunks * (m_numFilters / (blockWidth * filtersPerThread)), DivideUp(m_filterSize, blockHeight * pixelsPerThread));
}
dim3 blockDimensions(blockWidth, blockHeight);
bool lastBatch = m_inputDataCount % dataPerLoad != 0;
if (lastBatch)
{
if (m_inputNumChannels < 3)
{
ShipAssert(false, "Currently not supported!");
}
else if (m_inputNumChannels == 3)
{
if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 4, 16, 2, 32, 2, 3, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 3, 16, 4, 32, 2, 3, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 2, 8, 2, 16, 2, 3, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 1, 16, 16, 32, 2, 3, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else
{
if (m_inputNumChannels % 64 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 8, 8, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 8, 8, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 8, 8, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 8, 8, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else if (m_inputNumChannels % 48 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 8, 6, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 8, 6, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 8, 6, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 8, 6, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else if (m_inputNumChannels % 32 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 4, 8, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 4, 8, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 4, 8, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 4, 8, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 4, 4, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 4, 4, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 4, 4, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 4, 4, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
}
}
else
{
if (m_inputNumChannels < 3)
{
ShipAssert(false, "Currently not supported!");
}
else if (m_inputNumChannels == 3)
{
if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 4, 16, 2, 32, 2, 3, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 3, 16, 4, 32, 2, 3, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 2, 8, 2, 16, 2, 3, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 1, 16, 16, 32, 2, 3, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else
{
if (m_inputNumChannels % 64 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 8, 8, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 8, 8, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 8, 8, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 8, 8, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else if (m_inputNumChannels % 48 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 8, 6, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 8, 6, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 8, 6, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 8, 6, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else if (m_inputNumChannels % 32 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 4, 8, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 4, 8, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 4, 8, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 4, 8, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 4, 4, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 4, 4, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 4, 4, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 4, 4, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
}
}
// Aggregating calculated weights gradients from chunks.
const uint c_numThreadsPerBlock = 128;
const uint c_filtersBufferLength = (uint)(m_filtersBufferSize / sizeof(float));
const uint c_numBlocks = DivideUp(c_filtersBufferLength, c_numThreadsPerBlock);
const uint c_batchSize = m_parallelismMode == ParallelismMode::Model ? m_inputDataCount : m_tierSize * m_inputDataCount;
LAUNCH_KERNEL_ASYNC(AggregateWeightsGradientsFromChunks, dim3(c_numBlocks), dim3(c_numThreadsPerBlock), m_deviceCalculationStream)(m_filtersGradientsPerChunkBuffer,
numChunks, c_filtersBufferLength, c_batchSize, m_filtersGradientsBuffer);
CudaAssert(hipGetLastError());
}
/*
Calculates input gradients on input data.
Each thread calculates gradient for one input pixel of specified number of data per thread.
Needs to be function with template parameters since loops must have constant parameters to be unrolled.
*/
template <uint c_blockWidth, uint c_dataPerThread, uint c_blockHeight, uint c_dataPerLoad, uint c_numChannels, uint c_blockImagePatchSize, bool c_lastBatch>
__global__ void CalculateDataInputGradients(float* preactivationGradients, const uint dataWidth, const uint dataHeight, const uint dataSize, const uint dataCount,
const int paddingX, const int paddingY, float* filtersBuffer, const uint filterWidth, const uint filterHeight, const uint filterSize, const uint numFilters,
const uint stride, const uint numPatchesX, const uint numPatchesY, float* inputGradients)
{
const uint c_dataPerBlock = c_blockWidth * c_dataPerThread;
// Since same filters are used across threads in same block row and same gradients in threads across same block collumn,
// we will benefit from caching gradients and filters into shared memory.
__shared__ float filtersCache[c_blockHeight * c_numChannels][c_blockWidth + 1]; // Adding 1 to avoid shared memory bank conflicts.
__shared__ float gradientsCache[c_blockWidth][c_dataPerBlock];
// Positioning preactivation gradients buffer.
const uint c_dataOffset = blockIdx.x * c_dataPerBlock;
const uint c_threadIndex = threadIdx.y * c_blockWidth + threadIdx.x;
const uint c_gradientsCacheFilterIndex = c_threadIndex / c_dataPerLoad;
const uint c_gradientsCacheDataIndex = c_threadIndex % c_dataPerLoad;
const uint c_numPatches = numPatchesX * numPatchesY;
preactivationGradients += c_gradientsCacheFilterIndex * c_numPatches * dataCount + c_dataOffset + c_gradientsCacheDataIndex;
// Positioning filters buffer.
filtersBuffer += threadIdx.x;
// Positioning input gradients buffer.
const uint c_numBlockImagePatchesX = (dataWidth + c_blockImagePatchSize - 1) / c_blockImagePatchSize;
const uint c_blockImagePatchY = blockIdx.y / c_numBlockImagePatchesX;
const uint c_blockImagePatchX = blockIdx.y % c_numBlockImagePatchesX;
const uint c_pixelOffsetY = c_blockImagePatchY * c_blockImagePatchSize;
const uint c_pixelOffsetX = c_blockImagePatchX * c_blockImagePatchSize;
const uint c_pixelY = c_pixelOffsetY + threadIdx.y / c_blockImagePatchSize;
const uint c_pixelX = c_pixelOffsetX + threadIdx.y % c_blockImagePatchSize;
const bool c_validPixel = c_pixelX < dataWidth && c_pixelY < dataHeight;
inputGradients += (c_pixelY * dataWidth + c_pixelX) * dataCount + c_dataOffset + threadIdx.x;
// Initializing buffer for this thread calculated gradients.
float threadGradients[c_numChannels][c_dataPerThread];
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadGradients[channelIndex][dataIndex] = 0.f;
}
}
// Calculating this thread gradients.
const uint c_firstPatchX = c_pixelOffsetX + paddingX < filterWidth ? 0 : (c_pixelOffsetX + paddingX - filterWidth) / stride + 1;
const uint c_firstPatchY = c_pixelOffsetY + paddingY < filterHeight ? 0 : (c_pixelOffsetY + paddingY - filterHeight) / stride + 1;
const uint c_lastPatchX = min(numPatchesX, (c_pixelOffsetX + c_blockImagePatchSize - 1 + paddingX) / stride + 1);
const uint c_lastPatchY = min(numPatchesY, (c_pixelOffsetY + c_blockImagePatchSize - 1 + paddingY) / stride + 1);
float* filtersCacheLoad = &filtersCache[threadIdx.y][threadIdx.x];
float* gradientsCacheLoad = &gradientsCache[c_gradientsCacheFilterIndex][c_gradientsCacheDataIndex];
for (uint currPatchY = c_firstPatchY; currPatchY < c_lastPatchY; ++currPatchY)
{
const int c_filterPixelY = (int)c_pixelY + paddingY - (int)(currPatchY * stride);
for (uint currPatchX = c_firstPatchX; currPatchX < c_lastPatchX; ++currPatchX)
{
const int c_filterPixelX = (int)c_pixelX + paddingX - (int)(currPatchX * stride);
const uint c_filterPixel = c_filterPixelY * filterWidth + c_filterPixelX;
const uint c_currPatch = currPatchY * numPatchesX + currPatchX;
const bool c_validFilterPixel = c_filterPixelX >= 0 && c_filterPixelX < filterWidth && c_filterPixelY >= 0 && c_filterPixelY < filterHeight;
for (uint currFilter = 0; currFilter < numFilters; currFilter += c_blockWidth)
{
// Load gradients cache
const float* preactivationGradientsBufferLoad = preactivationGradients + (currFilter * c_numPatches + c_currPatch) * dataCount;
const uint c_dataLoadSlide = c_blockWidth * c_blockHeight / c_dataPerLoad;
#pragma unroll
for (uint dataToLoadIndex = 0; dataToLoadIndex < c_dataPerBlock; dataToLoadIndex += c_dataPerLoad)
{
if (!c_lastBatch || c_dataOffset + dataToLoadIndex + c_gradientsCacheDataIndex < dataCount)
{
#pragma unroll
for (uint filterToLoad = 0; filterToLoad < c_blockWidth; filterToLoad += c_dataLoadSlide)
{
gradientsCacheLoad[filterToLoad * c_dataPerBlock + dataToLoadIndex] = preactivationGradientsBufferLoad[filterToLoad * c_numPatches * dataCount];
}
}
else
{
#pragma unroll
for (uint filterToLoad = 0; filterToLoad < c_blockWidth; filterToLoad += c_dataLoadSlide)
{
gradientsCacheLoad[filterToLoad * c_dataPerBlock + dataToLoadIndex] = 0.f;
}
}
}
if (c_validPixel && c_validFilterPixel)
{
// Load filters cache.
const float* filtersBufferLoad = filtersBuffer + c_filterPixel * numFilters + currFilter;
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
filtersCacheLoad[channelIndex * c_blockHeight * (c_blockWidth + 1)] = filtersBufferLoad[channelIndex * filterSize * numFilters];
}
}
__syncthreads();
if (c_validPixel && c_validFilterPixel)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_blockWidth; ++filterIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadGradients[channelIndex][dataIndex] += filtersCache[channelIndex * c_blockHeight + threadIdx.y][filterIndex] *
gradientsCache[filterIndex][dataIndex * c_blockWidth + threadIdx.x];
}
}
}
}
__syncthreads();
}
}
}
if (c_validPixel)
{
// Writing this thread calculated gradients into gradients buffer.
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
if (!c_lastBatch || c_dataOffset + threadIdx.x + dataIndex * c_blockWidth < dataCount)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
inputGradients[channelIndex * dataSize * dataCount + dataIndex * c_blockWidth] = threadGradients[channelIndex][dataIndex];
}
}
}
}
}
/*
Calculates input gradients of filtered data (resulted from previously applied filters).
Each thread calculates gradient for one input pixel, of specified number of data per thread and specified number of channels per thread.
Grid is organized in a way that each row of blocks works on one pixel of data, and each column of blocks works on different data,
or same data but on different channel.
Columns are sorted first by channel than by data.
Needs to be function with template parameters since loops must have constant parameters to be unrolled.
*/
template <uint c_blockWidth, uint c_dataPerThread, uint c_blockHeight, uint c_channelsPerThread, uint c_filtersCacheLength, uint c_gradientsCacheLength, bool c_lastBatch>
__global__ void CalculateFilteredInputGradients(float* preactivationGradients, const uint dataWidth, const uint dataHeight, const uint dataSize, const uint dataCount,
const int paddingX, const int paddingY, float* filtersBuffer, const uint filterWidth, const uint filterHeight, const uint filterSize, const uint numFilters,
const uint numFilterChannels, const uint stride, const uint numPatchesX, const uint numPatchesY, float* inputGradients)
{
const uint c_dataPerBlock = c_blockWidth * c_dataPerThread;
const uint c_channelsPerBlock = c_blockHeight * c_channelsPerThread;
// Since same filters are used across threads in same block row and same gradients in threads across same block collumn,
// we will benefit from caching gradients and filters into shared memory.
__shared__ float filtersCache[c_channelsPerBlock][c_filtersCacheLength];
__shared__ float gradientsCache[c_gradientsCacheLength][c_dataPerBlock];
// Positioning preactivation gradients buffer.
const uint c_blocksPerChannel = gridDim.x / (numFilterChannels / c_channelsPerBlock);
const uint c_dataOffset = (blockIdx.x % c_blocksPerChannel) * c_dataPerBlock;
const uint c_gradientsCacheFilterIndex = threadIdx.y;
const uint c_gradientsCacheDataIndex = threadIdx.x;
const uint c_numPatches = numPatchesX * numPatchesY;
preactivationGradients += c_gradientsCacheFilterIndex * c_numPatches * dataCount + c_dataOffset + c_gradientsCacheDataIndex;
// Positioning filters buffer, it will be loaded into cache, one window by window, where window has dimensions ChannelsPerBlock x CacheLength.
const uint c_dataChannelIndex = (blockIdx.x / c_blocksPerChannel) * c_channelsPerBlock;
const uint c_threadIndex = threadIdx.y * c_blockWidth + threadIdx.x;
const uint c_filtersCacheChannelIndex = c_threadIndex / c_filtersCacheLength;
const uint c_filtersCachePosition = c_threadIndex % c_filtersCacheLength;
filtersBuffer += (c_dataChannelIndex + c_filtersCacheChannelIndex) * numFilters * filterSize + c_filtersCachePosition;
// Positioning input gradients buffer.
inputGradients += ((c_dataChannelIndex + threadIdx.y) * dataSize + blockIdx.y) * dataCount + c_dataOffset + threadIdx.x;
// Initializing buffer for this thread calculated gradients.
float threadGradients[c_channelsPerThread][c_dataPerThread];
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerThread; ++channelIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadGradients[channelIndex][dataIndex] = 0.f;
}
}
// Calculating this thread gradients.
const uint c_pixelY = blockIdx.y / dataWidth;
const uint c_pixelX = blockIdx.y % dataWidth;
const uint c_firstPatchX = c_pixelX + paddingX < filterWidth ? 0 : (c_pixelX + paddingX - filterWidth) / stride + 1;
const uint c_firstPatchY = c_pixelY + paddingY < filterHeight ? 0 : (c_pixelY + paddingY - filterHeight) / stride + 1;
const uint c_lastPatchX = min(numPatchesX, (c_pixelX + paddingX) / stride + 1);
const uint c_lastPatchY = min(numPatchesY, (c_pixelY + paddingY) / stride + 1);
float* filtersCacheLoad = &filtersCache[c_filtersCacheChannelIndex][c_filtersCachePosition];
float* gradientsCacheLoad = &gradientsCache[c_gradientsCacheFilterIndex][c_gradientsCacheDataIndex];
for (uint currPatchY = c_firstPatchY; currPatchY < c_lastPatchY; ++currPatchY)
{
const uint c_filterPixelY = c_pixelY + paddingY - currPatchY * stride;
for (uint currPatchX = c_firstPatchX; currPatchX < c_lastPatchX; ++currPatchX)
{
const uint c_filterPixelX = c_pixelX + paddingX - currPatchX * stride;
const uint c_filterPixel = c_filterPixelY * filterWidth + c_filterPixelX;
const uint c_currPatch = currPatchY * numPatchesX + currPatchX;
for (uint currFilter = 0; currFilter < numFilters; currFilter += c_filtersCacheLength)
{
const float* filtersBufferLoad = filtersBuffer + c_filterPixel * numFilters + currFilter;
// Load filters cache window.
const uint channelToLoadSlide = c_blockWidth * c_blockHeight / c_filtersCacheLength;
#pragma unroll
for (uint channelToLoad = 0; channelToLoad < c_channelsPerBlock; channelToLoad += channelToLoadSlide)
{
if (c_channelsPerBlock % channelToLoadSlide == 0 || channelToLoad + c_filtersCacheChannelIndex < c_channelsPerBlock)
{
filtersCacheLoad[channelToLoad * c_filtersCacheLength] = filtersBufferLoad[channelToLoad * filterSize * numFilters];
}
}
for (uint currGradientFilter = currFilter; currGradientFilter < currFilter + c_filtersCacheLength; currGradientFilter += c_gradientsCacheLength)
{
// Load gradients cache window.
const float* preactivationGradientsBufferLoad = preactivationGradients + (currGradientFilter * c_numPatches + c_currPatch) * dataCount;
#pragma unroll
for (uint filterToLoad = 0; filterToLoad < c_gradientsCacheLength; filterToLoad += c_blockHeight)
{
if (c_gradientsCacheLength % c_blockHeight == 0 || c_gradientsCacheFilterIndex + filterToLoad < c_gradientsCacheLength)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerBlock; dataIndex += c_blockWidth)
{
if (!c_lastBatch || c_dataOffset + c_gradientsCacheDataIndex + dataIndex < dataCount)
{
gradientsCacheLoad[filterToLoad * c_dataPerBlock + dataIndex] = preactivationGradientsBufferLoad[filterToLoad * c_numPatches * dataCount + dataIndex];
}
else
{
gradientsCacheLoad[filterToLoad * c_dataPerBlock + dataIndex] = 0.f;
}
}
}
}
__syncthreads();
// Calculate gradients from cache.
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_gradientsCacheLength; ++filterIndex)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerThread; ++channelIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadGradients[channelIndex][dataIndex] += filtersCache[channelIndex * c_blockHeight + threadIdx.y][currGradientFilter - currFilter + filterIndex] *
gradientsCache[filterIndex][dataIndex * c_blockWidth + threadIdx.x];
}
}
}
__syncthreads();
}
}
}
}
// Writing this thread calculated gradients into gradients buffer.
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
if (!c_lastBatch || c_dataOffset + threadIdx.x + dataIndex * c_blockWidth < dataCount)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerThread; ++channelIndex)
{
inputGradients[channelIndex * c_blockHeight * dataSize * dataCount + dataIndex * c_blockWidth] = threadGradients[channelIndex][dataIndex];
}
}
}
}
void ConvolutionalLayer::CalculateInputGradients()
{
if (m_inputNumChannels < 3)
{
ShipAssert(false, "Currently not supported!");
}
else if (m_inputNumChannels == 3)
{
uint dataPerThread = m_inputDataCount % 128 == 0 ? 8 : (m_inputDataCount % 64 == 0 ? 4 : 2);
uint blockWidth = 16;
uint blockHeight = 16;
// Block image patch size needs to be square root of block height!
// Noted here as a constant to avoid sqrt computation, if we already hardcode blockHeight.
uint blockImagePatchSize = 4;
dim3 blockDimensions(blockWidth, blockHeight);
dim3 gridDimensions(DivideUp(m_inputDataCount, blockWidth * dataPerThread), DivideUp(m_inputDataWidth, blockImagePatchSize) * DivideUp(m_inputDataHeight, blockImagePatchSize));
bool lastBatch = m_inputDataCount % (blockWidth * dataPerThread) != 0;
if (lastBatch)
{
LAUNCH_KERNEL_ASYNC((CalculateDataInputGradients<16, 2, 16, 32, 3, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateDataInputGradients<16, 8, 16, 32, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateDataInputGradients<16, 4, 16, 32, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateDataInputGradients<16, 2, 16, 32, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
}
else if (m_inputNumChannels % 8 == 0)
{
uint dataPerThread = m_inputDataCount % 128 == 0 ? 4 : (m_inputDataCount % 64 == 0 ? 2 : 1);
uint channelsPerThread = m_inputNumChannels % 64 == 0 ? 8 :
(m_inputNumChannels % 48 == 0 ? 12 :
(m_inputNumChannels % 32 == 0 ? 8 :
(m_inputNumChannels % 16 == 0 ? 4 : 2)));
uint blockWidth = 32;
uint blockHeight = m_inputNumChannels % 64 == 0 ? 8 : 4;
dim3 blockDimensions(blockWidth, blockHeight);
dim3 gridDimensions(DivideUp(m_inputDataCount, blockWidth * dataPerThread) * (m_inputNumChannels / (blockHeight * channelsPerThread)),
m_inputDataSize);
bool lastBatch = m_inputDataCount % (blockWidth * dataPerThread) != 0;
if (lastBatch)
{
if (m_inputNumChannels % 64 == 0)
{
if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 8, 8, 32, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 8, 8, 16, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else if (m_inputNumChannels % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 12, 16, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputNumChannels % 32 == 0)
{
if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 8, 32, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 8, 16, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else if (m_inputNumChannels % 16 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 4, 16, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 2, 16, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else
{
if (m_inputNumChannels % 64 == 0)
{
if (m_numFilters % 32 == 0)
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 8, 8, 32, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 8, 8, 32, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 8, 8, 32, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 8, 8, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 8, 8, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 8, 8, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
}
else if (m_inputNumChannels % 48 == 0)
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 4, 12, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 4, 12, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 12, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else if (m_inputNumChannels % 32 == 0)
{
if (m_numFilters % 32 == 0)
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 4, 8, 32, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 4, 8, 32, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 8, 32, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 4, 8, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 4, 8, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 8, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
}
else if (m_inputNumChannels % 16 == 0)
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 4, 4, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 4, 4, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 4, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 4, 2, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 4, 2, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 2, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
}
}
else if (m_inputNumChannels % 4 == 0)
{
ShipAssert(false, "Currently not supported!");
}
CudaAssert(hipGetLastError());
}
void ConvolutionalLayer::CalculatePreactivationsGradients()
{
CalculatePreactivationGradients(m_activationType, m_activationGradientsBuffer, m_activationDataBuffer, (uint)(m_activationBufferSize / sizeof(float)),
m_preactivationGradientsBuffer, m_deviceCalculationStream);
}
void ConvolutionalLayer::DoBackwardProp()
{
CalculatePreactivationsGradients();
CalculateInputGradients();
CalculateWeightsGradients();
CalculateBiasesGradients();
}
void ConvolutionalLayer::UpdateLayerParameters(float learningProgress)
{
CommonUpdateLayerParameters(learningProgress, m_filtersBuffer, m_filtersGradientsBuffer, m_filtersUpdateBuffer, (uint)(m_filtersBufferSize / sizeof(float)),
m_filtersUpdateMomentum, m_filtersUpdateLearningRateProgressStep, m_filtersUpdateStartingLearningRate, m_filtersUpdateLearningRateUpdateFactor,
m_filtersUpdateDecay, m_biasesBuffer, m_biasesGradientsBuffer, m_biasesUpdateBuffer, (uint)(m_biasesBufferSize / sizeof(float)), m_biasesUpdateMomentum,
m_biasesUpdateLearningRateProgressStep, m_biasesUpdateStartingLearningRate, m_biasesUpdateLearningRateUpdateFactor, m_biasesUpdateDecay);
} | 91039749c024233bc07967823b1d19e99024dd55.cu | // ----------------------------------------------------------------------------------------------------
// Copyrighted by Marko Rakita.
// Author: Marko Rakita
// File contains: Neural network convolutional layer.
// Created: 01/03/2016.
// ----------------------------------------------------------------------------------------------------
#include "include/convolutionallayer.cuh"
const uint ConvolutionalLayer::c_biasesGradientsSumsPerThread = 128;
const uint ConvolutionalLayer::c_biasesGradientsPartialSumThreadsPerBlock = 128;
ConvolutionalLayer::ConvolutionalLayer(ParallelismMode parallelismMode, cudaStream_t deviceCalculationStream, cudaStream_t deviceMemoryStream, uint indexInTier,
uint tierSize, uint inputNumChannels, uint inputDataWidth, uint inputDataHeight, uint inputDataCount, bool holdsInputData, uint numFilters, uint filterWidth,
uint filterHeight, uint numFilterChannels, bool initializeWeights, float weightsDeviation, bool initializeBiases, float biasesInitialValue,
float filtersUpdateMomentum, float filtersUpdateDecay, float filtersUpdateLearningRateProgressStep, float filtersUpdateStartingLearningRate,
float filtersUpdateLearningRateUpdateFactor, float biasesUpdateMomentum, float biasesUpdateDecay, float biasesUpdateLearningRateProgressStep,
float biasesUpdateStartingLearningRate, float biasesUpdateLearningRateUpdateFactor, int paddingX, int paddingY, uint stride,
ActivationType activationType, bool holdsActivationGradients)
{
m_layerType = LayerType::Convolutional;
m_parallelismMode = parallelismMode;
m_deviceCalculationStream = deviceCalculationStream;
m_deviceMemoryStream = deviceMemoryStream;
m_indexInTier = indexInTier;
m_tierSize = tierSize;
m_activationType = activationType;
m_inputNumChannels = inputNumChannels;
m_inputDataWidth = inputDataWidth;
m_inputDataHeight = inputDataHeight;
m_inputDataSize = m_inputDataWidth * m_inputDataHeight;
m_inputDataCount = inputDataCount;
m_holdsInputData = holdsInputData;
m_numFilters = numFilters;
m_filterWidth = filterWidth;
m_filterHeight = filterHeight;
m_filterSize = m_filterWidth * m_filterHeight;
m_numFilterChannels = numFilterChannels;
m_filtersUpdateMomentum = filtersUpdateMomentum;
m_filtersUpdateDecay = filtersUpdateDecay;
m_filtersUpdateLearningRateProgressStep = filtersUpdateLearningRateProgressStep;
m_filtersUpdateStartingLearningRate = filtersUpdateStartingLearningRate;
m_filtersUpdateLearningRateUpdateFactor = filtersUpdateLearningRateUpdateFactor;
m_biasesUpdateMomentum = biasesUpdateMomentum;
m_biasesUpdateDecay = biasesUpdateDecay;
m_biasesUpdateLearningRateProgressStep = biasesUpdateLearningRateProgressStep;
m_biasesUpdateStartingLearningRate = biasesUpdateStartingLearningRate;
m_biasesUpdateLearningRateUpdateFactor = biasesUpdateLearningRateUpdateFactor;
m_paddingX = paddingX;
m_paddingY = paddingY;
m_stride = stride;
m_numPatchesX = 1 + (uint)ceil((double)(2 * paddingX + m_inputDataWidth - m_filterWidth) / m_stride);
m_numPatchesY = 1 + (uint)ceil((double)(2 * paddingY + m_inputDataHeight - m_filterHeight) / m_stride);
m_activationNumChannels = m_numFilters;
m_activationDataWidth = m_numPatchesX;
m_activationDataHeight = m_numPatchesY;
m_activationDataSize = m_activationDataWidth * m_activationDataHeight;
// Allocating input data buffer.
m_inputBufferSize = m_inputNumChannels * m_inputDataSize * m_inputDataCount * sizeof(float);
if (m_holdsInputData)
{
CudaAssert(cudaMalloc<float>(&m_inputDataBuffer, m_inputBufferSize));
}
// Allocating input gradients buffer.
CudaAssert(cudaMalloc<float>(&m_inputGradientsBuffer, m_inputBufferSize));
// Allocating filters buffers.
m_filtersBufferSize = m_numFilters * m_filterSize * m_numFilterChannels * sizeof(float);
CudaAssert(cudaMalloc<float>(&m_filtersBuffer, m_filtersBufferSize));
CudaAssert(cudaMalloc<float>(&m_filtersGradientsBuffer, m_filtersBufferSize));
m_preactivationGradientsPerChunkWidth = m_inputNumChannels > 3 ? 3 : 4;
m_filtersGradientsPerChunkBufferSize = DivideUp(m_numPatchesX, m_preactivationGradientsPerChunkWidth) *
DivideUp(m_numPatchesY, m_preactivationGradientsPerChunkWidth) * m_filtersBufferSize;
CudaAssert(cudaMalloc<float>(&m_filtersGradientsPerChunkBuffer, m_filtersGradientsPerChunkBufferSize));
CudaAssert(cudaMalloc<float>(&m_filtersUpdateBuffer, m_filtersBufferSize));
// Initializing filter weights.
if (initializeWeights)
{
InitializeParamsFromDistribution(m_filtersBuffer, m_filtersBufferSize, weightsDeviation);
InitializeParamsToValue(m_filtersUpdateBuffer, m_filtersBufferSize, 0.f);
}
// Allocating biases buffers.
m_biasesBufferSize = m_numFilters * sizeof(float);
CudaAssert(cudaMalloc<float>(&m_biasesBuffer, m_biasesBufferSize));
CudaAssert(cudaMalloc<float>(&m_biasesGradientsBuffer, m_biasesBufferSize));
CudaAssert(cudaMalloc<float>(&m_biasesUpdateBuffer, m_biasesBufferSize));
// Allocating buffer for holding partial sums for calculating biases gradients.
m_biasesGradientsPartialSumBlocks = DivideUp(DivideUp(m_inputDataCount * m_numPatchesY * m_numPatchesX, c_biasesGradientsSumsPerThread),
c_biasesGradientsPartialSumThreadsPerBlock);
CudaAssert(cudaMalloc<float>(&m_biasesGradientsPartialSumsBuffer,
DivideUp(m_inputDataCount * m_numPatchesY * m_numPatchesX, c_biasesGradientsSumsPerThread) * m_biasesBufferSize));
// Initializing biases.
if (initializeBiases)
{
InitializeParamsToValue(m_biasesBuffer, m_biasesBufferSize, biasesInitialValue);
InitializeParamsToValue(m_biasesUpdateBuffer, m_biasesBufferSize, 0.f);
}
// Allocating preactivation and activation data buffers.
m_activationBufferSize = m_numFilters * m_activationDataSize * m_inputDataCount * sizeof(float);
CudaAssert(cudaMalloc<float>(&m_preactivationDataBuffer, m_activationBufferSize));
CudaAssert(cudaMalloc<float>(&m_activationDataBuffer, m_activationBufferSize));
// Allocating preactivation gradients buffer.
CudaAssert(cudaMalloc<float>(&m_preactivationGradientsBuffer, m_activationBufferSize));
// Allocating activation gradients buffer.
m_holdsActivationGradients = holdsActivationGradients;
if (m_holdsActivationGradients)
{
CudaAssert(cudaMalloc<float>(&m_activationGradientsBuffer, m_activationBufferSize));
}
}
void ConvolutionalLayer::Reinitialize(uint newInputDataCount)
{
Layer::Reinitialize(newInputDataCount);
m_biasesGradientsPartialSumBlocks = DivideUp(DivideUp(m_inputDataCount * m_numPatchesY * m_numPatchesX, c_biasesGradientsSumsPerThread),
c_biasesGradientsPartialSumThreadsPerBlock);
m_activationBufferSize = m_numFilters * m_activationDataSize * m_inputDataCount * sizeof(float);
}
void ConvolutionalLayer::CopyFiltersFromHost(float* hostFiltersBuffer)
{
CudaAssert(cudaMemcpyAsync(m_filtersBuffer, hostFiltersBuffer, m_filtersBufferSize, cudaMemcpyHostToDevice, m_deviceMemoryStream));
SynchronizeMemoryOperations();
}
void ConvolutionalLayer::CopyFiltersUpdateFromHost(float* hostFiltersUpdateBuffer)
{
CudaAssert(cudaMemcpyAsync(m_filtersUpdateBuffer, hostFiltersUpdateBuffer, m_filtersBufferSize, cudaMemcpyHostToDevice, m_deviceMemoryStream));
SynchronizeMemoryOperations();
}
void ConvolutionalLayer::CopyBiasesFromHost(float* hostBiasesBuffer)
{
CudaAssert(cudaMemcpyAsync(m_biasesBuffer, hostBiasesBuffer, m_biasesBufferSize, cudaMemcpyHostToDevice, m_deviceMemoryStream));
SynchronizeMemoryOperations();
}
void ConvolutionalLayer::CopyBiasesUpdateFromHost(float* hostBiasesUpdateBuffer)
{
CudaAssert(cudaMemcpyAsync(m_biasesUpdateBuffer, hostBiasesUpdateBuffer, m_biasesBufferSize, cudaMemcpyHostToDevice, m_deviceMemoryStream));
SynchronizeMemoryOperations();
}
ConvolutionalLayer::~ConvolutionalLayer()
{
CudaAssert(cudaFree(m_filtersBuffer));
CudaAssert(cudaFree(m_filtersGradientsBuffer));
CudaAssert(cudaFree(m_filtersGradientsPerChunkBuffer));
CudaAssert(cudaFree(m_filtersUpdateBuffer));
CudaAssert(cudaFree(m_biasesBuffer));
CudaAssert(cudaFree(m_biasesGradientsBuffer));
CudaAssert(cudaFree(m_biasesGradientsPartialSumsBuffer));
CudaAssert(cudaFree(m_biasesUpdateBuffer));
CudaAssert(cudaFree(m_preactivationDataBuffer));
CudaAssert(cudaFree(m_preactivationGradientsBuffer));
}
void ConvolutionalLayer::LoadInputs()
{
CommonLoadInputs();
}
/*
Applies filters on data from input (which has less than or equal to 3 channels).
Each thread applies specified number of filters to specified number of input data.
Grid is organized in a way that different collumns work on different data, and different rows work on different filters or same filters but different patch.
Rows are sorted first by patch than by filters.
Needs to be function with template parameters since loops must have constant parameters to be unrolled.
*/
template <uint c_blockWidth, uint c_dataPerThread, uint c_blockHeight, uint c_filtersPerThread, uint c_numChannels, uint c_cacheLength, bool c_lastBatch>
__global__ void ApplyFiltersOnInputData(float* dataBuffer, const uint dataWidth, const uint dataHeight, const uint dataSize, const uint dataCount, const int paddingX,
const int paddingY, float* filtersBuffer, const uint filterWidth, const uint filterHeight, const uint filterSize, const uint numFilters, const uint stride,
const uint numPatchesX, const uint numPatchesY, float* preactivationsBuffer)
{
const uint c_dataPerBlock = c_blockWidth * c_dataPerThread;
const uint c_filtersPerBlock = c_blockHeight * c_filtersPerThread;
// Since same filters are used across threads in same block row and same data in threads across same block collumn,
// we will benefit from caching data and filters into shared memory.
// In each pass we are caching cache length pixels from each channel of filters/data.
const uint c_cacheSize = c_cacheLength * c_numChannels;
__shared__ float dataCache[c_cacheSize][c_dataPerBlock];
__shared__ float filtersCache[c_cacheSize][c_filtersPerBlock];
// Positioning filters buffer, it will be loaded into cache, one window by window, where window has dimensions FiltersPerBlock x CacheLength.
const uint c_blocksPerPatch = numFilters / c_filtersPerBlock;
const uint c_filtersOffset = (blockIdx.y % c_blocksPerPatch) * c_filtersPerBlock;
const uint c_threadIndex = threadIdx.y * c_blockWidth + threadIdx.x;
// Filter cache index represents column in filters data cache window, i.e. which filter are we caching.
const uint c_filtersCacheIndex = c_threadIndex % c_filtersPerBlock;
// Filter cache position represents row in filters data cache window, i.e. which filter pixel are we caching.
const uint c_filtersCachePosition = c_threadIndex / c_filtersPerBlock;
filtersBuffer += c_filtersOffset + c_filtersCachePosition * numFilters + c_filtersCacheIndex;
// Positioning data buffer.
const uint c_dataOffset = blockIdx.x * c_dataPerBlock + threadIdx.x;
dataBuffer += c_dataOffset;
// Positioning preactivations buffer.
const uint c_numPatches = numPatchesX * numPatchesY;
const uint c_patchIndex = blockIdx.y / c_blocksPerPatch;
preactivationsBuffer += (c_filtersOffset + threadIdx.y * c_filtersPerThread) * dataCount * c_numPatches + c_patchIndex * dataCount + c_dataOffset;
// Initializing buffer for this thread calculated preactivations.
float threadPreactivations[c_filtersPerThread][c_dataPerThread];
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadPreactivations[filterIndex][dataIndex] = 0.f;
}
}
// Calculating this thread preactivations.
const uint c_filtersCacheMaxPosition = c_blockWidth / c_filtersPerThread;
const bool c_blockFitsInFilters = c_blockWidth % c_filtersPerThread == 0;
const bool c_blockCoversFilters = c_cacheLength % c_filtersCacheMaxPosition == 0;
const bool c_blockCoversCache = c_cacheLength % c_blockHeight == 0;
const int c_dataPositionX = -paddingX + (c_patchIndex % numPatchesX) * stride;
const int c_dataPositionY = -paddingY + (c_patchIndex / numPatchesX) * stride;
for (uint filterPosition = 0; filterPosition < filterSize; filterPosition += c_cacheLength)
{
// Loading filters cache from filter position.
// Each thread in block loads some of the filters data cache window pixels (window with FiltersPerBlock x CacheLength dimensions).
// Perfect case is when block totally covers the filters data cache window,
// worse case is when it just fits the filters data cache window so we have to cover it in couple passes,
// worst case is when it doesn't fit the filters data cache window so some of block threads are resident.
if (c_blockFitsInFilters || c_filtersCachePosition < c_filtersCacheMaxPosition)
{
#pragma unroll
for (uint passedCachePosition = 0; passedCachePosition < c_cacheLength; passedCachePosition += c_filtersCacheMaxPosition)
{
const uint c_currCachePosition = passedCachePosition + c_filtersCachePosition;
if (c_blockCoversFilters || c_currCachePosition < c_cacheLength)
{
if (filterPosition + c_currCachePosition < filterSize)
{
#pragma unroll
for (uint channel = 0; channel < c_numChannels; ++channel)
{
filtersCache[channel * c_cacheLength + c_currCachePosition][c_filtersCacheIndex] =
filtersBuffer[(channel * filterSize + filterPosition + passedCachePosition) * numFilters];
}
}
else
{
#pragma unroll
for (uint channel = 0; channel < c_numChannels; ++channel)
{
filtersCache[channel * c_cacheLength + c_currCachePosition][c_filtersCacheIndex] = 0.f;
}
}
}
}
}
// Loading data cache from filter position in data patch.
// Each thread in a block loads some data pixel into the data cache, and threads in the same column load
// different pixels from same data, while threads in the same row load pixels on the same position but from
// different data.
#pragma unroll
for (uint passedCachePosition = 0; passedCachePosition < c_cacheLength; passedCachePosition += c_blockHeight)
{
const uint c_currCachePosition = passedCachePosition + threadIdx.y;
const uint c_currFilterPosition = filterPosition + c_currCachePosition;
if (c_currFilterPosition < filterSize && (c_blockCoversCache || c_currCachePosition < c_cacheLength))
{
const int c_currDataPositionX = c_dataPositionX + c_currFilterPosition % filterWidth;
const int c_currDataPositionY = c_dataPositionY + c_currFilterPosition / filterWidth;
if (c_currDataPositionX >= 0 && c_currDataPositionX < dataWidth && c_currDataPositionY >= 0 && c_currDataPositionY < dataHeight)
{
float* currDataBufferPosition = dataBuffer + (c_currDataPositionY * dataWidth + c_currDataPositionX) * dataCount;
#pragma unroll
for (uint channel = 0; channel < c_numChannels; ++channel)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
if (!c_lastBatch || c_dataOffset + dataIndex * c_blockWidth < dataCount)
{
dataCache[c_currCachePosition + channel * c_cacheLength][threadIdx.x * c_dataPerThread + dataIndex] =
currDataBufferPosition[channel * dataCount * dataSize + dataIndex * c_blockWidth];
}
else
{
dataCache[c_currCachePosition + channel * c_cacheLength][threadIdx.x * c_dataPerThread + dataIndex] = 0.f;
}
}
}
}
else
{
// Fill padded positions with zeros.
#pragma unroll
for (uint channel = 0; channel < c_numChannels; ++channel)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
dataCache[c_currCachePosition + channel * c_cacheLength][threadIdx.x * c_dataPerThread + dataIndex] = 0.f;
}
}
}
}
}
__syncthreads();
// Applying loaded filter cache to loaded data cache.
#pragma unroll
for (uint cacheIndex = 0; cacheIndex < c_cacheSize; ++cacheIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadPreactivations[filterIndex][dataIndex] += dataCache[cacheIndex][threadIdx.x * c_dataPerThread + dataIndex] *
filtersCache[cacheIndex][threadIdx.y * c_filtersPerThread + filterIndex];
}
}
}
__syncthreads();
}
// Writing this thread calculated preactivations into preactivations buffer.
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
if (!c_lastBatch || c_dataOffset + dataIndex * c_blockWidth < dataCount)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
preactivationsBuffer[dataIndex * c_blockWidth + filterIndex * dataCount * c_numPatches] = threadPreactivations[filterIndex][dataIndex];
}
}
}
}
/*
Applies filters on filtered data (resulted from previously applied filters).
Each thread applies specified number of filters to specified number of input data.
Grid is organized in a way that different collumns work on different data, and different rows work on different filters or same filters but different patch.
Rows are sorted first by patch than by filters.
Needs to be function with template parameters since loops must have constant parameters to be unrolled.
*/
template <uint c_blockWidth, uint c_dataPerThread, uint c_blockHeight, uint c_filtersPerThread, uint c_cacheLength, bool c_lastBatch>
__global__ void ApplyFiltersOnFilteredData(float* dataBuffer, const uint dataWidth, const uint dataHeight, const uint dataSize, const uint dataCount,
const int paddingX, const int paddingY, float* filtersBuffer, const uint filterWidth, const uint filterHeight, const uint filterSize, const uint numFilters,
const uint numFilterChannels, const uint stride, const uint numPatchesX, const uint numPatchesY, float* preactivationsBuffer)
{
const uint c_dataPerBlock = c_blockWidth * c_dataPerThread;
const uint c_filtersPerBlock = c_blockHeight * c_filtersPerThread;
// Since same filters are used across threads in same block row and same data in threads across same block collumn,
// we will benefit from caching data and filters into shared memory.
// In each pass we are caching one pixel from cache length channels of filters/data.
__shared__ float dataCache[c_cacheLength][c_dataPerBlock];
__shared__ float filtersCache[c_cacheLength][c_filtersPerBlock];
// Positioning filters buffer, it will be loaded into cache, one window by window, where window has dimensions FiltersPerBlock x CacheLength.
const uint c_blocksPerPatch = numFilters / c_filtersPerBlock;
const uint c_filtersOffset = (blockIdx.y % c_blocksPerPatch) * c_filtersPerBlock;
const uint c_threadIndex = threadIdx.y * c_blockWidth + threadIdx.x;
// Filter cache index represents column in filters data cache window, i.e. which filter are we caching.
const uint c_filtersCacheIndex = c_threadIndex % c_filtersPerBlock;
// Filter cache position represents row in filters data cache window, i.e. which filter channel are we caching.
const uint c_filtersCachePosition = c_threadIndex / c_filtersPerBlock;
filtersBuffer += c_filtersOffset + c_filtersCachePosition * numFilters * filterSize + c_filtersCacheIndex;
// Positioning data buffer.
const uint c_dataOffset = blockIdx.x * c_dataPerBlock + threadIdx.x;
dataBuffer += threadIdx.y * dataCount * dataSize + c_dataOffset;
// Positioning preactivations buffer.
const uint c_numPatches = numPatchesX * numPatchesY;
const uint c_patchIndex = blockIdx.y / c_blocksPerPatch;
preactivationsBuffer += (c_filtersOffset + threadIdx.y) * dataCount * c_numPatches + c_patchIndex * dataCount + c_dataOffset;
// Initializing buffer for this thread calculated preactivations.
float threadPreactivations[c_filtersPerThread][c_dataPerThread];
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadPreactivations[filterIndex][dataIndex] = 0.f;
}
}
// Calculating this thread preactivations.
const uint c_filtersCacheMaxPosition = c_blockWidth / c_filtersPerThread;
const bool c_blockCoversFilters = c_cacheLength % c_filtersCacheMaxPosition == 0;
const bool c_blockCoversCache = c_cacheLength % c_blockHeight == 0;
const int c_dataPositionX = -paddingX + (c_patchIndex % numPatchesX) * stride;
const int c_dataPositionY = -paddingY + (c_patchIndex / numPatchesX) * stride;
const uint c_dataStartPositionX = max(0, c_dataPositionX);
const uint c_dataEndPositionX = min(c_dataPositionX + filterWidth, dataWidth);
const uint c_dataStartPositionY = max(0, c_dataPositionY);
const uint c_dataEndPositionY = min(c_dataPositionY + filterHeight, dataHeight);
for (uint currDataPositionY = c_dataStartPositionY; currDataPositionY < c_dataEndPositionY; ++currDataPositionY)
{
const uint c_currFilterPositionY = currDataPositionY - c_dataPositionY;
for (uint currDataPositionX = c_dataStartPositionX; currDataPositionX < c_dataEndPositionX; ++currDataPositionX)
{
const uint c_currFilterPositionX = currDataPositionX - c_dataPositionX;
const uint c_currFilterPosition = c_currFilterPositionY * filterWidth + c_currFilterPositionX;
const uint c_currDataPosition = currDataPositionY * dataWidth + currDataPositionX;
for (uint currChannelPosition = 0; currChannelPosition < numFilterChannels; currChannelPosition += c_cacheLength)
{
// Loading filters cache from filter position.
if (c_filtersCachePosition < c_filtersCacheMaxPosition)
{
#pragma unroll
for (uint passedCachePosition = 0; passedCachePosition < c_cacheLength; passedCachePosition += c_filtersCacheMaxPosition)
{
const uint c_currCachePosition = passedCachePosition + c_filtersCachePosition;
if (c_blockCoversFilters || c_currCachePosition < c_cacheLength)
{
filtersCache[c_currCachePosition][c_filtersCacheIndex] =
filtersBuffer[((currChannelPosition + passedCachePosition)* filterSize + c_currFilterPosition) * numFilters];
}
}
}
// Loading data cache from filter position in data patch.
float* currDataBufferPosition = dataBuffer + (currChannelPosition * dataSize + c_currDataPosition) * dataCount;
#pragma unroll
for (uint passedCachePosition = 0; passedCachePosition < c_cacheLength; passedCachePosition += c_blockHeight)
{
const uint c_currCachePosition = passedCachePosition + threadIdx.y;
if (c_blockCoversCache || c_currCachePosition < c_cacheLength)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
if (!c_lastBatch || c_dataOffset + dataIndex * c_blockWidth < dataCount)
{
dataCache[c_currCachePosition][threadIdx.x + dataIndex * c_blockWidth] =
currDataBufferPosition[passedCachePosition * dataCount * dataSize + dataIndex * c_blockWidth];
}
else
{
dataCache[c_currCachePosition][threadIdx.x + dataIndex * c_blockWidth] = 0.f;
}
}
}
}
__syncthreads();
// Applying loaded filter cache to loaded data cache.
#pragma unroll
for (uint cacheIndex = 0; cacheIndex < c_cacheLength; ++cacheIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
threadPreactivations[filterIndex][dataIndex] += dataCache[cacheIndex][dataIndex * c_blockWidth + threadIdx.x] *
filtersCache[cacheIndex][filterIndex * c_blockHeight + threadIdx.y];
}
}
}
__syncthreads();
}
}
}
// Writing this thread calculated preactivations into preactivations buffer.
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
if (!c_lastBatch || c_dataOffset + dataIndex * c_blockWidth < dataCount)
{
preactivationsBuffer[dataIndex * c_blockWidth + filterIndex * c_blockHeight * dataCount * c_numPatches] = threadPreactivations[filterIndex][dataIndex];
}
}
}
}
void ConvolutionalLayer::CalculatePreactivations()
{
uint dataPerThread = m_inputDataCount % 128 == 0 ? 4 : (m_inputDataCount % 64 == 0 ? 2 : 1);
uint filtersPerThread = m_numFilters % 64 == 0 ? 16 : ((m_inputNumChannels <= 3 && m_numFilters % 48 == 0) ? 12 : (m_numFilters % 32 == 0 ? 8 : 4));
uint blockWidth = 32;
uint blockHeight = (m_numFilters % 128 == 0 && m_numFilterChannels % 8 == 0 && dataPerThread < 4) ? 8 : 4;
dim3 blockDimensions(blockWidth, blockHeight);
dim3 gridDimensions(DivideUp(m_inputDataCount, blockWidth * dataPerThread), (m_activationDataSize * m_numFilters) / (blockHeight * filtersPerThread));
bool lastBatch = m_inputDataCount % (blockWidth * dataPerThread) != 0;
if (lastBatch)
{
if (m_inputNumChannels < 3)
{
ShipAssert(false, "Currently not supported!");
}
else if (m_inputNumChannels == 3)
{
if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 16, 3, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 12, 3, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 8, 3, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 4, 3, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputNumChannels % 8 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 8, 16, 8, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 16, 8, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 8, 8, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 4, 8, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputNumChannels % 4 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 16, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 16, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 8, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 4, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
}
else
{
if (m_inputNumChannels < 3)
{
ShipAssert(false, "Currently not supported!");
}
else if (m_inputNumChannels == 3)
{
if (m_inputDataCount % 128 == 0)
{
if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 4, 4, 16, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 4, 4, 12, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 4, 4, 8, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 4, 4, 4, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputDataCount % 64 == 0)
{
if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 2, 4, 16, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 2, 4, 12, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 2, 4, 8, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 2, 4, 4, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputDataCount % 32 == 0)
{
if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 16, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 12, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 8, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnInputData<32, 1, 4, 4, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
}
else if (m_inputNumChannels % 8 == 0)
{
if (m_inputDataCount % 128 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 16, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 16, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 8, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 4, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputDataCount % 64 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 8, 16, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 16, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 8, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 4, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputDataCount % 32 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 8, 16, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 16, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 8, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 4, 8, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
}
else if (m_inputNumChannels % 4 == 0)
{
if (m_inputDataCount % 128 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 16, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 16, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 8, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 4, 4, 4, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputDataCount % 64 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 16, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 16, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 8, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 2, 4, 4, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
else if (m_inputDataCount % 32 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 16, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 16, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 8, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((ApplyFiltersOnFilteredData<32, 1, 4, 4, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer, m_inputDataWidth,
m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize, m_numFilters,
m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationDataBuffer);
}
}
}
}
CudaAssert(cudaGetLastError());
}
/*
Does grid stride and adds biases to preactivations.
*/
__global__ void AddFilterBiases(float* preactivations, float* biases, const uint width, const uint height)
{
for (uint y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += gridDim.y * blockDim.y)
{
int laneId = threadIdx.x % warpSize;
int biasValue;
if (laneId == 0)
{
biasValue = biases[y];
}
biasValue = __shfl(biasValue, 0);
for (uint x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += gridDim.x * blockDim.x)
{
preactivations[y * width + x] += biasValue;
}
}
}
void ConvolutionalLayer::AddBiases()
{
dim3 blockDimensions(Config::MAX_NUM_THREADS, 1);
const uint c_width = m_inputDataCount * m_numPatchesX * m_numPatchesY;
const uint c_blocksPerWidth = max((uint)1, c_width / (uint)Config::MAX_NUM_THREADS);
uint gridX = c_blocksPerWidth;
if (c_blocksPerWidth >= 128)
{
gridX = 128;
}
else if (c_blocksPerWidth >= 64)
{
gridX = 64;
}
else if (c_blocksPerWidth >= 32)
{
gridX = 32;
}
dim3 gridDimensions(gridX, 64);
LAUNCH_KERNEL_ASYNC(AddFilterBiases, gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationDataBuffer, m_biasesBuffer, c_width, m_numFilters);
CudaAssert(cudaGetLastError());
}
void ConvolutionalLayer::CalculateActivations()
{
ApplyActivation(m_activationType, m_preactivationDataBuffer, (uint)(m_activationBufferSize / sizeof(float)), m_activationDataBuffer, m_deviceCalculationStream);
}
void ConvolutionalLayer::DoForwardProp(PropagationMode propagationMode)
{
CalculatePreactivations();
AddBiases();
CalculateActivations();
}
/*
Calculates partial sums for biases gradients.
*/
__global__ void __CalculateBiasesGradientsPartialSums(float* preactivationGradients, const uint numElementsToSum, float* partialSumsBuffer)
{
float partialSum = 0.f;
const uint c_preactivationsGradientsOffset = blockIdx.y * numElementsToSum;
for (uint partialSumIndex = blockIdx.x * blockDim.x + threadIdx.x; partialSumIndex < numElementsToSum; partialSumIndex += gridDim.x * blockDim.x)
{
partialSum += preactivationGradients[c_preactivationsGradientsOffset + partialSumIndex];
}
partialSumsBuffer[blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x] = partialSum;
}
/*
Calculates biases gradients, each thread calculating gradient for one bias.
*/
__global__ void __CalculateConvolutionalBiasesGradients(float* partialSumsBuffer, const uint numFilters, const uint numPartialSums, const uint batchSize,
float* biasesGradients)
{
const uint c_filterIndex = blockIdx.x * blockDim.x + threadIdx.x;
const uint c_filterPartialSumsOffset = c_filterIndex * numPartialSums;
if (c_filterIndex < numFilters)
{
float biasGradient = 0.f;
for (uint partialSumIndex = 0; partialSumIndex < numPartialSums; ++partialSumIndex)
{
biasGradient += partialSumsBuffer[c_filterPartialSumsOffset + partialSumIndex];
}
biasesGradients[c_filterIndex] = biasGradient / (float)batchSize;
}
}
void ConvolutionalLayer::CalculateBiasesGradients()
{
// Summing biases into temp buffer.
const uint c_width = m_inputDataCount * m_numPatchesY * m_numPatchesX;
dim3 blockDimensions(c_biasesGradientsPartialSumThreadsPerBlock);
dim3 gridDimensions(m_biasesGradientsPartialSumBlocks, m_numFilters);
LAUNCH_KERNEL_ASYNC(__CalculateBiasesGradientsPartialSums, gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
c_width, m_biasesGradientsPartialSumsBuffer);
CudaAssert(cudaGetLastError());
// Summing from partial sums buffer to biases gradients buffer.
const uint c_numThreadsPerBlock = 128;
const uint c_numBlocks = DivideUp(m_numFilters, c_numThreadsPerBlock);
const uint c_batchSize = m_parallelismMode == ParallelismMode::Model ? m_inputDataCount : m_tierSize * m_inputDataCount;
LAUNCH_KERNEL_ASYNC(__CalculateConvolutionalBiasesGradients, dim3(c_numBlocks), dim3(c_numThreadsPerBlock), m_deviceCalculationStream)(m_biasesGradientsPartialSumsBuffer,
m_numFilters, m_biasesGradientsPartialSumBlocks * c_biasesGradientsPartialSumThreadsPerBlock, c_batchSize, m_biasesGradientsBuffer);
CudaAssert(cudaGetLastError());
}
/*
Calculates weights gradients on input data.
Each thread calculates gradient for specified number of weights per thread, for specified number of filters per thread,
in one chunk of preactivations.
Needs to be function with template parameters since loops must have constant parameters to be unrolled.
*/
template <uint c_blockWidth, uint c_filtersPerThread, uint c_blockHeight, uint c_pixelsPerThread, uint c_dataPerLoad, uint c_pixelsPerLoad, uint c_numChannels, bool c_lastBatch>
__global__ void CalculateInputDataWeightsGradients(float* inputBuffer, const uint dataWidth, const uint dataHeight, const uint dataSize, const uint dataCount,
const int paddingX, const int paddingY, float* preactivationGradients, const uint filterWidth, const uint filterHeight, const uint filterSize, const uint numFilters,
const uint stride, const uint numPatchesX, const uint numPatchesY, const uint preactivationGradientsPerChunkWidth, float* filtersGradientsPerChunkBuffer)
{
const uint c_filtersPerBlock = c_blockWidth * c_filtersPerThread;
const uint c_pixelsPerBlock = c_blockHeight * c_pixelsPerThread;
const uint c_pixelsPerCache = c_blockHeight * c_pixelsPerLoad;
// Since same filters are used across threads in same block row and same gradients in threads across same block collumn,
// we will benefit from caching input and gradients into shared memory.
__shared__ float inputCache[c_pixelsPerCache * c_numChannels][c_dataPerLoad];
__shared__ int inputPixelOffsetsCache[c_pixelsPerBlock];
__shared__ float gradientsCache[c_filtersPerBlock][c_dataPerLoad + 1];
// Positioning inputs buffer.
const uint c_threadIndex = threadIdx.y * c_blockWidth + threadIdx.x;
const uint c_cacheLoadIndex = c_threadIndex / c_dataPerLoad;
const uint c_dataLoadIndex = c_threadIndex % c_dataPerLoad;
inputBuffer += c_dataLoadIndex;
// Positioning preactivation gradients buffer.
const uint c_blocksPerChunk = numFilters / c_filtersPerBlock;
const uint c_filterOffset = (blockIdx.x % c_blocksPerChunk) * c_filtersPerBlock;
const uint c_numPatches = numPatchesX * numPatchesY;
preactivationGradients += c_filterOffset * c_numPatches * dataCount + c_dataLoadIndex;
// Positioning gradients buffer.
const uint c_chunkIndex = blockIdx.x / c_blocksPerChunk;
const uint c_pixelOffset = blockIdx.y * c_pixelsPerBlock;
filtersGradientsPerChunkBuffer += c_chunkIndex * c_numChannels * numFilters * filterSize + (c_pixelOffset + threadIdx.y) * numFilters + c_filterOffset + threadIdx.x;
// Initializing buffer for this thread calculated gradients.
float threadGradients[c_numChannels][c_pixelsPerThread][c_filtersPerThread];
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
#pragma unroll
for (uint pixelIndex = 0; pixelIndex < c_pixelsPerThread; ++pixelIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
threadGradients[channelIndex][pixelIndex][filterIndex] = 0.f;
}
}
}
// Calculating this thread gradients.
const uint c_numChunksX = (numPatchesX + preactivationGradientsPerChunkWidth - 1) / preactivationGradientsPerChunkWidth;
const uint c_chunkY = c_chunkIndex / c_numChunksX;
const uint c_chunkX = c_chunkIndex % c_numChunksX;
const uint c_patchY = c_chunkY * preactivationGradientsPerChunkWidth;
const uint c_patchX = c_chunkX * preactivationGradientsPerChunkWidth;
const uint c_firstPatchY = c_patchY;
const uint c_firstPatchX = c_patchX;
const uint c_lastPatchY = min(numPatchesY, c_patchY + preactivationGradientsPerChunkWidth);
const uint c_lastPatchX = min(numPatchesX, c_patchX + preactivationGradientsPerChunkWidth);
const uint c_filterPixelY = (c_pixelOffset + c_threadIndex) / filterWidth;
const uint c_filterPixelX = (c_pixelOffset + c_threadIndex) % filterWidth;
for (uint patchY = c_firstPatchY; patchY < c_lastPatchY; ++patchY)
{
const int c_inputPixelY = (int)(c_filterPixelY + patchY * stride) - paddingY;
for (uint patchX = c_firstPatchX; patchX < c_lastPatchX; ++patchX)
{
const int c_inputPixelX = (int)(c_filterPixelX + patchX * stride) - paddingX;
const uint c_patch = patchY * numPatchesX + patchX;
// Loading input pixels offsets cache.
__syncthreads();
if (c_threadIndex < c_pixelsPerBlock)
{
const int c_inputPixelOffset = (c_inputPixelY * (int)dataWidth + c_inputPixelX) * (int)dataCount;
inputPixelOffsetsCache[c_threadIndex] = (c_inputPixelY >= 0 && c_inputPixelY < dataHeight &&
c_inputPixelX >= 0 && c_inputPixelX < dataWidth) ? c_inputPixelOffset : -1;
}
__syncthreads();
// Load input pixels and gradient pixels for data per load images, and calculate filter gradients on them.
for (uint dataIndex = 0; dataIndex < dataCount; dataIndex += c_dataPerLoad)
{
const uint cacheLoadSlide = (c_blockWidth * c_blockHeight) / c_dataPerLoad;
// Load gradients cache.
if (!c_lastBatch || dataIndex + c_dataLoadIndex < dataCount)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerBlock; filterIndex += cacheLoadSlide)
{
const uint c_filterToLoad = ((c_cacheLoadIndex + filterIndex) % c_filtersPerThread) * c_blockWidth + (c_cacheLoadIndex + filterIndex) / c_filtersPerThread;
if (c_filtersPerBlock % cacheLoadSlide == 0 || c_cacheLoadIndex + filterIndex < c_filtersPerBlock)
{
gradientsCache[c_cacheLoadIndex + filterIndex][c_dataLoadIndex] = preactivationGradients[c_filterToLoad * c_numPatches * dataCount +
c_patch * dataCount + dataIndex];
}
}
}
else
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerBlock; filterIndex += cacheLoadSlide)
{
if (c_filtersPerBlock % cacheLoadSlide == 0 || c_cacheLoadIndex + filterIndex < c_filtersPerBlock)
{
gradientsCache[c_cacheLoadIndex + filterIndex][c_dataLoadIndex] = 0.f;
}
}
}
// Load inputs, cache per cache, and calculate gradients.
#pragma unroll
for (uint pixelIndex = 0; pixelIndex < c_pixelsPerThread; pixelIndex += c_pixelsPerLoad)
{
// Load inputs cache.
#pragma unroll
for (uint loadPixelIndex = 0; loadPixelIndex < c_pixelsPerCache; loadPixelIndex += cacheLoadSlide)
{
if (c_pixelsPerCache % cacheLoadSlide == 0 || c_cacheLoadIndex + loadPixelIndex < c_pixelsPerCache)
{
const uint c_filterPixel = pixelIndex * c_blockHeight + c_cacheLoadIndex + loadPixelIndex;
if (c_pixelOffset + c_filterPixel < filterSize && (!c_lastBatch || dataIndex + c_dataLoadIndex < dataCount))
{
const int c_inputPixelOffset = inputPixelOffsetsCache[c_filterPixel];
if (c_inputPixelOffset >= 0)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
inputCache[channelIndex * c_pixelsPerCache + c_cacheLoadIndex + loadPixelIndex][c_dataLoadIndex] =
inputBuffer[channelIndex * dataSize * dataCount + c_inputPixelOffset + dataIndex];
}
}
else
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
inputCache[channelIndex * c_pixelsPerCache + c_cacheLoadIndex + loadPixelIndex][c_dataLoadIndex] = 0.f;
}
}
}
else
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
inputCache[channelIndex * c_pixelsPerCache + c_cacheLoadIndex + loadPixelIndex][c_dataLoadIndex] = 0.f;
}
}
}
}
__syncthreads();
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
#pragma unroll
for (uint loadedDataIndex = 0; loadedDataIndex < c_dataPerLoad; ++loadedDataIndex)
{
#pragma unroll
for (uint loadedPixelIndex = 0; loadedPixelIndex < c_pixelsPerLoad; ++loadedPixelIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
threadGradients[channelIndex][pixelIndex + loadedPixelIndex][filterIndex] +=
inputCache[channelIndex * c_pixelsPerCache + loadedPixelIndex * c_blockHeight + threadIdx.y][loadedDataIndex] *
gradientsCache[threadIdx.x * c_filtersPerThread + filterIndex][loadedDataIndex];
}
}
}
}
__syncthreads();
}
}
}
}
// Writing this thread calculated gradients into gradients buffer.
#pragma unroll
for (uint pixelIndex = 0; pixelIndex < c_pixelsPerThread; ++pixelIndex)
{
if (c_pixelOffset + pixelIndex * c_blockHeight + threadIdx.y < filterSize)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
filtersGradientsPerChunkBuffer[(channelIndex * filterSize + pixelIndex * c_blockHeight) * numFilters + filterIndex * c_blockWidth] =
threadGradients[channelIndex][pixelIndex][filterIndex];
}
}
}
}
}
/*
Calculates weights gradients on filtered data (resulted from previously applied filters).
Each thread calculates gradient for one weight in specified number of filters per thread and specified number of filter channels per thread,
in one chunk of preactivations.
Needs to be function with template parameters since loops must have constant parameters to be unrolled.
*/
template <uint c_blockWidth, uint c_filtersPerThread, uint c_blockHeight, uint c_channelsPerThread, uint c_dataPerLoad, bool c_lastBatch>
__global__ void CalculateFilteredDataWeightsGradients(float* inputBuffer, const uint dataWidth, const uint dataHeight, const uint dataSize, const uint dataCount,
const int paddingX, const int paddingY, float* preactivationGradients, const uint filterWidth, const uint filterHeight, const uint filterSize, const uint numFilters,
const uint numFilterChannels, const uint stride, const uint numPatchesX, const uint numPatchesY, const uint preactivationGradientsPerChunkWidth,
float* filtersGradientsPerChunkBuffer)
{
const uint c_filtersPerBlock = c_blockWidth * c_filtersPerThread;
const uint c_channelsPerBlock = c_blockHeight * c_channelsPerThread;
// Since same filters are used across threads in same block row and same gradients in threads across same block collumn,
// we will benefit from caching input and gradients into shared memory.
__shared__ float inputCache[c_channelsPerBlock][c_dataPerLoad];
__shared__ float gradientsCache[c_filtersPerBlock][c_dataPerLoad + 1];
// Positioning inputs buffer.
const uint c_threadIndex = threadIdx.y * c_blockWidth + threadIdx.x;
const uint c_cacheLoadIndex = c_threadIndex / c_dataPerLoad;
const uint c_dataLoadIndex = c_threadIndex % c_dataPerLoad;
const uint c_channelOffset = blockIdx.y * c_channelsPerBlock;
inputBuffer += (c_channelOffset + c_cacheLoadIndex) * dataSize * dataCount + c_dataLoadIndex;
// Positioning preactivation gradients buffer.
const uint c_blocksPerChunk = numFilters / c_filtersPerBlock;
const uint c_filterOffset = (blockIdx.x % c_blocksPerChunk) * c_filtersPerBlock;
const uint c_numPatches = numPatchesX * numPatchesY;
preactivationGradients += (c_filterOffset + c_cacheLoadIndex) * c_numPatches * dataCount + c_dataLoadIndex;
// Positioning gradients buffer.
const uint c_chunkIndex = blockIdx.x / c_blocksPerChunk;
const uint c_filterPixel = blockIdx.z;
filtersGradientsPerChunkBuffer += (c_chunkIndex * numFilterChannels + c_channelOffset + threadIdx.y) * numFilters * filterSize +
c_filterPixel * numFilters + c_filterOffset + threadIdx.x;
// Initializing buffer for this thread calculated gradients.
float threadGradients[c_channelsPerThread][c_filtersPerThread];
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerThread; ++channelIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
threadGradients[channelIndex][filterIndex] = 0.f;
}
}
// Calculating this thread gradients.
const uint c_filterPixelY = c_filterPixel / filterWidth;
const uint c_filterPixelX = c_filterPixel % filterWidth;
const uint c_numChunksX = (numPatchesX + preactivationGradientsPerChunkWidth - 1) / preactivationGradientsPerChunkWidth;
const uint c_chunkY = c_chunkIndex / c_numChunksX;
const uint c_chunkX = c_chunkIndex % c_numChunksX;
const uint c_patchY = c_chunkY * preactivationGradientsPerChunkWidth;
const uint c_patchX = c_chunkX * preactivationGradientsPerChunkWidth;
const uint c_firstPatchY = (uint)max((int)c_patchY, (-(int)c_filterPixelY + paddingY + (int)stride - 1) / (int)stride);
const uint c_firstPatchX = (uint)max((int)c_patchX, (-(int)c_filterPixelX + paddingX + (int)stride - 1) / (int)stride);
const uint c_lastPatchY = min(numPatchesY, min(c_patchY + preactivationGradientsPerChunkWidth, (dataHeight - c_filterPixelY + (uint)paddingY + stride - 1) / stride));
const uint c_lastPatchX = min(numPatchesX, min(c_patchX + preactivationGradientsPerChunkWidth, (dataWidth - c_filterPixelX + (uint)paddingX + stride - 1) / stride));
float* inputCacheLoad = &inputCache[c_cacheLoadIndex][c_dataLoadIndex];
float* gradientsCacheLoad = &gradientsCache[c_cacheLoadIndex][c_dataLoadIndex];
for (uint patchY = c_firstPatchY; patchY < c_lastPatchY; ++patchY)
{
const uint c_inputPixelY = c_filterPixelY + patchY * stride - (uint)paddingY;
for (uint patchX = c_firstPatchX; patchX < c_lastPatchX; ++patchX)
{
const uint c_patch = patchY * numPatchesX + patchX;
const uint c_inputPixelX = c_filterPixelX + patchX * stride - (uint)paddingX;
const uint c_inputPixel = (c_inputPixelY * dataWidth + c_inputPixelX) * dataCount;
// Load input pixels and gradient pixels for data per load images, and calculate filter gradients on them.
for (uint dataIndex = 0; dataIndex < dataCount; dataIndex += c_dataPerLoad)
{
const uint cacheLoadSlide = (c_blockWidth * c_blockHeight) / c_dataPerLoad;
if (!c_lastBatch || dataIndex + c_dataLoadIndex < dataCount)
{
// Load inputs cache.
if (c_cacheLoadIndex < c_channelsPerBlock)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerBlock; channelIndex += cacheLoadSlide)
{
if (c_channelsPerBlock % cacheLoadSlide == 0 || c_cacheLoadIndex + channelIndex < c_channelsPerBlock)
{
inputCacheLoad[channelIndex * c_dataPerLoad] = inputBuffer[channelIndex * dataSize * dataCount + c_inputPixel + dataIndex];
}
}
}
// Load gradients cache.
if (c_cacheLoadIndex < c_filtersPerBlock)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerBlock; filterIndex += cacheLoadSlide)
{
if (c_filtersPerBlock % cacheLoadSlide == 0 || c_cacheLoadIndex + filterIndex < c_filtersPerBlock)
{
gradientsCacheLoad[filterIndex * (c_dataPerLoad + 1)] = preactivationGradients[filterIndex * c_numPatches * dataCount + c_patch * dataCount + dataIndex];
}
}
}
}
else
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerBlock; channelIndex += cacheLoadSlide)
{
if (c_channelsPerBlock % cacheLoadSlide == 0 || c_cacheLoadIndex + channelIndex < c_channelsPerBlock)
{
inputCacheLoad[channelIndex * c_dataPerLoad] = 0.f;
}
}
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerBlock; filterIndex += cacheLoadSlide)
{
if (c_filtersPerBlock % cacheLoadSlide == 0 || c_cacheLoadIndex + filterIndex < c_filtersPerBlock)
{
gradientsCacheLoad[filterIndex * (c_dataPerLoad + 1)] = 0.f;
}
}
}
__syncthreads();
#pragma unroll
for (uint loadedDataIndex = 0; loadedDataIndex < c_dataPerLoad; ++loadedDataIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerThread; ++channelIndex)
{
threadGradients[channelIndex][filterIndex] += inputCache[channelIndex * c_blockHeight + threadIdx.y][loadedDataIndex] *
gradientsCache[filterIndex * c_blockWidth + threadIdx.x][loadedDataIndex];
}
}
}
__syncthreads();
}
}
}
// Writing this thread calculated gradients into gradients buffer.
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerThread; ++channelIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_filtersPerThread; ++filterIndex)
{
filtersGradientsPerChunkBuffer[channelIndex * c_blockHeight * numFilters * filterSize + filterIndex * c_blockWidth] = threadGradients[channelIndex][filterIndex];
}
}
}
/*
Aggregates calculated weights gradients from chunks.
*/
__global__ void AggregateWeightsGradientsFromChunks(float* filtersGradientsPerChunkBuffer, uint numChunks, uint filtersBufferLength, const uint batchSize,
float* filtersGradientsBuffer)
{
const uint c_filterIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (c_filterIndex < filtersBufferLength)
{
float filterGradient = 0.f;
for (uint chunkIndex = 0; chunkIndex < numChunks; ++chunkIndex)
{
filterGradient += filtersGradientsPerChunkBuffer[chunkIndex * filtersBufferLength + c_filterIndex];
}
filtersGradientsBuffer[c_filterIndex] = filterGradient / (float)batchSize;
}
}
void ConvolutionalLayer::CalculateWeightsGradients()
{
// Calculating weights gradients for chunks of preactivations.
uint numChunksX = DivideUp(m_numPatchesX, m_preactivationGradientsPerChunkWidth);
uint numChunksY = DivideUp(m_numPatchesY, m_preactivationGradientsPerChunkWidth);
uint numChunks = numChunksX * numChunksY;
dim3 gridDimensions;
uint dataPerLoad, blockWidth, blockHeight;
if (m_inputNumChannels > 3)
{
uint filtersPerThread = m_numFilters % 64 == 0 ? 4 : (m_numFilters % 32 == 0 ? 2 : 1);
blockWidth = m_numFilters % 128 == 0 ? 32 : 16;
uint channelsPerThread = m_inputNumChannels % 64 == 0 ? 8 : (m_inputNumChannels % 48 == 0 ? 6 : (m_inputNumChannels % 32 == 0 ? 8 : 4));
blockHeight = (m_inputNumChannels / channelsPerThread) % 8 == 0 ? 8 : 4;
dataPerLoad = (filtersPerThread * channelsPerThread) < 32 ? 32 : 16;
gridDimensions = dim3(numChunks * (m_numFilters / (blockWidth * filtersPerThread)), m_inputNumChannels / (blockHeight * channelsPerThread), m_filterSize);
}
else
{
uint filtersPerThread = 1;
uint pixelsPerThread = 16;
blockHeight = 16;
blockWidth = 16;
dataPerLoad = 32;
if (m_numFilters % 64 == 0)
{
filtersPerThread = 4;
pixelsPerThread = 2;
blockHeight = 16;
blockWidth = 16;
dataPerLoad = 32;
}
else if (m_numFilters % 48 == 0)
{
filtersPerThread = 3;
pixelsPerThread = 4;
blockHeight = 16;
blockWidth = 16;
dataPerLoad = 32;
}
else if (m_numFilters % 32 == 0)
{
filtersPerThread = 2;
pixelsPerThread = 2;
blockHeight = 8;
blockWidth = 16;
dataPerLoad = 16;
}
gridDimensions = dim3(numChunks * (m_numFilters / (blockWidth * filtersPerThread)), DivideUp(m_filterSize, blockHeight * pixelsPerThread));
}
dim3 blockDimensions(blockWidth, blockHeight);
bool lastBatch = m_inputDataCount % dataPerLoad != 0;
if (lastBatch)
{
if (m_inputNumChannels < 3)
{
ShipAssert(false, "Currently not supported!");
}
else if (m_inputNumChannels == 3)
{
if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 4, 16, 2, 32, 2, 3, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 3, 16, 4, 32, 2, 3, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 2, 8, 2, 16, 2, 3, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 1, 16, 16, 32, 2, 3, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else
{
if (m_inputNumChannels % 64 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 8, 8, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 8, 8, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 8, 8, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 8, 8, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else if (m_inputNumChannels % 48 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 8, 6, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 8, 6, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 8, 6, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 8, 6, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else if (m_inputNumChannels % 32 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 4, 8, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 4, 8, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 4, 8, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 4, 8, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 4, 4, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 4, 4, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 4, 4, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 4, 4, 32, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
}
}
else
{
if (m_inputNumChannels < 3)
{
ShipAssert(false, "Currently not supported!");
}
else if (m_inputNumChannels == 3)
{
if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 4, 16, 2, 32, 2, 3, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 3, 16, 4, 32, 2, 3, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 2, 8, 2, 16, 2, 3, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateInputDataWeightsGradients<16, 1, 16, 16, 32, 2, 3, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else
{
if (m_inputNumChannels % 64 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 8, 8, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 8, 8, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 8, 8, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 8, 8, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else if (m_inputNumChannels % 48 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 8, 6, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 8, 6, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 8, 6, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 8, 6, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else if (m_inputNumChannels % 32 == 0)
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 4, 8, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 4, 8, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 4, 8, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 4, 8, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
else
{
if (m_numFilters % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<32, 4, 4, 4, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 4, 4, 4, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 2, 4, 4, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredDataWeightsGradients<16, 1, 4, 4, 32, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_inputDataBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_preactivationGradientsBuffer, m_filterWidth, m_filterHeight,
m_filterSize, m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_preactivationGradientsPerChunkWidth, m_filtersGradientsPerChunkBuffer);
}
}
}
}
// Aggregating calculated weights gradients from chunks.
const uint c_numThreadsPerBlock = 128;
const uint c_filtersBufferLength = (uint)(m_filtersBufferSize / sizeof(float));
const uint c_numBlocks = DivideUp(c_filtersBufferLength, c_numThreadsPerBlock);
const uint c_batchSize = m_parallelismMode == ParallelismMode::Model ? m_inputDataCount : m_tierSize * m_inputDataCount;
LAUNCH_KERNEL_ASYNC(AggregateWeightsGradientsFromChunks, dim3(c_numBlocks), dim3(c_numThreadsPerBlock), m_deviceCalculationStream)(m_filtersGradientsPerChunkBuffer,
numChunks, c_filtersBufferLength, c_batchSize, m_filtersGradientsBuffer);
CudaAssert(cudaGetLastError());
}
/*
Calculates input gradients on input data.
Each thread calculates gradient for one input pixel of specified number of data per thread.
Needs to be function with template parameters since loops must have constant parameters to be unrolled.
*/
template <uint c_blockWidth, uint c_dataPerThread, uint c_blockHeight, uint c_dataPerLoad, uint c_numChannels, uint c_blockImagePatchSize, bool c_lastBatch>
__global__ void CalculateDataInputGradients(float* preactivationGradients, const uint dataWidth, const uint dataHeight, const uint dataSize, const uint dataCount,
const int paddingX, const int paddingY, float* filtersBuffer, const uint filterWidth, const uint filterHeight, const uint filterSize, const uint numFilters,
const uint stride, const uint numPatchesX, const uint numPatchesY, float* inputGradients)
{
const uint c_dataPerBlock = c_blockWidth * c_dataPerThread;
// Since same filters are used across threads in same block row and same gradients in threads across same block collumn,
// we will benefit from caching gradients and filters into shared memory.
__shared__ float filtersCache[c_blockHeight * c_numChannels][c_blockWidth + 1]; // Adding 1 to avoid shared memory bank conflicts.
__shared__ float gradientsCache[c_blockWidth][c_dataPerBlock];
// Positioning preactivation gradients buffer.
const uint c_dataOffset = blockIdx.x * c_dataPerBlock;
const uint c_threadIndex = threadIdx.y * c_blockWidth + threadIdx.x;
const uint c_gradientsCacheFilterIndex = c_threadIndex / c_dataPerLoad;
const uint c_gradientsCacheDataIndex = c_threadIndex % c_dataPerLoad;
const uint c_numPatches = numPatchesX * numPatchesY;
preactivationGradients += c_gradientsCacheFilterIndex * c_numPatches * dataCount + c_dataOffset + c_gradientsCacheDataIndex;
// Positioning filters buffer.
filtersBuffer += threadIdx.x;
// Positioning input gradients buffer.
const uint c_numBlockImagePatchesX = (dataWidth + c_blockImagePatchSize - 1) / c_blockImagePatchSize;
const uint c_blockImagePatchY = blockIdx.y / c_numBlockImagePatchesX;
const uint c_blockImagePatchX = blockIdx.y % c_numBlockImagePatchesX;
const uint c_pixelOffsetY = c_blockImagePatchY * c_blockImagePatchSize;
const uint c_pixelOffsetX = c_blockImagePatchX * c_blockImagePatchSize;
const uint c_pixelY = c_pixelOffsetY + threadIdx.y / c_blockImagePatchSize;
const uint c_pixelX = c_pixelOffsetX + threadIdx.y % c_blockImagePatchSize;
const bool c_validPixel = c_pixelX < dataWidth && c_pixelY < dataHeight;
inputGradients += (c_pixelY * dataWidth + c_pixelX) * dataCount + c_dataOffset + threadIdx.x;
// Initializing buffer for this thread calculated gradients.
float threadGradients[c_numChannels][c_dataPerThread];
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadGradients[channelIndex][dataIndex] = 0.f;
}
}
// Calculating this thread gradients.
const uint c_firstPatchX = c_pixelOffsetX + paddingX < filterWidth ? 0 : (c_pixelOffsetX + paddingX - filterWidth) / stride + 1;
const uint c_firstPatchY = c_pixelOffsetY + paddingY < filterHeight ? 0 : (c_pixelOffsetY + paddingY - filterHeight) / stride + 1;
const uint c_lastPatchX = min(numPatchesX, (c_pixelOffsetX + c_blockImagePatchSize - 1 + paddingX) / stride + 1);
const uint c_lastPatchY = min(numPatchesY, (c_pixelOffsetY + c_blockImagePatchSize - 1 + paddingY) / stride + 1);
float* filtersCacheLoad = &filtersCache[threadIdx.y][threadIdx.x];
float* gradientsCacheLoad = &gradientsCache[c_gradientsCacheFilterIndex][c_gradientsCacheDataIndex];
for (uint currPatchY = c_firstPatchY; currPatchY < c_lastPatchY; ++currPatchY)
{
const int c_filterPixelY = (int)c_pixelY + paddingY - (int)(currPatchY * stride);
for (uint currPatchX = c_firstPatchX; currPatchX < c_lastPatchX; ++currPatchX)
{
const int c_filterPixelX = (int)c_pixelX + paddingX - (int)(currPatchX * stride);
const uint c_filterPixel = c_filterPixelY * filterWidth + c_filterPixelX;
const uint c_currPatch = currPatchY * numPatchesX + currPatchX;
const bool c_validFilterPixel = c_filterPixelX >= 0 && c_filterPixelX < filterWidth && c_filterPixelY >= 0 && c_filterPixelY < filterHeight;
for (uint currFilter = 0; currFilter < numFilters; currFilter += c_blockWidth)
{
// Load gradients cache
const float* preactivationGradientsBufferLoad = preactivationGradients + (currFilter * c_numPatches + c_currPatch) * dataCount;
const uint c_dataLoadSlide = c_blockWidth * c_blockHeight / c_dataPerLoad;
#pragma unroll
for (uint dataToLoadIndex = 0; dataToLoadIndex < c_dataPerBlock; dataToLoadIndex += c_dataPerLoad)
{
if (!c_lastBatch || c_dataOffset + dataToLoadIndex + c_gradientsCacheDataIndex < dataCount)
{
#pragma unroll
for (uint filterToLoad = 0; filterToLoad < c_blockWidth; filterToLoad += c_dataLoadSlide)
{
gradientsCacheLoad[filterToLoad * c_dataPerBlock + dataToLoadIndex] = preactivationGradientsBufferLoad[filterToLoad * c_numPatches * dataCount];
}
}
else
{
#pragma unroll
for (uint filterToLoad = 0; filterToLoad < c_blockWidth; filterToLoad += c_dataLoadSlide)
{
gradientsCacheLoad[filterToLoad * c_dataPerBlock + dataToLoadIndex] = 0.f;
}
}
}
if (c_validPixel && c_validFilterPixel)
{
// Load filters cache.
const float* filtersBufferLoad = filtersBuffer + c_filterPixel * numFilters + currFilter;
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
filtersCacheLoad[channelIndex * c_blockHeight * (c_blockWidth + 1)] = filtersBufferLoad[channelIndex * filterSize * numFilters];
}
}
__syncthreads();
if (c_validPixel && c_validFilterPixel)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_blockWidth; ++filterIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadGradients[channelIndex][dataIndex] += filtersCache[channelIndex * c_blockHeight + threadIdx.y][filterIndex] *
gradientsCache[filterIndex][dataIndex * c_blockWidth + threadIdx.x];
}
}
}
}
__syncthreads();
}
}
}
if (c_validPixel)
{
// Writing this thread calculated gradients into gradients buffer.
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
if (!c_lastBatch || c_dataOffset + threadIdx.x + dataIndex * c_blockWidth < dataCount)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_numChannels; ++channelIndex)
{
inputGradients[channelIndex * dataSize * dataCount + dataIndex * c_blockWidth] = threadGradients[channelIndex][dataIndex];
}
}
}
}
}
/*
Calculates input gradients of filtered data (resulted from previously applied filters).
Each thread calculates gradient for one input pixel, of specified number of data per thread and specified number of channels per thread.
Grid is organized in a way that each row of blocks works on one pixel of data, and each column of blocks works on different data,
or same data but on different channel.
Columns are sorted first by channel than by data.
Needs to be function with template parameters since loops must have constant parameters to be unrolled.
*/
template <uint c_blockWidth, uint c_dataPerThread, uint c_blockHeight, uint c_channelsPerThread, uint c_filtersCacheLength, uint c_gradientsCacheLength, bool c_lastBatch>
__global__ void CalculateFilteredInputGradients(float* preactivationGradients, const uint dataWidth, const uint dataHeight, const uint dataSize, const uint dataCount,
const int paddingX, const int paddingY, float* filtersBuffer, const uint filterWidth, const uint filterHeight, const uint filterSize, const uint numFilters,
const uint numFilterChannels, const uint stride, const uint numPatchesX, const uint numPatchesY, float* inputGradients)
{
const uint c_dataPerBlock = c_blockWidth * c_dataPerThread;
const uint c_channelsPerBlock = c_blockHeight * c_channelsPerThread;
// Since same filters are used across threads in same block row and same gradients in threads across same block collumn,
// we will benefit from caching gradients and filters into shared memory.
__shared__ float filtersCache[c_channelsPerBlock][c_filtersCacheLength];
__shared__ float gradientsCache[c_gradientsCacheLength][c_dataPerBlock];
// Positioning preactivation gradients buffer.
const uint c_blocksPerChannel = gridDim.x / (numFilterChannels / c_channelsPerBlock);
const uint c_dataOffset = (blockIdx.x % c_blocksPerChannel) * c_dataPerBlock;
const uint c_gradientsCacheFilterIndex = threadIdx.y;
const uint c_gradientsCacheDataIndex = threadIdx.x;
const uint c_numPatches = numPatchesX * numPatchesY;
preactivationGradients += c_gradientsCacheFilterIndex * c_numPatches * dataCount + c_dataOffset + c_gradientsCacheDataIndex;
// Positioning filters buffer, it will be loaded into cache, one window by window, where window has dimensions ChannelsPerBlock x CacheLength.
const uint c_dataChannelIndex = (blockIdx.x / c_blocksPerChannel) * c_channelsPerBlock;
const uint c_threadIndex = threadIdx.y * c_blockWidth + threadIdx.x;
const uint c_filtersCacheChannelIndex = c_threadIndex / c_filtersCacheLength;
const uint c_filtersCachePosition = c_threadIndex % c_filtersCacheLength;
filtersBuffer += (c_dataChannelIndex + c_filtersCacheChannelIndex) * numFilters * filterSize + c_filtersCachePosition;
// Positioning input gradients buffer.
inputGradients += ((c_dataChannelIndex + threadIdx.y) * dataSize + blockIdx.y) * dataCount + c_dataOffset + threadIdx.x;
// Initializing buffer for this thread calculated gradients.
float threadGradients[c_channelsPerThread][c_dataPerThread];
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerThread; ++channelIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadGradients[channelIndex][dataIndex] = 0.f;
}
}
// Calculating this thread gradients.
const uint c_pixelY = blockIdx.y / dataWidth;
const uint c_pixelX = blockIdx.y % dataWidth;
const uint c_firstPatchX = c_pixelX + paddingX < filterWidth ? 0 : (c_pixelX + paddingX - filterWidth) / stride + 1;
const uint c_firstPatchY = c_pixelY + paddingY < filterHeight ? 0 : (c_pixelY + paddingY - filterHeight) / stride + 1;
const uint c_lastPatchX = min(numPatchesX, (c_pixelX + paddingX) / stride + 1);
const uint c_lastPatchY = min(numPatchesY, (c_pixelY + paddingY) / stride + 1);
float* filtersCacheLoad = &filtersCache[c_filtersCacheChannelIndex][c_filtersCachePosition];
float* gradientsCacheLoad = &gradientsCache[c_gradientsCacheFilterIndex][c_gradientsCacheDataIndex];
for (uint currPatchY = c_firstPatchY; currPatchY < c_lastPatchY; ++currPatchY)
{
const uint c_filterPixelY = c_pixelY + paddingY - currPatchY * stride;
for (uint currPatchX = c_firstPatchX; currPatchX < c_lastPatchX; ++currPatchX)
{
const uint c_filterPixelX = c_pixelX + paddingX - currPatchX * stride;
const uint c_filterPixel = c_filterPixelY * filterWidth + c_filterPixelX;
const uint c_currPatch = currPatchY * numPatchesX + currPatchX;
for (uint currFilter = 0; currFilter < numFilters; currFilter += c_filtersCacheLength)
{
const float* filtersBufferLoad = filtersBuffer + c_filterPixel * numFilters + currFilter;
// Load filters cache window.
const uint channelToLoadSlide = c_blockWidth * c_blockHeight / c_filtersCacheLength;
#pragma unroll
for (uint channelToLoad = 0; channelToLoad < c_channelsPerBlock; channelToLoad += channelToLoadSlide)
{
if (c_channelsPerBlock % channelToLoadSlide == 0 || channelToLoad + c_filtersCacheChannelIndex < c_channelsPerBlock)
{
filtersCacheLoad[channelToLoad * c_filtersCacheLength] = filtersBufferLoad[channelToLoad * filterSize * numFilters];
}
}
for (uint currGradientFilter = currFilter; currGradientFilter < currFilter + c_filtersCacheLength; currGradientFilter += c_gradientsCacheLength)
{
// Load gradients cache window.
const float* preactivationGradientsBufferLoad = preactivationGradients + (currGradientFilter * c_numPatches + c_currPatch) * dataCount;
#pragma unroll
for (uint filterToLoad = 0; filterToLoad < c_gradientsCacheLength; filterToLoad += c_blockHeight)
{
if (c_gradientsCacheLength % c_blockHeight == 0 || c_gradientsCacheFilterIndex + filterToLoad < c_gradientsCacheLength)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerBlock; dataIndex += c_blockWidth)
{
if (!c_lastBatch || c_dataOffset + c_gradientsCacheDataIndex + dataIndex < dataCount)
{
gradientsCacheLoad[filterToLoad * c_dataPerBlock + dataIndex] = preactivationGradientsBufferLoad[filterToLoad * c_numPatches * dataCount + dataIndex];
}
else
{
gradientsCacheLoad[filterToLoad * c_dataPerBlock + dataIndex] = 0.f;
}
}
}
}
__syncthreads();
// Calculate gradients from cache.
#pragma unroll
for (uint filterIndex = 0; filterIndex < c_gradientsCacheLength; ++filterIndex)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerThread; ++channelIndex)
{
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
threadGradients[channelIndex][dataIndex] += filtersCache[channelIndex * c_blockHeight + threadIdx.y][currGradientFilter - currFilter + filterIndex] *
gradientsCache[filterIndex][dataIndex * c_blockWidth + threadIdx.x];
}
}
}
__syncthreads();
}
}
}
}
// Writing this thread calculated gradients into gradients buffer.
#pragma unroll
for (uint dataIndex = 0; dataIndex < c_dataPerThread; ++dataIndex)
{
if (!c_lastBatch || c_dataOffset + threadIdx.x + dataIndex * c_blockWidth < dataCount)
{
#pragma unroll
for (uint channelIndex = 0; channelIndex < c_channelsPerThread; ++channelIndex)
{
inputGradients[channelIndex * c_blockHeight * dataSize * dataCount + dataIndex * c_blockWidth] = threadGradients[channelIndex][dataIndex];
}
}
}
}
void ConvolutionalLayer::CalculateInputGradients()
{
if (m_inputNumChannels < 3)
{
ShipAssert(false, "Currently not supported!");
}
else if (m_inputNumChannels == 3)
{
uint dataPerThread = m_inputDataCount % 128 == 0 ? 8 : (m_inputDataCount % 64 == 0 ? 4 : 2);
uint blockWidth = 16;
uint blockHeight = 16;
// Block image patch size needs to be square root of block height!
// Noted here as a constant to avoid sqrt computation, if we already hardcode blockHeight.
uint blockImagePatchSize = 4;
dim3 blockDimensions(blockWidth, blockHeight);
dim3 gridDimensions(DivideUp(m_inputDataCount, blockWidth * dataPerThread), DivideUp(m_inputDataWidth, blockImagePatchSize) * DivideUp(m_inputDataHeight, blockImagePatchSize));
bool lastBatch = m_inputDataCount % (blockWidth * dataPerThread) != 0;
if (lastBatch)
{
LAUNCH_KERNEL_ASYNC((CalculateDataInputGradients<16, 2, 16, 32, 3, 4, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateDataInputGradients<16, 8, 16, 32, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateDataInputGradients<16, 4, 16, 32, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateDataInputGradients<16, 2, 16, 32, 3, 4, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
}
else if (m_inputNumChannels % 8 == 0)
{
uint dataPerThread = m_inputDataCount % 128 == 0 ? 4 : (m_inputDataCount % 64 == 0 ? 2 : 1);
uint channelsPerThread = m_inputNumChannels % 64 == 0 ? 8 :
(m_inputNumChannels % 48 == 0 ? 12 :
(m_inputNumChannels % 32 == 0 ? 8 :
(m_inputNumChannels % 16 == 0 ? 4 : 2)));
uint blockWidth = 32;
uint blockHeight = m_inputNumChannels % 64 == 0 ? 8 : 4;
dim3 blockDimensions(blockWidth, blockHeight);
dim3 gridDimensions(DivideUp(m_inputDataCount, blockWidth * dataPerThread) * (m_inputNumChannels / (blockHeight * channelsPerThread)),
m_inputDataSize);
bool lastBatch = m_inputDataCount % (blockWidth * dataPerThread) != 0;
if (lastBatch)
{
if (m_inputNumChannels % 64 == 0)
{
if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 8, 8, 32, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 8, 8, 16, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else if (m_inputNumChannels % 48 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 12, 16, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputNumChannels % 32 == 0)
{
if (m_numFilters % 32 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 8, 32, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 8, 16, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else if (m_inputNumChannels % 16 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 4, 16, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 2, 16, 16, true>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else
{
if (m_inputNumChannels % 64 == 0)
{
if (m_numFilters % 32 == 0)
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 8, 8, 32, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 8, 8, 32, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 8, 8, 32, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 8, 8, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 8, 8, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 8, 8, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
}
else if (m_inputNumChannels % 48 == 0)
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 4, 12, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 4, 12, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 12, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else if (m_inputNumChannels % 32 == 0)
{
if (m_numFilters % 32 == 0)
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 4, 8, 32, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 4, 8, 32, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 8, 32, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 4, 8, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 4, 8, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 8, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
}
else if (m_inputNumChannels % 16 == 0)
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 4, 4, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 4, 4, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 4, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
else
{
if (m_inputDataCount % 128 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 4, 4, 2, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else if (m_inputDataCount % 64 == 0)
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 2, 4, 2, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
else
{
LAUNCH_KERNEL_ASYNC((CalculateFilteredInputGradients<32, 1, 4, 2, 16, 16, false>), gridDimensions, blockDimensions, m_deviceCalculationStream)(m_preactivationGradientsBuffer,
m_inputDataWidth, m_inputDataHeight, m_inputDataSize, m_inputDataCount, m_paddingX, m_paddingY, m_filtersBuffer, m_filterWidth, m_filterHeight, m_filterSize,
m_numFilters, m_numFilterChannels, m_stride, m_numPatchesX, m_numPatchesY, m_inputGradientsBuffer);
}
}
}
}
else if (m_inputNumChannels % 4 == 0)
{
ShipAssert(false, "Currently not supported!");
}
CudaAssert(cudaGetLastError());
}
void ConvolutionalLayer::CalculatePreactivationsGradients()
{
CalculatePreactivationGradients(m_activationType, m_activationGradientsBuffer, m_activationDataBuffer, (uint)(m_activationBufferSize / sizeof(float)),
m_preactivationGradientsBuffer, m_deviceCalculationStream);
}
void ConvolutionalLayer::DoBackwardProp()
{
CalculatePreactivationsGradients();
CalculateInputGradients();
CalculateWeightsGradients();
CalculateBiasesGradients();
}
void ConvolutionalLayer::UpdateLayerParameters(float learningProgress)
{
CommonUpdateLayerParameters(learningProgress, m_filtersBuffer, m_filtersGradientsBuffer, m_filtersUpdateBuffer, (uint)(m_filtersBufferSize / sizeof(float)),
m_filtersUpdateMomentum, m_filtersUpdateLearningRateProgressStep, m_filtersUpdateStartingLearningRate, m_filtersUpdateLearningRateUpdateFactor,
m_filtersUpdateDecay, m_biasesBuffer, m_biasesGradientsBuffer, m_biasesUpdateBuffer, (uint)(m_biasesBufferSize / sizeof(float)), m_biasesUpdateMomentum,
m_biasesUpdateLearningRateProgressStep, m_biasesUpdateStartingLearningRate, m_biasesUpdateLearningRateUpdateFactor, m_biasesUpdateDecay);
} |
782dd94c984e0a839956715fc5d9dcda984c7c3d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <sys/time.h>
#include <time.h>
#include <hip/hip_runtime.h>
//#include <rocblas.h>
// CUDA and CUBLAS functions
#include <helper_functions.h>
#include <helper_cuda.h>
#define BLOCK_DIM 16
/**
* Computes the squared Euclidean distance matrix between the query points and the reference points.
*
* @param ref refence points stored in the global memory
* @param ref_width number of reference points
* @param ref_pitch pitch of the reference points array in number of column
* @param query query points stored in the global memory
* @param query_width number of query points
* @param query_pitch pitch of the query points array in number of columns
* @param height dimension of points = height of texture `ref` and of the array `query`
* @param dist array containing the query_width x ref_width computed distances
*/
__global__ void compute_distances(float * ref,
int ref_width,
int ref_pitch,
float * query,
int query_width,
int query_pitch,
int height,
float * dist) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Initializarion of the SSD for the current thread
float ssd = 0.f;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * ref_pitch;
step_B = BLOCK_DIM * query_pitch;
end_A = begin_A + (height-1) * ref_pitch;
// Conditions
int cond0 = (begin_A + tx < ref_width); // used to write in shared memory
int cond1 = (begin_B + tx < query_width); // used to write in shared memory & to computations and to write in output array
int cond2 = (begin_A + ty < ref_width); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/ref_pitch + ty < height) {
shared_A[ty][tx] = (cond0)? ref[a + ref_pitch * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? query[b + query_pitch * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
float tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceeding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1) {
dist[ (begin_A + ty) * query_pitch + begin_B + tx ] = ssd;
}
}
/**
* Computes the squared Euclidean distance matrix between the query points and the reference points.
*
* @param ref refence points stored in the texture memory
* @param ref_width number of reference points
* @param query query points stored in the global memory
* @param query_width number of query points
* @param query_pitch pitch of the query points array in number of columns
* @param height dimension of points = height of texture `ref` and of the array `query`
* @param dist array containing the query_width x ref_width computed distances
*/
__global__ void compute_distance_texture(hipTextureObject_t ref,
int ref_width,
float * query,
int query_width,
int query_pitch,
int height,
float* dist) {
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if ( xIndex<query_width && yIndex<ref_width) {
float ssd = 0.f;
for (int i=0; i<height; i++) {
float tmp = tex2D<float>(ref, (float)yIndex, (float)i) - query[i * query_pitch + xIndex];
ssd += tmp * tmp;
}
dist[yIndex * query_pitch + xIndex] = ssd;
}
}
/**
* For each reference point (i.e. each column) finds the k-th smallest distances
* of the distance matrix and their respective indexes and gathers them at the top
* of the 2 arrays.
*
* Since we only need to locate the k smallest distances, sorting the entire array
* would not be very efficient if k is relatively small. Instead, we perform a
* simple insertion sort by eventually inserting a given distance in the first
* k values.
*
* @param dist distance matrix
* @param dist_pitch pitch of the distance matrix given in number of columns
* @param index index matrix
* @param index_pitch pitch of the index matrix given in number of columns
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix
* @param k number of values to find
*/
__global__ void modified_insertion_sort(float * dist,
int dist_pitch,
int * index,
int index_pitch,
int width,
int height,
int k){
// Column position
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
// Do nothing if we are out of bounds
if (xIndex < width) {
// Pointer shift
float * p_dist = dist + xIndex;
int * p_index = index + xIndex;
// Initialise the first index
p_index[0] = 0;
// Go through all points
for (int i=1; i<height; ++i) {
// Store current distance and associated index
float curr_dist = p_dist[i*dist_pitch];
int curr_index = i;
// Skip the current value if its index is >= k and if it's higher the k-th slready sorted mallest value
if (i >= k && curr_dist >= p_dist[(k-1)*dist_pitch]) {
continue;
}
// Shift values (and indexes) higher that the current distance to the right
int j = min(i, k-1);
while (j > 0 && p_dist[(j-1)*dist_pitch] > curr_dist) {
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_index[j*index_pitch] = p_index[(j-1)*index_pitch];
--j;
}
// Write the current distance and index at their position
p_dist[j*dist_pitch] = curr_dist;
p_index[j*index_pitch] = curr_index;
}
}
}
/**
* Computes the square root of the first k lines of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param pitch pitch of the distance matrix given in number of columns
* @param k number of values to consider
*/
__global__ void compute_sqrt(float * dist, int width, int pitch, int k){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*pitch + xIndex] = sqrt(dist[yIndex*pitch + xIndex]);
}
/**
* Computes the squared norm of each column of the input array.
*
* @param array input array
* @param width number of columns of `array` = number of points
* @param pitch pitch of `array` in number of columns
* @param height number of rows of `array` = dimension of the points
* @param norm output array containing the squared norm values
*/
__global__ void compute_squared_norm(float * array, int width, int pitch, int height, float * norm){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
float sum = 0.f;
for (int i=0; i<height; i++){
float val = array[i*pitch+xIndex];
sum += val*val;
}
norm[xIndex] = sum;
}
}
/**
* Add the reference points norm (column vector) to each colum of the input array.
*
* @param array input array
* @param width number of columns of `array` = number of points
* @param pitch pitch of `array` in number of columns
* @param height number of rows of `array` = dimension of the points
* @param norm reference points norm stored as a column vector
*/
__global__ void add_reference_points_norm(float * array, int width, int pitch, int height, float * norm){
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int xIndex = blockIdx.x * blockDim.x + tx;
unsigned int yIndex = blockIdx.y * blockDim.y + ty;
__shared__ float shared_vec[16];
if (tx==0 && yIndex<height)
shared_vec[ty] = norm[yIndex];
__syncthreads();
if (xIndex<width && yIndex<height)
array[yIndex*pitch+xIndex] += shared_vec[ty];
}
/**
* Adds the query points norm (row vector) to the k first lines of the input
* array and computes the square root of the resulting values.
*
* @param array input array
* @param width number of columns of `array` = number of points
* @param pitch pitch of `array` in number of columns
* @param k number of neighbors to consider
* @param norm query points norm stored as a row vector
*/
__global__ void add_query_points_norm_and_sqrt(float * array, int width, int pitch, int k, float * norm){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
array[yIndex*pitch + xIndex] = sqrt(array[yIndex*pitch + xIndex] + norm[xIndex]);
}
bool knn_cuda_global(const float * ref,
int ref_nb,
const float * query,
int query_nb,
int dim,
int k,
float * knn_dist,
int * knn_index,int nb_iterations, double &elapsed_time) {
// Constants
const unsigned int size_of_float = sizeof(float);
const unsigned int size_of_int = sizeof(int);
// Return variables
hipError_t err0, err1, err2, err3;
#ifndef METRIC_RUN_MAIN
// Check that we have at least one CUDA device
int nb_devices;
err0 = hipGetDeviceCount(&nb_devices);
if (err0 != hipSuccess || nb_devices == 0) {
printf("ERROR: No CUDA device found\n");
return false;
}
// Select the first CUDA device as default
err0 = hipSetDevice(0);
if (err0 != hipSuccess) {
printf("ERROR: Cannot set the chosen CUDA device\n");
return false;
}
#endif
// Allocate global memory
float * ref_dev = NULL;
float * query_dev = NULL;
float * dist_dev = NULL;
int * index_dev = NULL;
size_t ref_pitch_in_bytes;
size_t query_pitch_in_bytes;
size_t dist_pitch_in_bytes;
size_t index_pitch_in_bytes;
err0 = hipMallocPitch((void**)&ref_dev, &ref_pitch_in_bytes, ref_nb * size_of_float, dim);
err1 = hipMallocPitch((void**)&query_dev, &query_pitch_in_bytes, query_nb * size_of_float, dim);
err2 = hipMallocPitch((void**)&dist_dev, &dist_pitch_in_bytes, query_nb * size_of_float, ref_nb);
err3 = hipMallocPitch((void**)&index_dev, &index_pitch_in_bytes, query_nb * size_of_int, k);
if (err0 != hipSuccess || err1 != hipSuccess || err2 != hipSuccess || err3 != hipSuccess) {
printf("ERROR: Memory allocation error\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
return false;
}
// Deduce pitch values
size_t ref_pitch = ref_pitch_in_bytes / size_of_float;
size_t query_pitch = query_pitch_in_bytes / size_of_float;
size_t dist_pitch = dist_pitch_in_bytes / size_of_float;
size_t index_pitch = index_pitch_in_bytes / size_of_int;
// Check pitch values
if (query_pitch != dist_pitch || query_pitch != index_pitch) {
printf("ERROR: Invalid pitch value\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
return false;
}
// Copy reference and query data from the host to the device
err0 = hipMemcpy2D(ref_dev, ref_pitch_in_bytes, ref, ref_nb * size_of_float, ref_nb * size_of_float, dim, hipMemcpyHostToDevice);
err1 = hipMemcpy2D(query_dev, query_pitch_in_bytes, query, query_nb * size_of_float, query_nb * size_of_float, dim, hipMemcpyHostToDevice);
if (err0 != hipSuccess || err1 != hipSuccess) {
printf("ERROR: Unable to copy data from host to device\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
return false;
}
// Compute the squared Euclidean distances
dim3 block0(BLOCK_DIM, BLOCK_DIM, 1);
dim3 grid0(query_nb / BLOCK_DIM, ref_nb / BLOCK_DIM, 1);
if (query_nb % BLOCK_DIM != 0) grid0.x += 1;
if (ref_nb % BLOCK_DIM != 0) grid0.y += 1;
#ifdef ONLY_TIME_KERNELS
#ifndef METRIC_NOT_RUN_OTHER_EVENTS
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
#endif
//struct timeval tic;
//gettimeofday(&tic, NULL);
for(int i=0; i<nb_iterations;i++)
{
#endif
hipLaunchKernelGGL(( compute_distances), dim3(grid0), dim3(block0), 0, 0, ref_dev, ref_nb, ref_pitch, query_dev, query_nb, query_pitch, dim, dist_dev);
/*
if (hipGetLastError() != hipSuccess) {
printf("ERROR: Unable to execute kernel\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
return false;
}
*/
// Sort the distances with their respective indexes
dim3 block1(256, 1, 1);
dim3 grid1(query_nb / 256, 1, 1);
if (query_nb % 256 != 0) grid1.x += 1;
hipLaunchKernelGGL(( modified_insertion_sort), dim3(grid1), dim3(block1), 0, 0, dist_dev, dist_pitch, index_dev, index_pitch, query_nb, ref_nb, k);
/*
if (hipGetLastError() != hipSuccess) {
printf("ERROR: Unable to execute kernel\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
return false;
}
*/
// Compute the square root of the k smallest distances
dim3 block2(16, 16, 1);
dim3 grid2(query_nb / 16, k / 16, 1);
if (query_nb % 16 != 0) grid2.x += 1;
if (k % 16 != 0) grid2.y += 1;
hipLaunchKernelGGL(( compute_sqrt), dim3(grid2), dim3(block2), 0, 0, dist_dev, query_nb, query_pitch, k);
/*
if (hipGetLastError() != hipSuccess) {
printf("ERROR: Unable to execute kernel\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
return false;
}
*/
#ifdef ONLY_TIME_KERNELS
}
float msecTotal = 0.0f;
#ifndef METRIC_NOT_RUN_OTHER_EVENTS
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
#endif
elapsed_time=((msecTotal/nb_iterations)/1000);
#endif
// Copy k smallest distances / indexes from the device to the host
err0 = hipMemcpy2D(knn_dist, query_nb * size_of_float, dist_dev, dist_pitch_in_bytes, query_nb * size_of_float, k, hipMemcpyDeviceToHost);
err1 = hipMemcpy2D(knn_index, query_nb * size_of_int, index_dev, index_pitch_in_bytes, query_nb * size_of_int, k, hipMemcpyDeviceToHost);
if (err0 != hipSuccess || err1 != hipSuccess) {
printf("ERROR: Unable to copy data from device to host\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
return false;
}
// Memory clean-up
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
return true;
}
bool knn_cuda_texture(const float * ref,
int ref_nb,
const float * query,
int query_nb,
int dim,
int k,
float * knn_dist,
int * knn_index,int nb_iterations, double &elapsed_time) {
// Constants
unsigned int size_of_float = sizeof(float);
unsigned int size_of_int = sizeof(int);
// Return variables
hipError_t err0, err1, err2;
#ifndef METRIC_RUN_MAIN
// Check that we have at least one CUDA device
int nb_devices;
err0 = hipGetDeviceCount(&nb_devices);
if (err0 != hipSuccess || nb_devices == 0) {
printf("ERROR: No CUDA device found\n");
return false;
}
// Select the first CUDA device as default
err0 = hipSetDevice(0);
if (err0 != hipSuccess) {
printf("ERROR: Cannot set the chosen CUDA device\n");
return false;
}
#endif
// Allocate global memory
float * query_dev = NULL;
float * dist_dev = NULL;
int * index_dev = NULL;
size_t query_pitch_in_bytes;
size_t dist_pitch_in_bytes;
size_t index_pitch_in_bytes;
err0 = hipMallocPitch((void**)&query_dev, &query_pitch_in_bytes, query_nb * size_of_float, dim);
err1 = hipMallocPitch((void**)&dist_dev, &dist_pitch_in_bytes, query_nb * size_of_float, ref_nb);
err2 = hipMallocPitch((void**)&index_dev, &index_pitch_in_bytes, query_nb * size_of_int, k);
if (err0 != hipSuccess || err1 != hipSuccess || err2 != hipSuccess) {
printf("ERROR: Memory allocation error (hipMallocPitch)\n");
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
return false;
}
// Deduce pitch values
size_t query_pitch = query_pitch_in_bytes / size_of_float;
size_t dist_pitch = dist_pitch_in_bytes / size_of_float;
size_t index_pitch = index_pitch_in_bytes / size_of_int;
// Check pitch values
if (query_pitch != dist_pitch || query_pitch != index_pitch) {
printf("ERROR: Invalid pitch value\n");
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
return false;
}
// Copy query data from the host to the device
err0 = hipMemcpy2D(query_dev, query_pitch_in_bytes, query, query_nb * size_of_float, query_nb * size_of_float, dim, hipMemcpyHostToDevice);
if (err0 != hipSuccess) {
printf("ERROR: Unable to copy data from host to device\n");
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
return false;
}
// Allocate CUDA array for reference points
hipArray* ref_array_dev = NULL;
hipChannelFormatDesc channel_desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
err0 = hipMallocArray(&ref_array_dev, &channel_desc, ref_nb, dim);
if (err0 != hipSuccess) {
printf("ERROR: Memory allocation error (hipMallocArray)\n");
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
return false;
}
// Copy reference points from host to device
err0 = hipMemcpyToArray(ref_array_dev, 0, 0, ref, ref_nb * size_of_float * dim, hipMemcpyHostToDevice);
if (err0 != hipSuccess) {
printf("ERROR: Unable to copy data from host to device\n");
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFreeArray(ref_array_dev);
return false;
}
// Resource descriptor
struct hipResourceDesc res_desc;
memset(&res_desc, 0, sizeof(res_desc));
res_desc.resType = hipResourceTypeArray;
res_desc.res.array.array = ref_array_dev;
// Texture descriptor
struct hipTextureDesc tex_desc;
memset(&tex_desc, 0, sizeof(tex_desc));
tex_desc.addressMode[0] = hipAddressModeClamp;
tex_desc.addressMode[1] = hipAddressModeClamp;
tex_desc.filterMode = hipFilterModePoint;
tex_desc.readMode = hipReadModeElementType;
tex_desc.normalizedCoords = 0;
// Create the texture
hipTextureObject_t ref_tex_dev = 0;
err0 = hipCreateTextureObject(&ref_tex_dev, &res_desc, &tex_desc, NULL);
if (err0 != hipSuccess) {
printf("ERROR: Unable to create the texture\n");
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFreeArray(ref_array_dev);
return false;
}
// Compute the squared Euclidean distances
dim3 block0(16, 16, 1);
dim3 grid0(query_nb / 16, ref_nb / 16, 1);
if (query_nb % 16 != 0) grid0.x += 1;
if (ref_nb % 16 != 0) grid0.y += 1;
#ifdef ONLY_TIME_KERNELS
#ifndef METRIC_NOT_RUN_OTHER_EVENTS
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
#endif
//struct timeval tic;
//gettimeofday(&tic, NULL);
for(int i=0; i<nb_iterations;i++)
{
#endif
hipLaunchKernelGGL(( compute_distance_texture), dim3(grid0), dim3(block0), 0, 0, ref_tex_dev, ref_nb, query_dev, query_nb, query_pitch, dim, dist_dev);
if (hipGetLastError() != hipSuccess) {
printf("ERROR: Unable to execute kernel\n");
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFreeArray(ref_array_dev);
hipDestroyTextureObject(ref_tex_dev);
return false;
}
// Sort the distances with their respective indexes
dim3 block1(256, 1, 1);
dim3 grid1(query_nb / 256, 1, 1);
if (query_nb % 256 != 0) grid1.x += 1;
hipLaunchKernelGGL(( modified_insertion_sort), dim3(grid1), dim3(block1), 0, 0, dist_dev, dist_pitch, index_dev, index_pitch, query_nb, ref_nb, k);
if (hipGetLastError() != hipSuccess) {
printf("ERROR: Unable to execute kernel\n");
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFreeArray(ref_array_dev);
hipDestroyTextureObject(ref_tex_dev);
return false;
}
// Compute the square root of the k smallest distances
dim3 block2(16, 16, 1);
dim3 grid2(query_nb / 16, k / 16, 1);
if (query_nb % 16 != 0) grid2.x += 1;
if (k % 16 != 0) grid2.y += 1;
hipLaunchKernelGGL(( compute_sqrt), dim3(grid2), dim3(block2), 0, 0, dist_dev, query_nb, query_pitch, k);
if (hipGetLastError() != hipSuccess) {
printf("ERROR: Unable to execute kernel\n");
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFreeArray(ref_array_dev);
hipDestroyTextureObject(ref_tex_dev);
return false;
}
#ifdef ONLY_TIME_KERNELS
}
float msecTotal = 0.0f;
#ifndef METRIC_NOT_RUN_OTHER_EVENTS
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
#endif
elapsed_time=((msecTotal/nb_iterations)/1000);
#endif
// Copy k smallest distances / indexes from the device to the host
err0 = hipMemcpy2D(knn_dist, query_nb * size_of_float, dist_dev, dist_pitch_in_bytes, query_nb * size_of_float, k, hipMemcpyDeviceToHost);
err1 = hipMemcpy2D(knn_index, query_nb * size_of_int, index_dev, index_pitch_in_bytes, query_nb * size_of_int, k, hipMemcpyDeviceToHost);
if (err0 != hipSuccess || err1 != hipSuccess) {
printf("ERROR: Unable to copy data from device to host\n");
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFreeArray(ref_array_dev);
hipDestroyTextureObject(ref_tex_dev);
return false;
}
// Memory clean-up
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFreeArray(ref_array_dev);
hipDestroyTextureObject(ref_tex_dev);
return true;
}
bool knn_cublas(const float * ref,
int ref_nb,
const float * query,
int query_nb,
int dim,
int k,
float * knn_dist,
int * knn_index,int nb_iterations, double & elapsed_time) {
// Constants
const unsigned int size_of_float = sizeof(float);
const unsigned int size_of_int = sizeof(int);
// Return variables
hipError_t err0, err1, err2, err3, err4, err5;
#ifndef METRIC_RUN_MAIN
// Check that we have at least one CUDA device
int nb_devices;
err0 = hipGetDeviceCount(&nb_devices);
if (err0 != hipSuccess || nb_devices == 0) {
printf("ERROR: No CUDA device found\n");
return false;
}
// Select the first CUDA device as default
err0 = hipSetDevice(0);
if (err0 != hipSuccess) {
printf("ERROR: Cannot set the chosen CUDA device\n");
return false;
}
#endif
// Initialize CUBLAS
hipblasInit();
// Allocate global memory
float * ref_dev = NULL;
float * query_dev = NULL;
float * dist_dev = NULL;
int * index_dev = NULL;
float * ref_norm_dev = NULL;
float * query_norm_dev = NULL;
size_t ref_pitch_in_bytes;
size_t query_pitch_in_bytes;
size_t dist_pitch_in_bytes;
size_t index_pitch_in_bytes;
err0 = hipMallocPitch((void**)&ref_dev, &ref_pitch_in_bytes, ref_nb * size_of_float, dim);
err1 = hipMallocPitch((void**)&query_dev, &query_pitch_in_bytes, query_nb * size_of_float, dim);
err2 = hipMallocPitch((void**)&dist_dev, &dist_pitch_in_bytes, query_nb * size_of_float, ref_nb);
err3 = hipMallocPitch((void**)&index_dev, &index_pitch_in_bytes, query_nb * size_of_int, k);
err4 = hipMalloc((void**)&ref_norm_dev, ref_nb * size_of_float);
err5 = hipMalloc((void**)&query_norm_dev, query_nb * size_of_float);
if (err0 != hipSuccess || err1 != hipSuccess || err2 != hipSuccess || err3 != hipSuccess || err4 != hipSuccess || err5 != hipSuccess) {
printf("ERROR: Memory allocation error\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFree(ref_norm_dev);
hipFree(query_norm_dev);
hipblasShutdown();
return false;
}
// Deduce pitch values
size_t ref_pitch = ref_pitch_in_bytes / size_of_float;
size_t query_pitch = query_pitch_in_bytes / size_of_float;
size_t dist_pitch = dist_pitch_in_bytes / size_of_float;
size_t index_pitch = index_pitch_in_bytes / size_of_int;
// Check pitch values
if (query_pitch != dist_pitch || query_pitch != index_pitch) {
printf("ERROR: Invalid pitch value\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFree(ref_norm_dev);
hipFree(query_norm_dev);
hipblasShutdown();
return false;
}
// Copy reference and query data from the host to the device
err0 = hipMemcpy2D(ref_dev, ref_pitch_in_bytes, ref, ref_nb * size_of_float, ref_nb * size_of_float, dim, hipMemcpyHostToDevice);
err1 = hipMemcpy2D(query_dev, query_pitch_in_bytes, query, query_nb * size_of_float, query_nb * size_of_float, dim, hipMemcpyHostToDevice);
if (err0 != hipSuccess || err1 != hipSuccess) {
printf("ERROR: Unable to copy data from host to device\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFree(ref_norm_dev);
hipFree(query_norm_dev);
hipblasShutdown();
return false;
}
// Compute the squared norm of the reference points
dim3 block0(256, 1, 1);
dim3 grid0(ref_nb / 256, 1, 1);
if (ref_nb % 256 != 0) grid0.x += 1;
#ifdef ONLY_TIME_KERNELS
#ifndef METRIC_NOT_RUN_OTHER_EVENTS
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
#endif
//struct timeval tic;
//gettimeofday(&tic, NULL);
#endif
#ifdef ONLY_TIME_KERNELS
for(int i=0; i<nb_iterations;i++)
{
#endif
hipLaunchKernelGGL(( compute_squared_norm), dim3(grid0), dim3(block0), 0, 0, ref_dev, ref_nb, ref_pitch, dim, ref_norm_dev);
if (hipGetLastError() != hipSuccess) {
printf("ERROR: Unable to execute kernel\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFree(ref_norm_dev);
hipFree(query_norm_dev);
hipblasShutdown();
return false;
}
// Compute the squared norm of the query points
dim3 block1(256, 1, 1);
dim3 grid1(query_nb / 256, 1, 1);
if (query_nb % 256 != 0) grid1.x += 1;
hipLaunchKernelGGL(( compute_squared_norm), dim3(grid1), dim3(block1), 0, 0, query_dev, query_nb, query_pitch, dim, query_norm_dev);
if (hipGetLastError() != hipSuccess) {
printf("ERROR: Unable to execute kernel\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFree(ref_norm_dev);
hipFree(query_norm_dev);
hipblasShutdown();
return false;
}
// Computation of query*transpose(reference)
hipblasSgemm('n', 't', (int)query_pitch, (int)ref_pitch, dim, (float)-2.0, query_dev, query_pitch, ref_dev, ref_pitch, (float)0.0, dist_dev, query_pitch);
if (hipblasGetError() != HIPBLAS_STATUS_SUCCESS) {
printf("ERROR: Unable to execute hipblasSgemm\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFree(ref_norm_dev);
hipFree(query_norm_dev);
hipblasShutdown();
return false;
}
// Add reference points norm
dim3 block2(16, 16, 1);
dim3 grid2(query_nb / 16, ref_nb / 16, 1);
if (query_nb % 16 != 0) grid2.x += 1;
if (ref_nb % 16 != 0) grid2.y += 1;
hipLaunchKernelGGL(( add_reference_points_norm), dim3(grid2), dim3(block2), 0, 0, dist_dev, query_nb, dist_pitch, ref_nb, ref_norm_dev);
if (hipGetLastError() != hipSuccess) {
printf("ERROR: Unable to execute kernel\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFree(ref_norm_dev);
hipFree(query_norm_dev);
hipblasShutdown();
return false;
}
// Sort each column
hipLaunchKernelGGL(( modified_insertion_sort), dim3(grid1), dim3(block1), 0, 0, dist_dev, dist_pitch, index_dev, index_pitch, query_nb, ref_nb, k);
if (hipGetLastError() != hipSuccess) {
printf("ERROR: Unable to execute kernel\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFree(ref_norm_dev);
hipFree(query_norm_dev);
hipblasShutdown();
return false;
}
// Add query norm and compute the square root of the of the k first elements
dim3 block3(16, 16, 1);
dim3 grid3(query_nb / 16, k / 16, 1);
if (query_nb % 16 != 0) grid3.x += 1;
if (k % 16 != 0) grid3.y += 1;
hipLaunchKernelGGL(( add_query_points_norm_and_sqrt), dim3(grid3), dim3(block3), 0, 0, dist_dev, query_nb, dist_pitch, k, query_norm_dev);
if (hipGetLastError() != hipSuccess) {
printf("ERROR: Unable to execute kernel\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFree(ref_norm_dev);
hipFree(query_norm_dev);
hipblasShutdown();
return false;
}
#ifdef ONLY_TIME_KERNELS
}
float msecTotal = 0.0f;
#ifndef METRIC_NOT_RUN_OTHER_EVENTS
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
#endif
elapsed_time=((msecTotal/nb_iterations)/1000);
#endif
// Copy k smallest distances / indexes from the device to the host
err0 = hipMemcpy2D(knn_dist, query_nb * size_of_float, dist_dev, dist_pitch_in_bytes, query_nb * size_of_float, k, hipMemcpyDeviceToHost);
err1 = hipMemcpy2D(knn_index, query_nb * size_of_int, index_dev, index_pitch_in_bytes, query_nb * size_of_int, k, hipMemcpyDeviceToHost);
if (err0 != hipSuccess || err1 != hipSuccess) {
printf("ERROR: Unable to copy data from device to host\n");
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFree(ref_norm_dev);
hipFree(query_norm_dev);
hipblasShutdown();
return false;
}
// Memory clean-up and CUBLAS shutdown
hipFree(ref_dev);
hipFree(query_dev);
hipFree(dist_dev);
hipFree(index_dev);
hipFree(ref_norm_dev);
hipFree(query_norm_dev);
hipblasShutdown();
return true;
}
| 782dd94c984e0a839956715fc5d9dcda984c7c3d.cu | #include <stdio.h>
#include <cuda.h>
#include <cublas.h>
#include <sys/time.h>
#include <time.h>
#include <cuda_runtime.h>
//#include <cublas_v2.h>
// CUDA and CUBLAS functions
#include <helper_functions.h>
#include <helper_cuda.h>
#define BLOCK_DIM 16
/**
* Computes the squared Euclidean distance matrix between the query points and the reference points.
*
* @param ref refence points stored in the global memory
* @param ref_width number of reference points
* @param ref_pitch pitch of the reference points array in number of column
* @param query query points stored in the global memory
* @param query_width number of query points
* @param query_pitch pitch of the query points array in number of columns
* @param height dimension of points = height of texture `ref` and of the array `query`
* @param dist array containing the query_width x ref_width computed distances
*/
__global__ void compute_distances(float * ref,
int ref_width,
int ref_pitch,
float * query,
int query_width,
int query_pitch,
int height,
float * dist) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Initializarion of the SSD for the current thread
float ssd = 0.f;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * ref_pitch;
step_B = BLOCK_DIM * query_pitch;
end_A = begin_A + (height-1) * ref_pitch;
// Conditions
int cond0 = (begin_A + tx < ref_width); // used to write in shared memory
int cond1 = (begin_B + tx < query_width); // used to write in shared memory & to computations and to write in output array
int cond2 = (begin_A + ty < ref_width); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/ref_pitch + ty < height) {
shared_A[ty][tx] = (cond0)? ref[a + ref_pitch * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? query[b + query_pitch * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
float tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceeding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1) {
dist[ (begin_A + ty) * query_pitch + begin_B + tx ] = ssd;
}
}
/**
* Computes the squared Euclidean distance matrix between the query points and the reference points.
*
* @param ref refence points stored in the texture memory
* @param ref_width number of reference points
* @param query query points stored in the global memory
* @param query_width number of query points
* @param query_pitch pitch of the query points array in number of columns
* @param height dimension of points = height of texture `ref` and of the array `query`
* @param dist array containing the query_width x ref_width computed distances
*/
__global__ void compute_distance_texture(cudaTextureObject_t ref,
int ref_width,
float * query,
int query_width,
int query_pitch,
int height,
float* dist) {
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if ( xIndex<query_width && yIndex<ref_width) {
float ssd = 0.f;
for (int i=0; i<height; i++) {
float tmp = tex2D<float>(ref, (float)yIndex, (float)i) - query[i * query_pitch + xIndex];
ssd += tmp * tmp;
}
dist[yIndex * query_pitch + xIndex] = ssd;
}
}
/**
* For each reference point (i.e. each column) finds the k-th smallest distances
* of the distance matrix and their respective indexes and gathers them at the top
* of the 2 arrays.
*
* Since we only need to locate the k smallest distances, sorting the entire array
* would not be very efficient if k is relatively small. Instead, we perform a
* simple insertion sort by eventually inserting a given distance in the first
* k values.
*
* @param dist distance matrix
* @param dist_pitch pitch of the distance matrix given in number of columns
* @param index index matrix
* @param index_pitch pitch of the index matrix given in number of columns
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix
* @param k number of values to find
*/
__global__ void modified_insertion_sort(float * dist,
int dist_pitch,
int * index,
int index_pitch,
int width,
int height,
int k){
// Column position
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
// Do nothing if we are out of bounds
if (xIndex < width) {
// Pointer shift
float * p_dist = dist + xIndex;
int * p_index = index + xIndex;
// Initialise the first index
p_index[0] = 0;
// Go through all points
for (int i=1; i<height; ++i) {
// Store current distance and associated index
float curr_dist = p_dist[i*dist_pitch];
int curr_index = i;
// Skip the current value if its index is >= k and if it's higher the k-th slready sorted mallest value
if (i >= k && curr_dist >= p_dist[(k-1)*dist_pitch]) {
continue;
}
// Shift values (and indexes) higher that the current distance to the right
int j = min(i, k-1);
while (j > 0 && p_dist[(j-1)*dist_pitch] > curr_dist) {
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_index[j*index_pitch] = p_index[(j-1)*index_pitch];
--j;
}
// Write the current distance and index at their position
p_dist[j*dist_pitch] = curr_dist;
p_index[j*index_pitch] = curr_index;
}
}
}
/**
* Computes the square root of the first k lines of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param pitch pitch of the distance matrix given in number of columns
* @param k number of values to consider
*/
__global__ void compute_sqrt(float * dist, int width, int pitch, int k){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*pitch + xIndex] = sqrt(dist[yIndex*pitch + xIndex]);
}
/**
* Computes the squared norm of each column of the input array.
*
* @param array input array
* @param width number of columns of `array` = number of points
* @param pitch pitch of `array` in number of columns
* @param height number of rows of `array` = dimension of the points
* @param norm output array containing the squared norm values
*/
__global__ void compute_squared_norm(float * array, int width, int pitch, int height, float * norm){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
float sum = 0.f;
for (int i=0; i<height; i++){
float val = array[i*pitch+xIndex];
sum += val*val;
}
norm[xIndex] = sum;
}
}
/**
* Add the reference points norm (column vector) to each colum of the input array.
*
* @param array input array
* @param width number of columns of `array` = number of points
* @param pitch pitch of `array` in number of columns
* @param height number of rows of `array` = dimension of the points
* @param norm reference points norm stored as a column vector
*/
__global__ void add_reference_points_norm(float * array, int width, int pitch, int height, float * norm){
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int xIndex = blockIdx.x * blockDim.x + tx;
unsigned int yIndex = blockIdx.y * blockDim.y + ty;
__shared__ float shared_vec[16];
if (tx==0 && yIndex<height)
shared_vec[ty] = norm[yIndex];
__syncthreads();
if (xIndex<width && yIndex<height)
array[yIndex*pitch+xIndex] += shared_vec[ty];
}
/**
* Adds the query points norm (row vector) to the k first lines of the input
* array and computes the square root of the resulting values.
*
* @param array input array
* @param width number of columns of `array` = number of points
* @param pitch pitch of `array` in number of columns
* @param k number of neighbors to consider
* @param norm query points norm stored as a row vector
*/
__global__ void add_query_points_norm_and_sqrt(float * array, int width, int pitch, int k, float * norm){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
array[yIndex*pitch + xIndex] = sqrt(array[yIndex*pitch + xIndex] + norm[xIndex]);
}
bool knn_cuda_global(const float * ref,
int ref_nb,
const float * query,
int query_nb,
int dim,
int k,
float * knn_dist,
int * knn_index,int nb_iterations, double &elapsed_time) {
// Constants
const unsigned int size_of_float = sizeof(float);
const unsigned int size_of_int = sizeof(int);
// Return variables
cudaError_t err0, err1, err2, err3;
#ifndef METRIC_RUN_MAIN
// Check that we have at least one CUDA device
int nb_devices;
err0 = cudaGetDeviceCount(&nb_devices);
if (err0 != cudaSuccess || nb_devices == 0) {
printf("ERROR: No CUDA device found\n");
return false;
}
// Select the first CUDA device as default
err0 = cudaSetDevice(0);
if (err0 != cudaSuccess) {
printf("ERROR: Cannot set the chosen CUDA device\n");
return false;
}
#endif
// Allocate global memory
float * ref_dev = NULL;
float * query_dev = NULL;
float * dist_dev = NULL;
int * index_dev = NULL;
size_t ref_pitch_in_bytes;
size_t query_pitch_in_bytes;
size_t dist_pitch_in_bytes;
size_t index_pitch_in_bytes;
err0 = cudaMallocPitch((void**)&ref_dev, &ref_pitch_in_bytes, ref_nb * size_of_float, dim);
err1 = cudaMallocPitch((void**)&query_dev, &query_pitch_in_bytes, query_nb * size_of_float, dim);
err2 = cudaMallocPitch((void**)&dist_dev, &dist_pitch_in_bytes, query_nb * size_of_float, ref_nb);
err3 = cudaMallocPitch((void**)&index_dev, &index_pitch_in_bytes, query_nb * size_of_int, k);
if (err0 != cudaSuccess || err1 != cudaSuccess || err2 != cudaSuccess || err3 != cudaSuccess) {
printf("ERROR: Memory allocation error\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
return false;
}
// Deduce pitch values
size_t ref_pitch = ref_pitch_in_bytes / size_of_float;
size_t query_pitch = query_pitch_in_bytes / size_of_float;
size_t dist_pitch = dist_pitch_in_bytes / size_of_float;
size_t index_pitch = index_pitch_in_bytes / size_of_int;
// Check pitch values
if (query_pitch != dist_pitch || query_pitch != index_pitch) {
printf("ERROR: Invalid pitch value\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
return false;
}
// Copy reference and query data from the host to the device
err0 = cudaMemcpy2D(ref_dev, ref_pitch_in_bytes, ref, ref_nb * size_of_float, ref_nb * size_of_float, dim, cudaMemcpyHostToDevice);
err1 = cudaMemcpy2D(query_dev, query_pitch_in_bytes, query, query_nb * size_of_float, query_nb * size_of_float, dim, cudaMemcpyHostToDevice);
if (err0 != cudaSuccess || err1 != cudaSuccess) {
printf("ERROR: Unable to copy data from host to device\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
return false;
}
// Compute the squared Euclidean distances
dim3 block0(BLOCK_DIM, BLOCK_DIM, 1);
dim3 grid0(query_nb / BLOCK_DIM, ref_nb / BLOCK_DIM, 1);
if (query_nb % BLOCK_DIM != 0) grid0.x += 1;
if (ref_nb % BLOCK_DIM != 0) grid0.y += 1;
#ifdef ONLY_TIME_KERNELS
#ifndef METRIC_NOT_RUN_OTHER_EVENTS
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
#endif
//struct timeval tic;
//gettimeofday(&tic, NULL);
for(int i=0; i<nb_iterations;i++)
{
#endif
compute_distances<<<grid0, block0>>>(ref_dev, ref_nb, ref_pitch, query_dev, query_nb, query_pitch, dim, dist_dev);
/*
if (cudaGetLastError() != cudaSuccess) {
printf("ERROR: Unable to execute kernel\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
return false;
}
*/
// Sort the distances with their respective indexes
dim3 block1(256, 1, 1);
dim3 grid1(query_nb / 256, 1, 1);
if (query_nb % 256 != 0) grid1.x += 1;
modified_insertion_sort<<<grid1, block1>>>(dist_dev, dist_pitch, index_dev, index_pitch, query_nb, ref_nb, k);
/*
if (cudaGetLastError() != cudaSuccess) {
printf("ERROR: Unable to execute kernel\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
return false;
}
*/
// Compute the square root of the k smallest distances
dim3 block2(16, 16, 1);
dim3 grid2(query_nb / 16, k / 16, 1);
if (query_nb % 16 != 0) grid2.x += 1;
if (k % 16 != 0) grid2.y += 1;
compute_sqrt<<<grid2, block2>>>(dist_dev, query_nb, query_pitch, k);
/*
if (cudaGetLastError() != cudaSuccess) {
printf("ERROR: Unable to execute kernel\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
return false;
}
*/
#ifdef ONLY_TIME_KERNELS
}
float msecTotal = 0.0f;
#ifndef METRIC_NOT_RUN_OTHER_EVENTS
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
#endif
elapsed_time=((msecTotal/nb_iterations)/1000);
#endif
// Copy k smallest distances / indexes from the device to the host
err0 = cudaMemcpy2D(knn_dist, query_nb * size_of_float, dist_dev, dist_pitch_in_bytes, query_nb * size_of_float, k, cudaMemcpyDeviceToHost);
err1 = cudaMemcpy2D(knn_index, query_nb * size_of_int, index_dev, index_pitch_in_bytes, query_nb * size_of_int, k, cudaMemcpyDeviceToHost);
if (err0 != cudaSuccess || err1 != cudaSuccess) {
printf("ERROR: Unable to copy data from device to host\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
return false;
}
// Memory clean-up
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
return true;
}
bool knn_cuda_texture(const float * ref,
int ref_nb,
const float * query,
int query_nb,
int dim,
int k,
float * knn_dist,
int * knn_index,int nb_iterations, double &elapsed_time) {
// Constants
unsigned int size_of_float = sizeof(float);
unsigned int size_of_int = sizeof(int);
// Return variables
cudaError_t err0, err1, err2;
#ifndef METRIC_RUN_MAIN
// Check that we have at least one CUDA device
int nb_devices;
err0 = cudaGetDeviceCount(&nb_devices);
if (err0 != cudaSuccess || nb_devices == 0) {
printf("ERROR: No CUDA device found\n");
return false;
}
// Select the first CUDA device as default
err0 = cudaSetDevice(0);
if (err0 != cudaSuccess) {
printf("ERROR: Cannot set the chosen CUDA device\n");
return false;
}
#endif
// Allocate global memory
float * query_dev = NULL;
float * dist_dev = NULL;
int * index_dev = NULL;
size_t query_pitch_in_bytes;
size_t dist_pitch_in_bytes;
size_t index_pitch_in_bytes;
err0 = cudaMallocPitch((void**)&query_dev, &query_pitch_in_bytes, query_nb * size_of_float, dim);
err1 = cudaMallocPitch((void**)&dist_dev, &dist_pitch_in_bytes, query_nb * size_of_float, ref_nb);
err2 = cudaMallocPitch((void**)&index_dev, &index_pitch_in_bytes, query_nb * size_of_int, k);
if (err0 != cudaSuccess || err1 != cudaSuccess || err2 != cudaSuccess) {
printf("ERROR: Memory allocation error (cudaMallocPitch)\n");
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
return false;
}
// Deduce pitch values
size_t query_pitch = query_pitch_in_bytes / size_of_float;
size_t dist_pitch = dist_pitch_in_bytes / size_of_float;
size_t index_pitch = index_pitch_in_bytes / size_of_int;
// Check pitch values
if (query_pitch != dist_pitch || query_pitch != index_pitch) {
printf("ERROR: Invalid pitch value\n");
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
return false;
}
// Copy query data from the host to the device
err0 = cudaMemcpy2D(query_dev, query_pitch_in_bytes, query, query_nb * size_of_float, query_nb * size_of_float, dim, cudaMemcpyHostToDevice);
if (err0 != cudaSuccess) {
printf("ERROR: Unable to copy data from host to device\n");
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
return false;
}
// Allocate CUDA array for reference points
cudaArray* ref_array_dev = NULL;
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
err0 = cudaMallocArray(&ref_array_dev, &channel_desc, ref_nb, dim);
if (err0 != cudaSuccess) {
printf("ERROR: Memory allocation error (cudaMallocArray)\n");
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
return false;
}
// Copy reference points from host to device
err0 = cudaMemcpyToArray(ref_array_dev, 0, 0, ref, ref_nb * size_of_float * dim, cudaMemcpyHostToDevice);
if (err0 != cudaSuccess) {
printf("ERROR: Unable to copy data from host to device\n");
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFreeArray(ref_array_dev);
return false;
}
// Resource descriptor
struct cudaResourceDesc res_desc;
memset(&res_desc, 0, sizeof(res_desc));
res_desc.resType = cudaResourceTypeArray;
res_desc.res.array.array = ref_array_dev;
// Texture descriptor
struct cudaTextureDesc tex_desc;
memset(&tex_desc, 0, sizeof(tex_desc));
tex_desc.addressMode[0] = cudaAddressModeClamp;
tex_desc.addressMode[1] = cudaAddressModeClamp;
tex_desc.filterMode = cudaFilterModePoint;
tex_desc.readMode = cudaReadModeElementType;
tex_desc.normalizedCoords = 0;
// Create the texture
cudaTextureObject_t ref_tex_dev = 0;
err0 = cudaCreateTextureObject(&ref_tex_dev, &res_desc, &tex_desc, NULL);
if (err0 != cudaSuccess) {
printf("ERROR: Unable to create the texture\n");
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFreeArray(ref_array_dev);
return false;
}
// Compute the squared Euclidean distances
dim3 block0(16, 16, 1);
dim3 grid0(query_nb / 16, ref_nb / 16, 1);
if (query_nb % 16 != 0) grid0.x += 1;
if (ref_nb % 16 != 0) grid0.y += 1;
#ifdef ONLY_TIME_KERNELS
#ifndef METRIC_NOT_RUN_OTHER_EVENTS
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
#endif
//struct timeval tic;
//gettimeofday(&tic, NULL);
for(int i=0; i<nb_iterations;i++)
{
#endif
compute_distance_texture<<<grid0, block0>>>(ref_tex_dev, ref_nb, query_dev, query_nb, query_pitch, dim, dist_dev);
if (cudaGetLastError() != cudaSuccess) {
printf("ERROR: Unable to execute kernel\n");
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFreeArray(ref_array_dev);
cudaDestroyTextureObject(ref_tex_dev);
return false;
}
// Sort the distances with their respective indexes
dim3 block1(256, 1, 1);
dim3 grid1(query_nb / 256, 1, 1);
if (query_nb % 256 != 0) grid1.x += 1;
modified_insertion_sort<<<grid1, block1>>>(dist_dev, dist_pitch, index_dev, index_pitch, query_nb, ref_nb, k);
if (cudaGetLastError() != cudaSuccess) {
printf("ERROR: Unable to execute kernel\n");
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFreeArray(ref_array_dev);
cudaDestroyTextureObject(ref_tex_dev);
return false;
}
// Compute the square root of the k smallest distances
dim3 block2(16, 16, 1);
dim3 grid2(query_nb / 16, k / 16, 1);
if (query_nb % 16 != 0) grid2.x += 1;
if (k % 16 != 0) grid2.y += 1;
compute_sqrt<<<grid2, block2>>>(dist_dev, query_nb, query_pitch, k);
if (cudaGetLastError() != cudaSuccess) {
printf("ERROR: Unable to execute kernel\n");
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFreeArray(ref_array_dev);
cudaDestroyTextureObject(ref_tex_dev);
return false;
}
#ifdef ONLY_TIME_KERNELS
}
float msecTotal = 0.0f;
#ifndef METRIC_NOT_RUN_OTHER_EVENTS
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
#endif
elapsed_time=((msecTotal/nb_iterations)/1000);
#endif
// Copy k smallest distances / indexes from the device to the host
err0 = cudaMemcpy2D(knn_dist, query_nb * size_of_float, dist_dev, dist_pitch_in_bytes, query_nb * size_of_float, k, cudaMemcpyDeviceToHost);
err1 = cudaMemcpy2D(knn_index, query_nb * size_of_int, index_dev, index_pitch_in_bytes, query_nb * size_of_int, k, cudaMemcpyDeviceToHost);
if (err0 != cudaSuccess || err1 != cudaSuccess) {
printf("ERROR: Unable to copy data from device to host\n");
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFreeArray(ref_array_dev);
cudaDestroyTextureObject(ref_tex_dev);
return false;
}
// Memory clean-up
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFreeArray(ref_array_dev);
cudaDestroyTextureObject(ref_tex_dev);
return true;
}
bool knn_cublas(const float * ref,
int ref_nb,
const float * query,
int query_nb,
int dim,
int k,
float * knn_dist,
int * knn_index,int nb_iterations, double & elapsed_time) {
// Constants
const unsigned int size_of_float = sizeof(float);
const unsigned int size_of_int = sizeof(int);
// Return variables
cudaError_t err0, err1, err2, err3, err4, err5;
#ifndef METRIC_RUN_MAIN
// Check that we have at least one CUDA device
int nb_devices;
err0 = cudaGetDeviceCount(&nb_devices);
if (err0 != cudaSuccess || nb_devices == 0) {
printf("ERROR: No CUDA device found\n");
return false;
}
// Select the first CUDA device as default
err0 = cudaSetDevice(0);
if (err0 != cudaSuccess) {
printf("ERROR: Cannot set the chosen CUDA device\n");
return false;
}
#endif
// Initialize CUBLAS
cublasInit();
// Allocate global memory
float * ref_dev = NULL;
float * query_dev = NULL;
float * dist_dev = NULL;
int * index_dev = NULL;
float * ref_norm_dev = NULL;
float * query_norm_dev = NULL;
size_t ref_pitch_in_bytes;
size_t query_pitch_in_bytes;
size_t dist_pitch_in_bytes;
size_t index_pitch_in_bytes;
err0 = cudaMallocPitch((void**)&ref_dev, &ref_pitch_in_bytes, ref_nb * size_of_float, dim);
err1 = cudaMallocPitch((void**)&query_dev, &query_pitch_in_bytes, query_nb * size_of_float, dim);
err2 = cudaMallocPitch((void**)&dist_dev, &dist_pitch_in_bytes, query_nb * size_of_float, ref_nb);
err3 = cudaMallocPitch((void**)&index_dev, &index_pitch_in_bytes, query_nb * size_of_int, k);
err4 = cudaMalloc((void**)&ref_norm_dev, ref_nb * size_of_float);
err5 = cudaMalloc((void**)&query_norm_dev, query_nb * size_of_float);
if (err0 != cudaSuccess || err1 != cudaSuccess || err2 != cudaSuccess || err3 != cudaSuccess || err4 != cudaSuccess || err5 != cudaSuccess) {
printf("ERROR: Memory allocation error\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFree(ref_norm_dev);
cudaFree(query_norm_dev);
cublasShutdown();
return false;
}
// Deduce pitch values
size_t ref_pitch = ref_pitch_in_bytes / size_of_float;
size_t query_pitch = query_pitch_in_bytes / size_of_float;
size_t dist_pitch = dist_pitch_in_bytes / size_of_float;
size_t index_pitch = index_pitch_in_bytes / size_of_int;
// Check pitch values
if (query_pitch != dist_pitch || query_pitch != index_pitch) {
printf("ERROR: Invalid pitch value\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFree(ref_norm_dev);
cudaFree(query_norm_dev);
cublasShutdown();
return false;
}
// Copy reference and query data from the host to the device
err0 = cudaMemcpy2D(ref_dev, ref_pitch_in_bytes, ref, ref_nb * size_of_float, ref_nb * size_of_float, dim, cudaMemcpyHostToDevice);
err1 = cudaMemcpy2D(query_dev, query_pitch_in_bytes, query, query_nb * size_of_float, query_nb * size_of_float, dim, cudaMemcpyHostToDevice);
if (err0 != cudaSuccess || err1 != cudaSuccess) {
printf("ERROR: Unable to copy data from host to device\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFree(ref_norm_dev);
cudaFree(query_norm_dev);
cublasShutdown();
return false;
}
// Compute the squared norm of the reference points
dim3 block0(256, 1, 1);
dim3 grid0(ref_nb / 256, 1, 1);
if (ref_nb % 256 != 0) grid0.x += 1;
#ifdef ONLY_TIME_KERNELS
#ifndef METRIC_NOT_RUN_OTHER_EVENTS
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
#endif
//struct timeval tic;
//gettimeofday(&tic, NULL);
#endif
#ifdef ONLY_TIME_KERNELS
for(int i=0; i<nb_iterations;i++)
{
#endif
compute_squared_norm<<<grid0, block0>>>(ref_dev, ref_nb, ref_pitch, dim, ref_norm_dev);
if (cudaGetLastError() != cudaSuccess) {
printf("ERROR: Unable to execute kernel\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFree(ref_norm_dev);
cudaFree(query_norm_dev);
cublasShutdown();
return false;
}
// Compute the squared norm of the query points
dim3 block1(256, 1, 1);
dim3 grid1(query_nb / 256, 1, 1);
if (query_nb % 256 != 0) grid1.x += 1;
compute_squared_norm<<<grid1, block1>>>(query_dev, query_nb, query_pitch, dim, query_norm_dev);
if (cudaGetLastError() != cudaSuccess) {
printf("ERROR: Unable to execute kernel\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFree(ref_norm_dev);
cudaFree(query_norm_dev);
cublasShutdown();
return false;
}
// Computation of query*transpose(reference)
cublasSgemm('n', 't', (int)query_pitch, (int)ref_pitch, dim, (float)-2.0, query_dev, query_pitch, ref_dev, ref_pitch, (float)0.0, dist_dev, query_pitch);
if (cublasGetError() != CUBLAS_STATUS_SUCCESS) {
printf("ERROR: Unable to execute cublasSgemm\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFree(ref_norm_dev);
cudaFree(query_norm_dev);
cublasShutdown();
return false;
}
// Add reference points norm
dim3 block2(16, 16, 1);
dim3 grid2(query_nb / 16, ref_nb / 16, 1);
if (query_nb % 16 != 0) grid2.x += 1;
if (ref_nb % 16 != 0) grid2.y += 1;
add_reference_points_norm<<<grid2, block2>>>(dist_dev, query_nb, dist_pitch, ref_nb, ref_norm_dev);
if (cudaGetLastError() != cudaSuccess) {
printf("ERROR: Unable to execute kernel\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFree(ref_norm_dev);
cudaFree(query_norm_dev);
cublasShutdown();
return false;
}
// Sort each column
modified_insertion_sort<<<grid1, block1>>>(dist_dev, dist_pitch, index_dev, index_pitch, query_nb, ref_nb, k);
if (cudaGetLastError() != cudaSuccess) {
printf("ERROR: Unable to execute kernel\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFree(ref_norm_dev);
cudaFree(query_norm_dev);
cublasShutdown();
return false;
}
// Add query norm and compute the square root of the of the k first elements
dim3 block3(16, 16, 1);
dim3 grid3(query_nb / 16, k / 16, 1);
if (query_nb % 16 != 0) grid3.x += 1;
if (k % 16 != 0) grid3.y += 1;
add_query_points_norm_and_sqrt<<<grid3, block3>>>(dist_dev, query_nb, dist_pitch, k, query_norm_dev);
if (cudaGetLastError() != cudaSuccess) {
printf("ERROR: Unable to execute kernel\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFree(ref_norm_dev);
cudaFree(query_norm_dev);
cublasShutdown();
return false;
}
#ifdef ONLY_TIME_KERNELS
}
float msecTotal = 0.0f;
#ifndef METRIC_NOT_RUN_OTHER_EVENTS
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
#endif
elapsed_time=((msecTotal/nb_iterations)/1000);
#endif
// Copy k smallest distances / indexes from the device to the host
err0 = cudaMemcpy2D(knn_dist, query_nb * size_of_float, dist_dev, dist_pitch_in_bytes, query_nb * size_of_float, k, cudaMemcpyDeviceToHost);
err1 = cudaMemcpy2D(knn_index, query_nb * size_of_int, index_dev, index_pitch_in_bytes, query_nb * size_of_int, k, cudaMemcpyDeviceToHost);
if (err0 != cudaSuccess || err1 != cudaSuccess) {
printf("ERROR: Unable to copy data from device to host\n");
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFree(ref_norm_dev);
cudaFree(query_norm_dev);
cublasShutdown();
return false;
}
// Memory clean-up and CUBLAS shutdown
cudaFree(ref_dev);
cudaFree(query_dev);
cudaFree(dist_dev);
cudaFree(index_dev);
cudaFree(ref_norm_dev);
cudaFree(query_norm_dev);
cublasShutdown();
return true;
}
|
9c732ad257f683448eb278c51e928ce6d9143614.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file optimizer_op.cu
* \brief Optimizer operators
* \author Junyuan Xie
*/
#include "./optimizer_op-inl.h"
#include <hipcub/hipcub.hpp>
namespace mxnet {
namespace op {
template <int req>
struct SGDMomStdDnsRspDnsKernel<req, gpu> {
template <typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i,
index_t row_length,
DType* out_data,
DType* mom_data,
const DType* weight_data,
const IType* grad_idx,
const DType* grad_data,
const RType* prefix_sum,
const DType clip_gradient,
const DType momentum,
const DType lr,
const DType wd,
const DType rescale_grad) {
using nnvm::dim_t;
const dim_t row_id = i / row_length;
const dim_t col_id = i % row_length;
const dim_t nnr = prefix_sum[row_id];
const bool non_zero = (row_id == 0) ? prefix_sum[0] > 0 : nnr > prefix_sum[row_id - 1];
const RType grad_i = (nnr - 1) * row_length + col_id;
const DType grad = non_zero ? grad_data[grad_i] : static_cast<DType>(0);
DType grad_rescaled = rescale_grad * grad;
if (clip_gradient >= 0.0f) {
grad_rescaled = mshadow_op::clip::Map(grad_rescaled, clip_gradient);
}
grad_rescaled += wd * weight_data[i];
mom_data[i] *= momentum;
mom_data[i] -= lr * grad_rescaled;
KERNEL_ASSIGN(out_data[i], req, weight_data[i] + mom_data[i]);
}
};
template <>
void SGDMomStdUpdateDnsRspDnsImpl<gpu>(const SGDMomParam& param,
const OpContext& ctx,
const TBlob& weight,
const NDArray& grad,
const TBlob& mom,
const OpReqType& req,
TBlob* out) {
using namespace mxnet_op;
using namespace rowsparse;
using namespace mshadow;
Stream<gpu>* s = ctx.get_stream<gpu>();
if (req == kNullOp)
return;
CHECK_EQ(req, kWriteInplace) << "kWriteInplace is expected for sparse sgd_mom_update";
CHECK_GT(weight.shape_.Size(), 0);
CHECK_GT(mom.shape_.Size(), 0);
MSHADOW_REAL_TYPE_SWITCH(weight.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(grad.aux_type(kIdx), IType, {
MXNET_ASSIGN_REQ_SWITCH(req, req_type, {
DType* weight_data = weight.dptr<DType>();
IType* grad_idx = grad.aux_data(kIdx).dptr<IType>();
DType* grad_val = grad.data().dptr<DType>();
DType* mom_data = mom.dptr<DType>();
DType* out_data = out->dptr<DType>();
nnvm::dim_t num_rows = weight.shape_[0];
nnvm::dim_t row_length = weight.shape_.ProdShape(1, weight.ndim());
nnvm::dim_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(
Shape1(num_rows * sizeof(nnvm::dim_t) + temp_storage_bytes), s);
prefix_sum = reinterpret_cast<nnvm::dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows * sizeof(nnvm::dim_t);
// mark row flags
Fill<false>(s, TBlob(prefix_sum, Shape1(num_rows), gpu::kDevMask), kWriteTo, 0);
if (grad.storage_initialized()) {
Kernel<MarkRowFlgKernel, gpu>::Launch(s, grad.aux_shape(kIdx)[0], prefix_sum, grad_idx);
// calculate inclusive prefix sum
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
mshadow::Stream<gpu>::GetStream(s));
}
size_t num_threads = num_rows * row_length;
Kernel<SGDMomStdDnsRspDnsKernel<req_type, gpu>, gpu>::Launch(
s,
num_threads,
row_length,
out_data,
mom_data,
weight_data,
grad_idx,
grad_val,
prefix_sum,
static_cast<DType>(param.clip_gradient),
static_cast<DType>(param.momentum),
static_cast<DType>(param.lr),
static_cast<DType>(param.wd),
static_cast<DType>(param.rescale_grad));
});
});
});
}
template <int req>
struct AdamStdDnsRspDnsKernel<req, gpu> {
template <typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i,
const nnvm::dim_t row_length,
DType* out_data,
DType* mean_data,
DType* var_data,
const DType* weight_data,
const IType* grad_idx,
const DType* grad_data,
const RType* prefix_sum,
const DType clip_gradient,
const DType beta1,
const DType beta2,
const DType lr,
const DType wd,
const DType epsilon,
const DType rescale_grad) {
using namespace mshadow_op;
using nnvm::dim_t;
const dim_t row_id = i / row_length;
const dim_t col_id = i % row_length;
const bool non_zero =
(row_id == 0) ? prefix_sum[0] > 0 : prefix_sum[row_id] > prefix_sum[row_id - 1];
const RType grad_offset = (prefix_sum[row_id] - 1) * row_length + col_id;
DType grad_rescaled = non_zero ? static_cast<DType>(grad_data[grad_offset] * rescale_grad)
: static_cast<DType>(0);
if (clip_gradient >= 0.0f) {
grad_rescaled = clip::Map(grad_rescaled, clip_gradient);
}
grad_rescaled += weight_data[i] * wd;
mean_data[i] = beta1 * mean_data[i] + (1.f - beta1) * grad_rescaled;
var_data[i] = beta2 * var_data[i] + (1.f - beta2) * square::Map(grad_rescaled);
KERNEL_ASSIGN(out_data[i],
req,
weight_data[i] - lr * mean_data[i] / (square_root::Map(var_data[i]) + epsilon));
}
};
template <>
void AdamStdUpdateDnsRspDnsImpl<gpu>(const AdamParam& param,
const OpContext& ctx,
const TBlob& weight,
const NDArray& grad,
const TBlob& mean,
const TBlob& var,
const OpReqType& req,
TBlob* out) {
using namespace mxnet_op;
using namespace rowsparse;
using namespace mshadow;
Stream<gpu>* s = ctx.get_stream<gpu>();
if (req == kNullOp)
return;
CHECK_EQ(req, kWriteInplace) << "kWriteInplace is expected for sparse adam_update";
CHECK_GT(weight.shape_.Size(), 0);
CHECK_GT(mean.shape_.Size(), 0);
CHECK_GT(var.shape_.Size(), 0);
MSHADOW_REAL_TYPE_SWITCH(weight.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(grad.aux_type(kIdx), IType, {
MXNET_ASSIGN_REQ_SWITCH(req, req_type, {
const DType* weight_data = weight.dptr<DType>();
const IType* grad_idx = grad.aux_data(kIdx).dptr<IType>();
const DType* grad_val = grad.data().dptr<DType>();
DType* mean_data = mean.dptr<DType>();
DType* var_data = var.dptr<DType>();
DType* out_data = out->dptr<DType>();
const nnvm::dim_t num_rows = weight.shape_[0];
const nnvm::dim_t row_length = weight.shape_.ProdShape(1, weight.ndim());
nnvm::dim_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(
Shape1(num_rows * sizeof(nnvm::dim_t) + temp_storage_bytes), s);
prefix_sum = reinterpret_cast<nnvm::dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows * sizeof(nnvm::dim_t);
// mark row flags
Fill<false>(s, TBlob(prefix_sum, Shape1(num_rows), gpu::kDevMask), kWriteTo, 0);
if (grad.storage_initialized()) {
Kernel<MarkRowFlgKernel, gpu>::Launch(s, grad.aux_shape(kIdx)[0], prefix_sum, grad_idx);
// calculate inclusive prefix sum
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
}
Kernel<AdamStdDnsRspDnsKernel<req_type, gpu>, gpu>::Launch(
s,
weight.shape_.Size(),
row_length,
out_data,
mean_data,
var_data,
weight_data,
grad_idx,
grad_val,
prefix_sum,
static_cast<DType>(param.clip_gradient),
static_cast<DType>(param.beta1),
static_cast<DType>(param.beta2),
static_cast<DType>(param.lr),
static_cast<DType>(param.wd),
static_cast<DType>(param.epsilon),
static_cast<DType>(param.rescale_grad));
});
});
});
}
NNVM_REGISTER_OP(signsgd_update).set_attr<FCompute>("FCompute<gpu>", SignSGDUpdate<gpu>);
NNVM_REGISTER_OP(signum_update).set_attr<FCompute>("FCompute<gpu>", SignumUpdate<gpu>);
NNVM_REGISTER_OP(sgd_update)
.set_attr<FCompute>("FCompute<gpu>", SGDUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", SGDUpdateEx<gpu>);
NNVM_REGISTER_OP(sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", SGDMomUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", SGDMomUpdateEx<gpu>);
NNVM_REGISTER_OP(mp_sgd_update).set_attr<FCompute>("FCompute<gpu>", MP_SGDUpdate<gpu>);
NNVM_REGISTER_OP(mp_sgd_mom_update).set_attr<FCompute>("FCompute<gpu>", MP_SGDMomUpdate<gpu>);
NNVM_REGISTER_OP(multi_sgd_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDUpdate<gpu, type_identity, 2>);
NNVM_REGISTER_OP(multi_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDMomUpdate<gpu, type_identity, 3>);
NNVM_REGISTER_OP(multi_mp_sgd_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDUpdate<gpu, single_precision, 3>);
NNVM_REGISTER_OP(multi_mp_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDMomUpdate<gpu, single_precision, 4>);
NNVM_REGISTER_OP(nag_mom_update).set_attr<FCompute>("FCompute<gpu>", NAGMomUpdate<gpu>);
NNVM_REGISTER_OP(mp_nag_mom_update).set_attr<FCompute>("FCompute<gpu>", MP_NAGMomUpdate<gpu>);
NNVM_REGISTER_OP(ftml_update).set_attr<FCompute>("FCompute<gpu>", FTMLUpdate<gpu>);
NNVM_REGISTER_OP(adam_update)
.set_attr<FCompute>("FCompute<gpu>", AdamUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", AdamUpdateEx<gpu>);
NNVM_REGISTER_OP(rmsprop_update).set_attr<FCompute>("FCompute<gpu>", RMSPropUpdate<gpu>);
NNVM_REGISTER_OP(rmspropalex_update).set_attr<FCompute>("FCompute<gpu>", RMSPropAlexUpdate<gpu>);
NNVM_REGISTER_OP(ftrl_update)
.set_attr<FCompute>("FCompute<gpu>", FtrlUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", FtrlUpdateEx<gpu>);
NNVM_REGISTER_OP(_sparse_adagrad_update)
.set_attr<FComputeEx>("FComputeEx<gpu>", AdagradUpdateEx<gpu>);
NNVM_REGISTER_OP(lamb_update_phase1).set_attr<FCompute>("FCompute<gpu>", LambUpdatePhaseOne<gpu>);
NNVM_REGISTER_OP(lamb_update_phase2).set_attr<FCompute>("FCompute<gpu>", LambUpdatePhaseTwo<gpu>);
NNVM_REGISTER_OP(mp_lamb_update_phase1)
.set_attr<FCompute>("FCompute<gpu>", MPLambUpdatePhaseOne<gpu>);
NNVM_REGISTER_OP(mp_lamb_update_phase2)
.set_attr<FCompute>("FCompute<gpu>", MPLambUpdatePhaseTwo<gpu>);
} // namespace op
} // namespace mxnet
| 9c732ad257f683448eb278c51e928ce6d9143614.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file optimizer_op.cu
* \brief Optimizer operators
* \author Junyuan Xie
*/
#include "./optimizer_op-inl.h"
#include <cub/cub.cuh>
namespace mxnet {
namespace op {
template <int req>
struct SGDMomStdDnsRspDnsKernel<req, gpu> {
template <typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i,
index_t row_length,
DType* out_data,
DType* mom_data,
const DType* weight_data,
const IType* grad_idx,
const DType* grad_data,
const RType* prefix_sum,
const DType clip_gradient,
const DType momentum,
const DType lr,
const DType wd,
const DType rescale_grad) {
using nnvm::dim_t;
const dim_t row_id = i / row_length;
const dim_t col_id = i % row_length;
const dim_t nnr = prefix_sum[row_id];
const bool non_zero = (row_id == 0) ? prefix_sum[0] > 0 : nnr > prefix_sum[row_id - 1];
const RType grad_i = (nnr - 1) * row_length + col_id;
const DType grad = non_zero ? grad_data[grad_i] : static_cast<DType>(0);
DType grad_rescaled = rescale_grad * grad;
if (clip_gradient >= 0.0f) {
grad_rescaled = mshadow_op::clip::Map(grad_rescaled, clip_gradient);
}
grad_rescaled += wd * weight_data[i];
mom_data[i] *= momentum;
mom_data[i] -= lr * grad_rescaled;
KERNEL_ASSIGN(out_data[i], req, weight_data[i] + mom_data[i]);
}
};
template <>
void SGDMomStdUpdateDnsRspDnsImpl<gpu>(const SGDMomParam& param,
const OpContext& ctx,
const TBlob& weight,
const NDArray& grad,
const TBlob& mom,
const OpReqType& req,
TBlob* out) {
using namespace mxnet_op;
using namespace rowsparse;
using namespace mshadow;
Stream<gpu>* s = ctx.get_stream<gpu>();
if (req == kNullOp)
return;
CHECK_EQ(req, kWriteInplace) << "kWriteInplace is expected for sparse sgd_mom_update";
CHECK_GT(weight.shape_.Size(), 0);
CHECK_GT(mom.shape_.Size(), 0);
MSHADOW_REAL_TYPE_SWITCH(weight.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(grad.aux_type(kIdx), IType, {
MXNET_ASSIGN_REQ_SWITCH(req, req_type, {
DType* weight_data = weight.dptr<DType>();
IType* grad_idx = grad.aux_data(kIdx).dptr<IType>();
DType* grad_val = grad.data().dptr<DType>();
DType* mom_data = mom.dptr<DType>();
DType* out_data = out->dptr<DType>();
nnvm::dim_t num_rows = weight.shape_[0];
nnvm::dim_t row_length = weight.shape_.ProdShape(1, weight.ndim());
nnvm::dim_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(
Shape1(num_rows * sizeof(nnvm::dim_t) + temp_storage_bytes), s);
prefix_sum = reinterpret_cast<nnvm::dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows * sizeof(nnvm::dim_t);
// mark row flags
Fill<false>(s, TBlob(prefix_sum, Shape1(num_rows), gpu::kDevMask), kWriteTo, 0);
if (grad.storage_initialized()) {
Kernel<MarkRowFlgKernel, gpu>::Launch(s, grad.aux_shape(kIdx)[0], prefix_sum, grad_idx);
// calculate inclusive prefix sum
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
mshadow::Stream<gpu>::GetStream(s));
}
size_t num_threads = num_rows * row_length;
Kernel<SGDMomStdDnsRspDnsKernel<req_type, gpu>, gpu>::Launch(
s,
num_threads,
row_length,
out_data,
mom_data,
weight_data,
grad_idx,
grad_val,
prefix_sum,
static_cast<DType>(param.clip_gradient),
static_cast<DType>(param.momentum),
static_cast<DType>(param.lr),
static_cast<DType>(param.wd),
static_cast<DType>(param.rescale_grad));
});
});
});
}
template <int req>
struct AdamStdDnsRspDnsKernel<req, gpu> {
template <typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i,
const nnvm::dim_t row_length,
DType* out_data,
DType* mean_data,
DType* var_data,
const DType* weight_data,
const IType* grad_idx,
const DType* grad_data,
const RType* prefix_sum,
const DType clip_gradient,
const DType beta1,
const DType beta2,
const DType lr,
const DType wd,
const DType epsilon,
const DType rescale_grad) {
using namespace mshadow_op;
using nnvm::dim_t;
const dim_t row_id = i / row_length;
const dim_t col_id = i % row_length;
const bool non_zero =
(row_id == 0) ? prefix_sum[0] > 0 : prefix_sum[row_id] > prefix_sum[row_id - 1];
const RType grad_offset = (prefix_sum[row_id] - 1) * row_length + col_id;
DType grad_rescaled = non_zero ? static_cast<DType>(grad_data[grad_offset] * rescale_grad)
: static_cast<DType>(0);
if (clip_gradient >= 0.0f) {
grad_rescaled = clip::Map(grad_rescaled, clip_gradient);
}
grad_rescaled += weight_data[i] * wd;
mean_data[i] = beta1 * mean_data[i] + (1.f - beta1) * grad_rescaled;
var_data[i] = beta2 * var_data[i] + (1.f - beta2) * square::Map(grad_rescaled);
KERNEL_ASSIGN(out_data[i],
req,
weight_data[i] - lr * mean_data[i] / (square_root::Map(var_data[i]) + epsilon));
}
};
template <>
void AdamStdUpdateDnsRspDnsImpl<gpu>(const AdamParam& param,
const OpContext& ctx,
const TBlob& weight,
const NDArray& grad,
const TBlob& mean,
const TBlob& var,
const OpReqType& req,
TBlob* out) {
using namespace mxnet_op;
using namespace rowsparse;
using namespace mshadow;
Stream<gpu>* s = ctx.get_stream<gpu>();
if (req == kNullOp)
return;
CHECK_EQ(req, kWriteInplace) << "kWriteInplace is expected for sparse adam_update";
CHECK_GT(weight.shape_.Size(), 0);
CHECK_GT(mean.shape_.Size(), 0);
CHECK_GT(var.shape_.Size(), 0);
MSHADOW_REAL_TYPE_SWITCH(weight.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(grad.aux_type(kIdx), IType, {
MXNET_ASSIGN_REQ_SWITCH(req, req_type, {
const DType* weight_data = weight.dptr<DType>();
const IType* grad_idx = grad.aux_data(kIdx).dptr<IType>();
const DType* grad_val = grad.data().dptr<DType>();
DType* mean_data = mean.dptr<DType>();
DType* var_data = var.dptr<DType>();
DType* out_data = out->dptr<DType>();
const nnvm::dim_t num_rows = weight.shape_[0];
const nnvm::dim_t row_length = weight.shape_.ProdShape(1, weight.ndim());
nnvm::dim_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(
Shape1(num_rows * sizeof(nnvm::dim_t) + temp_storage_bytes), s);
prefix_sum = reinterpret_cast<nnvm::dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows * sizeof(nnvm::dim_t);
// mark row flags
Fill<false>(s, TBlob(prefix_sum, Shape1(num_rows), gpu::kDevMask), kWriteTo, 0);
if (grad.storage_initialized()) {
Kernel<MarkRowFlgKernel, gpu>::Launch(s, grad.aux_shape(kIdx)[0], prefix_sum, grad_idx);
// calculate inclusive prefix sum
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
}
Kernel<AdamStdDnsRspDnsKernel<req_type, gpu>, gpu>::Launch(
s,
weight.shape_.Size(),
row_length,
out_data,
mean_data,
var_data,
weight_data,
grad_idx,
grad_val,
prefix_sum,
static_cast<DType>(param.clip_gradient),
static_cast<DType>(param.beta1),
static_cast<DType>(param.beta2),
static_cast<DType>(param.lr),
static_cast<DType>(param.wd),
static_cast<DType>(param.epsilon),
static_cast<DType>(param.rescale_grad));
});
});
});
}
NNVM_REGISTER_OP(signsgd_update).set_attr<FCompute>("FCompute<gpu>", SignSGDUpdate<gpu>);
NNVM_REGISTER_OP(signum_update).set_attr<FCompute>("FCompute<gpu>", SignumUpdate<gpu>);
NNVM_REGISTER_OP(sgd_update)
.set_attr<FCompute>("FCompute<gpu>", SGDUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", SGDUpdateEx<gpu>);
NNVM_REGISTER_OP(sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", SGDMomUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", SGDMomUpdateEx<gpu>);
NNVM_REGISTER_OP(mp_sgd_update).set_attr<FCompute>("FCompute<gpu>", MP_SGDUpdate<gpu>);
NNVM_REGISTER_OP(mp_sgd_mom_update).set_attr<FCompute>("FCompute<gpu>", MP_SGDMomUpdate<gpu>);
NNVM_REGISTER_OP(multi_sgd_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDUpdate<gpu, type_identity, 2>);
NNVM_REGISTER_OP(multi_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDMomUpdate<gpu, type_identity, 3>);
NNVM_REGISTER_OP(multi_mp_sgd_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDUpdate<gpu, single_precision, 3>);
NNVM_REGISTER_OP(multi_mp_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDMomUpdate<gpu, single_precision, 4>);
NNVM_REGISTER_OP(nag_mom_update).set_attr<FCompute>("FCompute<gpu>", NAGMomUpdate<gpu>);
NNVM_REGISTER_OP(mp_nag_mom_update).set_attr<FCompute>("FCompute<gpu>", MP_NAGMomUpdate<gpu>);
NNVM_REGISTER_OP(ftml_update).set_attr<FCompute>("FCompute<gpu>", FTMLUpdate<gpu>);
NNVM_REGISTER_OP(adam_update)
.set_attr<FCompute>("FCompute<gpu>", AdamUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", AdamUpdateEx<gpu>);
NNVM_REGISTER_OP(rmsprop_update).set_attr<FCompute>("FCompute<gpu>", RMSPropUpdate<gpu>);
NNVM_REGISTER_OP(rmspropalex_update).set_attr<FCompute>("FCompute<gpu>", RMSPropAlexUpdate<gpu>);
NNVM_REGISTER_OP(ftrl_update)
.set_attr<FCompute>("FCompute<gpu>", FtrlUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", FtrlUpdateEx<gpu>);
NNVM_REGISTER_OP(_sparse_adagrad_update)
.set_attr<FComputeEx>("FComputeEx<gpu>", AdagradUpdateEx<gpu>);
NNVM_REGISTER_OP(lamb_update_phase1).set_attr<FCompute>("FCompute<gpu>", LambUpdatePhaseOne<gpu>);
NNVM_REGISTER_OP(lamb_update_phase2).set_attr<FCompute>("FCompute<gpu>", LambUpdatePhaseTwo<gpu>);
NNVM_REGISTER_OP(mp_lamb_update_phase1)
.set_attr<FCompute>("FCompute<gpu>", MPLambUpdatePhaseOne<gpu>);
NNVM_REGISTER_OP(mp_lamb_update_phase2)
.set_attr<FCompute>("FCompute<gpu>", MPLambUpdatePhaseTwo<gpu>);
} // namespace op
} // namespace mxnet
|
0006d1b6b20789e71196f9cf6f49349c61dbb81a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "calculation.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
char *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int constant = 1;
int vector_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
calculation), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,constant,vector_size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
calculation), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,constant,vector_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
calculation), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,constant,vector_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0006d1b6b20789e71196f9cf6f49349c61dbb81a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "calculation.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
char *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int constant = 1;
int vector_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
calculation<<<gridBlock,threadBlock>>>(a,b,c,constant,vector_size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
calculation<<<gridBlock,threadBlock>>>(a,b,c,constant,vector_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
calculation<<<gridBlock,threadBlock>>>(a,b,c,constant,vector_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
db4c834de46e781bc64dac6605eec1864c85d0fa.hip | // !!! This is a file automatically generated by hipify!!!
/*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.6
* copyright (c) 2020, Universidad Politcnica de Valencia (UPV), PRHLT Research Centre
* Date: April 2020
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <hip/hip_runtime.h>
#include "eddl/hardware/gpu/nn/gpu_nn_kernels.h"
#include "eddl/hardware/gpu/gpu_kernels.h"
__global__ void gpu_traspose_batch_depth(float *ptrB, float *ptr, int b,int z,int r, int c)
{
long int ops=b*z*r*c;
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < ops) {
int bo=thread_id_x/(z*r*c);
int zom=thread_id_x%(z*r*c);
int zo=zom/(r*c);
int rom=zom%(r*c);
int ro=rom/c;
int co=rom%c;
int pos=(zo*(b*r*c))+(bo*(r*c))+(ro*c)+co;
ptr[thread_id_x]=ptrB[pos];
}
}
__global__ void gpu_addbias_k(float *O, int batch, int r,int c,int nk,float *bias)
{
int size=nk*r*c;
int thread_id_x=threadIdx.x;
int p=blockIdx.x*size+thread_id_x*r*c;
for (int i = 0; i < r*c; i++)
O[p+i]+=bias[thread_id_x];
}
__global__ void gpu_deltabias_k(float *D, int batch, int r,int c,int nk,float *bias)
{
int size=nk*r*c;
int thread_id_x=threadIdx.x;
int p=blockIdx.x*size+thread_id_x*r*c;
for (int i = 0; i < r*c; i++)
atomicAdd(&(bias[thread_id_x]),D[p+i]);
}
__global__ void gpu_im2col_k(float* I, float *ptrI,int batch,int irows,int icols, int idepth, float* K, int nk, int kr,int kc, float* O,int orows,int ocols,int sr,int sc,int padrt,int padrb,int padcl,int padcr,int col2im)
{
long int ops=batch*orows*ocols*kr*kc*idepth;
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < ops) {
int iz,ix,iy;
int ksize=kr*kc*idepth;
int im=thread_id_x/(ksize*orows*ocols);
int ioffset=im*irows*icols*idepth;
int tx=thread_id_x%(ksize*orows*ocols);
int r=tx/ksize;
int c=tx%ksize;
int oy=r/ocols;
int ox=r%ocols;
ix=(ox*sc)-padcl;
iy=(oy*sr)-padrt;
iz=c/(kr*kc);
c=c%(kr*kc);
iy+=c/kc;
ix+=c%kc;
if ((ix>=0)&&(ix<icols)&&(iy>=0)&&(iy<irows)) {
int p=iz*(irows*icols)+(iy*icols)+ix;
if (col2im)
atomicAdd(&(I[p+ioffset]),ptrI[thread_id_x]);
else
ptrI[thread_id_x]=I[p+ioffset];
}
else
if (!col2im)
ptrI[thread_id_x]=0;
}
}
__global__ void gpu_im2col_k_low(float* I, int b, float *ptrI,int irows,int icols, int idepth, float* K, int nk, int kr,int kc, float* O,int orows,int ocols,int sr,int sc,int padrt,int padrb,int padcl,int padcr,int col2im)
{
long int ops=orows*ocols*kr*kc*idepth;
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < ops) {
int iz,ix,iy;
int ksize=kr*kc*idepth;
int im=b;
int ioffset=im*irows*icols*idepth;
int tx=thread_id_x%(ksize*orows*ocols);
int r=tx/ksize;
int c=tx%ksize;
int oy=r/ocols;
int ox=r%ocols;
ix=(ox*sc)-padcl;
iy=(oy*sr)-padrt;
iz=c/(kr*kc);
c=c%(kr*kc);
iy+=c/kc;
ix+=c%kc;
if ((ix>=0)&&(ix<icols)&&(iy>=0)&&(iy<irows)) {
int p=iz*(irows*icols)+(iy*icols)+ix;
if (col2im)
atomicAdd(&(I[p+ioffset]),ptrI[thread_id_x]);
else
ptrI[thread_id_x]=I[p+ioffset];
}
else
if (!col2im)
ptrI[thread_id_x]=0;
}
}
| db4c834de46e781bc64dac6605eec1864c85d0fa.cu | /*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.6
* copyright (c) 2020, Universidad Politécnica de Valencia (UPV), PRHLT Research Centre
* Date: April 2020
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cuda.h>
#include "eddl/hardware/gpu/nn/gpu_nn_kernels.h"
#include "eddl/hardware/gpu/gpu_kernels.h"
__global__ void gpu_traspose_batch_depth(float *ptrB, float *ptr, int b,int z,int r, int c)
{
long int ops=b*z*r*c;
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < ops) {
int bo=thread_id_x/(z*r*c);
int zom=thread_id_x%(z*r*c);
int zo=zom/(r*c);
int rom=zom%(r*c);
int ro=rom/c;
int co=rom%c;
int pos=(zo*(b*r*c))+(bo*(r*c))+(ro*c)+co;
ptr[thread_id_x]=ptrB[pos];
}
}
__global__ void gpu_addbias_k(float *O, int batch, int r,int c,int nk,float *bias)
{
int size=nk*r*c;
int thread_id_x=threadIdx.x;
int p=blockIdx.x*size+thread_id_x*r*c;
for (int i = 0; i < r*c; i++)
O[p+i]+=bias[thread_id_x];
}
__global__ void gpu_deltabias_k(float *D, int batch, int r,int c,int nk,float *bias)
{
int size=nk*r*c;
int thread_id_x=threadIdx.x;
int p=blockIdx.x*size+thread_id_x*r*c;
for (int i = 0; i < r*c; i++)
atomicAdd(&(bias[thread_id_x]),D[p+i]);
}
__global__ void gpu_im2col_k(float* I, float *ptrI,int batch,int irows,int icols, int idepth, float* K, int nk, int kr,int kc, float* O,int orows,int ocols,int sr,int sc,int padrt,int padrb,int padcl,int padcr,int col2im)
{
long int ops=batch*orows*ocols*kr*kc*idepth;
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < ops) {
int iz,ix,iy;
int ksize=kr*kc*idepth;
int im=thread_id_x/(ksize*orows*ocols);
int ioffset=im*irows*icols*idepth;
int tx=thread_id_x%(ksize*orows*ocols);
int r=tx/ksize;
int c=tx%ksize;
int oy=r/ocols;
int ox=r%ocols;
ix=(ox*sc)-padcl;
iy=(oy*sr)-padrt;
iz=c/(kr*kc);
c=c%(kr*kc);
iy+=c/kc;
ix+=c%kc;
if ((ix>=0)&&(ix<icols)&&(iy>=0)&&(iy<irows)) {
int p=iz*(irows*icols)+(iy*icols)+ix;
if (col2im)
atomicAdd(&(I[p+ioffset]),ptrI[thread_id_x]);
else
ptrI[thread_id_x]=I[p+ioffset];
}
else
if (!col2im)
ptrI[thread_id_x]=0;
}
}
__global__ void gpu_im2col_k_low(float* I, int b, float *ptrI,int irows,int icols, int idepth, float* K, int nk, int kr,int kc, float* O,int orows,int ocols,int sr,int sc,int padrt,int padrb,int padcl,int padcr,int col2im)
{
long int ops=orows*ocols*kr*kc*idepth;
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < ops) {
int iz,ix,iy;
int ksize=kr*kc*idepth;
int im=b;
int ioffset=im*irows*icols*idepth;
int tx=thread_id_x%(ksize*orows*ocols);
int r=tx/ksize;
int c=tx%ksize;
int oy=r/ocols;
int ox=r%ocols;
ix=(ox*sc)-padcl;
iy=(oy*sr)-padrt;
iz=c/(kr*kc);
c=c%(kr*kc);
iy+=c/kc;
ix+=c%kc;
if ((ix>=0)&&(ix<icols)&&(iy>=0)&&(iy<irows)) {
int p=iz*(irows*icols)+(iy*icols)+ix;
if (col2im)
atomicAdd(&(I[p+ioffset]),ptrI[thread_id_x]);
else
ptrI[thread_id_x]=I[p+ioffset];
}
else
if (!col2im)
ptrI[thread_id_x]=0;
}
}
|
fd5263802301b040ccbed1e08162552e6cbe563e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// set flag for any includes that depend on it, like solver.h
#define SIE
#include <stdio.h>
#include <rocblas.h>
#include "solver.h"
#include "ode.h"
#include "utils.h"
#include "cuda_utils.h"
#include "vector_kernels.h"
#include "linear_algebra.h"
//#define DEBUG
#define useCUDA
//#include <cusolverDn.h>
//#include "magmablas.h"
void SIE_step(
float timestep, // device pointer to the current timestep (across all systems, lame!!)
float ** d_Jacobianss, // Nsystems x Neqn_p_sys*Neqn_p_sys 2d array with flattened jacobians
float * d_Jacobianss_flat,
float ** d_inversess, // inverse output, overwrite d_Jacobianss
float * d_inversess_flat,
float ** d_identity, // 1 x Neqn_p_sys*Neqn_p_sys array storing the identity (ideally in constant memory?)
float ** d_derivatives, // Nsystems x Neqn_p_sys 2d array to store derivatives
float * d_derivatives_flat, // Nsystems*Neqn_p_sys 1d array (flattened above)
float * d_equations_flat, // output state vector, iterative calls integrates
int Nsystems, // number of ODE systems
int Neqn_p_sys, // number of equations in each system
float * d_derivative_modification_flat){ // vector to subtract from hf before multipying by A
/* -------------- initialize cublas -------------- */
// initialize cublas status tracking pointers
hipblasHandle_t handle;
int *P, *INFO;
// handle is something that connects cublas calls within a stream... something about v2 and
// being able to pass scalars by reference instead of by value. I don't really understand it
// place to store cublas status stuff.
hipblasCreate(&handle);
hipMalloc(&P, Neqn_p_sys * Nsystems * sizeof(int));
hipMalloc(&INFO, Nsystems * sizeof(int));
//NOTE: uncomment this to use device pointers for constants
//hipblasSetPointerMode(handle,HIPBLAS_POINTER_MODE_DEVICE);
// scalars for adding/multiplying
float alpha = 1.0;
float beta = 0.0;
/* ----------------------------------------------- */
/* -------------- configure the grid ------------ */
int threads_per_block; // TODO replace this
dim3 matrix_gridDim;
dim3 vector_gridDim;
dim3 ode_gridDim;
configureGrid(
Nsystems,Neqn_p_sys,
&threads_per_block,
&matrix_gridDim,
&ode_gridDim,
&vector_gridDim);
/* ----------------------------------------------- */
/* -------------- invert the matrix -------------- */
hipblasStatus_t error;
hipblasStatus_t second_error;
if (d_Jacobianss != NULL){
// compute (I-hJ) with a custom kernel
#ifdef useCUDA
hipLaunchKernelGGL(( addArrayToBatchArrays), dim3(matrix_gridDim),dim3(threads_per_block), 0, 0,
d_identity,d_Jacobianss,1.0,-1.0,timestep,
Nsystems,Neqn_p_sys);
// flush any previous uncaught cuda errors
hipError_t cuda_error = hipGetLastError();
hipLaunchKernelGGL(( gjeInvertMatrixBatched), dim3(Nsystems),dim3(threads_per_block), 0, 0,
d_Jacobianss_flat,
d_inversess_flat,
Neqn_p_sys,
Nsystems);
//hipDeviceSynchronize();
#else
//TODO implement add to batch arrays in C
//TODO implement invert matrix batched in C
#endif
hipError_t gjeError = hipGetLastError();
if (gjeError != hipSuccess){
printf("Inversion failed: %s \n",hipGetErrorString(gjeError));
}
}
/* ----------------------------------------------- */
/* -------------- perform a matrix-vector mult --- */
if (d_derivative_modification_flat != NULL){
// (hf(n)-Delta(n-1)) into d_derivatives_flat
#ifdef useCUDA
hipLaunchKernelGGL(( addVectors), dim3(vector_gridDim),dim3(threads_per_block), 0, 0,
-1.0,d_derivative_modification_flat,
timestep, d_derivatives_flat,
d_derivatives_flat,Nsystems,Neqn_p_sys);
#else
// TODO implement addVectors in C
#endif
}
// multiply (I-h*Js)^-1 x fs
error = hipblasSgemmBatched(
handle,// cublas handle
HIPBLAS_OP_N,// no transformation
HIPBLAS_OP_N,// no transformation
Neqn_p_sys, //m- number of rows in A (and C)
1, //n- number of columns in B (and C)
Neqn_p_sys, //k-number of columns in A and rows in B
(const float *) &alpha, // alpha scalar
(const float **) d_inversess, // A matrix
Neqn_p_sys, // leading dimension of the 2d array storing A??
(const float **) d_derivatives, // B matrix (or n x 1 column vector)
Neqn_p_sys, // leading dimension of the 2d array storing B??
(const float *) &beta, // beta scalar
(float **) d_derivatives, // output "matrix," let's overwrite B
Neqn_p_sys, // leading dimension of the 2d array storing C??
Nsystems); // batch count
if (error != HIPBLAS_STATUS_SUCCESS){
_cudaGetErrorEnum(error);
printf("Sgemm broke\n");
}
/* ----------------------------------------------- */
/* ------------ update the current state --------- */
if (d_derivative_modification_flat == NULL){
// scale it explicitly in case calling context needs
// h x A(n) x f(n)
#ifdef useCUDA
hipLaunchKernelGGL(( scaleVector), dim3(vector_gridDim),dim3(threads_per_block), 0, 0,
d_derivatives_flat,
timestep,
Nsystems,
Neqn_p_sys);
// add ys + h x dys = ys + h x [(I-h*Js)^-1*fs]
hipLaunchKernelGGL(( addVectors), dim3(vector_gridDim),dim3(threads_per_block), 0, 0,
1.0, d_equations_flat,
1.0, d_derivatives_flat,
d_equations_flat,Nsystems,Neqn_p_sys);
#else
// TODO implement scale vector in C
// TODO implement add vectors in C
#endif
}
/* ----------------------------------------------- */
// shut down cublas
hipblasDestroy(handle);
hipFree(P); hipFree(INFO);
}
int solveSystem(
float tnow,
float tend,
int n_integration_steps,
float ** d_Jacobianss, // matrix (jacobian) input
float * d_Jacobianss_flat,
float ** d_inversess,
float * d_inversess_flat,
float * jacobian_zeros,
float ** d_identity, // pointer to identity (ideally in constant memory?)
float ** d_derivatives, // vector (derivatives) input
float * d_derivatives_flat, // dy vector output
float * d_current_state_flat, // y vector output
float * d_constants,
int Nsystems, // number of systems
int Neqn_p_sys){
/* -------------- configure the grid ------------ */
int threads_per_block;
dim3 vector_gridDim;
configureGrid(
Nsystems,Neqn_p_sys,
&threads_per_block,
NULL,
NULL,
&vector_gridDim);
/* ----------------------------------------------- */
hipblasHandle_t handle;
hipblasStatus_t error;
hipblasCreate(&handle);
/* -------------- main integration loop ---------- */
int nsteps = 0;
float timestep = (tend-tnow)/n_integration_steps;
while (nsteps < n_integration_steps){
nsteps++;
// evaluate the derivative and jacobian at
// the current state
resetSystem(
d_derivatives,
d_derivatives_flat,
d_Jacobianss,
d_Jacobianss_flat,
d_constants,
d_current_state_flat,
jacobian_zeros,
Nsystems,
Neqn_p_sys,
tnow);
SIE_step(
timestep,
d_Jacobianss,
d_Jacobianss_flat,
d_inversess, // inverse output, overwrite d_Jacobianss
d_inversess_flat,
d_identity, // pointer to identity (ideally in constant memory?)
d_derivatives, // vector (derivatives) input
d_derivatives_flat, // dy vector output -- store A(n) x (hf(n) - Delta(n-1))
d_current_state_flat, // y vector output
Nsystems, // number of systems
Neqn_p_sys, // number of equations in each system
// flag to change d_equations_flat or just compute A(n) & hA(n)f(n)
NULL); // doubles as a flag to add A h f(n) + y(n)
tnow+=timestep;
}
hipblasDestroy(handle);
return nsteps;
}
int errorLoop(
float tnow,
float tend,
int n_integration_steps,
float ** d_Jacobianss, // matrix (jacobian) input
float * d_Jacobianss_flat,
float ** d_inversess,
float * d_inversess_flat,
float * jacobian_zeros,
float ** d_identity, // pointer to identity (ideally in constant memory?)
float ** d_derivatives, // vector (derivatives) input
float * d_derivatives_flat, // dy vector output
float * equations,
float * d_current_state_flat, // y vector output
float * d_half_current_state_flat,
float * d_constants,
int Nsystems, // number of systems
int Neqn_p_sys,
float ABSOLUTE,
float RELATIVE){
int * error_flag = (int *) malloc(sizeof(int));
int * d_error_flag;
hipMalloc(&d_error_flag,sizeof(int));
*error_flag = 0;
hipMemcpy(d_error_flag,error_flag,sizeof(int),hipMemcpyHostToDevice);
/* -------------- configure the grid ------------ */
int threads_per_block;
dim3 vector_gridDim;
configureGrid(
Nsystems,Neqn_p_sys,
&threads_per_block,
NULL,
NULL,
&vector_gridDim);
int nsteps=0;
/* ----------------------------------------------- */
// use a flag as a counter, why not
int unsolved = 0;
float timestep = (tend-tnow)/n_integration_steps;
while (tnow < tend && unsolved < 9){
// make sure we don't overintegrate
timestep = fmin(timestep,tend-tnow);
nsteps+=3;
solveSystem(
tnow,
tnow+timestep,
1,
d_Jacobianss,
d_Jacobianss_flat,
d_inversess,
d_inversess_flat,
jacobian_zeros,
d_identity,
d_derivatives,
d_derivatives_flat,
d_current_state_flat,
d_constants,
Nsystems,
Neqn_p_sys);
#ifdef ADAPTIVE_TIMESTEP
solveSystem(
tnow,
tnow+timestep,
2,
d_Jacobianss,
d_Jacobianss_flat,
d_inversess,
d_inversess_flat,
jacobian_zeros,
d_identity,
d_derivatives,
d_derivatives_flat,
d_half_current_state_flat,// the output state vector
d_constants,
Nsystems,
Neqn_p_sys);
#ifdef DEBUGBLOCK
// print the current state and how many steps
// we've taken
printf("%02d - y1\t",nsteps);
hipLaunchKernelGGL(( cudaRoutineFlat), dim3(1),dim3(Neqn_p_sys), 0, 0,
Neqn_p_sys*DEBUGBLOCK,d_current_state_flat);
hipDeviceSynchronize();
printf("%02d - y2\t",nsteps);
hipLaunchKernelGGL(( cudaRoutineFlat), dim3(1),dim3(Neqn_p_sys), 0, 0,
Neqn_p_sys*DEBUGBLOCK,d_half_current_state_flat);
hipDeviceSynchronize();
#endif
// determine if ANY of the INDEPENDENT systems are above the
// the tolerance and fail them all. NOTE: this makes them not
// independent.
#ifdef useCUDA
hipLaunchKernelGGL(( checkError), dim3(vector_gridDim),dim3(threads_per_block), 0, 0,
d_current_state_flat,d_half_current_state_flat,d_error_flag,
Nsystems,Neqn_p_sys,ABSOLUTE,RELATIVE);
// copy back the bool flag and determine if we done did it
hipMemcpy(error_flag,d_error_flag,sizeof(int),hipMemcpyDeviceToHost);
//*error_flag = 0;
#else
// TODO implement check error in C
#endif
if (*error_flag){
// increase the refinement level
unsolved++;
timestep/=2;
#ifdef LOUD
printf("refining...%d - %d\n",nsteps,unsolved);
#endif
*error_flag = 0;
// reset the error flag on the device
hipMemcpy(d_error_flag,error_flag,sizeof(int),hipMemcpyHostToDevice);
// reset the equation for the half-step
hipMemcpy(
d_half_current_state_flat,
equations,
Nsystems*Neqn_p_sys*sizeof(float),
hipMemcpyHostToDevice);
// copy this half-step to the previous full-step to save work
hipMemcpy(
d_current_state_flat,
d_half_current_state_flat,
Nsystems*Neqn_p_sys*sizeof(float),
hipMemcpyDeviceToDevice);
}// if unsolved
else{
unsolved=0;
// we did it, let's accept the value
// by accepting the half step
hipMemcpy(
d_current_state_flat,
d_half_current_state_flat,
Nsystems*Neqn_p_sys*sizeof(float),
hipMemcpyDeviceToDevice);
// and copying the value back to the host
// in case we need to refine later on
hipMemcpy(
equations,
d_half_current_state_flat,
Nsystems*Neqn_p_sys*sizeof(float),
hipMemcpyDeviceToHost);
tnow+=timestep;
// let's get more optimistic
timestep*=2;
}
}// while unsolved
#else
// take only this one step and call it a day, simplest way to
tnow+=timestep;
hipMemcpy(
equations,
d_current_state_flat,
Nsystems*Neqn_p_sys*sizeof(float),
hipMemcpyDeviceToHost);
#endif
// free up memory
hipFree(d_error_flag);
free(error_flag);
// return computations performed
return nsteps*Nsystems;
}
int cudaIntegrateSIE(
float tnow, // the current time
float tend, // the time we integrating the system to
int n_integration_steps, // the initial timestep to attempt to integrate the system with
float * constants, // the constants for each system
float * equations, // a flattened array containing the y value for each equation in each system
int Nsystems, // the number of systems
int Neqn_p_sys,
float ABSOLUTE,
float RELATIVE){ // the number of equations in each system
#ifdef LOUD
printf("SIE Received %d systems, %d equations per system\n",Nsystems,Neqn_p_sys);
#endif
// define the identity matrix on the host
float *identity_flat = (float *)malloc(Neqn_p_sys*Neqn_p_sys*sizeof(float));
setIdentityMatrix(identity_flat,Neqn_p_sys);
// set a batchsize of one
float * d_identity_flat;
float ** d_identity = initializeDeviceMatrix(identity_flat,&d_identity_flat,Neqn_p_sys*Neqn_p_sys,1);
/* -------------- move data to device ------------ */
// zeros to initialize jacobians with
float * jacobian_zeros = (float *) malloc(Nsystems*Neqn_p_sys*Neqn_p_sys*sizeof(float));
for (int i=0; i<Neqn_p_sys*Neqn_p_sys*Nsystems; i++){
jacobian_zeros[i]=0;
}
// allocate memory for Jacobian matrices as a single "batch"
float *d_Jacobianss_flat;
float **d_Jacobianss = initializeDeviceMatrix(jacobian_zeros,&d_Jacobianss_flat,Neqn_p_sys*Neqn_p_sys,Nsystems);
// allocate memory for Jacobian matrices as a single "batch"
float *d_inversess_flat;
float **d_inversess = initializeDeviceMatrix(jacobian_zeros,&d_inversess_flat,Neqn_p_sys*Neqn_p_sys,Nsystems);
// initialize state-equation vectors
float * zeros = (float *) malloc(Nsystems*Neqn_p_sys*sizeof(float));
for (int i=0; i<Neqn_p_sys*Nsystems; i++){
zeros[i]=0;
}
// constants that define the ODEs
/* TODO put this in constant memory instead-- does the below work?
__constant__ float d_constants[NUM_CONST]; // NUM_CONST #define'd in ode.h
hipMemcpyToSymbol(constants,d_constants,sizeof(d_constants));
*/
float * d_constants;
hipMalloc(&d_constants,Nsystems*NUM_CONST*sizeof(float));
hipMemcpy(d_constants,constants,Nsystems*NUM_CONST*sizeof(float),hipMemcpyHostToDevice);
// state equations, where output will be stored
float *d_current_state_flat;
float **d_current_state = initializeDeviceMatrix(equations,&d_current_state_flat,Neqn_p_sys,Nsystems);
float *d_half_current_state_flat;
float **d_half_current_state = initializeDeviceMatrix(
equations,&d_half_current_state_flat,Neqn_p_sys,Nsystems);
// initialize derivative vectors
float *d_derivatives_flat;
float **d_derivatives = initializeDeviceMatrix(zeros,&d_derivatives_flat,Neqn_p_sys,Nsystems);
/* ----------------------------------------------- */
int nsteps = errorLoop(
tnow,
tend,
n_integration_steps,
d_Jacobianss, // matrix (jacobian) input
d_Jacobianss_flat,
d_inversess,
d_inversess_flat,
jacobian_zeros,
d_identity, // pointer to identity (ideally in constant memory?)
d_derivatives, // vector (derivatives) input
d_derivatives_flat, // dy vector output
equations,
d_current_state_flat, // y vector output
d_half_current_state_flat,
d_constants,
Nsystems, // number of systems
Neqn_p_sys,
ABSOLUTE,
RELATIVE);
#ifdef LOUD
printf("nsteps taken: %d - tnow: %.2f\n",nsteps,tend);
#endif
/* -------------- shutdown by freeing memory --- */
hipFree(d_identity); hipFree(d_identity_flat);
hipFree(d_Jacobianss); hipFree(d_Jacobianss_flat);
hipFree(d_inversess); hipFree(d_inversess_flat);
hipFree(d_current_state); hipFree(d_current_state_flat);
hipFree(d_half_current_state); hipFree(d_half_current_state_flat);
hipFree(d_derivatives); hipFree(d_derivatives_flat);
free(zeros); free(jacobian_zeros);
free(identity_flat);
/* ----------------------------------------------- */
//return how many steps were taken
return nsteps;
}
| fd5263802301b040ccbed1e08162552e6cbe563e.cu | // set flag for any includes that depend on it, like solver.h
#define SIE
#include <stdio.h>
#include <cublas_v2.h>
#include "solver.h"
#include "ode.h"
#include "utils.h"
#include "cuda_utils.h"
#include "vector_kernels.h"
#include "linear_algebra.h"
//#define DEBUG
#define useCUDA
//#include <cusolverDn.h>
//#include "magmablas.h"
void SIE_step(
float timestep, // device pointer to the current timestep (across all systems, lame!!)
float ** d_Jacobianss, // Nsystems x Neqn_p_sys*Neqn_p_sys 2d array with flattened jacobians
float * d_Jacobianss_flat,
float ** d_inversess, // inverse output, overwrite d_Jacobianss
float * d_inversess_flat,
float ** d_identity, // 1 x Neqn_p_sys*Neqn_p_sys array storing the identity (ideally in constant memory?)
float ** d_derivatives, // Nsystems x Neqn_p_sys 2d array to store derivatives
float * d_derivatives_flat, // Nsystems*Neqn_p_sys 1d array (flattened above)
float * d_equations_flat, // output state vector, iterative calls integrates
int Nsystems, // number of ODE systems
int Neqn_p_sys, // number of equations in each system
float * d_derivative_modification_flat){ // vector to subtract from hf before multipying by A
/* -------------- initialize cublas -------------- */
// initialize cublas status tracking pointers
cublasHandle_t handle;
int *P, *INFO;
// handle is something that connects cublas calls within a stream... something about v2 and
// being able to pass scalars by reference instead of by value. I don't really understand it
// place to store cublas status stuff.
cublasCreate_v2(&handle);
cudaMalloc(&P, Neqn_p_sys * Nsystems * sizeof(int));
cudaMalloc(&INFO, Nsystems * sizeof(int));
//NOTE: uncomment this to use device pointers for constants
//cublasSetPointerMode(handle,CUBLAS_POINTER_MODE_DEVICE);
// scalars for adding/multiplying
float alpha = 1.0;
float beta = 0.0;
/* ----------------------------------------------- */
/* -------------- configure the grid ------------ */
int threads_per_block; // TODO replace this
dim3 matrix_gridDim;
dim3 vector_gridDim;
dim3 ode_gridDim;
configureGrid(
Nsystems,Neqn_p_sys,
&threads_per_block,
&matrix_gridDim,
&ode_gridDim,
&vector_gridDim);
/* ----------------------------------------------- */
/* -------------- invert the matrix -------------- */
cublasStatus_t error;
cublasStatus_t second_error;
if (d_Jacobianss != NULL){
// compute (I-hJ) with a custom kernel
#ifdef useCUDA
addArrayToBatchArrays<<<matrix_gridDim,threads_per_block>>>(
d_identity,d_Jacobianss,1.0,-1.0,timestep,
Nsystems,Neqn_p_sys);
// flush any previous uncaught cuda errors
cudaError_t cuda_error = cudaGetLastError();
gjeInvertMatrixBatched<<<Nsystems,threads_per_block>>>(
d_Jacobianss_flat,
d_inversess_flat,
Neqn_p_sys,
Nsystems);
//cudaDeviceSynchronize();
#else
//TODO implement add to batch arrays in C
//TODO implement invert matrix batched in C
#endif
cudaError_t gjeError = cudaGetLastError();
if (gjeError != cudaSuccess){
printf("Inversion failed: %s \n",cudaGetErrorString(gjeError));
}
}
/* ----------------------------------------------- */
/* -------------- perform a matrix-vector mult --- */
if (d_derivative_modification_flat != NULL){
// (hf(n)-Delta(n-1)) into d_derivatives_flat
#ifdef useCUDA
addVectors<<<vector_gridDim,threads_per_block>>>(
-1.0,d_derivative_modification_flat,
timestep, d_derivatives_flat,
d_derivatives_flat,Nsystems,Neqn_p_sys);
#else
// TODO implement addVectors in C
#endif
}
// multiply (I-h*Js)^-1 x fs
error = cublasSgemmBatched(
handle,// cublas handle
CUBLAS_OP_N,// no transformation
CUBLAS_OP_N,// no transformation
Neqn_p_sys, //m- number of rows in A (and C)
1, //n- number of columns in B (and C)
Neqn_p_sys, //k-number of columns in A and rows in B
(const float *) &alpha, // alpha scalar
(const float **) d_inversess, // A matrix
Neqn_p_sys, // leading dimension of the 2d array storing A??
(const float **) d_derivatives, // B matrix (or n x 1 column vector)
Neqn_p_sys, // leading dimension of the 2d array storing B??
(const float *) &beta, // beta scalar
(float **) d_derivatives, // output "matrix," let's overwrite B
Neqn_p_sys, // leading dimension of the 2d array storing C??
Nsystems); // batch count
if (error != CUBLAS_STATUS_SUCCESS){
_cudaGetErrorEnum(error);
printf("Sgemm broke\n");
}
/* ----------------------------------------------- */
/* ------------ update the current state --------- */
if (d_derivative_modification_flat == NULL){
// scale it explicitly in case calling context needs
// h x A(n) x f(n)
#ifdef useCUDA
scaleVector<<<vector_gridDim,threads_per_block>>>(
d_derivatives_flat,
timestep,
Nsystems,
Neqn_p_sys);
// add ys + h x dys = ys + h x [(I-h*Js)^-1*fs]
addVectors<<<vector_gridDim,threads_per_block>>>(
1.0, d_equations_flat,
1.0, d_derivatives_flat,
d_equations_flat,Nsystems,Neqn_p_sys);
#else
// TODO implement scale vector in C
// TODO implement add vectors in C
#endif
}
/* ----------------------------------------------- */
// shut down cublas
cublasDestroy_v2(handle);
cudaFree(P); cudaFree(INFO);
}
int solveSystem(
float tnow,
float tend,
int n_integration_steps,
float ** d_Jacobianss, // matrix (jacobian) input
float * d_Jacobianss_flat,
float ** d_inversess,
float * d_inversess_flat,
float * jacobian_zeros,
float ** d_identity, // pointer to identity (ideally in constant memory?)
float ** d_derivatives, // vector (derivatives) input
float * d_derivatives_flat, // dy vector output
float * d_current_state_flat, // y vector output
float * d_constants,
int Nsystems, // number of systems
int Neqn_p_sys){
/* -------------- configure the grid ------------ */
int threads_per_block;
dim3 vector_gridDim;
configureGrid(
Nsystems,Neqn_p_sys,
&threads_per_block,
NULL,
NULL,
&vector_gridDim);
/* ----------------------------------------------- */
cublasHandle_t handle;
cublasStatus_t error;
cublasCreate_v2(&handle);
/* -------------- main integration loop ---------- */
int nsteps = 0;
float timestep = (tend-tnow)/n_integration_steps;
while (nsteps < n_integration_steps){
nsteps++;
// evaluate the derivative and jacobian at
// the current state
resetSystem(
d_derivatives,
d_derivatives_flat,
d_Jacobianss,
d_Jacobianss_flat,
d_constants,
d_current_state_flat,
jacobian_zeros,
Nsystems,
Neqn_p_sys,
tnow);
SIE_step(
timestep,
d_Jacobianss,
d_Jacobianss_flat,
d_inversess, // inverse output, overwrite d_Jacobianss
d_inversess_flat,
d_identity, // pointer to identity (ideally in constant memory?)
d_derivatives, // vector (derivatives) input
d_derivatives_flat, // dy vector output -- store A(n) x (hf(n) - Delta(n-1))
d_current_state_flat, // y vector output
Nsystems, // number of systems
Neqn_p_sys, // number of equations in each system
// flag to change d_equations_flat or just compute A(n) & hA(n)f(n)
NULL); // doubles as a flag to add A h f(n) + y(n)
tnow+=timestep;
}
cublasDestroy_v2(handle);
return nsteps;
}
int errorLoop(
float tnow,
float tend,
int n_integration_steps,
float ** d_Jacobianss, // matrix (jacobian) input
float * d_Jacobianss_flat,
float ** d_inversess,
float * d_inversess_flat,
float * jacobian_zeros,
float ** d_identity, // pointer to identity (ideally in constant memory?)
float ** d_derivatives, // vector (derivatives) input
float * d_derivatives_flat, // dy vector output
float * equations,
float * d_current_state_flat, // y vector output
float * d_half_current_state_flat,
float * d_constants,
int Nsystems, // number of systems
int Neqn_p_sys,
float ABSOLUTE,
float RELATIVE){
int * error_flag = (int *) malloc(sizeof(int));
int * d_error_flag;
cudaMalloc(&d_error_flag,sizeof(int));
*error_flag = 0;
cudaMemcpy(d_error_flag,error_flag,sizeof(int),cudaMemcpyHostToDevice);
/* -------------- configure the grid ------------ */
int threads_per_block;
dim3 vector_gridDim;
configureGrid(
Nsystems,Neqn_p_sys,
&threads_per_block,
NULL,
NULL,
&vector_gridDim);
int nsteps=0;
/* ----------------------------------------------- */
// use a flag as a counter, why not
int unsolved = 0;
float timestep = (tend-tnow)/n_integration_steps;
while (tnow < tend && unsolved < 9){
// make sure we don't overintegrate
timestep = fmin(timestep,tend-tnow);
nsteps+=3;
solveSystem(
tnow,
tnow+timestep,
1,
d_Jacobianss,
d_Jacobianss_flat,
d_inversess,
d_inversess_flat,
jacobian_zeros,
d_identity,
d_derivatives,
d_derivatives_flat,
d_current_state_flat,
d_constants,
Nsystems,
Neqn_p_sys);
#ifdef ADAPTIVE_TIMESTEP
solveSystem(
tnow,
tnow+timestep,
2,
d_Jacobianss,
d_Jacobianss_flat,
d_inversess,
d_inversess_flat,
jacobian_zeros,
d_identity,
d_derivatives,
d_derivatives_flat,
d_half_current_state_flat,// the output state vector
d_constants,
Nsystems,
Neqn_p_sys);
#ifdef DEBUGBLOCK
// print the current state and how many steps
// we've taken
printf("%02d - y1\t",nsteps);
cudaRoutineFlat<<<1,Neqn_p_sys>>>(
Neqn_p_sys*DEBUGBLOCK,d_current_state_flat);
cudaDeviceSynchronize();
printf("%02d - y2\t",nsteps);
cudaRoutineFlat<<<1,Neqn_p_sys>>>(
Neqn_p_sys*DEBUGBLOCK,d_half_current_state_flat);
cudaDeviceSynchronize();
#endif
// determine if ANY of the INDEPENDENT systems are above the
// the tolerance and fail them all. NOTE: this makes them not
// independent.
#ifdef useCUDA
checkError<<<vector_gridDim,threads_per_block>>>(
d_current_state_flat,d_half_current_state_flat,d_error_flag,
Nsystems,Neqn_p_sys,ABSOLUTE,RELATIVE);
// copy back the bool flag and determine if we done did it
cudaMemcpy(error_flag,d_error_flag,sizeof(int),cudaMemcpyDeviceToHost);
//*error_flag = 0;
#else
// TODO implement check error in C
#endif
if (*error_flag){
// increase the refinement level
unsolved++;
timestep/=2;
#ifdef LOUD
printf("refining...%d - %d\n",nsteps,unsolved);
#endif
*error_flag = 0;
// reset the error flag on the device
cudaMemcpy(d_error_flag,error_flag,sizeof(int),cudaMemcpyHostToDevice);
// reset the equation for the half-step
cudaMemcpy(
d_half_current_state_flat,
equations,
Nsystems*Neqn_p_sys*sizeof(float),
cudaMemcpyHostToDevice);
// copy this half-step to the previous full-step to save work
cudaMemcpy(
d_current_state_flat,
d_half_current_state_flat,
Nsystems*Neqn_p_sys*sizeof(float),
cudaMemcpyDeviceToDevice);
}// if unsolved
else{
unsolved=0;
// we did it, let's accept the value
// by accepting the half step
cudaMemcpy(
d_current_state_flat,
d_half_current_state_flat,
Nsystems*Neqn_p_sys*sizeof(float),
cudaMemcpyDeviceToDevice);
// and copying the value back to the host
// in case we need to refine later on
cudaMemcpy(
equations,
d_half_current_state_flat,
Nsystems*Neqn_p_sys*sizeof(float),
cudaMemcpyDeviceToHost);
tnow+=timestep;
// let's get more optimistic
timestep*=2;
}
}// while unsolved
#else
// take only this one step and call it a day, simplest way to
tnow+=timestep;
cudaMemcpy(
equations,
d_current_state_flat,
Nsystems*Neqn_p_sys*sizeof(float),
cudaMemcpyDeviceToHost);
#endif
// free up memory
cudaFree(d_error_flag);
free(error_flag);
// return computations performed
return nsteps*Nsystems;
}
int cudaIntegrateSIE(
float tnow, // the current time
float tend, // the time we integrating the system to
int n_integration_steps, // the initial timestep to attempt to integrate the system with
float * constants, // the constants for each system
float * equations, // a flattened array containing the y value for each equation in each system
int Nsystems, // the number of systems
int Neqn_p_sys,
float ABSOLUTE,
float RELATIVE){ // the number of equations in each system
#ifdef LOUD
printf("SIE Received %d systems, %d equations per system\n",Nsystems,Neqn_p_sys);
#endif
// define the identity matrix on the host
float *identity_flat = (float *)malloc(Neqn_p_sys*Neqn_p_sys*sizeof(float));
setIdentityMatrix(identity_flat,Neqn_p_sys);
// set a batchsize of one
float * d_identity_flat;
float ** d_identity = initializeDeviceMatrix(identity_flat,&d_identity_flat,Neqn_p_sys*Neqn_p_sys,1);
/* -------------- move data to device ------------ */
// zeros to initialize jacobians with
float * jacobian_zeros = (float *) malloc(Nsystems*Neqn_p_sys*Neqn_p_sys*sizeof(float));
for (int i=0; i<Neqn_p_sys*Neqn_p_sys*Nsystems; i++){
jacobian_zeros[i]=0;
}
// allocate memory for Jacobian matrices as a single "batch"
float *d_Jacobianss_flat;
float **d_Jacobianss = initializeDeviceMatrix(jacobian_zeros,&d_Jacobianss_flat,Neqn_p_sys*Neqn_p_sys,Nsystems);
// allocate memory for Jacobian matrices as a single "batch"
float *d_inversess_flat;
float **d_inversess = initializeDeviceMatrix(jacobian_zeros,&d_inversess_flat,Neqn_p_sys*Neqn_p_sys,Nsystems);
// initialize state-equation vectors
float * zeros = (float *) malloc(Nsystems*Neqn_p_sys*sizeof(float));
for (int i=0; i<Neqn_p_sys*Nsystems; i++){
zeros[i]=0;
}
// constants that define the ODEs
/* TODO put this in constant memory instead-- does the below work?
__constant__ float d_constants[NUM_CONST]; // NUM_CONST #define'd in ode.h
cudaMemcpyToSymbol(constants,d_constants,sizeof(d_constants));
*/
float * d_constants;
cudaMalloc(&d_constants,Nsystems*NUM_CONST*sizeof(float));
cudaMemcpy(d_constants,constants,Nsystems*NUM_CONST*sizeof(float),cudaMemcpyHostToDevice);
// state equations, where output will be stored
float *d_current_state_flat;
float **d_current_state = initializeDeviceMatrix(equations,&d_current_state_flat,Neqn_p_sys,Nsystems);
float *d_half_current_state_flat;
float **d_half_current_state = initializeDeviceMatrix(
equations,&d_half_current_state_flat,Neqn_p_sys,Nsystems);
// initialize derivative vectors
float *d_derivatives_flat;
float **d_derivatives = initializeDeviceMatrix(zeros,&d_derivatives_flat,Neqn_p_sys,Nsystems);
/* ----------------------------------------------- */
int nsteps = errorLoop(
tnow,
tend,
n_integration_steps,
d_Jacobianss, // matrix (jacobian) input
d_Jacobianss_flat,
d_inversess,
d_inversess_flat,
jacobian_zeros,
d_identity, // pointer to identity (ideally in constant memory?)
d_derivatives, // vector (derivatives) input
d_derivatives_flat, // dy vector output
equations,
d_current_state_flat, // y vector output
d_half_current_state_flat,
d_constants,
Nsystems, // number of systems
Neqn_p_sys,
ABSOLUTE,
RELATIVE);
#ifdef LOUD
printf("nsteps taken: %d - tnow: %.2f\n",nsteps,tend);
#endif
/* -------------- shutdown by freeing memory --- */
cudaFree(d_identity); cudaFree(d_identity_flat);
cudaFree(d_Jacobianss); cudaFree(d_Jacobianss_flat);
cudaFree(d_inversess); cudaFree(d_inversess_flat);
cudaFree(d_current_state); cudaFree(d_current_state_flat);
cudaFree(d_half_current_state); cudaFree(d_half_current_state_flat);
cudaFree(d_derivatives); cudaFree(d_derivatives_flat);
free(zeros); free(jacobian_zeros);
free(identity_flat);
/* ----------------------------------------------- */
//return how many steps were taken
return nsteps;
}
|
f753afdfa862d9b6c2fc5fd89c224f09f682368a.hip | // !!! This is a file automatically generated by hipify!!!
#include "defines.h"
#include <vector>
#include <complex>
#include <math.h>
#include <hip/hip_runtime.h>
using namespace std;
typedef complex<float> fcomp;
const float PI = acos(-1);
fcomp float2_to_fcomp(float2 f2) {
fcomp fc(f2.x, f2.y);
return fc;
}
void copy_f2s_to_comps(float2* vxf, float2* vyf, vector<fcomp> &vxc, vector<fcomp> &vyc) {
for (int i = 0; i < DIM_FFT_DATA; i++) {
vxc[i] = float2_to_fcomp(vxf[i]);
vyc[i] = float2_to_fcomp(vyf[i]);
}
}
float2 fcomp_to_float2(fcomp fc) {
float2 f2;
f2.x = real(fc);
f2.y = imag(fc);
return f2;
}
void copy_comps_to_f2s(vector<fcomp> vxc, vector<fcomp> vyc, float2* vxf, float2* vyf) {
for (int i = 0; i < DIM_FFT_DATA; i++) {
vxf[i] = fcomp_to_float2(vxc[i]);
vyf[i] = fcomp_to_float2(vyc[i]);
}
}
void fft_cpu(vector<fcomp> &x, bool invert) {
int n = x.size();
if (n == 1) // base case for recursion
return;
vector<fcomp> x0(n/2), x1(n/2);
for (int i = 0; i * 2 < n; i++) {
x0[i] = x[2*i];
x1[i] = x[2*i+1];
}
fft_cpu(x0, invert);
fft_cpu(x1, invert);
float ang = 2*PI/n;
if (invert) ang = -ang;
fcomp omega(cos(ang), sin(ang)); // wavelength
fcomp w(1);
for (int i = 0; i*2 < n; i++) {
x[i] = x0[i] + w * x1[i];
x[i+n/2] = x0[i] - w * x1[i];
if (invert) { // since this is done at every level of binary recursion, ends up dividing both indices by n
x[i] /= 2;
x[i + n/2] /= 2;
}
w *= omega;
}
}
void advect_velocity_cpu(float2* v, float* vx, float* vy, int dim, float dt) {
int2 pprev; // previous voxel position
float2 vprev; // previous voxel velocity
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
int ind = i * dim + j;
// trace back to previous position using velocity at current position
vprev = v[ind];
pprev.x = int((i + 0.5f) - (dt * vprev.x * dim));
pprev.y = int((j + 0.5f) - (dt * vprev.y * dim));
// wrap around the border
if (pprev.x > dim) pprev.x -= dim;
if (pprev.y > dim) pprev.y -= dim;
if (pprev.x < 0) pprev.x += dim;
if (pprev.y < 0) pprev.y += dim;
// save velocity from past voxel in component vectors
int p_ind = pprev.x * dim + pprev.y;
vprev = v[p_ind];
vx[ind] = vprev.x;
vy[ind] = vprev.y;
}
}
}
void diffuse_projection_cpu(float2 *vxcomp, float2 *vycomp, int dim, float dt, float visc) {
// complex velocity FFT x- and y-components for computation
float2 vxc, vyc; // note the .x and .y attributes of these correspond to real
// and imaginary components for each velocity FFT term
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
int ind = i * dim + j;
vxc = vxcomp[ind];
vyc = vycomp[ind];
// compute index in FFT components
float iix = (float) i;
float iiy = (float) j;
if (j > (dim/2)) iiy -= (float) dim;
// calculate diffusion constant (diff) based on viscosity with smoothing
float k2 = iix*iix + iiy*iiy;
float diff = 1.0f / (1.0f + visc * dt * k2);
vxc.x *= diff;
vxc.y *= diff;
vyc.x *= diff;
vyc.y *= diff;
if (k2 > 0.) {
// if diffusion constant is positive perform velocity projection
float k2_inv = 1.0f / k2; // scaling the size of change in frequency domain
// other options on https://www.mathworks.com/matlabcentral/answers/15770-scaling-the-fft-and-the-ifft
float vp_real = (iix*vxc.x + iiy*vyc.x) * k2_inv;
float vp_imag = (iix*vxc.y + iiy*vyc.y) * k2_inv;
vxc.x -= vp_real * iix;
vxc.y -= vp_imag * iix;
vyc.x -= vp_real * iiy;
vyc.y -= vp_imag * iiy;
}
vxcomp[ind] = vxc;
vycomp[ind] = vyc;
}
}
}
void update_velocity_cpu(float2* v, float* vx, float* vy, int dim) {
float2 vnew;
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
int ind = i * dim + j;
// scale FFT (other options suggested on mathworks forum https://www.mathworks.com/matlabcentral/answers/15770-scaling-the-fft-and-the-ifft)
float scale = 1.f;// / ((float)sqrt(dim));
vnew.x = vx[ind] * scale;
vnew.y = vy[ind] * scale;
v[ind] = vnew;
}
}
}
void advect_particles_cpu(float2* p, float2* v, int dim, float dt) {
float2 pt, vt;
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
int ind = i * dim + j;
pt = p[ind];
// grab vt at the voxel pt points to
int pti = (int) (pt.x*dim);
int ptj = (int) (pt.y*dim);
int pind = pti * dim + ptj;
vt = v[pind];
// update positons
pt.x += dt * vt.x;
pt.y += dt * vt.y;
if (pt.x < 0.f) pt.x += 1.f;
if (pt.x > 1.f) pt.x -= 1.f;
if (pt.y < 0.f) pt.y += 1.f;
if (pt.y > 1.f) pt.y -= 1.f;
p[ind] = pt;
}
}
}
void add_forces_cpu(float2 *v, int dim, int spx, int spy, float fx, float fy, int r) {
float2 vt;
for (int i = 0; i < 2*r; i++) {
for (int j = 0; j < 2*r; j++) {
int ind = (i + spx) * dim + j + spy;
vt = v[ind];
float s = 1.f / (1.f + pow(i-r, 4) + pow(j-r, 4));
vt.x += s * fx;
vt.y += s * fy;
v[ind] = vt;
}
}
}
| f753afdfa862d9b6c2fc5fd89c224f09f682368a.cu | #include "defines.h"
#include <vector>
#include <complex>
#include <math.h>
#include <cuda_runtime.h>
using namespace std;
typedef complex<float> fcomp;
const float PI = acos(-1);
fcomp float2_to_fcomp(float2 f2) {
fcomp fc(f2.x, f2.y);
return fc;
}
void copy_f2s_to_comps(float2* vxf, float2* vyf, vector<fcomp> &vxc, vector<fcomp> &vyc) {
for (int i = 0; i < DIM_FFT_DATA; i++) {
vxc[i] = float2_to_fcomp(vxf[i]);
vyc[i] = float2_to_fcomp(vyf[i]);
}
}
float2 fcomp_to_float2(fcomp fc) {
float2 f2;
f2.x = real(fc);
f2.y = imag(fc);
return f2;
}
void copy_comps_to_f2s(vector<fcomp> vxc, vector<fcomp> vyc, float2* vxf, float2* vyf) {
for (int i = 0; i < DIM_FFT_DATA; i++) {
vxf[i] = fcomp_to_float2(vxc[i]);
vyf[i] = fcomp_to_float2(vyc[i]);
}
}
void fft_cpu(vector<fcomp> &x, bool invert) {
int n = x.size();
if (n == 1) // base case for recursion
return;
vector<fcomp> x0(n/2), x1(n/2);
for (int i = 0; i * 2 < n; i++) {
x0[i] = x[2*i];
x1[i] = x[2*i+1];
}
fft_cpu(x0, invert);
fft_cpu(x1, invert);
float ang = 2*PI/n;
if (invert) ang = -ang;
fcomp omega(cos(ang), sin(ang)); // wavelength
fcomp w(1);
for (int i = 0; i*2 < n; i++) {
x[i] = x0[i] + w * x1[i];
x[i+n/2] = x0[i] - w * x1[i];
if (invert) { // since this is done at every level of binary recursion, ends up dividing both indices by n
x[i] /= 2;
x[i + n/2] /= 2;
}
w *= omega;
}
}
void advect_velocity_cpu(float2* v, float* vx, float* vy, int dim, float dt) {
int2 pprev; // previous voxel position
float2 vprev; // previous voxel velocity
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
int ind = i * dim + j;
// trace back to previous position using velocity at current position
vprev = v[ind];
pprev.x = int((i + 0.5f) - (dt * vprev.x * dim));
pprev.y = int((j + 0.5f) - (dt * vprev.y * dim));
// wrap around the border
if (pprev.x > dim) pprev.x -= dim;
if (pprev.y > dim) pprev.y -= dim;
if (pprev.x < 0) pprev.x += dim;
if (pprev.y < 0) pprev.y += dim;
// save velocity from past voxel in component vectors
int p_ind = pprev.x * dim + pprev.y;
vprev = v[p_ind];
vx[ind] = vprev.x;
vy[ind] = vprev.y;
}
}
}
void diffuse_projection_cpu(float2 *vxcomp, float2 *vycomp, int dim, float dt, float visc) {
// complex velocity FFT x- and y-components for computation
float2 vxc, vyc; // note the .x and .y attributes of these correspond to real
// and imaginary components for each velocity FFT term
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
int ind = i * dim + j;
vxc = vxcomp[ind];
vyc = vycomp[ind];
// compute index in FFT components
float iix = (float) i;
float iiy = (float) j;
if (j > (dim/2)) iiy -= (float) dim;
// calculate diffusion constant (diff) based on viscosity with smoothing
float k2 = iix*iix + iiy*iiy;
float diff = 1.0f / (1.0f + visc * dt * k2);
vxc.x *= diff;
vxc.y *= diff;
vyc.x *= diff;
vyc.y *= diff;
if (k2 > 0.) {
// if diffusion constant is positive perform velocity projection
float k2_inv = 1.0f / k2; // scaling the size of change in frequency domain
// other options on https://www.mathworks.com/matlabcentral/answers/15770-scaling-the-fft-and-the-ifft
float vp_real = (iix*vxc.x + iiy*vyc.x) * k2_inv;
float vp_imag = (iix*vxc.y + iiy*vyc.y) * k2_inv;
vxc.x -= vp_real * iix;
vxc.y -= vp_imag * iix;
vyc.x -= vp_real * iiy;
vyc.y -= vp_imag * iiy;
}
vxcomp[ind] = vxc;
vycomp[ind] = vyc;
}
}
}
void update_velocity_cpu(float2* v, float* vx, float* vy, int dim) {
float2 vnew;
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
int ind = i * dim + j;
// scale FFT (other options suggested on mathworks forum https://www.mathworks.com/matlabcentral/answers/15770-scaling-the-fft-and-the-ifft)
float scale = 1.f;// / ((float)sqrt(dim));
vnew.x = vx[ind] * scale;
vnew.y = vy[ind] * scale;
v[ind] = vnew;
}
}
}
void advect_particles_cpu(float2* p, float2* v, int dim, float dt) {
float2 pt, vt;
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
int ind = i * dim + j;
pt = p[ind];
// grab vt at the voxel pt points to
int pti = (int) (pt.x*dim);
int ptj = (int) (pt.y*dim);
int pind = pti * dim + ptj;
vt = v[pind];
// update positons
pt.x += dt * vt.x;
pt.y += dt * vt.y;
if (pt.x < 0.f) pt.x += 1.f;
if (pt.x > 1.f) pt.x -= 1.f;
if (pt.y < 0.f) pt.y += 1.f;
if (pt.y > 1.f) pt.y -= 1.f;
p[ind] = pt;
}
}
}
void add_forces_cpu(float2 *v, int dim, int spx, int spy, float fx, float fy, int r) {
float2 vt;
for (int i = 0; i < 2*r; i++) {
for (int j = 0; j < 2*r; j++) {
int ind = (i + spx) * dim + j + spy;
vt = v[ind];
float s = 1.f / (1.f + pow(i-r, 4) + pow(j-r, 4));
vt.x += s * fx;
vt.y += s * fy;
v[ind] = vt;
}
}
}
|
eaebd9cdc49adf6ac6cb2e93a844d3ea41782df9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "custom_cuda_layers.h"
#include "general_kernels.h"
namespace cg = cooperative_groups;
// Fused attention + softmax
template <int tbSize, int blockStride, int tbSeq>
__global__ void attn_softmax(float* vals,
const float* attn_mask,
int heads,
int seq_length,
int iterations)
{
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5;
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int batch = blockIdx.x;
int row = blockIdx.y;
int max_threads_in_sequence = ::max(seq_length, tbSeq);
int seq_lane = threadIdx.x % max_threads_in_sequence;
int data_offset = batch * (gridDim.y * block_width) + row * block_width +
(threadIdx.x / max_threads_in_sequence) * seq_length;
int mask_offset = batch * seq_length;
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
float4* val_cast = reinterpret_cast<float4*>(vals);
const float4* attn_mask_cast = reinterpret_cast<const float4*>(attn_mask);
float4 data[MAX_THREAD_ITERATIONS];
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float4 mask = attn_mask_cast[mask_offset + data_id];
data[i] = val_cast[data_offset + data_id];
data[i].x += mask.x;
data[i].y += mask.y;
data[i].z += mask.z;
data[i].w += mask.w;
max_val = (data[i].x > max_val ? data[i].x : max_val);
max_val = (data[i].y > max_val ? data[i].y : max_val);
max_val = (data[i].z > max_val ? data[i].z : max_val);
max_val = (data[i].w > max_val ? data[i].w : max_val);
} else {
data[i].x = minus_infinity;
data[i].y = minus_infinity;
data[i].z = minus_infinity;
data[i].w = minus_infinity;
}
}
for (int i = 1; i < tbSize; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / tbSize);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
data[i].x = __expf(data[i].x - max_val);
data[i].y = __expf(data[i].y - max_val);
data[i].z = __expf(data[i].z - max_val);
data[i].w = __expf(data[i].w - max_val);
sum += (data[i].x + data[i].y + data[i].z + data[i].w);
}
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / tbSize);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
data[i].x /= sum;
data[i].y /= sum;
data[i].z /= sum;
data[i].w /= sum;
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) val_cast[data_offset + data_id] = data[i];
}
}
template <int tbSize, int blockStride, int tbSeq>
__global__ void attn_softmax(__half* vals,
const __half* attn_mask,
int heads,
int seq_length,
int iterations)
{
#if __CUDA_ARCH__ >= 700
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5;
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int batch = blockIdx.x;
int row = blockIdx.y;
int max_threads_in_sequence = ::max(seq_length, tbSeq);
int seq_lane = threadIdx.x % max_threads_in_sequence;
int data_offset = batch * (gridDim.y * block_width) + row * block_width +
(threadIdx.x / max_threads_in_sequence) * seq_length;
int mask_offset = batch * seq_length;
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
float2* val_cast = reinterpret_cast<float2*>(vals);
const float2* attn_mask_cast = reinterpret_cast<const float2*>(attn_mask);
val_cast += data_offset;
attn_mask_cast += mask_offset;
float2 low_data[MAX_THREAD_ITERATIONS];
float2 high_data[MAX_THREAD_ITERATIONS];
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float2 data = val_cast[data_id];
float2 mask = attn_mask_cast[data_id];
__half2* data_arr = reinterpret_cast<__half2*>(&data);
__half2* mask_arr = reinterpret_cast<__half2*>(&mask);
low_data[i] = __half22float2(data_arr[0]);
high_data[i] = __half22float2(data_arr[1]);
float2 low_mask = __half22float2(mask_arr[0]);
float2 high_mask = __half22float2(mask_arr[1]);
low_data[i].x += low_mask.x;
low_data[i].y += low_mask.y;
high_data[i].x += high_mask.x;
high_data[i].y += high_mask.y;
max_val = (low_data[i].x > max_val ? low_data[i].x : max_val);
max_val = (low_data[i].y > max_val ? low_data[i].y : max_val);
max_val = (high_data[i].x > max_val ? high_data[i].x : max_val);
max_val = (high_data[i].y > max_val ? high_data[i].y : max_val);
}
}
for (int i = 1; i < tbSize; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / tbSize);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
low_data[i].x = __expf(low_data[i].x - max_val);
low_data[i].y = __expf(low_data[i].y - max_val);
high_data[i].x = __expf(high_data[i].x - max_val);
high_data[i].y = __expf(high_data[i].y - max_val);
sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y);
}
}
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / tbSize);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
low_data[i].x /= sum;
low_data[i].y /= sum;
high_data[i].x /= sum;
high_data[i].y /= sum;
result_h[0] = __float22half2_rn(low_data[i]);
result_h[1] = __float22half2_rn(high_data[i]);
val_cast[data_id] = result_f;
}
}
#endif
}
template <typename T>
void launch_attn_softmax(T*, const T*, int, int, int, hipStream_t, bool);
template <>
void launch_attn_softmax<float>(float* vals,
const float* attn_mask,
int batch_size,
int heads,
int sequence_length,
hipStream_t stream)
{
const int threads = 128;
int seq_length4 = sequence_length / 4;
int seq2 = sequence_length * seq_length4;
int block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
int iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 8)
hipLaunchKernelGGL(( attn_softmax<2, (threads / 2), 2>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 16)
hipLaunchKernelGGL(( attn_softmax<4, (threads / 4), 4>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 32)
hipLaunchKernelGGL(( attn_softmax<8, (threads / 8), 8>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 64)
hipLaunchKernelGGL(( attn_softmax<16, (threads / 16), 16>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 128)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 32), 32>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 256)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 64), 64>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else {
const int threads = 256;
block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
if (sequence_length <= 512)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 128), 128>), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
hipLaunchKernelGGL(( attn_softmax<32, 1, 128>), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, attn_mask, heads, seq_length4, iterations);
else
throw std::runtime_error(
"Unsupport Seq_Length! Check the restriction of the max_threads and "
"max_thread_iterations!");
}
}
template <>
void launch_attn_softmax<__half>(__half* vals,
const __half* attn_mask,
int batch_size,
int heads,
int sequence_length,
hipStream_t stream)
{
const int threads = 128;
int seq_length4 = sequence_length / 4;
int seq2 = sequence_length * seq_length4;
int block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
int iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 8)
hipLaunchKernelGGL(( attn_softmax<2, (threads / 2), 2>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 16)
hipLaunchKernelGGL(( attn_softmax<4, (threads / 4), 4>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 32)
hipLaunchKernelGGL(( attn_softmax<8, (threads / 8), 8>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 64)
hipLaunchKernelGGL(( attn_softmax<16, (threads / 16), 16>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 128)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 32), 32>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 256)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 64), 64>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else {
const int threads = 256;
block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
if (sequence_length <= 512)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 128), 128>), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
hipLaunchKernelGGL(( attn_softmax<32, 1, 128>), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, attn_mask, heads, seq_length4, iterations);
else
throw std::runtime_error(
"Unsupport Seq_Length! Check the restriction of the max_threads and "
"max_thread_iterations!");
}
}
template <typename T, int tbSize, int blockStride>
__global__ void softmax_backward_kernel(T* out_grad, const T* soft_inp, int seq_length)
{
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5; // warp-count = num_threads / WARP_SIZE (32)
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
int iterations = (seq_length < (MAX_THREAD_ITERATIONS * iteration_stride)
? (seq_length + iteration_stride - 1) / iteration_stride
: MAX_THREAD_ITERATIONS);
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id >> 5;
int lane = id & 0x1f;
T val_reg[MAX_THREAD_ITERATIONS];
T soft_reg[MAX_THREAD_ITERATIONS];
float grad_reg = 0.0f;
#pragma unroll
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + id;
if (data_id < block_width) {
val_reg[i] = out_grad[row * block_width + data_id];
soft_reg[i] = soft_inp[row * block_width + data_id];
grad_reg += ((float)val_reg[i] *
(float)soft_reg[i]); // if done in half, the multiplication, we may lose
// 2% of accuracy in computation!!
}
}
for (int i = 1; i < tbSize; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = grad_reg;
b.sync();
if (lane < warp_num) grad_reg = partialSum[lane];
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
grad_reg = g.shfl(grad_reg, id / tbSize);
}
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + id;
if (data_id < block_width) {
float temp = (float)soft_reg[i] * ((float)val_reg[i] - grad_reg);
out_grad[row * block_width + data_id] = (T)temp;
}
}
}
template <typename T, int ITERATIONS>
__global__ void softmax_backward_kernel_v2(T* grad /* input & output*/,
const T* output,
int softmax_length)
{
int batch_idx = blockIdx.x * blockDim.y + threadIdx.y;
int offset = batch_idx * softmax_length + threadIdx.x;
grad += offset;
output += offset;
T grad_reg[ITERATIONS];
T output_reg[ITERATIONS];
float sum = 0.0;
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length) {
grad_reg[i] = grad[i * WARP_SIZE];
output_reg[i] = output[i * WARP_SIZE];
sum += (float)grad_reg[i] * (float)output_reg[i];
}
}
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i);
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length)
grad[i * WARP_SIZE] = (float)output_reg[i] * ((float)grad_reg[i] - sum);
}
}
template <typename T>
void launch_attn_softmax_backward_v2(T* out_grad,
const T* soft_inp,
int batch_size,
int heads,
int seq_length,
hipStream_t stream)
{
if ((seq_length % WARP_SIZE) != 0 || seq_length > 2048)
throw std::runtime_error("Invalid sequence length found in softmax backward.");
const int warps_per_block = 4;
dim3 grid_dim(batch_size * heads * seq_length / warps_per_block);
dim3 block_dim(WARP_SIZE, warps_per_block);
switch (seq_length) {
case 32:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 1>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 64:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 2>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 128:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 4>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 256:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 8>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 384:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 12>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 512:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 16>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 768:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 24>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 1024:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 32>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 2048:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 64>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
default:
throw std::runtime_error(
std::string("Special sequence length found in softmax backward, seq_length: ") +
std::to_string(seq_length));
}
}
template void launch_attn_softmax_backward_v2<__half>(__half* out_grad,
const __half* soft_inp,
int batch_size,
int heads,
int seq_length,
hipStream_t stream);
template void launch_attn_softmax_backward_v2<float>(float* out_grad,
const float* soft_inp,
int batch_size,
int heads,
int seq_length,
hipStream_t stream);
| eaebd9cdc49adf6ac6cb2e93a844d3ea41782df9.cu | #include "custom_cuda_layers.h"
#include "general_kernels.h"
namespace cg = cooperative_groups;
// Fused attention + softmax
template <int tbSize, int blockStride, int tbSeq>
__global__ void attn_softmax(float* vals,
const float* attn_mask,
int heads,
int seq_length,
int iterations)
{
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5;
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int batch = blockIdx.x;
int row = blockIdx.y;
int max_threads_in_sequence = std::max(seq_length, tbSeq);
int seq_lane = threadIdx.x % max_threads_in_sequence;
int data_offset = batch * (gridDim.y * block_width) + row * block_width +
(threadIdx.x / max_threads_in_sequence) * seq_length;
int mask_offset = batch * seq_length;
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
float4* val_cast = reinterpret_cast<float4*>(vals);
const float4* attn_mask_cast = reinterpret_cast<const float4*>(attn_mask);
float4 data[MAX_THREAD_ITERATIONS];
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float4 mask = attn_mask_cast[mask_offset + data_id];
data[i] = val_cast[data_offset + data_id];
data[i].x += mask.x;
data[i].y += mask.y;
data[i].z += mask.z;
data[i].w += mask.w;
max_val = (data[i].x > max_val ? data[i].x : max_val);
max_val = (data[i].y > max_val ? data[i].y : max_val);
max_val = (data[i].z > max_val ? data[i].z : max_val);
max_val = (data[i].w > max_val ? data[i].w : max_val);
} else {
data[i].x = minus_infinity;
data[i].y = minus_infinity;
data[i].z = minus_infinity;
data[i].w = minus_infinity;
}
}
for (int i = 1; i < tbSize; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / tbSize);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
data[i].x = __expf(data[i].x - max_val);
data[i].y = __expf(data[i].y - max_val);
data[i].z = __expf(data[i].z - max_val);
data[i].w = __expf(data[i].w - max_val);
sum += (data[i].x + data[i].y + data[i].z + data[i].w);
}
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / tbSize);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
data[i].x /= sum;
data[i].y /= sum;
data[i].z /= sum;
data[i].w /= sum;
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) val_cast[data_offset + data_id] = data[i];
}
}
template <int tbSize, int blockStride, int tbSeq>
__global__ void attn_softmax(__half* vals,
const __half* attn_mask,
int heads,
int seq_length,
int iterations)
{
#if __CUDA_ARCH__ >= 700
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5;
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int batch = blockIdx.x;
int row = blockIdx.y;
int max_threads_in_sequence = std::max(seq_length, tbSeq);
int seq_lane = threadIdx.x % max_threads_in_sequence;
int data_offset = batch * (gridDim.y * block_width) + row * block_width +
(threadIdx.x / max_threads_in_sequence) * seq_length;
int mask_offset = batch * seq_length;
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
float2* val_cast = reinterpret_cast<float2*>(vals);
const float2* attn_mask_cast = reinterpret_cast<const float2*>(attn_mask);
val_cast += data_offset;
attn_mask_cast += mask_offset;
float2 low_data[MAX_THREAD_ITERATIONS];
float2 high_data[MAX_THREAD_ITERATIONS];
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float2 data = val_cast[data_id];
float2 mask = attn_mask_cast[data_id];
__half2* data_arr = reinterpret_cast<__half2*>(&data);
__half2* mask_arr = reinterpret_cast<__half2*>(&mask);
low_data[i] = __half22float2(data_arr[0]);
high_data[i] = __half22float2(data_arr[1]);
float2 low_mask = __half22float2(mask_arr[0]);
float2 high_mask = __half22float2(mask_arr[1]);
low_data[i].x += low_mask.x;
low_data[i].y += low_mask.y;
high_data[i].x += high_mask.x;
high_data[i].y += high_mask.y;
max_val = (low_data[i].x > max_val ? low_data[i].x : max_val);
max_val = (low_data[i].y > max_val ? low_data[i].y : max_val);
max_val = (high_data[i].x > max_val ? high_data[i].x : max_val);
max_val = (high_data[i].y > max_val ? high_data[i].y : max_val);
}
}
for (int i = 1; i < tbSize; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / tbSize);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
low_data[i].x = __expf(low_data[i].x - max_val);
low_data[i].y = __expf(low_data[i].y - max_val);
high_data[i].x = __expf(high_data[i].x - max_val);
high_data[i].y = __expf(high_data[i].y - max_val);
sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y);
}
}
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / tbSize);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
low_data[i].x /= sum;
low_data[i].y /= sum;
high_data[i].x /= sum;
high_data[i].y /= sum;
result_h[0] = __float22half2_rn(low_data[i]);
result_h[1] = __float22half2_rn(high_data[i]);
val_cast[data_id] = result_f;
}
}
#endif
}
template <typename T>
void launch_attn_softmax(T*, const T*, int, int, int, cudaStream_t, bool);
template <>
void launch_attn_softmax<float>(float* vals,
const float* attn_mask,
int batch_size,
int heads,
int sequence_length,
cudaStream_t stream)
{
const int threads = 128;
int seq_length4 = sequence_length / 4;
int seq2 = sequence_length * seq_length4;
int block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
int iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 8)
attn_softmax<2, (threads / 2), 2>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 16)
attn_softmax<4, (threads / 4), 4>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 32)
attn_softmax<8, (threads / 8), 8>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 64)
attn_softmax<16, (threads / 16), 16>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 128)
attn_softmax<32, (threads / 32), 32>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 256)
attn_softmax<32, (threads / 64), 64>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else {
const int threads = 256;
block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
if (sequence_length <= 512)
attn_softmax<32, (threads / 128), 128><<<grid_dim, block_dim, 0, stream>>>(
vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
attn_softmax<32, 1, 128><<<grid_dim, block_dim, 0, stream>>>(
vals, attn_mask, heads, seq_length4, iterations);
else
throw std::runtime_error(
"Unsupport Seq_Length! Check the restriction of the max_threads and "
"max_thread_iterations!");
}
}
template <>
void launch_attn_softmax<__half>(__half* vals,
const __half* attn_mask,
int batch_size,
int heads,
int sequence_length,
cudaStream_t stream)
{
const int threads = 128;
int seq_length4 = sequence_length / 4;
int seq2 = sequence_length * seq_length4;
int block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
int iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 8)
attn_softmax<2, (threads / 2), 2>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 16)
attn_softmax<4, (threads / 4), 4>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 32)
attn_softmax<8, (threads / 8), 8>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 64)
attn_softmax<16, (threads / 16), 16>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 128)
attn_softmax<32, (threads / 32), 32>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 256)
attn_softmax<32, (threads / 64), 64>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else {
const int threads = 256;
block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
if (sequence_length <= 512)
attn_softmax<32, (threads / 128), 128><<<grid_dim, block_dim, 0, stream>>>(
vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
attn_softmax<32, 1, 128><<<grid_dim, block_dim, 0, stream>>>(
vals, attn_mask, heads, seq_length4, iterations);
else
throw std::runtime_error(
"Unsupport Seq_Length! Check the restriction of the max_threads and "
"max_thread_iterations!");
}
}
template <typename T, int tbSize, int blockStride>
__global__ void softmax_backward_kernel(T* out_grad, const T* soft_inp, int seq_length)
{
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5; // warp-count = num_threads / WARP_SIZE (32)
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
int iterations = (seq_length < (MAX_THREAD_ITERATIONS * iteration_stride)
? (seq_length + iteration_stride - 1) / iteration_stride
: MAX_THREAD_ITERATIONS);
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id >> 5;
int lane = id & 0x1f;
T val_reg[MAX_THREAD_ITERATIONS];
T soft_reg[MAX_THREAD_ITERATIONS];
float grad_reg = 0.0f;
#pragma unroll
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + id;
if (data_id < block_width) {
val_reg[i] = out_grad[row * block_width + data_id];
soft_reg[i] = soft_inp[row * block_width + data_id];
grad_reg += ((float)val_reg[i] *
(float)soft_reg[i]); // if done in half, the multiplication, we may lose
// 2% of accuracy in computation!!
}
}
for (int i = 1; i < tbSize; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = grad_reg;
b.sync();
if (lane < warp_num) grad_reg = partialSum[lane];
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
grad_reg = g.shfl(grad_reg, id / tbSize);
}
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + id;
if (data_id < block_width) {
float temp = (float)soft_reg[i] * ((float)val_reg[i] - grad_reg);
out_grad[row * block_width + data_id] = (T)temp;
}
}
}
template <typename T, int ITERATIONS>
__global__ void softmax_backward_kernel_v2(T* grad /* input & output*/,
const T* output,
int softmax_length)
{
int batch_idx = blockIdx.x * blockDim.y + threadIdx.y;
int offset = batch_idx * softmax_length + threadIdx.x;
grad += offset;
output += offset;
T grad_reg[ITERATIONS];
T output_reg[ITERATIONS];
float sum = 0.0;
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length) {
grad_reg[i] = grad[i * WARP_SIZE];
output_reg[i] = output[i * WARP_SIZE];
sum += (float)grad_reg[i] * (float)output_reg[i];
}
}
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i);
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length)
grad[i * WARP_SIZE] = (float)output_reg[i] * ((float)grad_reg[i] - sum);
}
}
template <typename T>
void launch_attn_softmax_backward_v2(T* out_grad,
const T* soft_inp,
int batch_size,
int heads,
int seq_length,
cudaStream_t stream)
{
if ((seq_length % WARP_SIZE) != 0 || seq_length > 2048)
throw std::runtime_error("Invalid sequence length found in softmax backward.");
const int warps_per_block = 4;
dim3 grid_dim(batch_size * heads * seq_length / warps_per_block);
dim3 block_dim(WARP_SIZE, warps_per_block);
switch (seq_length) {
case 32:
softmax_backward_kernel_v2<T, 1>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 64:
softmax_backward_kernel_v2<T, 2>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 128:
softmax_backward_kernel_v2<T, 4>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 256:
softmax_backward_kernel_v2<T, 8>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 384:
softmax_backward_kernel_v2<T, 12>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 512:
softmax_backward_kernel_v2<T, 16>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 768:
softmax_backward_kernel_v2<T, 24>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 1024:
softmax_backward_kernel_v2<T, 32>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 2048:
softmax_backward_kernel_v2<T, 64>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
default:
throw std::runtime_error(
std::string("Special sequence length found in softmax backward, seq_length: ") +
std::to_string(seq_length));
}
}
template void launch_attn_softmax_backward_v2<__half>(__half* out_grad,
const __half* soft_inp,
int batch_size,
int heads,
int seq_length,
cudaStream_t stream);
template void launch_attn_softmax_backward_v2<float>(float* out_grad,
const float* soft_inp,
int batch_size,
int heads,
int seq_length,
cudaStream_t stream);
|
547443c62ed687767f32074b66bab4a748853a7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/dilation2d.h>
#include <array/DataTypeUtils.h>
#include <PointersManager.h>
namespace nd4j {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
__global__ static void dilation2dCuda(const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW) {
// x [bS, iH, iW, iC]
// y [kH, kW, iC]
// z [bS, oH, oW, iC]
const X* x = reinterpret_cast<const X*>(vx);
const X* y = reinterpret_cast<const X*>(vy);
Z* z = reinterpret_cast<Z*>(vz);
__shared__ int xzRank, yRank;
__shared__ uint iH, iW, kH, kW;
__shared__ Nd4jLong *sharedMem, zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
zLen = shape::length(zShapeInfo);
xzRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
iH = xShapeInfo[2];
iW = xShapeInfo[3];
kH = yShapeInfo[1];
kW = yShapeInfo[2];
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto xzCoords = sharedMem + threadIdx.x * (xzRank + yRank);
auto yCoords = xzCoords + xzRank;
shape::index2coords(xzRank, zShapeInfo + 1, zInd, zLen, xzCoords);
const auto zOffset = shape::getOffset(zShapeInfo, xzCoords);
yCoords[2] = xzCoords[3]; // iC coordinate is same for x, y and z
const auto oh = xzCoords[1];
const auto ow = xzCoords[2];
X max = -DataTypeUtils::max<X>();
for (yCoords[0] = 0; yCoords[0] < kH; ++yCoords[0]) {
xzCoords[1] = oh * sH - pH + yCoords[0] * dH;
if (xzCoords[1] < 0 || xzCoords[1] >= iH) continue;
for (yCoords[1] = 0; yCoords[1] < kW; ++yCoords[1]) {
xzCoords[2] = ow * sW - pW + yCoords[1] * dW;
if(xzCoords[2] < 0 || xzCoords[2] >= iW) continue;
const X val = x[shape::getOffset(xShapeInfo, xzCoords)] + y[shape::getOffset(yShapeInfo, yCoords)];
if (val > max)
max = val;
}
}
z[zOffset] = static_cast<Z>(max);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void dilation2dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW) {
hipLaunchKernelGGL(( dilation2dCuda<X,Z>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, sH, sW, pH, pW, dH, dW);
}
BUILD_DOUBLE_TEMPLATE(template void dilation2dCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW), LIBND4J_TYPES, FLOAT_TYPES);
void dilation2d(nd4j::LaunchContext* context, NDArray *input, NDArray *weights, NDArray *output, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) {
PointersManager manager(context, "dilation2d");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = (weights->rankOf() + output->rankOf()) * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({output}, {input, weights});
BUILD_DOUBLE_SELECTOR(input->dataType(), output->dataType(), dilation2dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), weights->getSpecialBuffer(), weights->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), sH, sW, pH, pW, dH, dW), LIBND4J_TYPES, FLOAT_TYPES);
NDArray::registerSpecialUse({output}, {input, weights});
manager.synchronize();
}
}
}
}
| 547443c62ed687767f32074b66bab4a748853a7b.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/dilation2d.h>
#include <array/DataTypeUtils.h>
#include <PointersManager.h>
namespace nd4j {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
__global__ static void dilation2dCuda(const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW) {
// x [bS, iH, iW, iC]
// y [kH, kW, iC]
// z [bS, oH, oW, iC]
const X* x = reinterpret_cast<const X*>(vx);
const X* y = reinterpret_cast<const X*>(vy);
Z* z = reinterpret_cast<Z*>(vz);
__shared__ int xzRank, yRank;
__shared__ uint iH, iW, kH, kW;
__shared__ Nd4jLong *sharedMem, zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
zLen = shape::length(zShapeInfo);
xzRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
iH = xShapeInfo[2];
iW = xShapeInfo[3];
kH = yShapeInfo[1];
kW = yShapeInfo[2];
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto xzCoords = sharedMem + threadIdx.x * (xzRank + yRank);
auto yCoords = xzCoords + xzRank;
shape::index2coords(xzRank, zShapeInfo + 1, zInd, zLen, xzCoords);
const auto zOffset = shape::getOffset(zShapeInfo, xzCoords);
yCoords[2] = xzCoords[3]; // iC coordinate is same for x, y and z
const auto oh = xzCoords[1];
const auto ow = xzCoords[2];
X max = -DataTypeUtils::max<X>();
for (yCoords[0] = 0; yCoords[0] < kH; ++yCoords[0]) {
xzCoords[1] = oh * sH - pH + yCoords[0] * dH;
if (xzCoords[1] < 0 || xzCoords[1] >= iH) continue;
for (yCoords[1] = 0; yCoords[1] < kW; ++yCoords[1]) {
xzCoords[2] = ow * sW - pW + yCoords[1] * dW;
if(xzCoords[2] < 0 || xzCoords[2] >= iW) continue;
const X val = x[shape::getOffset(xShapeInfo, xzCoords)] + y[shape::getOffset(yShapeInfo, yCoords)];
if (val > max)
max = val;
}
}
z[zOffset] = static_cast<Z>(max);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void dilation2dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW) {
dilation2dCuda<X,Z><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, sH, sW, pH, pW, dH, dW);
}
BUILD_DOUBLE_TEMPLATE(template void dilation2dCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW), LIBND4J_TYPES, FLOAT_TYPES);
void dilation2d(nd4j::LaunchContext* context, NDArray *input, NDArray *weights, NDArray *output, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) {
PointersManager manager(context, "dilation2d");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = (weights->rankOf() + output->rankOf()) * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({output}, {input, weights});
BUILD_DOUBLE_SELECTOR(input->dataType(), output->dataType(), dilation2dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), weights->getSpecialBuffer(), weights->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), sH, sW, pH, pW, dH, dW), LIBND4J_TYPES, FLOAT_TYPES);
NDArray::registerSpecialUse({output}, {input, weights});
manager.synchronize();
}
}
}
}
|
eec007043c57e283664a20409e3ca73604654a02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <hip/hip_fp16.h>
#include "split.h"
using namespace nvinfer1;
using nvinfer1::plugin::SplitPlugin;
template<typename T>
__device__
int upper_bound(T const* vals, int n, T const& key)
{
int i = 0;
while( n > 0 )
{
int m = n / 2;
int j = i + m;
if( !(key < vals[j]) )
{
i = j + 1;
n -= m + 1;
}
else
{
n = m;
}
}
return i;
}
template<typename T>
__global__
void split_kernel(int nsegment,
int const* __restrict__ segment_offsets,
T const* __restrict__ idata,
T* const* odatas,
int nx,
int src_ny,
int nz)
{
int x0 = threadIdx.x + blockIdx.x * blockDim.x;
int src_y0 = threadIdx.y + blockIdx.y * blockDim.y;
int z0 = threadIdx.z + blockIdx.z * blockDim.z;
for( int z=z0; z<nz; z+=blockDim.z*gridDim.z )
{
for( int src_y=src_y0; src_y<src_ny; src_y+=blockDim.y*gridDim.y )
{
for( int x=x0; x<nx; x+=blockDim.x*gridDim.x )
{
int segment = upper_bound(segment_offsets, nsegment, src_y) - 1;
int dst_y = src_y - segment_offsets[segment];
int dst_ny = segment_offsets[segment + 1] - segment_offsets[segment];
odatas[segment][x + nx*(dst_y + dst_ny*z)] =
idata[x + nx*(src_y + src_ny*z)];
}
}
}
}
bool SplitPlugin::supportsFormatCombination(int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs)
{
ASSERT(inOut && pos < (nbInputs + nbOutputs));
return (inOut[pos].format == nvinfer1::PluginFormat::kNCHW);
}
nvinfer1::DataType SplitPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
ASSERT(inputTypes && nbInputs > 0);
return inputTypes[0];
}
int SplitPlugin::initialize()
{
return 0;
}
void SplitPlugin::terminate()
{
}
void SplitPlugin::configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs)
{
std::vector<int> segment_offsets(1, 0);
for( int i = 0; i < nbOutputs; ++i )
{
segment_offsets.push_back(segment_offsets.back() + _output_lengths[i]);
}
_d_segment_offsets = segment_offsets;
for (int i = 0; i < nbInputs; i++)
{
for (int j = 0; j < in[0].desc.dims.nbDims; j++)
{
// Do not support dynamic dimensions
ASSERT(in[0].desc.dims.d[j] != -1);
}
}
nvinfer1::Dims dims = in[0].desc.dims;
_nx = 1;
for( int i = dims.nbDims-1; i > _axis; --i )
{
_nx *= dims.d[i];
}
_ny = dims.d[_axis];
_nz = 1;
for( int i = _axis-1; i >= 0; --i )
{
_nz *= dims.d[i];
}
_d_output_ptrs.resize(nbOutputs, nullptr);
}
int SplitPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs,
void* workspace,
hipStream_t stream)
{
int const* d_segment_offsets_ptr =
thrust::raw_pointer_cast(&_d_segment_offsets[0]);
float const* idata = reinterpret_cast<float const*>(inputs[0]);
float* const* h_odatas = reinterpret_cast<float* const*>(outputs);
float** odatas = thrust::raw_pointer_cast(&_d_output_ptrs[0]);
hipError_t cuda_status =
hipMemcpyAsync(odatas, h_odatas,
_d_output_ptrs.size() * sizeof(float*),
hipMemcpyHostToDevice, stream);
if( cuda_status != hipSuccess )
{
return 1;
}
int nz = _nz * inputDesc[0].dims.d[0];
dim3 block(32, 16);
dim3 grid(::min((_nx - 1) / block.x + 1, 65535u),
::min((_ny - 1) / block.y + 1, 65535u),
::min((_nz - 1) / block.z + 1, 65535u));
if (inputDesc[0].type==nvinfer1::DataType::kFLOAT)
{
hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream,
_d_segment_offsets.size(), d_segment_offsets_ptr, idata, odatas,
_nx, _ny, nz);
}
else
{
hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream,
_d_segment_offsets.size(), d_segment_offsets_ptr, (__half const*)idata, (__half**)odatas,
_nx, _ny, nz);
}
return hipGetLastError() != hipSuccess;
}
nvinfer1::DimsExprs SplitPlugin::getOutputDimensions(int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder)
{
nvinfer1::DimsExprs output(inputs[0]);
output.d[_axis] = exprBuilder.constant(_output_lengths[outputIndex]);
return output;
}
| eec007043c57e283664a20409e3ca73604654a02.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cuda_fp16.h>
#include "split.h"
using namespace nvinfer1;
using nvinfer1::plugin::SplitPlugin;
template<typename T>
__device__
int upper_bound(T const* vals, int n, T const& key)
{
int i = 0;
while( n > 0 )
{
int m = n / 2;
int j = i + m;
if( !(key < vals[j]) )
{
i = j + 1;
n -= m + 1;
}
else
{
n = m;
}
}
return i;
}
template<typename T>
__global__
void split_kernel(int nsegment,
int const* __restrict__ segment_offsets,
T const* __restrict__ idata,
T* const* odatas,
int nx,
int src_ny,
int nz)
{
int x0 = threadIdx.x + blockIdx.x * blockDim.x;
int src_y0 = threadIdx.y + blockIdx.y * blockDim.y;
int z0 = threadIdx.z + blockIdx.z * blockDim.z;
for( int z=z0; z<nz; z+=blockDim.z*gridDim.z )
{
for( int src_y=src_y0; src_y<src_ny; src_y+=blockDim.y*gridDim.y )
{
for( int x=x0; x<nx; x+=blockDim.x*gridDim.x )
{
int segment = upper_bound(segment_offsets, nsegment, src_y) - 1;
int dst_y = src_y - segment_offsets[segment];
int dst_ny = segment_offsets[segment + 1] - segment_offsets[segment];
odatas[segment][x + nx*(dst_y + dst_ny*z)] =
idata[x + nx*(src_y + src_ny*z)];
}
}
}
}
bool SplitPlugin::supportsFormatCombination(int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs)
{
ASSERT(inOut && pos < (nbInputs + nbOutputs));
return (inOut[pos].format == nvinfer1::PluginFormat::kNCHW);
}
nvinfer1::DataType SplitPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
ASSERT(inputTypes && nbInputs > 0);
return inputTypes[0];
}
int SplitPlugin::initialize()
{
return 0;
}
void SplitPlugin::terminate()
{
}
void SplitPlugin::configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs)
{
std::vector<int> segment_offsets(1, 0);
for( int i = 0; i < nbOutputs; ++i )
{
segment_offsets.push_back(segment_offsets.back() + _output_lengths[i]);
}
_d_segment_offsets = segment_offsets;
for (int i = 0; i < nbInputs; i++)
{
for (int j = 0; j < in[0].desc.dims.nbDims; j++)
{
// Do not support dynamic dimensions
ASSERT(in[0].desc.dims.d[j] != -1);
}
}
nvinfer1::Dims dims = in[0].desc.dims;
_nx = 1;
for( int i = dims.nbDims-1; i > _axis; --i )
{
_nx *= dims.d[i];
}
_ny = dims.d[_axis];
_nz = 1;
for( int i = _axis-1; i >= 0; --i )
{
_nz *= dims.d[i];
}
_d_output_ptrs.resize(nbOutputs, nullptr);
}
int SplitPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs,
void* workspace,
cudaStream_t stream)
{
int const* d_segment_offsets_ptr =
thrust::raw_pointer_cast(&_d_segment_offsets[0]);
float const* idata = reinterpret_cast<float const*>(inputs[0]);
float* const* h_odatas = reinterpret_cast<float* const*>(outputs);
float** odatas = thrust::raw_pointer_cast(&_d_output_ptrs[0]);
cudaError_t cuda_status =
cudaMemcpyAsync(odatas, h_odatas,
_d_output_ptrs.size() * sizeof(float*),
cudaMemcpyHostToDevice, stream);
if( cuda_status != cudaSuccess )
{
return 1;
}
int nz = _nz * inputDesc[0].dims.d[0];
dim3 block(32, 16);
dim3 grid(std::min((_nx - 1) / block.x + 1, 65535u),
std::min((_ny - 1) / block.y + 1, 65535u),
std::min((_nz - 1) / block.z + 1, 65535u));
if (inputDesc[0].type==nvinfer1::DataType::kFLOAT)
{
split_kernel<<<grid, block, 0, stream>>>
(_d_segment_offsets.size(), d_segment_offsets_ptr, idata, odatas,
_nx, _ny, nz);
}
else
{
split_kernel<<<grid, block, 0, stream>>>
(_d_segment_offsets.size(), d_segment_offsets_ptr, (__half const*)idata, (__half**)odatas,
_nx, _ny, nz);
}
return cudaGetLastError() != cudaSuccess;
}
nvinfer1::DimsExprs SplitPlugin::getOutputDimensions(int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder)
{
nvinfer1::DimsExprs output(inputs[0]);
output.d[_axis] = exprBuilder.constant(_output_lengths[outputIndex]);
return output;
}
|
848cf1e7faf31396ed1d864a4f7415a5df7e056d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ComputationsBarnesHutCuda.h"
#include <thrust/sort.h>
#include <bitset>
#include <string>
const float G = 6.674 * (1e-11);
const float EPS = 0.01f;
const float theta = 2;
const int THREADS_PER_BLOCK = 1024;
const int K = 15;
struct OctreeNode {
int children[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
int position = -1;
float totalMass = 0;
float centerX = 0.0;
float centerY = 0.0;
float centerZ = 0.0;
};
template <typename T>
__global__
void getDimensions(T* positions, T* px, T* py, T* pz, int numberOfBodies) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= numberOfBodies) return;
px[thid] = positions[3*thid];
py[thid] = positions[3*thid+1];
pz[thid] = positions[3*thid+2];
}
template <typename T>
__global__
void calculateMortonCodes(T* positions, unsigned long long* codes, int numberOfBodies, float* mins, float* maxs) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= numberOfBodies) return;
float t[3] = {mins[0], mins[1], mins[2]};
float p[3] = {positions[3*thid], positions[3*thid+1], positions[3*thid+2]};
float b[3] = {(maxs[0] - mins[0])/2, (maxs[1] - mins[1])/2, (maxs[2] - mins[2])/2};
unsigned long long code = 0;
for(int i = 0; i < K; ++i) {
for(int j = 0; j < 3; ++j) {
code <<= 1;
if(t[j]+b[j] < p[j]) {
code |= 0x1;
t[j] += b[j];
}
b[j] /= 2;
}
}
codes[thid] = code;
}
template <typename T>
__global__
void fillNodes(T* sortedNodes, int numberOfBodies) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= numberOfBodies) return;
sortedNodes[thid] = thid;
}
__global__
void calculateDuplicates(unsigned long long int* mortonCodes, int* result, int N) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= N || thid == 0) return;
unsigned long long int code = mortonCodes[thid];
unsigned long long int previous_code = mortonCodes[thid-1];
code >>= 3;
previous_code >>= 3;
result[thid] = (code != previous_code);
}
__global__
void connectChildren(unsigned long long int* mortonCodes, int* parentsNumbers, OctreeNode* octree,
int N, int previousAllChildrenCount, int* sortedNodes, float* positions, float* weights, int level) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= N) return;
unsigned long long int childNumber = mortonCodes[thid] & 0x7; // 7 = 111 binarnie
octree[parentsNumbers[thid]].children[childNumber] = thid+previousAllChildrenCount;
octree[parentsNumbers[thid]].position = -1;
octree[thid+previousAllChildrenCount].position = ((level == 0) ? sortedNodes[thid] : -1);
int childIndex = sortedNodes[thid];
if(level == 0) {
octree[thid].totalMass = weights[childIndex];
octree[thid].centerX = weights[childIndex] * positions[3*childIndex];
octree[thid].centerY = weights[childIndex] * positions[3*childIndex+1];
octree[thid].centerZ = weights[childIndex] * positions[3*childIndex+2];
}
int pthid = parentsNumbers[thid];
atomicAdd(&octree[pthid].totalMass, octree[thid+previousAllChildrenCount].totalMass);
atomicAdd(&octree[pthid].centerX, octree[thid+previousAllChildrenCount].centerX);
atomicAdd(&octree[pthid].centerY, octree[thid+previousAllChildrenCount].centerY);
atomicAdd(&octree[pthid].centerZ, octree[thid+previousAllChildrenCount].centerZ);
}
__global__
void computeForces(OctreeNode* octree, float* velocities, float* weights,
float* pos, float* mins, float* maxs, int AllNodes, int N, float dt)
{
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= N) return;
float p[3] = {pos[3*thid], pos[3*thid + 1], pos[3*thid + 2]};
float forces[3] = {0.0, 0.0, 0.0};
const int C = 0;//16;
int stack[16];
char child[16];
float bound = maxs[0]-mins[0];
int top = 0;
stack[threadIdx.x*C+top] = AllNodes - 1;
child[threadIdx.x*C+top] = 0;
while(top>=0)
{
int prevTop = top;
int nextChild = child[threadIdx.x*C+top];
int idx = stack[threadIdx.x*C+top];
top--;
if(idx == -1)
continue;
if(octree[idx].position == -1)
{
float distX = octree[idx].centerX - p[0];
float distY = octree[idx].centerY - p[1];
float distZ = octree[idx].centerZ - p[2];
float dist = distX*distX + distY*distY + distZ*distZ + EPS*EPS;
dist = dist * sqrt(dist);
float s = bound/(1<<prevTop);
bool isFarAway = (s/dist < theta);
if(isFarAway)
{
float F = G * (octree[idx].totalMass * weights[thid]);
forces[0] += F * distX / dist;
forces[1] += F * distY / dist;
forces[2] += F * distZ / dist;
}
else
{
if(nextChild==8) {
continue;
}
++top;
stack[threadIdx.x*C+top] = idx;
child[threadIdx.x*C+top] = nextChild + 1;
++top;
stack[threadIdx.x*C+top] = octree[idx].children[nextChild];
child[threadIdx.x*C+top] = 0;
continue;
}
}
else
{
int p = octree[idx].position;
if(thid == p)
continue;
float distX = pos[3*p] - pos[3*thid];
float distY = pos[3*p + 1] - pos[3*thid + 1];
float distZ = pos[3*p + 2] - pos[3*thid + 2];
float dist = (distX * distX + distY * distY + distZ * distZ) + EPS * EPS;
dist = dist * sqrt(dist);
float F = G * (weights[p] * weights[thid]);
forces[0] += F * distX / dist;
forces[1] += F * distY / dist;
forces[2] += F * distZ / dist;
}
}
for (int j = 0; j < 3; j++) {
float acceleration = forces[j] / weights[thid];
pos[thid * 3 + j] +=
velocities[thid * 3 + j] * dt + acceleration * dt * dt / 2;
velocities[thid * 3 + j] += acceleration * dt;
}
}
__global__
void computeCenterOfMasses(OctreeNode* octree, int N) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= N) return;
int totalMass = octree[thid].totalMass;
octree[thid].centerX /= totalMass;
octree[thid].centerY /= totalMass;
octree[thid].centerZ /= totalMass;
}
void ComputationsBarnesHut::createTree(int numberOfBodies, float dt) {
int blocks = (numberOfBodies+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
thrust::device_vector<float> px(numberOfBodies), py(numberOfBodies), pz(numberOfBodies);
float *d_px = thrust::raw_pointer_cast(px.data());
float *d_py = thrust::raw_pointer_cast(py.data());
float *d_pz = thrust::raw_pointer_cast(pz.data());
hipLaunchKernelGGL(( getDimensions), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, d_positions, d_px, d_py, d_pz, numberOfBodies);
auto itx = thrust::minmax_element(px.begin(), px.end());
auto ity = thrust::minmax_element(py.begin(), py.end());
auto itz = thrust::minmax_element(pz.begin(), pz.end());
float mins[3] = {*itx.first, *ity.first, *itz.first};
float maxs[3] = {*itx.second, *ity.second, *itz.second};
thrust::device_vector<float> minsDeviceVector(mins, mins+3);
thrust::device_vector<float> maxsDeviceVector(maxs, maxs+3);
float* d_mins = thrust::raw_pointer_cast(minsDeviceVector.data());
float* d_maxs = thrust::raw_pointer_cast(maxsDeviceVector.data());
thrust::device_vector<unsigned long long> mortonCodes(numberOfBodies);
unsigned long long* d_codes = thrust::raw_pointer_cast(mortonCodes.data());
hipLaunchKernelGGL(( calculateMortonCodes), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, d_positions, d_codes, numberOfBodies, d_mins, d_maxs);
thrust::device_vector<int> sortedNodes(numberOfBodies);
int* d_sortedNodes = thrust::raw_pointer_cast(sortedNodes.data());
hipLaunchKernelGGL(( fillNodes), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, d_sortedNodes, numberOfBodies);
thrust::sort_by_key(mortonCodes.begin(), mortonCodes.end(), sortedNodes.begin());
int uniquePointsCount = mortonCodes.size();
blocks = (uniquePointsCount+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
thrust::device_vector<OctreeNode> octree(uniquePointsCount);
OctreeNode* d_octree = thrust::raw_pointer_cast(octree.data());
thrust::device_vector<int> parentsNumbers(uniquePointsCount);
int* d_parentsNumbers = thrust::raw_pointer_cast(parentsNumbers.data());
int childrenCount = uniquePointsCount;
int allChildrenCount = uniquePointsCount;
int previousAllChildrenCount = 0;
for(int i = 0; i < K; ++i) {
blocks = (childrenCount+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
thrust::fill(parentsNumbers.begin(), parentsNumbers.end(), 0);
hipLaunchKernelGGL(( calculateDuplicates), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0,
d_codes,
d_parentsNumbers,
childrenCount
);
thrust::inclusive_scan(parentsNumbers.begin(), parentsNumbers.end(), parentsNumbers.begin());
octree.insert(octree.end(), parentsNumbers[childrenCount-1]+1, OctreeNode());
d_octree = thrust::raw_pointer_cast(octree.data());
thrust::for_each(parentsNumbers.begin(), parentsNumbers.end(), thrust::placeholders::_1 += allChildrenCount);
hipLaunchKernelGGL(( connectChildren), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0,
d_codes,
d_parentsNumbers,
d_octree,
childrenCount,
previousAllChildrenCount,
d_sortedNodes,
d_positions,
d_weights,
i
);
thrust::for_each(mortonCodes.begin(), mortonCodes.end(), thrust::placeholders::_1 >>= 3);
auto it = thrust::unique(mortonCodes.begin(), mortonCodes.end());
mortonCodes.erase(it, mortonCodes.end());
d_codes = thrust::raw_pointer_cast(mortonCodes.data()); // dlaczego znowu raw_cast?
childrenCount = mortonCodes.size();
previousAllChildrenCount = allChildrenCount;
allChildrenCount += childrenCount;
}
blocks = (octree.size()+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
hipLaunchKernelGGL(( computeCenterOfMasses), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0,
d_octree,
octree.size()
);
float *d_velocities = thrust::raw_pointer_cast(veloD.data());
float *d_weights = thrust::raw_pointer_cast(weightsD.data());
blocks = (numberOfBodies+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
hipLaunchKernelGGL(( computeForces), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, d_octree,
d_velocities,
d_weights,
d_positions,
d_mins, d_maxs,
allChildrenCount,
numberOfBodies, dt);
}
bool ComputationsBarnesHut::testingMomemntum(int numberOfBodies) {
float momentum[3] = {0.0f, 0.0f, 0.0f};
for (unsigned i = 0; i < numberOfBodies; i++) {
for(int k = 0; k < 3; k++) {
momentum[k] += (weightsD[i] * veloD[i*3 + k]);
}
}
std::cout << momentum[0] << " " << momentum[1] << " " << momentum[2] << std::endl;
return true;
}
void ComputationsBarnesHut::BarnesHutBridge(type &pos, int numberOfBodies, float dt) {
thrust::device_vector<float> posD = pos;
d_positions = thrust::raw_pointer_cast(posD.data());
createTree(numberOfBodies, dt);
//testingMomemntum(numberOfBodies);
pos = posD;
} | 848cf1e7faf31396ed1d864a4f7415a5df7e056d.cu | #include "ComputationsBarnesHutCuda.h"
#include <thrust/sort.h>
#include <bitset>
#include <string>
const float G = 6.674 * (1e-11);
const float EPS = 0.01f;
const float theta = 2;
const int THREADS_PER_BLOCK = 1024;
const int K = 15;
struct OctreeNode {
int children[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
int position = -1;
float totalMass = 0;
float centerX = 0.0;
float centerY = 0.0;
float centerZ = 0.0;
};
template <typename T>
__global__
void getDimensions(T* positions, T* px, T* py, T* pz, int numberOfBodies) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= numberOfBodies) return;
px[thid] = positions[3*thid];
py[thid] = positions[3*thid+1];
pz[thid] = positions[3*thid+2];
}
template <typename T>
__global__
void calculateMortonCodes(T* positions, unsigned long long* codes, int numberOfBodies, float* mins, float* maxs) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= numberOfBodies) return;
float t[3] = {mins[0], mins[1], mins[2]};
float p[3] = {positions[3*thid], positions[3*thid+1], positions[3*thid+2]};
float b[3] = {(maxs[0] - mins[0])/2, (maxs[1] - mins[1])/2, (maxs[2] - mins[2])/2};
unsigned long long code = 0;
for(int i = 0; i < K; ++i) {
for(int j = 0; j < 3; ++j) {
code <<= 1;
if(t[j]+b[j] < p[j]) {
code |= 0x1;
t[j] += b[j];
}
b[j] /= 2;
}
}
codes[thid] = code;
}
template <typename T>
__global__
void fillNodes(T* sortedNodes, int numberOfBodies) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= numberOfBodies) return;
sortedNodes[thid] = thid;
}
__global__
void calculateDuplicates(unsigned long long int* mortonCodes, int* result, int N) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= N || thid == 0) return;
unsigned long long int code = mortonCodes[thid];
unsigned long long int previous_code = mortonCodes[thid-1];
code >>= 3;
previous_code >>= 3;
result[thid] = (code != previous_code);
}
__global__
void connectChildren(unsigned long long int* mortonCodes, int* parentsNumbers, OctreeNode* octree,
int N, int previousAllChildrenCount, int* sortedNodes, float* positions, float* weights, int level) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= N) return;
unsigned long long int childNumber = mortonCodes[thid] & 0x7; // 7 = 111 binarnie
octree[parentsNumbers[thid]].children[childNumber] = thid+previousAllChildrenCount;
octree[parentsNumbers[thid]].position = -1;
octree[thid+previousAllChildrenCount].position = ((level == 0) ? sortedNodes[thid] : -1);
int childIndex = sortedNodes[thid];
if(level == 0) {
octree[thid].totalMass = weights[childIndex];
octree[thid].centerX = weights[childIndex] * positions[3*childIndex];
octree[thid].centerY = weights[childIndex] * positions[3*childIndex+1];
octree[thid].centerZ = weights[childIndex] * positions[3*childIndex+2];
}
int pthid = parentsNumbers[thid];
atomicAdd(&octree[pthid].totalMass, octree[thid+previousAllChildrenCount].totalMass);
atomicAdd(&octree[pthid].centerX, octree[thid+previousAllChildrenCount].centerX);
atomicAdd(&octree[pthid].centerY, octree[thid+previousAllChildrenCount].centerY);
atomicAdd(&octree[pthid].centerZ, octree[thid+previousAllChildrenCount].centerZ);
}
__global__
void computeForces(OctreeNode* octree, float* velocities, float* weights,
float* pos, float* mins, float* maxs, int AllNodes, int N, float dt)
{
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= N) return;
float p[3] = {pos[3*thid], pos[3*thid + 1], pos[3*thid + 2]};
float forces[3] = {0.0, 0.0, 0.0};
const int C = 0;//16;
int stack[16];
char child[16];
float bound = maxs[0]-mins[0];
int top = 0;
stack[threadIdx.x*C+top] = AllNodes - 1;
child[threadIdx.x*C+top] = 0;
while(top>=0)
{
int prevTop = top;
int nextChild = child[threadIdx.x*C+top];
int idx = stack[threadIdx.x*C+top];
top--;
if(idx == -1)
continue;
if(octree[idx].position == -1)
{
float distX = octree[idx].centerX - p[0];
float distY = octree[idx].centerY - p[1];
float distZ = octree[idx].centerZ - p[2];
float dist = distX*distX + distY*distY + distZ*distZ + EPS*EPS;
dist = dist * sqrt(dist);
float s = bound/(1<<prevTop);
bool isFarAway = (s/dist < theta);
if(isFarAway)
{
float F = G * (octree[idx].totalMass * weights[thid]);
forces[0] += F * distX / dist;
forces[1] += F * distY / dist;
forces[2] += F * distZ / dist;
}
else
{
if(nextChild==8) {
continue;
}
++top;
stack[threadIdx.x*C+top] = idx;
child[threadIdx.x*C+top] = nextChild + 1;
++top;
stack[threadIdx.x*C+top] = octree[idx].children[nextChild];
child[threadIdx.x*C+top] = 0;
continue;
}
}
else
{
int p = octree[idx].position;
if(thid == p)
continue;
float distX = pos[3*p] - pos[3*thid];
float distY = pos[3*p + 1] - pos[3*thid + 1];
float distZ = pos[3*p + 2] - pos[3*thid + 2];
float dist = (distX * distX + distY * distY + distZ * distZ) + EPS * EPS;
dist = dist * sqrt(dist);
float F = G * (weights[p] * weights[thid]);
forces[0] += F * distX / dist;
forces[1] += F * distY / dist;
forces[2] += F * distZ / dist;
}
}
for (int j = 0; j < 3; j++) {
float acceleration = forces[j] / weights[thid];
pos[thid * 3 + j] +=
velocities[thid * 3 + j] * dt + acceleration * dt * dt / 2;
velocities[thid * 3 + j] += acceleration * dt;
}
}
__global__
void computeCenterOfMasses(OctreeNode* octree, int N) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= N) return;
int totalMass = octree[thid].totalMass;
octree[thid].centerX /= totalMass;
octree[thid].centerY /= totalMass;
octree[thid].centerZ /= totalMass;
}
void ComputationsBarnesHut::createTree(int numberOfBodies, float dt) {
int blocks = (numberOfBodies+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
thrust::device_vector<float> px(numberOfBodies), py(numberOfBodies), pz(numberOfBodies);
float *d_px = thrust::raw_pointer_cast(px.data());
float *d_py = thrust::raw_pointer_cast(py.data());
float *d_pz = thrust::raw_pointer_cast(pz.data());
getDimensions<<<blocks, THREADS_PER_BLOCK>>>(d_positions, d_px, d_py, d_pz, numberOfBodies);
auto itx = thrust::minmax_element(px.begin(), px.end());
auto ity = thrust::minmax_element(py.begin(), py.end());
auto itz = thrust::minmax_element(pz.begin(), pz.end());
float mins[3] = {*itx.first, *ity.first, *itz.first};
float maxs[3] = {*itx.second, *ity.second, *itz.second};
thrust::device_vector<float> minsDeviceVector(mins, mins+3);
thrust::device_vector<float> maxsDeviceVector(maxs, maxs+3);
float* d_mins = thrust::raw_pointer_cast(minsDeviceVector.data());
float* d_maxs = thrust::raw_pointer_cast(maxsDeviceVector.data());
thrust::device_vector<unsigned long long> mortonCodes(numberOfBodies);
unsigned long long* d_codes = thrust::raw_pointer_cast(mortonCodes.data());
calculateMortonCodes<<<blocks, THREADS_PER_BLOCK>>>(d_positions, d_codes, numberOfBodies, d_mins, d_maxs);
thrust::device_vector<int> sortedNodes(numberOfBodies);
int* d_sortedNodes = thrust::raw_pointer_cast(sortedNodes.data());
fillNodes<<<blocks, THREADS_PER_BLOCK>>>(d_sortedNodes, numberOfBodies);
thrust::sort_by_key(mortonCodes.begin(), mortonCodes.end(), sortedNodes.begin());
int uniquePointsCount = mortonCodes.size();
blocks = (uniquePointsCount+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
thrust::device_vector<OctreeNode> octree(uniquePointsCount);
OctreeNode* d_octree = thrust::raw_pointer_cast(octree.data());
thrust::device_vector<int> parentsNumbers(uniquePointsCount);
int* d_parentsNumbers = thrust::raw_pointer_cast(parentsNumbers.data());
int childrenCount = uniquePointsCount;
int allChildrenCount = uniquePointsCount;
int previousAllChildrenCount = 0;
for(int i = 0; i < K; ++i) {
blocks = (childrenCount+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
thrust::fill(parentsNumbers.begin(), parentsNumbers.end(), 0);
calculateDuplicates<<<blocks, THREADS_PER_BLOCK>>>(
d_codes,
d_parentsNumbers,
childrenCount
);
thrust::inclusive_scan(parentsNumbers.begin(), parentsNumbers.end(), parentsNumbers.begin());
octree.insert(octree.end(), parentsNumbers[childrenCount-1]+1, OctreeNode());
d_octree = thrust::raw_pointer_cast(octree.data());
thrust::for_each(parentsNumbers.begin(), parentsNumbers.end(), thrust::placeholders::_1 += allChildrenCount);
connectChildren<<<blocks, THREADS_PER_BLOCK>>>(
d_codes,
d_parentsNumbers,
d_octree,
childrenCount,
previousAllChildrenCount,
d_sortedNodes,
d_positions,
d_weights,
i
);
thrust::for_each(mortonCodes.begin(), mortonCodes.end(), thrust::placeholders::_1 >>= 3);
auto it = thrust::unique(mortonCodes.begin(), mortonCodes.end());
mortonCodes.erase(it, mortonCodes.end());
d_codes = thrust::raw_pointer_cast(mortonCodes.data()); // dlaczego znowu raw_cast?
childrenCount = mortonCodes.size();
previousAllChildrenCount = allChildrenCount;
allChildrenCount += childrenCount;
}
blocks = (octree.size()+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
computeCenterOfMasses<<<blocks, THREADS_PER_BLOCK>>>(
d_octree,
octree.size()
);
float *d_velocities = thrust::raw_pointer_cast(veloD.data());
float *d_weights = thrust::raw_pointer_cast(weightsD.data());
blocks = (numberOfBodies+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
computeForces<<<blocks, THREADS_PER_BLOCK>>>(d_octree,
d_velocities,
d_weights,
d_positions,
d_mins, d_maxs,
allChildrenCount,
numberOfBodies, dt);
}
bool ComputationsBarnesHut::testingMomemntum(int numberOfBodies) {
float momentum[3] = {0.0f, 0.0f, 0.0f};
for (unsigned i = 0; i < numberOfBodies; i++) {
for(int k = 0; k < 3; k++) {
momentum[k] += (weightsD[i] * veloD[i*3 + k]);
}
}
std::cout << momentum[0] << " " << momentum[1] << " " << momentum[2] << std::endl;
return true;
}
void ComputationsBarnesHut::BarnesHutBridge(type &pos, int numberOfBodies, float dt) {
thrust::device_vector<float> posD = pos;
d_positions = thrust::raw_pointer_cast(posD.data());
createTree(numberOfBodies, dt);
//testingMomemntum(numberOfBodies);
pos = posD;
} |
aa6f1d7b48432ae108646ec54ed560aad1996dbe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
clock_t sum_time = 0;
const int TILE = 16;
const int SIDE = 4;
__global__ void kernel(int *or_mat, int * to){
__shared__ int mat[TILE][TILE + 1];
int x = blockIdx.x * TILE + threadIdx.x;
int y = blockIdx.y * TILE + threadIdx.y;
#pragma unroll
for (int k = 0; k < TILE; k += SIDE) {
if (x < 1024 && y + k < 1024)mat[threadIdx.y + k][threadIdx.x] = or_mat[((y + k) * 1024) + x];
}
__syncthreads();
x = blockIdx.y * TILE + threadIdx.x;
y = blockIdx.x * TILE + threadIdx.y;
#pragma unroll
for (int k = 0; k < TILE; k += SIDE){
if (x < 1024 && y + k < 1024)to[(y + k) * 1024 + x] = mat[threadIdx.x][threadIdx.y + k];
}
}
int main(){
int n, it, i, j;
int * mat, *to, *d_mat, *d_to;
int *d_mat2, *d_to2;
int *d_mat3, *d_to3;
int *d_mat4, *d_to4;
n = 1024;
mat = (int*)malloc(n*n*sizeof(int));
to = (int*)malloc(n*n*sizeof(int));
printf("input the iter times:\n");
scanf("%d", &it);
for (i = 0; i<n; i++){
for (j = 0; j<n; j++){
mat[i*n + j] = 1;
}
}
hipMalloc((void**)&d_mat, n*n*sizeof(int));
hipMalloc((void**)&d_to, n*n*sizeof(int));
hipMemcpy(d_mat, mat, n*n*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&d_mat2, n*n*sizeof(int));
hipMalloc((void**)&d_to2, n*n*sizeof(int));
hipMemcpy(d_mat2, mat, n*n*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&d_mat3, n*n*sizeof(int));
hipMalloc((void**)&d_to3, n*n*sizeof(int));
hipMemcpy(d_mat3, mat, n*n*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&d_mat4, n*n*sizeof(int));
hipMalloc((void**)&d_to4, n*n*sizeof(int));
hipMemcpy(d_mat4, mat, n*n*sizeof(int), hipMemcpyHostToDevice);
dim3 dimBlock(16, 4);
dim3 dimGrid(64, 64);
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipStream_t stream[4];
for (i = 0; i < 4; i++){
hipStreamCreate(&stream[i]);
}
for (i = 0; i<it; i++){
kernel << <dimGrid, dimBlock,sizeof(int)*TILE*(TILE+1),stream[0] >> >(d_mat, d_to);
kernel << <dimGrid, dimBlock, sizeof(int)*TILE*(TILE + 1), stream[1] >> >(d_mat2, d_to2);
kernel << <dimGrid, dimBlock, sizeof(int)*TILE*(TILE + 1), stream[2] >> >(d_mat3, d_to3);
kernel << <dimGrid, dimBlock, sizeof(int)*TILE*(TILE + 1), stream[3] >> >(d_mat4, d_to4);
hipDeviceSynchronize();
}
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
float time;
hipEventElapsedTime(&time, start, stop);
printf("The total running time is: %f\n", time);
printf("input any to exit\n");
scanf("%d", &it);
}
| aa6f1d7b48432ae108646ec54ed560aad1996dbe.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
clock_t sum_time = 0;
const int TILE = 16;
const int SIDE = 4;
__global__ void kernel(int *or_mat, int * to){
__shared__ int mat[TILE][TILE + 1];
int x = blockIdx.x * TILE + threadIdx.x;
int y = blockIdx.y * TILE + threadIdx.y;
#pragma unroll
for (int k = 0; k < TILE; k += SIDE) {
if (x < 1024 && y + k < 1024)mat[threadIdx.y + k][threadIdx.x] = or_mat[((y + k) * 1024) + x];
}
__syncthreads();
x = blockIdx.y * TILE + threadIdx.x;
y = blockIdx.x * TILE + threadIdx.y;
#pragma unroll
for (int k = 0; k < TILE; k += SIDE){
if (x < 1024 && y + k < 1024)to[(y + k) * 1024 + x] = mat[threadIdx.x][threadIdx.y + k];
}
}
int main(){
int n, it, i, j;
int * mat, *to, *d_mat, *d_to;
int *d_mat2, *d_to2;
int *d_mat3, *d_to3;
int *d_mat4, *d_to4;
n = 1024;
mat = (int*)malloc(n*n*sizeof(int));
to = (int*)malloc(n*n*sizeof(int));
printf("input the iter times:\n");
scanf("%d", &it);
for (i = 0; i<n; i++){
for (j = 0; j<n; j++){
mat[i*n + j] = 1;
}
}
cudaMalloc((void**)&d_mat, n*n*sizeof(int));
cudaMalloc((void**)&d_to, n*n*sizeof(int));
cudaMemcpy(d_mat, mat, n*n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_mat2, n*n*sizeof(int));
cudaMalloc((void**)&d_to2, n*n*sizeof(int));
cudaMemcpy(d_mat2, mat, n*n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_mat3, n*n*sizeof(int));
cudaMalloc((void**)&d_to3, n*n*sizeof(int));
cudaMemcpy(d_mat3, mat, n*n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_mat4, n*n*sizeof(int));
cudaMalloc((void**)&d_to4, n*n*sizeof(int));
cudaMemcpy(d_mat4, mat, n*n*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(16, 4);
dim3 dimGrid(64, 64);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaStream_t stream[4];
for (i = 0; i < 4; i++){
cudaStreamCreate(&stream[i]);
}
for (i = 0; i<it; i++){
kernel << <dimGrid, dimBlock,sizeof(int)*TILE*(TILE+1),stream[0] >> >(d_mat, d_to);
kernel << <dimGrid, dimBlock, sizeof(int)*TILE*(TILE + 1), stream[1] >> >(d_mat2, d_to2);
kernel << <dimGrid, dimBlock, sizeof(int)*TILE*(TILE + 1), stream[2] >> >(d_mat3, d_to3);
kernel << <dimGrid, dimBlock, sizeof(int)*TILE*(TILE + 1), stream[3] >> >(d_mat4, d_to4);
cudaThreadSynchronize();
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
printf("The total running time is: %f\n", time);
printf("input any to exit\n");
scanf("%d", &it);
}
|
4ec8d6ec091739f416d9a7239b1e62fe44962413.hip | // !!! This is a file automatically generated by hipify!!!
#include <hipfft.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#define cufftSafeCall(err) __cufftSafeCall (err, __FILE__, __LINE__)
static const char *_cudaGetErrorEnum (hipfftResult error)
{
switch (error)
{
#define cr(x) case CUFFT_##x: return #x
cr (SUCCESS);
cr (INVALID_PLAN);
cr (ALLOC_FAILED);
cr (INVALID_TYPE);
cr (INVALID_VALUE);
cr (INTERNAL_ERROR);
cr (EXEC_FAILED);
cr (SETUP_FAILED);
cr (INVALID_SIZE);
cr (UNALIGNED_DATA);
#undef cr
}
return "UNKNOWN";
}
inline void __cufftSafeCall (hipfftResult err, const char * file, const int line)
{
if (HIPFFT_SUCCESS != err)
{
fprintf (stderr, "CUFFT error in file '%s'\n",__FILE__);
fprintf (stderr, "CUFFT error %d: %s\nterminating!\n", err, _cudaGetErrorEnum (err));
hipDeviceReset ();
}
}
int main (int argc, char * argv[])
{
if (argc < 10)
{
fprintf (stderr, "Usage: %s N LOT istride ostride idist odist llprint kfunc ntime\n", argv[0]);
return 1;
}
int N = atoi (argv[1]);
int LOT = atoi (argv[2]);
int istride = atoi (argv[3]);
int ostride = atoi (argv[4]);
int idist = atoi (argv[5]);
int odist = atoi (argv[6]);
int llprint = atoi (argv[7]);
int kfunc = atoi (argv[8]);
int ntime = atoi (argv[9]);
assert ((istride == 1) || (idist == 1));
assert ((ostride == 1) || (odist == 1));
hipfftHandle plan;
if (hipDeviceSynchronize() != hipSuccess)
{
fprintf(stderr, "Cuda error: Failed to synchronize\n");
return 1;
}
int embed[1] = {1};
cufftSafeCall (hipfftCreate (&plan));
cufftSafeCall (hipfftPlanMany (&plan, 1, &N, embed, istride, idist, embed, ostride, odist, HIPFFT_D2Z, LOT));
if (llprint)
printf (" N = %d, LOT = %d, istride = %d, ostride = %d, idist = %d, odist = %d\n", N, LOT, istride, ostride, idist, odist);
if (hipDeviceSynchronize () != hipSuccess)
{
fprintf(stderr, "Cuda error: Failed to synchronize\n");
return 1;
}
size_t sz = LOT * idist + N * istride + 2 * LOT;
if (llprint)
printf (" sz = %ld\n", sz);
double * z = (double *)malloc (sz * sizeof (double));
for (int i = 0; i < sz; i++)
z[i] = 9999.;
for (int j = 0; j < LOT; j++)
for (int i = 0; i < N; i++)
{
double zval = 0.;
switch (kfunc)
{
case 1: zval = (i % 4) ? +1. : -1.; break;
case 2: zval = (i % 2) ? +1. : -1.; break;
default: zval = 1.;
}
z[j*idist+i*istride] = zval;
}
if (llprint == 1)
for (int j = 0; j < LOT; j++)
{
for (int i = 0; i < N+2; i++)
printf (" %8.1f", z[j*idist+i*istride]);
printf ("\n");
}
if (llprint == 2)
for (int i = 0; i < sz; i++)
{
printf (" %8.1f", z[i]);
if ((((i + 1) % 20) == 0) || (i == sz - 1)) printf ("\n");
}
hipfftDoubleComplex * data = NULL;
hipMalloc ((void**)&data, sz * sizeof (double));
hipMemcpy (data, z, sz * sizeof (double), hipMemcpyHostToDevice);
clock_t t0 = clock ();
for (int itime = 0; itime < ntime; itime++)
cufftSafeCall (hipfftExecD2Z (plan, (hipfftDoubleReal*)data, data));
clock_t t1 = clock ();
printf (" sz = %ld, dt = %f\n", sz, (double)(t1-t0)/1e+6);
hipMemcpy (z, data, sz * sizeof (double), hipMemcpyDeviceToHost);
if (llprint == 1)
for (int j = 0; j < LOT; j++)
{
for (int i = 0; i < N+2; i++)
printf (" %8.1f", z[j*idist+i*istride]);
printf ("\n");
}
if (llprint == 2)
for (int i = 0; i < sz; i++)
{
printf (" %8.1f", z[i]);
if ((((i + 1) % 20) == 0) || (i == sz - 1)) printf ("\n");
}
return 0;
}
| 4ec8d6ec091739f416d9a7239b1e62fe44962413.cu | #include <cufft.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#define cufftSafeCall(err) __cufftSafeCall (err, __FILE__, __LINE__)
static const char *_cudaGetErrorEnum (cufftResult error)
{
switch (error)
{
#define cr(x) case CUFFT_##x: return #x
cr (SUCCESS);
cr (INVALID_PLAN);
cr (ALLOC_FAILED);
cr (INVALID_TYPE);
cr (INVALID_VALUE);
cr (INTERNAL_ERROR);
cr (EXEC_FAILED);
cr (SETUP_FAILED);
cr (INVALID_SIZE);
cr (UNALIGNED_DATA);
#undef cr
}
return "UNKNOWN";
}
inline void __cufftSafeCall (cufftResult err, const char * file, const int line)
{
if (CUFFT_SUCCESS != err)
{
fprintf (stderr, "CUFFT error in file '%s'\n",__FILE__);
fprintf (stderr, "CUFFT error %d: %s\nterminating!\n", err, _cudaGetErrorEnum (err));
cudaDeviceReset ();
}
}
int main (int argc, char * argv[])
{
if (argc < 10)
{
fprintf (stderr, "Usage: %s N LOT istride ostride idist odist llprint kfunc ntime\n", argv[0]);
return 1;
}
int N = atoi (argv[1]);
int LOT = atoi (argv[2]);
int istride = atoi (argv[3]);
int ostride = atoi (argv[4]);
int idist = atoi (argv[5]);
int odist = atoi (argv[6]);
int llprint = atoi (argv[7]);
int kfunc = atoi (argv[8]);
int ntime = atoi (argv[9]);
assert ((istride == 1) || (idist == 1));
assert ((ostride == 1) || (odist == 1));
cufftHandle plan;
if (cudaDeviceSynchronize() != cudaSuccess)
{
fprintf(stderr, "Cuda error: Failed to synchronize\n");
return 1;
}
int embed[1] = {1};
cufftSafeCall (cufftCreate (&plan));
cufftSafeCall (cufftPlanMany (&plan, 1, &N, embed, istride, idist, embed, ostride, odist, CUFFT_D2Z, LOT));
if (llprint)
printf (" N = %d, LOT = %d, istride = %d, ostride = %d, idist = %d, odist = %d\n", N, LOT, istride, ostride, idist, odist);
if (cudaDeviceSynchronize () != cudaSuccess)
{
fprintf(stderr, "Cuda error: Failed to synchronize\n");
return 1;
}
size_t sz = LOT * idist + N * istride + 2 * LOT;
if (llprint)
printf (" sz = %ld\n", sz);
double * z = (double *)malloc (sz * sizeof (double));
for (int i = 0; i < sz; i++)
z[i] = 9999.;
for (int j = 0; j < LOT; j++)
for (int i = 0; i < N; i++)
{
double zval = 0.;
switch (kfunc)
{
case 1: zval = (i % 4) ? +1. : -1.; break;
case 2: zval = (i % 2) ? +1. : -1.; break;
default: zval = 1.;
}
z[j*idist+i*istride] = zval;
}
if (llprint == 1)
for (int j = 0; j < LOT; j++)
{
for (int i = 0; i < N+2; i++)
printf (" %8.1f", z[j*idist+i*istride]);
printf ("\n");
}
if (llprint == 2)
for (int i = 0; i < sz; i++)
{
printf (" %8.1f", z[i]);
if ((((i + 1) % 20) == 0) || (i == sz - 1)) printf ("\n");
}
cufftDoubleComplex * data = NULL;
cudaMalloc ((void**)&data, sz * sizeof (double));
cudaMemcpy (data, z, sz * sizeof (double), cudaMemcpyHostToDevice);
clock_t t0 = clock ();
for (int itime = 0; itime < ntime; itime++)
cufftSafeCall (cufftExecD2Z (plan, (cufftDoubleReal*)data, data));
clock_t t1 = clock ();
printf (" sz = %ld, dt = %f\n", sz, (double)(t1-t0)/1e+6);
cudaMemcpy (z, data, sz * sizeof (double), cudaMemcpyDeviceToHost);
if (llprint == 1)
for (int j = 0; j < LOT; j++)
{
for (int i = 0; i < N+2; i++)
printf (" %8.1f", z[j*idist+i*istride]);
printf ("\n");
}
if (llprint == 2)
for (int i = 0; i < sz; i++)
{
printf (" %8.1f", z[i]);
if ((((i + 1) % 20) == 0) || (i == sz - 1)) printf ("\n");
}
return 0;
}
|
5a9bc801d2d9ee8f8808c8b531f2e4c6255f14f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helpme.h"
#include <math.h>
#define BLOCK_SIZE 50
__device__ int sobelX[3][3] {{-1,0,1},{-2,0,2},{-1,0,1}};
__device__ int sobelY[3][3] {{1,2,1},{0,0,0},{-1,-2,-1}};
// Doivent tre initialiser avant de dmarrer la function qui leur fait reference
__constant__ float3 lowerValue;
__constant__ float3 higherValue;
__device__ bool InRange(float3 v) {
if( v.x >= lowerValue.x && v.y >= lowerValue.y && v.z >= lowerValue.z
&& v.x <= higherValue.x && v.y <= higherValue.y && v.z <= higherValue.z) {
return true;
}
return false;
}
__global__ static void Kernel_Sobel_Operator(uchar* ArrayA, uchar* ArrayR, int cols, int rows) {
int index = getGlobalIDx_1D_1D();
int mtindex = index - cols;
int mbindex = index + cols; // Middle Bottom
// Si on n'est sur les board on fait rien
if(mtindex < 0 || mbindex > (rows*cols)) {
return;
}
int lt = ArrayA[mtindex - 1]; // Left Top
int rt = ArrayA[mtindex + 1]; // Right Top
int lb = ArrayA[mbindex - 1]; // Left Bottom
int rb = ArrayA[mbindex + 1]; // Right Bottom
int ml = ArrayA[index - 1]; // MiddleLeft
int mr = ArrayA[index + 1]; // MiddleRight
int mt = ArrayA[mtindex];
int mb = ArrayA[mbindex];
int x_weight = lt * sobelX[0][0] + ml * sobelX[1][0] + lb * sobelX[2][0] + rt * sobelX[0][2] + mr * sobelX[1][2] + rb * sobelX[2][2];
int y_weight = lt * sobelY[0][0] + mt * sobelY[0][1] + rt * sobelY[0][2] + lb * sobelY[2][0] + mb * sobelY[2][1] + rb * sobelY[2][2];
x_weight = x_weight * x_weight;
y_weight = y_weight * y_weight;
float val = sqrtf(x_weight+y_weight+0.0);
if(val > 255) {
val = 255;
}
ArrayR[index] = (uchar)val;
}
__global__ static void Kernel_Filter_HSV_TO_GS(float3* ArrayA,uchar* ArrayR,int size) {
int index = getGlobalIDx_1D_1D();
if (InRange(ArrayA[index])) {
ArrayR[index] = 255;
return;
}
ArrayR[index] = 0;
}
extern "C" hipError_t StartKernel_Object_Detection(uchar3 *pArrayA, uchar* pArrayR, int cols,int rows) {
ValidPlateform(false);
int size = cols * rows;
int BLOCK_COUNT = getBlockCount_1D_1D(size,BLOCK_SIZE);
// Cre les pointeurs cuda pour nos images
uchar3 *pArrayInitial; // L'Array de mon image initial
float3 *pArrayHSV; // L'array de mon image en hsv
uchar *pArrayFilterBG; // L'Array de mon image avec le bg filtrer
uchar *pArraySobel; // L'Array de mon image avec le filtre sobel
// Alloue l'espace mmoire pour changer mon image en hsv
size_t memSize = size * sizeof(uchar3);
size_t memSizeF = size * sizeof(float3);
size_t memSizeU = size * sizeof(uchar);
HANDLE_ERROR(hipMalloc((void**)&pArrayInitial,memSize));
HANDLE_ERROR(hipMalloc((void**)&pArrayHSV,memSizeF));
HANDLE_ERROR(hipMemcpy(pArrayInitial,pArrayA,memSize,hipMemcpyHostToDevice));
// Dmarre le kernel pour transformer l'image en HSV
hipLaunchKernelGGL(( Kernel_RGB_TO_HSV), dim3(BLOCK_COUNT),dim3(BLOCK_SIZE), 0, 0, (uchar3*)pArrayInitial,(float3*)pArrayHSV,(int)size);
// Pendant que sa process alloc le rest de ma mmoire pour les autres transformations
// aloue la mmoire pour notre image filterbg
HANDLE_ERROR(hipMalloc((void**)&pArrayFilterBG,memSizeU));
float3 lower = make_float3(80,0.0,0.5);
float3 higher = make_float3(143,1.0,1.0);
HANDLE_ERROR(hipMemcpyToSymbol(lowerValue,&lower,sizeof(float3)));
HANDLE_ERROR(hipMemcpyToSymbol(higherValue,&higher,sizeof(float3)));
// attend que la dernier kernel lancer soit finit
HANDLE_ERROR(hipDeviceSynchronize());
printf("Fin execution conversion vers HSV\n");
hipLaunchKernelGGL(( Kernel_Filter_HSV_TO_GS), dim3(BLOCK_COUNT),dim3(BLOCK_SIZE), 0, 0, (float3*)pArrayHSV,(uchar*)pArrayFilterBG,(int)size);
HANDLE_ERROR(hipMalloc((void**)&pArraySobel,memSizeU));
HANDLE_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( Kernel_Sobel_Operator), dim3(BLOCK_COUNT),dim3(BLOCK_SIZE), 0, 0, (uchar*)pArrayFilterBG,(uchar*)pArraySobel,(int)cols,(int)rows);
HANDLE_ERROR(hipDeviceSynchronize());
// Copie notre image final vers le cpu
HANDLE_ERROR(hipMemcpy(pArrayR,pArraySobel,memSizeU, hipMemcpyDeviceToHost));
hipFree(pArrayInitial);
hipFree(pArrayHSV);
hipFree(pArrayFilterBG);
hipFree(pArraySobel);
return hipSuccess;
}
extern "C" hipError_t StartKernel_RGB_TO_HSV(uchar3 *pArrayA,float3 *pArrayR,int size) {
ValidPlateform(false);
int BLOCK_COUNT = getBlockCount_1D_1D(size,BLOCK_SIZE);
//Cre nos pointeur utiliser par cuda
uchar3 *ArrayA;
float3 *ArrayR;
// Alloue l'espace mmoire des deux ArrayRices sur le gpu
// calcul de l'espace de notre array de pixel qui reprsente l'image
size_t memSize = size * sizeof(uchar3);
hipError_t cudaStatus = hipMalloc( (void**)&ArrayA,memSize);
if(cudaStatus != hipSuccess){
printf("Failed to allocate uchar3\n");
return cudaStatus;
}
size_t memSizeF = size * sizeof(float3);
cudaStatus = hipMalloc( (void**)&ArrayR,memSizeF);
if(cudaStatus != hipSuccess) {
printf("Failed to allocate float3\n");
return cudaStatus;
}
// Copie l'array de donnne vers le gpu
cudaStatus = hipMemcpy(ArrayA,pArrayA, memSize, hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess) {
printf("Failt to copy ArrayA to GPU\n");
return cudaStatus;
}
// Dmarre le kernel
hipLaunchKernelGGL(( Kernel_RGB_TO_HSV), dim3(BLOCK_COUNT),dim3(BLOCK_SIZE), 0, 0, (uchar3*)ArrayA,(float3*)ArrayR,(int)size);
if(hipDeviceSynchronize() == hipSuccess) {
printf("Finit d'execution du kernel\r\n");
}
// Fait une copie de l'array de resultat du gpu vers le cpu
cudaStatus = hipMemcpy(pArrayR,ArrayR,memSizeF,hipMemcpyDeviceToHost);
if(cudaStatus != hipSuccess) {
return cudaStatus;
}
hipFree(ArrayA);
hipFree(ArrayR);
return hipSuccess;
} | 5a9bc801d2d9ee8f8808c8b531f2e4c6255f14f7.cu | #include "helpme.h"
#include <math.h>
#define BLOCK_SIZE 50
__device__ int sobelX[3][3] {{-1,0,1},{-2,0,2},{-1,0,1}};
__device__ int sobelY[3][3] {{1,2,1},{0,0,0},{-1,-2,-1}};
// Doivent être initialiser avant de démarrer la function qui leur fait reference
__constant__ float3 lowerValue;
__constant__ float3 higherValue;
__device__ bool InRange(float3 v) {
if( v.x >= lowerValue.x && v.y >= lowerValue.y && v.z >= lowerValue.z
&& v.x <= higherValue.x && v.y <= higherValue.y && v.z <= higherValue.z) {
return true;
}
return false;
}
__global__ static void Kernel_Sobel_Operator(uchar* ArrayA, uchar* ArrayR, int cols, int rows) {
int index = getGlobalIDx_1D_1D();
int mtindex = index - cols;
int mbindex = index + cols; // Middle Bottom
// Si on n'est sur les board on fait rien
if(mtindex < 0 || mbindex > (rows*cols)) {
return;
}
int lt = ArrayA[mtindex - 1]; // Left Top
int rt = ArrayA[mtindex + 1]; // Right Top
int lb = ArrayA[mbindex - 1]; // Left Bottom
int rb = ArrayA[mbindex + 1]; // Right Bottom
int ml = ArrayA[index - 1]; // MiddleLeft
int mr = ArrayA[index + 1]; // MiddleRight
int mt = ArrayA[mtindex];
int mb = ArrayA[mbindex];
int x_weight = lt * sobelX[0][0] + ml * sobelX[1][0] + lb * sobelX[2][0] + rt * sobelX[0][2] + mr * sobelX[1][2] + rb * sobelX[2][2];
int y_weight = lt * sobelY[0][0] + mt * sobelY[0][1] + rt * sobelY[0][2] + lb * sobelY[2][0] + mb * sobelY[2][1] + rb * sobelY[2][2];
x_weight = x_weight * x_weight;
y_weight = y_weight * y_weight;
float val = sqrtf(x_weight+y_weight+0.0);
if(val > 255) {
val = 255;
}
ArrayR[index] = (uchar)val;
}
__global__ static void Kernel_Filter_HSV_TO_GS(float3* ArrayA,uchar* ArrayR,int size) {
int index = getGlobalIDx_1D_1D();
if (InRange(ArrayA[index])) {
ArrayR[index] = 255;
return;
}
ArrayR[index] = 0;
}
extern "C" cudaError_t StartKernel_Object_Detection(uchar3 *pArrayA, uchar* pArrayR, int cols,int rows) {
ValidPlateform(false);
int size = cols * rows;
int BLOCK_COUNT = getBlockCount_1D_1D(size,BLOCK_SIZE);
// Crée les pointeurs cuda pour nos images
uchar3 *pArrayInitial; // L'Array de mon image initial
float3 *pArrayHSV; // L'array de mon image en hsv
uchar *pArrayFilterBG; // L'Array de mon image avec le bg filtrer
uchar *pArraySobel; // L'Array de mon image avec le filtre sobel
// Alloue l'espace mémoire pour changer mon image en hsv
size_t memSize = size * sizeof(uchar3);
size_t memSizeF = size * sizeof(float3);
size_t memSizeU = size * sizeof(uchar);
HANDLE_ERROR(cudaMalloc((void**)&pArrayInitial,memSize));
HANDLE_ERROR(cudaMalloc((void**)&pArrayHSV,memSizeF));
HANDLE_ERROR(cudaMemcpy(pArrayInitial,pArrayA,memSize,cudaMemcpyHostToDevice));
// Démarre le kernel pour transformer l'image en HSV
Kernel_RGB_TO_HSV<<<BLOCK_COUNT,BLOCK_SIZE>>>((uchar3*)pArrayInitial,(float3*)pArrayHSV,(int)size);
// Pendant que sa process alloc le rest de ma mémoire pour les autres transformations
// aloue la mémoire pour notre image filterbg
HANDLE_ERROR(cudaMalloc((void**)&pArrayFilterBG,memSizeU));
float3 lower = make_float3(80,0.0,0.5);
float3 higher = make_float3(143,1.0,1.0);
HANDLE_ERROR(cudaMemcpyToSymbol(lowerValue,&lower,sizeof(float3)));
HANDLE_ERROR(cudaMemcpyToSymbol(higherValue,&higher,sizeof(float3)));
// attend que la dernier kernel lancer soit finit
HANDLE_ERROR(cudaDeviceSynchronize());
printf("Fin execution conversion vers HSV\n");
Kernel_Filter_HSV_TO_GS<<<BLOCK_COUNT,BLOCK_SIZE>>>((float3*)pArrayHSV,(uchar*)pArrayFilterBG,(int)size);
HANDLE_ERROR(cudaMalloc((void**)&pArraySobel,memSizeU));
HANDLE_ERROR(cudaDeviceSynchronize());
Kernel_Sobel_Operator<<<BLOCK_COUNT,BLOCK_SIZE>>>((uchar*)pArrayFilterBG,(uchar*)pArraySobel,(int)cols,(int)rows);
HANDLE_ERROR(cudaDeviceSynchronize());
// Copie notre image final vers le cpu
HANDLE_ERROR(cudaMemcpy(pArrayR,pArraySobel,memSizeU, cudaMemcpyDeviceToHost));
cudaFree(pArrayInitial);
cudaFree(pArrayHSV);
cudaFree(pArrayFilterBG);
cudaFree(pArraySobel);
return cudaSuccess;
}
extern "C" cudaError_t StartKernel_RGB_TO_HSV(uchar3 *pArrayA,float3 *pArrayR,int size) {
ValidPlateform(false);
int BLOCK_COUNT = getBlockCount_1D_1D(size,BLOCK_SIZE);
//Crée nos pointeur utiliser par cuda
uchar3 *ArrayA;
float3 *ArrayR;
// Alloue l'espace mémoire des deux ArrayRices sur le gpu
// calcul de l'espace de notre array de pixel qui représente l'image
size_t memSize = size * sizeof(uchar3);
cudaError_t cudaStatus = cudaMalloc( (void**)&ArrayA,memSize);
if(cudaStatus != cudaSuccess){
printf("Failed to allocate uchar3\n");
return cudaStatus;
}
size_t memSizeF = size * sizeof(float3);
cudaStatus = cudaMalloc( (void**)&ArrayR,memSizeF);
if(cudaStatus != cudaSuccess) {
printf("Failed to allocate float3\n");
return cudaStatus;
}
// Copie l'array de donnnée vers le gpu
cudaStatus = cudaMemcpy(ArrayA,pArrayA, memSize, cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess) {
printf("Failt to copy ArrayA to GPU\n");
return cudaStatus;
}
// Démarre le kernel
Kernel_RGB_TO_HSV<<<BLOCK_COUNT,BLOCK_SIZE>>>((uchar3*)ArrayA,(float3*)ArrayR,(int)size);
if(cudaDeviceSynchronize() == cudaSuccess) {
printf("Finit d'execution du kernel\r\n");
}
// Fait une copie de l'array de resultat du gpu vers le cpu
cudaStatus = cudaMemcpy(pArrayR,ArrayR,memSizeF,cudaMemcpyDeviceToHost);
if(cudaStatus != cudaSuccess) {
return cudaStatus;
}
cudaFree(ArrayA);
cudaFree(ArrayR);
return cudaSuccess;
} |
0471aa9fea83f801426aa92c4487d780e74bc252.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gen_matvecT(float *A, float *x, float *y, const int m, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n ) {
float c = 0.0f;
for(int i=0; i<m; i++)
c = c + y[i] * A[xIndex * m + i];
x[xIndex] = c;
}
} | 0471aa9fea83f801426aa92c4487d780e74bc252.cu | #include "includes.h"
__global__ void gen_matvecT(float *A, float *x, float *y, const int m, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n ) {
float c = 0.0f;
for(int i=0; i<m; i++)
c = c + y[i] * A[xIndex * m + i];
x[xIndex] = c;
}
} |
57f38df7b1e99f1ae688b996b28dac68ff63c7ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void LRMomentaKernel (double *RadMomP, double *RadMomM, double *ThetaMomP, double *ThetaMomM, double *Dens, double *Vrad, double *Vtheta, int nrad, int nsec, double *Rmed, double OmegaFrame)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
if (i<nrad && j<nsec){
RadMomP[i*nsec + j] = Dens[i*nsec + j] * Vrad[(i+1)*nsec + j]; // (i+1)*nsec
RadMomM[i*nsec + j] = Dens[i*nsec + j] * Vrad[i*nsec + j];
/* it is the angular momentum -> ThetaMomP */
ThetaMomP[i*nsec + j] = Dens[i*nsec + j] * (Vtheta[i*nsec + (j+1)%nsec]+Rmed[i]*OmegaFrame)*Rmed[i];
ThetaMomM[i*nsec + j] = Dens[i*nsec + j] * (Vtheta[i*nsec + j]+Rmed[i]*OmegaFrame)*Rmed[i];
}
} | 57f38df7b1e99f1ae688b996b28dac68ff63c7ff.cu | #include "includes.h"
__global__ void LRMomentaKernel (double *RadMomP, double *RadMomM, double *ThetaMomP, double *ThetaMomM, double *Dens, double *Vrad, double *Vtheta, int nrad, int nsec, double *Rmed, double OmegaFrame)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
if (i<nrad && j<nsec){
RadMomP[i*nsec + j] = Dens[i*nsec + j] * Vrad[(i+1)*nsec + j]; // (i+1)*nsec
RadMomM[i*nsec + j] = Dens[i*nsec + j] * Vrad[i*nsec + j];
/* it is the angular momentum -> ThetaMomP */
ThetaMomP[i*nsec + j] = Dens[i*nsec + j] * (Vtheta[i*nsec + (j+1)%nsec]+Rmed[i]*OmegaFrame)*Rmed[i];
ThetaMomM[i*nsec + j] = Dens[i*nsec + j] * (Vtheta[i*nsec + j]+Rmed[i]*OmegaFrame)*Rmed[i];
}
} |
f77f5a3592ba15802b0d8a75b6b90a66759a9b80.hip | // !!! This is a file automatically generated by hipify!!!
/*
** cudaFunc.cu - helper functions for cudaMpi1Bruteforce.c
**
**/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include "../mpi/nbody.h"
// CUDA runtime
#include <hip/hip_runtime.h>
extern "C"
{
#include <hip/hip_runtime.h>
}
#define MEMSIZE 30
//int nparticles;
extern particle_t *particles;
particle_t *d_particles, *d_nparticles;
__device__ void compute_force(particle_t*p, double x_pos, double y_pos, double mass) {
double x_sep, y_sep, dist_sq, grav_base;
x_sep = x_pos - p->x_pos;
y_sep = y_pos - p->y_pos;
dist_sq = MAX((x_sep*x_sep) + (y_sep*y_sep), 0.01);
/* Use the 2-dimensional gravity rule: F = d * (GMm/d^2) */
grav_base = GRAV_CONSTANT*(p->mass)*(mass)/dist_sq;
p->x_force += grav_base*x_sep;
p->y_force += grav_base*y_sep;
}
__global__ void calcForce(particle_t *d_particles, particle_t *d_nparticles, int d_nbP, int fPart, int lPart)
{
int i, j;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i + fPart < lPart){
//printf("calc force de part %d\n", i - fPart);
d_nparticles[i].x_force = 0;
d_nparticles[i].y_force = 0;
//printf("calculating for part %d \n", i);
for(j = 0; j < d_nbP; j++)
compute_force(&d_nparticles[i], d_particles[j].x_pos, d_particles[j].y_pos, d_particles[j].mass);//on modifie nparticle, et on prend les infos de d_particle
}
}
extern "C" void initCuda(int nbPMax){
hipMalloc((void**)&d_particles, nparticles * sizeof(particle_t));
hipMalloc((void**)&d_nparticles, nparticles * sizeof(particle_t));
}
extern "C" void finalizeCuda(){
hipFree(d_particles);
hipFree(d_nparticles);
}
extern "C" void all_move_particles(double step, int fPart, int lPart)
{
hipMemcpy(d_particles, particles, nparticles * sizeof(particle_t), hipMemcpyHostToDevice);
hipMemcpy(d_nparticles, particles + fPart, (lPart - fPart) * sizeof(particle_t), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( calcForce), dim3(1000000), dim3(10), 0, 0, d_particles, d_nparticles, nparticles, fPart, lPart);
hipMemcpy(particles + fPart, d_nparticles, (lPart - fPart) * sizeof(particle_t), hipMemcpyDeviceToHost);
}
| f77f5a3592ba15802b0d8a75b6b90a66759a9b80.cu | /*
** cudaFunc.cu - helper functions for cudaMpi1Bruteforce.c
**
**/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include "../mpi/nbody.h"
// CUDA runtime
#include <cuda_runtime.h>
extern "C"
{
#include <cuda.h>
}
#define MEMSIZE 30
//int nparticles;
extern particle_t *particles;
particle_t *d_particles, *d_nparticles;
__device__ void compute_force(particle_t*p, double x_pos, double y_pos, double mass) {
double x_sep, y_sep, dist_sq, grav_base;
x_sep = x_pos - p->x_pos;
y_sep = y_pos - p->y_pos;
dist_sq = MAX((x_sep*x_sep) + (y_sep*y_sep), 0.01);
/* Use the 2-dimensional gravity rule: F = d * (GMm/d^2) */
grav_base = GRAV_CONSTANT*(p->mass)*(mass)/dist_sq;
p->x_force += grav_base*x_sep;
p->y_force += grav_base*y_sep;
}
__global__ void calcForce(particle_t *d_particles, particle_t *d_nparticles, int d_nbP, int fPart, int lPart)
{
int i, j;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i + fPart < lPart){
//printf("calc force de part %d\n", i - fPart);
d_nparticles[i].x_force = 0;
d_nparticles[i].y_force = 0;
//printf("calculating for part %d \n", i);
for(j = 0; j < d_nbP; j++)
compute_force(&d_nparticles[i], d_particles[j].x_pos, d_particles[j].y_pos, d_particles[j].mass);//on modifie nparticle, et on prend les infos de d_particle
}
}
extern "C" void initCuda(int nbPMax){
cudaMalloc((void**)&d_particles, nparticles * sizeof(particle_t));
cudaMalloc((void**)&d_nparticles, nparticles * sizeof(particle_t));
}
extern "C" void finalizeCuda(){
cudaFree(d_particles);
cudaFree(d_nparticles);
}
extern "C" void all_move_particles(double step, int fPart, int lPart)
{
cudaMemcpy(d_particles, particles, nparticles * sizeof(particle_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_nparticles, particles + fPart, (lPart - fPart) * sizeof(particle_t), cudaMemcpyHostToDevice);
calcForce<<<1000000, 10>>>(d_particles, d_nparticles, nparticles, fPart, lPart);
cudaMemcpy(particles + fPart, d_nparticles, (lPart - fPart) * sizeof(particle_t), cudaMemcpyDeviceToHost);
}
|
1fb1fb9a380c5cba31837ed62d54554c5aa2494f.hip | // !!! This is a file automatically generated by hipify!!!
#include "WTDenUpdateKernel.cuh"
void UpdateWTDenKernel(WTD &argWTDen, WTAll &argWT, Document &argDoc, int argChunkId, int argGPUId, hipStream_t &stream) {
int iter= (argWTDen.numOfWordD - 1) / GridDim + 1;// number of iterations for block.
int counter = 0;
int GPUId = argGPUId;
hipSetDevice(GPUId);
/*int numOfTokenD = argWTDen.numOfWordD;*/
for (int i = 0; i < iter; i++)
{
WTDen_Update_Kernel << <GridDim, BlockDim >> >(argDoc.GPUChunkVec[GPUId].deviceTLTopic, argWTDen.WTDenseGPUChunkVec[GPUId].deviceWTDenseCopy, argDoc.GPUChunkVec[GPUId].deviceTLWordCount, argDoc.GPUChunkVec[GPUId].deviceTLWordOffset, argWT.WTGPUChunkVec[GPUId].deviceWTOffset, argWTDen.numOfWordD, counter);
H_ERR(hipDeviceSynchronize());
counter++;
}
}
void UpdateWTDenRowSumKernel(WTD &argWTDen, WTAll &argWT, int argGPUId, hipStream_t &stream)
{
int GPUId = argGPUId;
hipSetDevice(GPUId);
WTDen_Sum_Update_Kernel << <GridDim, BlockDim>> >(argWTDen.WTDenseGPUChunkVec[0].deviceWTDense, argWT.WTGPUChunkVec[GPUId].deviceWTRowSum, argWT.WTGPUChunkVec[GPUId].deviceWTOffset, argWTDen.numOfWordD);
H_ERR(hipDeviceSynchronize());
}
| 1fb1fb9a380c5cba31837ed62d54554c5aa2494f.cu | #include "WTDenUpdateKernel.cuh"
void UpdateWTDenKernel(WTD &argWTDen, WTAll &argWT, Document &argDoc, int argChunkId, int argGPUId, cudaStream_t &stream) {
int iter= (argWTDen.numOfWordD - 1) / GridDim + 1;// number of iterations for block.
int counter = 0;
int GPUId = argGPUId;
cudaSetDevice(GPUId);
/*int numOfTokenD = argWTDen.numOfWordD;*/
for (int i = 0; i < iter; i++)
{
WTDen_Update_Kernel << <GridDim, BlockDim >> >(argDoc.GPUChunkVec[GPUId].deviceTLTopic, argWTDen.WTDenseGPUChunkVec[GPUId].deviceWTDenseCopy, argDoc.GPUChunkVec[GPUId].deviceTLWordCount, argDoc.GPUChunkVec[GPUId].deviceTLWordOffset, argWT.WTGPUChunkVec[GPUId].deviceWTOffset, argWTDen.numOfWordD, counter);
H_ERR(cudaDeviceSynchronize());
counter++;
}
}
void UpdateWTDenRowSumKernel(WTD &argWTDen, WTAll &argWT, int argGPUId, cudaStream_t &stream)
{
int GPUId = argGPUId;
cudaSetDevice(GPUId);
WTDen_Sum_Update_Kernel << <GridDim, BlockDim>> >(argWTDen.WTDenseGPUChunkVec[0].deviceWTDense, argWT.WTGPUChunkVec[GPUId].deviceWTRowSum, argWT.WTGPUChunkVec[GPUId].deviceWTOffset, argWTDen.numOfWordD);
H_ERR(cudaDeviceSynchronize());
}
|
f7f54c0dbd2f8b37a5ba4ee51060374cd98449d0.hip | // !!! This is a file automatically generated by hipify!!!
// ******************************************
// implicit time stepping implementation of 2D diffusion problem
// Ben Cumming, CSCS
// *****************************************
// A small benchmark app that solves the 2D fisher equation using second-order
// finite differences.
// Syntax: ./main nx ny nt t
#include <algorithm>
#include <iostream>
#include <sstream>
#include <fstream>
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <omp.h>
#include <mpi.h>
#include "data.h"
#include "linalg.h"
#include "operators.h"
#include "stats.h"
#include "unit_tests.h"
using namespace data;
using namespace linalg;
using namespace operators;
using namespace stats;
// ==============================================================================
void write_binary(std::string fname, Field &u, SubDomain &domain, Discretization &options)
{
MPI_Offset disp = 0;
MPI_File filehandle;
MPI_Datatype filetype;
int result =
MPI_File_open(
MPI_COMM_WORLD,
fname.c_str(),
MPI_MODE_CREATE | MPI_MODE_WRONLY,
MPI_INFO_NULL,
&filehandle
);
assert(result==MPI_SUCCESS);
int ustart[] = {domain.startx-1, domain.starty-1};
int ucount[] = {domain.nx, domain.ny};
int dimuids[] = {options.nx, options.ny};
result = MPI_Type_create_subarray(2, dimuids, ucount, ustart, MPI_ORDER_FORTRAN, MPI_DOUBLE, &filetype);
assert(result==MPI_SUCCESS);
result = MPI_Type_commit(&filetype);
assert(result==MPI_SUCCESS);
result = MPI_File_set_view(filehandle, disp, MPI_DOUBLE, filetype, "native", MPI_INFO_NULL);
assert(result==MPI_SUCCESS);
// update the host values, before writing to file
u.update_host();
result = MPI_File_write_all(filehandle, u.host_data(), domain.N, MPI_DOUBLE, MPI_STATUS_IGNORE);
assert(result==MPI_SUCCESS);
result = MPI_Type_free(&filetype);
assert(result==MPI_SUCCESS);
result = MPI_File_close(&filehandle);
assert(result==MPI_SUCCESS);
}
// read command line arguments
static void readcmdline(Discretization& options, int argc, char* argv[])
{
if (argc<5 || argc>6 ) {
std::cerr << "Usage: main nx ny nt t\n";
std::cerr << " nx number of gridpoints in x-direction\n";
std::cerr << " ny number of gridpoints in y-direction\n";
std::cerr << " nt number of timesteps\n";
std::cerr << " t total time\n";
std::cerr << " v [optional] turn on verbose output\n";
exit(1);
}
// read nx
options.nx = atoi(argv[1]);
if (options.nx < 1) {
std::cerr << "nx must be positive integer\n";
exit(-1);
}
// read ny
options.ny = atoi(argv[2]);
if (options.ny < 1) {
std::cerr << "ny must be positive integer\n";
exit(-1);
}
// read nt
options.nt = atoi(argv[3]);
if (options.nt < 1) {
std::cerr << "nt must be positive integer\n";
exit(-1);
}
// read total time
double t = atof(argv[4]);
if (t < 0) {
std::cerr << "t must be positive real value\n";
exit(-1);
}
verbose_output = false;
if( argc==6 ) {
verbose_output = (domain.rank==0);
}
// compute timestep size
options.dt = t / options.nt;
// compute the distance between grid points
// assume that x dimension has length 1.0
options.dx = 1. / (options.nx - 1);
// set alpha, assume diffusion coefficient D is 1
options.alpha = (options.dx * options.dx) / (1. * options.dt);
}
// ==============================================================================
int main(int argc, char* argv[])
{
// read command line arguments
readcmdline(options, argc, argv);
// initialize cuda
// assert that there is exactly one GPU per node, i.e. there should only be 1 GPU
// visible to each MPI rank
int device_count;
cuda_api_call( hipGetDeviceCount(&device_count) );
if(device_count != 1) {
std::cerr << "error: there should be one device per node" << std::endl;
exit(-1);
}
cuda_api_call( hipSetDevice(0) );
// get the cublas handle to force cublas initialization outside the main time
// stepping loop, to ensure that the timing doesn't count initialization costs
auto handle = cublas_handle();
// initialize MPI
int mpi_rank, mpi_size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
bool is_root = mpi_rank==0;
// initialize subdomain
domain.init(mpi_rank, mpi_size, options);
domain.print();
int nx = domain.nx;
int ny = domain.ny;
int nt = options.nt;
// set iteration parameters
int max_cg_iters = 200;
int max_newton_iters = 50;
double tolerance = 1.e-6;
if( domain.rank == 0 ) {
std::cout << "\n========================================================================" << std::endl;
std::cout << " Welcome to mini-stencil!" << std::endl;
std::cout << "version :: C++ with MPI : " << domain.size << " MPI ranks" << std::endl;
std::cout << "mesh :: " << options.nx << " * " << options.ny << " dx = " << options.dx << std::endl;
std::cout << "time :: " << nt << " time steps from 0 .. " << options.nt*options.dt << std::endl;;
std::cout << "iteration :: " << "CG " << max_cg_iters
<< ", Newton " << max_newton_iters
<< ", tolerance " << tolerance << std::endl;;
std::cout << "========================================================================\n" << std::endl;
}
// allocate global fields
x_new.init(nx,ny);
x_old.init(nx,ny);
bndN.init(nx,1);
bndS.init(nx,1);
bndE.init(ny,1);
bndW.init(ny,1);
buffN.init(nx,1);
buffS.init(nx,1);
buffE.init(ny,1);
buffW.init(ny,1);
Field b(nx,ny);
Field deltax(nx,ny);
// TODO: put unit tests here because:
// * they can then use the buffers and fields allocated for the main application
// * they won't interfere with the initial conditions, set below
if (!unit_tests()) {
return 1;
}
// set dirichlet boundary conditions to 0 all around
ss_fill(bndN, 0);
ss_fill(bndS, 0);
ss_fill(bndE, 0);
ss_fill(bndW, 0);
// set the initial condition
// a circle of concentration 0.1 centred at (xdim/4, ydim/4) with radius
// no larger than 1/8 of both xdim and ydim
ss_fill(x_new, 0.);
double xc = 1.0 / 4.0;
double yc = (options.ny - 1) * options.dx / 4;
double radius = ::min(xc, yc) / 2.0;
for (int j = domain.starty-1; j < domain.endy; j++)
{
double y = (j - 1) * options.dx;
for (int i = domain.startx-1; i < domain.endx; i++)
{
double x = (i - 1) * options.dx;
if ((x - xc) * (x - xc) + (y - yc) * (y - yc) < radius * radius)
x_new(i-domain.startx+1, j-domain.starty+1) = 0.1;
}
}
// update initial conditions on the device
x_new.update_device();
iters_cg = 0;
iters_newton = 0;
// start timer
double timespent = -omp_get_wtime();
// main timeloop
for (int timestep = 1; timestep <= nt; timestep++)
{
// set x_new and x_old to be the solution
ss_copy(x_old, x_new);
double residual;
bool converged = false;
int it;
for (it=0; it<max_newton_iters; it++)
{
// compute residual : requires both x_new and x_old
diffusion(x_new, b);
residual = ss_norm2(b);
// check for convergence
if (residual < tolerance)
{
converged = true;
break;
}
// solve linear system to get -deltax
bool cg_converged = false;
ss_cg(deltax, b, max_cg_iters, tolerance, cg_converged);
// check that the CG solver converged
if (!cg_converged) break;
// update solution
ss_axpy(x_new, -1.0, deltax);
}
iters_newton += it+1;
// output some statistics
if (converged && verbose_output && is_root) {
std::cout << "step " << timestep
<< " required " << it
<< " iterations for residual " << residual
<< std::endl;
}
if (!converged) {
if(!domain.rank) {
std::cerr << "step " << timestep
<< " ERROR : nonlinear iterations failed to converge" << std::endl;;
}
break;
}
}
// get times
timespent += omp_get_wtime();
////////////////////////////////////////////////////////////////////
// write final solution to BOV file for visualization
////////////////////////////////////////////////////////////////////
// binary data
write_binary("output.bin", x_old, domain, options);
// metadata
if (is_root) {
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << options.nx << " " << options.ny << " 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 " << (options.ny-1)*options.dx << " 1.0" << std::endl;
}
// print table sumarizing results
if (is_root) {
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
std::cout << "simulation took " << timespent << " seconds" << std::endl;
std::cout << int(iters_cg) << " conjugate gradient iterations, at rate of "
<< float(iters_cg)/timespent << " iters/second" << std::endl;
std::cout << iters_newton << " newton iterations" << std::endl;
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
}
if (is_root) std::cout << "Goodbye!" << std::endl;
// clean windows, communicator and do finalize
MPI_Finalize();
return 0;
}
| f7f54c0dbd2f8b37a5ba4ee51060374cd98449d0.cu | // ******************************************
// implicit time stepping implementation of 2D diffusion problem
// Ben Cumming, CSCS
// *****************************************
// A small benchmark app that solves the 2D fisher equation using second-order
// finite differences.
// Syntax: ./main nx ny nt t
#include <algorithm>
#include <iostream>
#include <sstream>
#include <fstream>
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <omp.h>
#include <mpi.h>
#include "data.h"
#include "linalg.h"
#include "operators.h"
#include "stats.h"
#include "unit_tests.h"
using namespace data;
using namespace linalg;
using namespace operators;
using namespace stats;
// ==============================================================================
void write_binary(std::string fname, Field &u, SubDomain &domain, Discretization &options)
{
MPI_Offset disp = 0;
MPI_File filehandle;
MPI_Datatype filetype;
int result =
MPI_File_open(
MPI_COMM_WORLD,
fname.c_str(),
MPI_MODE_CREATE | MPI_MODE_WRONLY,
MPI_INFO_NULL,
&filehandle
);
assert(result==MPI_SUCCESS);
int ustart[] = {domain.startx-1, domain.starty-1};
int ucount[] = {domain.nx, domain.ny};
int dimuids[] = {options.nx, options.ny};
result = MPI_Type_create_subarray(2, dimuids, ucount, ustart, MPI_ORDER_FORTRAN, MPI_DOUBLE, &filetype);
assert(result==MPI_SUCCESS);
result = MPI_Type_commit(&filetype);
assert(result==MPI_SUCCESS);
result = MPI_File_set_view(filehandle, disp, MPI_DOUBLE, filetype, "native", MPI_INFO_NULL);
assert(result==MPI_SUCCESS);
// update the host values, before writing to file
u.update_host();
result = MPI_File_write_all(filehandle, u.host_data(), domain.N, MPI_DOUBLE, MPI_STATUS_IGNORE);
assert(result==MPI_SUCCESS);
result = MPI_Type_free(&filetype);
assert(result==MPI_SUCCESS);
result = MPI_File_close(&filehandle);
assert(result==MPI_SUCCESS);
}
// read command line arguments
static void readcmdline(Discretization& options, int argc, char* argv[])
{
if (argc<5 || argc>6 ) {
std::cerr << "Usage: main nx ny nt t\n";
std::cerr << " nx number of gridpoints in x-direction\n";
std::cerr << " ny number of gridpoints in y-direction\n";
std::cerr << " nt number of timesteps\n";
std::cerr << " t total time\n";
std::cerr << " v [optional] turn on verbose output\n";
exit(1);
}
// read nx
options.nx = atoi(argv[1]);
if (options.nx < 1) {
std::cerr << "nx must be positive integer\n";
exit(-1);
}
// read ny
options.ny = atoi(argv[2]);
if (options.ny < 1) {
std::cerr << "ny must be positive integer\n";
exit(-1);
}
// read nt
options.nt = atoi(argv[3]);
if (options.nt < 1) {
std::cerr << "nt must be positive integer\n";
exit(-1);
}
// read total time
double t = atof(argv[4]);
if (t < 0) {
std::cerr << "t must be positive real value\n";
exit(-1);
}
verbose_output = false;
if( argc==6 ) {
verbose_output = (domain.rank==0);
}
// compute timestep size
options.dt = t / options.nt;
// compute the distance between grid points
// assume that x dimension has length 1.0
options.dx = 1. / (options.nx - 1);
// set alpha, assume diffusion coefficient D is 1
options.alpha = (options.dx * options.dx) / (1. * options.dt);
}
// ==============================================================================
int main(int argc, char* argv[])
{
// read command line arguments
readcmdline(options, argc, argv);
// initialize cuda
// assert that there is exactly one GPU per node, i.e. there should only be 1 GPU
// visible to each MPI rank
int device_count;
cuda_api_call( cudaGetDeviceCount(&device_count) );
if(device_count != 1) {
std::cerr << "error: there should be one device per node" << std::endl;
exit(-1);
}
cuda_api_call( cudaSetDevice(0) );
// get the cublas handle to force cublas initialization outside the main time
// stepping loop, to ensure that the timing doesn't count initialization costs
auto handle = cublas_handle();
// initialize MPI
int mpi_rank, mpi_size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
bool is_root = mpi_rank==0;
// initialize subdomain
domain.init(mpi_rank, mpi_size, options);
domain.print();
int nx = domain.nx;
int ny = domain.ny;
int nt = options.nt;
// set iteration parameters
int max_cg_iters = 200;
int max_newton_iters = 50;
double tolerance = 1.e-6;
if( domain.rank == 0 ) {
std::cout << "\n========================================================================" << std::endl;
std::cout << " Welcome to mini-stencil!" << std::endl;
std::cout << "version :: C++ with MPI : " << domain.size << " MPI ranks" << std::endl;
std::cout << "mesh :: " << options.nx << " * " << options.ny << " dx = " << options.dx << std::endl;
std::cout << "time :: " << nt << " time steps from 0 .. " << options.nt*options.dt << std::endl;;
std::cout << "iteration :: " << "CG " << max_cg_iters
<< ", Newton " << max_newton_iters
<< ", tolerance " << tolerance << std::endl;;
std::cout << "========================================================================\n" << std::endl;
}
// allocate global fields
x_new.init(nx,ny);
x_old.init(nx,ny);
bndN.init(nx,1);
bndS.init(nx,1);
bndE.init(ny,1);
bndW.init(ny,1);
buffN.init(nx,1);
buffS.init(nx,1);
buffE.init(ny,1);
buffW.init(ny,1);
Field b(nx,ny);
Field deltax(nx,ny);
// TODO: put unit tests here because:
// * they can then use the buffers and fields allocated for the main application
// * they won't interfere with the initial conditions, set below
if (!unit_tests()) {
return 1;
}
// set dirichlet boundary conditions to 0 all around
ss_fill(bndN, 0);
ss_fill(bndS, 0);
ss_fill(bndE, 0);
ss_fill(bndW, 0);
// set the initial condition
// a circle of concentration 0.1 centred at (xdim/4, ydim/4) with radius
// no larger than 1/8 of both xdim and ydim
ss_fill(x_new, 0.);
double xc = 1.0 / 4.0;
double yc = (options.ny - 1) * options.dx / 4;
double radius = std::min(xc, yc) / 2.0;
for (int j = domain.starty-1; j < domain.endy; j++)
{
double y = (j - 1) * options.dx;
for (int i = domain.startx-1; i < domain.endx; i++)
{
double x = (i - 1) * options.dx;
if ((x - xc) * (x - xc) + (y - yc) * (y - yc) < radius * radius)
x_new(i-domain.startx+1, j-domain.starty+1) = 0.1;
}
}
// update initial conditions on the device
x_new.update_device();
iters_cg = 0;
iters_newton = 0;
// start timer
double timespent = -omp_get_wtime();
// main timeloop
for (int timestep = 1; timestep <= nt; timestep++)
{
// set x_new and x_old to be the solution
ss_copy(x_old, x_new);
double residual;
bool converged = false;
int it;
for (it=0; it<max_newton_iters; it++)
{
// compute residual : requires both x_new and x_old
diffusion(x_new, b);
residual = ss_norm2(b);
// check for convergence
if (residual < tolerance)
{
converged = true;
break;
}
// solve linear system to get -deltax
bool cg_converged = false;
ss_cg(deltax, b, max_cg_iters, tolerance, cg_converged);
// check that the CG solver converged
if (!cg_converged) break;
// update solution
ss_axpy(x_new, -1.0, deltax);
}
iters_newton += it+1;
// output some statistics
if (converged && verbose_output && is_root) {
std::cout << "step " << timestep
<< " required " << it
<< " iterations for residual " << residual
<< std::endl;
}
if (!converged) {
if(!domain.rank) {
std::cerr << "step " << timestep
<< " ERROR : nonlinear iterations failed to converge" << std::endl;;
}
break;
}
}
// get times
timespent += omp_get_wtime();
////////////////////////////////////////////////////////////////////
// write final solution to BOV file for visualization
////////////////////////////////////////////////////////////////////
// binary data
write_binary("output.bin", x_old, domain, options);
// metadata
if (is_root) {
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << options.nx << " " << options.ny << " 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 " << (options.ny-1)*options.dx << " 1.0" << std::endl;
}
// print table sumarizing results
if (is_root) {
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
std::cout << "simulation took " << timespent << " seconds" << std::endl;
std::cout << int(iters_cg) << " conjugate gradient iterations, at rate of "
<< float(iters_cg)/timespent << " iters/second" << std::endl;
std::cout << iters_newton << " newton iterations" << std::endl;
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
}
if (is_root) std::cout << "Goodbye!" << std::endl;
// clean windows, communicator and do finalize
MPI_Finalize();
return 0;
}
|
54e2868728a71855f6193f8e418107b8654bef8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#include<math.h>
#define N 100000
#define R 3
#define BLOCK_SIZE 1024
__global__ void singlethread_stencil(int* d_in, int* d_out, int M){
int tid = threadIdx.x;
if(tid == 0) {
for(int i=0; i<M; i++) {
for(int j=-R; j<=R; j++)
d_out[i] += d_in[i+j+R];
}
}
}
__global__ void multiplethreads_stencil(int* d_in, int* d_out, int M){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < M) {
int result = 0;
for(int j=-R; j<=R; j++)
result += d_in[tid+R+j];
d_out[tid] = result;
}
}
__global__ void multiplethreads_faster_stencil(int* d_in, int* d_out, int M)
{
__shared__ int temp[BLOCK_SIZE+(2*R)];
int g_id = (blockIdx.x*BLOCK_SIZE)+ threadIdx.x;
int l_id = threadIdx.x+R;
if(g_id < M) {
temp[l_id] = d_in[g_id+R];
if(threadIdx.x < R) {
temp[l_id-R] = d_in[g_id];
temp[l_id+BLOCK_SIZE] = d_in[g_id+BLOCK_SIZE];
}
__syncthreads();
}
int result = 0;
for(int j=-R; j<=R; j++)
result += temp[l_id+R];
d_out[g_id] = result;
}
int main()
{
int M = N-2*R;
int h_in[N];
int h_out[M];
for(int i=0; i < N; i++)
h_in[i] = 1;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int* d_in;
int* d_out;
//Part 1: Memory transfer from host to device
hipMalloc((void**) &d_in, N*sizeof(int));
hipMalloc((void**) &d_out, M*sizeof(int));
hipMemcpy(d_in, &h_in, N*sizeof(int), hipMemcpyHostToDevice);
//Part 2: Execute kernel
hipEventRecord(start);
//singlethread_stencil<<<1, BLOCK_SIZE>>>(d_in, d_out, M);
//multiplethreads_stencil<<<(int) ceil(M/ (double) BLOCK_SIZE), BLOCK_SIZE>>>(d_in, d_out, M);
hipLaunchKernelGGL(( multiplethreads_faster_stencil), dim3((int) ceil(M/ (double) BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, d_in, d_out, M);
hipEventRecord(stop);
//Part 3: Memory tranfer from device to host
hipMemcpy(&h_out, d_out, M*sizeof(int), hipMemcpyHostToHost);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
hipFree(d_in);
hipFree(d_out);
//Part 4: Check the result
/*for(int i=0; i<M; i++) {
printf("%d ",h_out[i]);
}*/
for(int i=0; i<M; i++) {
if(h_out[i] != 2*R+1){
printf("Incorrent result!\n");
return -1;
}
}
printf("Correct result!\n");
printf("%f ms\n", milliseconds);
}
| 54e2868728a71855f6193f8e418107b8654bef8a.cu | #include<stdio.h>
#include<cuda.h>
#include<math.h>
#define N 100000
#define R 3
#define BLOCK_SIZE 1024
__global__ void singlethread_stencil(int* d_in, int* d_out, int M){
int tid = threadIdx.x;
if(tid == 0) {
for(int i=0; i<M; i++) {
for(int j=-R; j<=R; j++)
d_out[i] += d_in[i+j+R];
}
}
}
__global__ void multiplethreads_stencil(int* d_in, int* d_out, int M){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < M) {
int result = 0;
for(int j=-R; j<=R; j++)
result += d_in[tid+R+j];
d_out[tid] = result;
}
}
__global__ void multiplethreads_faster_stencil(int* d_in, int* d_out, int M)
{
__shared__ int temp[BLOCK_SIZE+(2*R)];
int g_id = (blockIdx.x*BLOCK_SIZE)+ threadIdx.x;
int l_id = threadIdx.x+R;
if(g_id < M) {
temp[l_id] = d_in[g_id+R];
if(threadIdx.x < R) {
temp[l_id-R] = d_in[g_id];
temp[l_id+BLOCK_SIZE] = d_in[g_id+BLOCK_SIZE];
}
__syncthreads();
}
int result = 0;
for(int j=-R; j<=R; j++)
result += temp[l_id+R];
d_out[g_id] = result;
}
int main()
{
int M = N-2*R;
int h_in[N];
int h_out[M];
for(int i=0; i < N; i++)
h_in[i] = 1;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int* d_in;
int* d_out;
//Part 1: Memory transfer from host to device
cudaMalloc((void**) &d_in, N*sizeof(int));
cudaMalloc((void**) &d_out, M*sizeof(int));
cudaMemcpy(d_in, &h_in, N*sizeof(int), cudaMemcpyHostToDevice);
//Part 2: Execute kernel
cudaEventRecord(start);
//singlethread_stencil<<<1, BLOCK_SIZE>>>(d_in, d_out, M);
//multiplethreads_stencil<<<(int) ceil(M/ (double) BLOCK_SIZE), BLOCK_SIZE>>>(d_in, d_out, M);
multiplethreads_faster_stencil<<<(int) ceil(M/ (double) BLOCK_SIZE), BLOCK_SIZE>>>(d_in, d_out, M);
cudaEventRecord(stop);
//Part 3: Memory tranfer from device to host
cudaMemcpy(&h_out, d_out, M*sizeof(int), cudaMemcpyHostToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaFree(d_in);
cudaFree(d_out);
//Part 4: Check the result
/*for(int i=0; i<M; i++) {
printf("%d ",h_out[i]);
}*/
for(int i=0; i<M; i++) {
if(h_out[i] != 2*R+1){
printf("Incorrent result!\n");
return -1;
}
}
printf("Correct result!\n");
printf("%f ms\n", milliseconds);
}
|
de80a8ca0adbf4e1f676e0120b676407cf7d81f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of WarpReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <typeinfo>
#include <cub/warp/warp_reduce.cuh>
#include <hipcub/hipcub.hpp>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
/**
* \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants)
*/
template<
typename OpT,
int LOGICAL_WARP_THREADS>
struct WrapperFunctor
{
OpT op;
int num_valid;
inline __host__ __device__ WrapperFunctor(OpT op, int num_valid) : op(op), num_valid(num_valid) {}
template <typename T>
inline __host__ __device__ T operator()(const T &a, const T &b) const
{
#if CUB_PTX_ARCH != 0
if ((cub::LaneId() % LOGICAL_WARP_THREADS) >= num_valid)
cub::ThreadTrap();
#endif
return op(a, b);
}
};
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/**
* Generic reduction
*/
template <
typename T,
typename ReductionOp,
typename WarpReduce,
bool PRIMITIVE = Traits<T>::PRIMITIVE>
struct DeviceTest
{
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
ReductionOp &reduction_op)
{
return WarpReduce(temp_storage).Reduce(data, reduction_op);
}
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
ReductionOp &reduction_op,
const int &valid_warp_threads)
{
return WarpReduce(temp_storage).Reduce(data, reduction_op, valid_warp_threads);
}
template <typename FlagT>
static __device__ __forceinline__ T HeadSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
ReductionOp &reduction_op)
{
return WarpReduce(temp_storage).HeadSegmentedReduce(data, flag, reduction_op);
}
template <typename FlagT>
static __device__ __forceinline__ T TailSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
ReductionOp &reduction_op)
{
return WarpReduce(temp_storage).TailSegmentedReduce(data, flag, reduction_op);
}
};
/**
* Summation
*/
template <
typename T,
typename WarpReduce>
struct DeviceTest<T, Sum, WarpReduce, true>
{
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
Sum &reduction_op)
{
return WarpReduce(temp_storage).Sum(data);
}
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
Sum &reduction_op,
const int &valid_warp_threads)
{
return WarpReduce(temp_storage).Sum(data, valid_warp_threads);
}
template <typename FlagT>
static __device__ __forceinline__ T HeadSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
Sum &reduction_op)
{
return WarpReduce(temp_storage).HeadSegmentedSum(data, flag);
}
template <typename FlagT>
static __device__ __forceinline__ T TailSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
Sum &reduction_op)
{
return WarpReduce(temp_storage).TailSegmentedSum(data, flag);
}
};
/**
* Full-tile warp reduction kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
__global__ void FullWarpReduceKernel(
T *d_in,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative warp-reduce utility type (1 warp)
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::Reduce(
temp_storage[warp_id], input, reduction_op);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = (threadIdx.x % LOGICAL_WARP_THREADS == 0) ?
output :
input;
}
/**
* Partially-full warp reduction kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
__global__ void PartialWarpReduceKernel(
T *d_in,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed,
int valid_warp_threads)
{
// Cooperative warp-reduce utility type
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test partial-warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::Reduce(
temp_storage[warp_id], input, reduction_op, valid_warp_threads);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = (threadIdx.x % LOGICAL_WARP_THREADS == 0) ?
output :
input;
}
/**
* Head-based segmented warp reduction test kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename FlagT,
typename ReductionOp>
__global__ void WarpHeadSegmentedReduceKernel(
T *d_in,
FlagT *d_head_flags,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative warp-reduce utility type
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
FlagT head_flag = d_head_flags[threadIdx.x];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test segmented warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::HeadSegmentedReduce(
temp_storage[warp_id], input, head_flag, reduction_op);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = ((threadIdx.x % LOGICAL_WARP_THREADS == 0) || head_flag) ?
output :
input;
}
/**
* Tail-based segmented warp reduction test kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename FlagT,
typename ReductionOp>
__global__ void WarpTailSegmentedReduceKernel(
T *d_in,
FlagT *d_tail_flags,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative warp-reduce utility type
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
FlagT tail_flag = d_tail_flags[threadIdx.x];
FlagT head_flag = (threadIdx.x == 0) ?
0 :
d_tail_flags[threadIdx.x - 1];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test segmented warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::TailSegmentedReduce(
temp_storage[warp_id], input, tail_flag, reduction_op);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = ((threadIdx.x % LOGICAL_WARP_THREADS == 0) || head_flag) ?
output :
input;
}
//---------------------------------------------------------------------
// Host utility subroutines
//---------------------------------------------------------------------
/**
* Initialize reduction problem (and solution)
*/
template <
typename T,
typename ReductionOp>
void Initialize(
GenMode gen_mode,
int flag_entropy,
T *h_in,
int *h_flags,
int warps,
int warp_threads,
int valid_warp_threads,
ReductionOp reduction_op,
T *h_head_out,
T *h_tail_out)
{
for (int i = 0; i < warps * warp_threads; ++i)
{
// Sample a value for this item
InitValue(gen_mode, h_in[i], i);
h_head_out[i] = h_in[i];
h_tail_out[i] = h_in[i];
// Sample whether or not this item will be a segment head
char bits;
RandomBits(bits, flag_entropy);
h_flags[i] = bits & 0x1;
}
// Accumulate segments (lane 0 of each warp is implicitly a segment head)
for (int warp = 0; warp < warps; ++warp)
{
int warp_offset = warp * warp_threads;
int item_offset = warp_offset + valid_warp_threads - 1;
// Last item in warp
T head_aggregate = h_in[item_offset];
T tail_aggregate = h_in[item_offset];
if (h_flags[item_offset])
h_head_out[item_offset] = head_aggregate;
item_offset--;
// Work backwards
while (item_offset >= warp_offset)
{
if (h_flags[item_offset + 1])
{
head_aggregate = h_in[item_offset];
}
else
{
head_aggregate = reduction_op(head_aggregate, h_in[item_offset]);
}
if (h_flags[item_offset])
{
h_head_out[item_offset] = head_aggregate;
h_tail_out[item_offset + 1] = tail_aggregate;
tail_aggregate = h_in[item_offset];
}
else
{
tail_aggregate = reduction_op(tail_aggregate, h_in[item_offset]);
}
item_offset--;
}
// Record last segment head_aggregate to head offset
h_head_out[warp_offset] = head_aggregate;
h_tail_out[warp_offset] = tail_aggregate;
}
}
/**
* Test warp reduction
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
void TestReduce(
GenMode gen_mode,
ReductionOp reduction_op,
int valid_warp_threads = LOGICAL_WARP_THREADS)
{
const int BLOCK_THREADS = LOGICAL_WARP_THREADS * WARPS;
// Allocate host arrays
T *h_in = new T[BLOCK_THREADS];
int *h_flags = new int[BLOCK_THREADS];
T *h_out = new T[BLOCK_THREADS];
T *h_tail_out = new T[BLOCK_THREADS];
// Initialize problem
Initialize(gen_mode, -1, h_in, h_flags, WARPS, LOGICAL_WARP_THREADS, valid_warp_threads, reduction_op, h_out, h_tail_out);
// Initialize/clear device arrays
T *d_in = NULL;
T *d_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t)));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * BLOCK_THREADS, hipMemcpyHostToDevice));
CubDebugExit(hipMemset(d_out, 0, sizeof(T) * BLOCK_THREADS));
if (g_verbose)
{
printf("Data:\n");
for (int i = 0; i < WARPS; ++i)
DisplayResults(h_in + (i * LOGICAL_WARP_THREADS), valid_warp_threads);
}
// Run kernel
printf("\nGen-mode %d, %d warps, %d warp threads, %d valid lanes, %s (%d bytes) elements:\n",
gen_mode,
WARPS,
LOGICAL_WARP_THREADS,
valid_warp_threads,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
if (valid_warp_threads == LOGICAL_WARP_THREADS)
{
// Run full-warp kernel
hipLaunchKernelGGL(( FullWarpReduceKernel<WARPS, LOGICAL_WARP_THREADS>), dim3(1), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_out,
reduction_op,
d_elapsed);
}
else
{
// Run partial-warp kernel
hipLaunchKernelGGL(( PartialWarpReduceKernel<WARPS, LOGICAL_WARP_THREADS>), dim3(1), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_out,
reduction_op,
d_elapsed,
valid_warp_threads);
}
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Copy out and display results
printf("\tReduction results: ");
int compare = CompareDeviceResults(h_out, d_out, BLOCK_THREADS, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (h_flags) delete[] h_flags;
if (h_out) delete[] h_out;
if (h_tail_out) delete[] h_tail_out;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test warp segmented reduction
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
void TestSegmentedReduce(
GenMode gen_mode,
int flag_entropy,
ReductionOp reduction_op)
{
const int BLOCK_THREADS = LOGICAL_WARP_THREADS * WARPS;
// Allocate host arrays
int compare;
T *h_in = new T[BLOCK_THREADS];
int *h_flags = new int[BLOCK_THREADS];
T *h_head_out = new T[BLOCK_THREADS];
T *h_tail_out = new T[BLOCK_THREADS];
// Initialize problem
Initialize(gen_mode, flag_entropy, h_in, h_flags, WARPS, LOGICAL_WARP_THREADS, LOGICAL_WARP_THREADS, reduction_op, h_head_out, h_tail_out);
// Initialize/clear device arrays
T *d_in = NULL;
int *d_flags = NULL;
T *d_head_out = NULL;
T *d_tail_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(int) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_head_out, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_tail_out, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t)));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * BLOCK_THREADS, hipMemcpyHostToDevice));
CubDebugExit(hipMemcpy(d_flags, h_flags, sizeof(int) * BLOCK_THREADS, hipMemcpyHostToDevice));
CubDebugExit(hipMemset(d_head_out, 0, sizeof(T) * BLOCK_THREADS));
CubDebugExit(hipMemset(d_tail_out, 0, sizeof(T) * BLOCK_THREADS));
if (g_verbose)
{
printf("Data:\n");
for (int i = 0; i < WARPS; ++i)
DisplayResults(h_in + (i * LOGICAL_WARP_THREADS), LOGICAL_WARP_THREADS);
printf("\nFlags:\n");
for (int i = 0; i < WARPS; ++i)
DisplayResults(h_flags + (i * LOGICAL_WARP_THREADS), LOGICAL_WARP_THREADS);
}
printf("\nGen-mode %d, head flag entropy reduction %d, %d warps, %d warp threads, %s (%d bytes) elements:\n",
gen_mode,
flag_entropy,
WARPS,
LOGICAL_WARP_THREADS,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
// Run head-based kernel
hipLaunchKernelGGL(( WarpHeadSegmentedReduceKernel<WARPS, LOGICAL_WARP_THREADS>), dim3(1), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_flags,
d_head_out,
reduction_op,
d_elapsed);
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Copy out and display results
printf("\tHead-based segmented reduction results: ");
compare = CompareDeviceResults(h_head_out, d_head_out, BLOCK_THREADS, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Run tail-based kernel
hipLaunchKernelGGL(( WarpTailSegmentedReduceKernel<WARPS, LOGICAL_WARP_THREADS>), dim3(1), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_flags,
d_tail_out,
reduction_op,
d_elapsed);
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Copy out and display results
printf("\tTail-based segmented reduction results: ");
compare = CompareDeviceResults(h_tail_out, d_tail_out, BLOCK_THREADS, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (h_flags) delete[] h_flags;
if (h_head_out) delete[] h_head_out;
if (h_tail_out) delete[] h_tail_out;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags));
if (d_head_out) CubDebugExit(g_allocator.DeviceFree(d_head_out));
if (d_tail_out) CubDebugExit(g_allocator.DeviceFree(d_tail_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Run battery of tests for different full and partial tile sizes
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
void Test(
GenMode gen_mode,
ReductionOp reduction_op)
{
// Partial tiles
for (
int valid_warp_threads = 1;
valid_warp_threads < LOGICAL_WARP_THREADS;
valid_warp_threads += CUB_MAX(1, LOGICAL_WARP_THREADS / 5))
{
// Without wrapper (to test non-excepting PTX POD-op specializations)
TestReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, reduction_op, valid_warp_threads);
// With wrapper to ensure no ops called on OOB lanes
WrapperFunctor<ReductionOp, LOGICAL_WARP_THREADS> wrapped_op(reduction_op, valid_warp_threads);
TestReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, wrapped_op, valid_warp_threads);
}
// Full tile
TestReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, reduction_op, LOGICAL_WARP_THREADS);
// Segmented reduction with different head flags
for (int flag_entropy = 0; flag_entropy < 10; ++flag_entropy)
{
TestSegmentedReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, flag_entropy, reduction_op);
}
}
/**
* Run battery of tests for different data types and reduce ops
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS>
void Test(GenMode gen_mode)
{
// primitive
Test<WARPS, LOGICAL_WARP_THREADS, char>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, short>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, int>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, long long>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned char>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned short>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned int>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned long long>( gen_mode, Sum());
if (gen_mode != RANDOM)
{
Test<WARPS, LOGICAL_WARP_THREADS, float>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, double>( gen_mode, Sum());
}
// primitive (alternative reduce op)
Test<WARPS, LOGICAL_WARP_THREADS, unsigned char>( gen_mode, Max());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned short>( gen_mode, Max());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned int>( gen_mode, Max());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned long long>( gen_mode, Max());
// vec-1
Test<WARPS, LOGICAL_WARP_THREADS, uchar1>( gen_mode, Sum());
// vec-2
Test<WARPS, LOGICAL_WARP_THREADS, uchar2>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ushort2>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, uint2>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ulonglong2>( gen_mode, Sum());
// vec-4
Test<WARPS, LOGICAL_WARP_THREADS, uchar4>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ushort4>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, uint4>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ulonglong4>( gen_mode, Sum());
// complex
Test<WARPS, LOGICAL_WARP_THREADS, TestFoo>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, TestBar>( gen_mode, Sum());
}
/**
* Run battery of tests for different problem generation options
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS>
void Test()
{
Test<WARPS, LOGICAL_WARP_THREADS>(UNIFORM);
Test<WARPS, LOGICAL_WARP_THREADS>(INTEGER_SEED);
Test<WARPS, LOGICAL_WARP_THREADS>(RANDOM);
}
/**
* Run battery of tests for different number of active warps
*/
template <int LOGICAL_WARP_THREADS>
void Test()
{
Test<1, LOGICAL_WARP_THREADS>();
// Only power-of-two subwarps can be tiled
if ((LOGICAL_WARP_THREADS == 32) || PowerOfTwo<LOGICAL_WARP_THREADS>::VALUE)
Test<2, LOGICAL_WARP_THREADS>();
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
#ifdef CUB_TEST_BENCHMARK
// Compile/run quick tests
TestReduce<1, 32, int>(UNIFORM, Sum());
TestReduce<1, 32, double>(UNIFORM, Sum());
TestReduce<2, 16, TestBar>(UNIFORM, Sum());
TestSegmentedReduce<1, 32, int>(UNIFORM, 1, Sum());
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test logical warp sizes
Test<32>();
Test<16>();
Test<9>();
Test<7>();
}
#endif
return 0;
}
| de80a8ca0adbf4e1f676e0120b676407cf7d81f6.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of WarpReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <typeinfo>
#include <cub/warp/warp_reduce.cuh>
#include <cub/util_allocator.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
/**
* \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants)
*/
template<
typename OpT,
int LOGICAL_WARP_THREADS>
struct WrapperFunctor
{
OpT op;
int num_valid;
inline __host__ __device__ WrapperFunctor(OpT op, int num_valid) : op(op), num_valid(num_valid) {}
template <typename T>
inline __host__ __device__ T operator()(const T &a, const T &b) const
{
#if CUB_PTX_ARCH != 0
if ((cub::LaneId() % LOGICAL_WARP_THREADS) >= num_valid)
cub::ThreadTrap();
#endif
return op(a, b);
}
};
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/**
* Generic reduction
*/
template <
typename T,
typename ReductionOp,
typename WarpReduce,
bool PRIMITIVE = Traits<T>::PRIMITIVE>
struct DeviceTest
{
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
ReductionOp &reduction_op)
{
return WarpReduce(temp_storage).Reduce(data, reduction_op);
}
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
ReductionOp &reduction_op,
const int &valid_warp_threads)
{
return WarpReduce(temp_storage).Reduce(data, reduction_op, valid_warp_threads);
}
template <typename FlagT>
static __device__ __forceinline__ T HeadSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
ReductionOp &reduction_op)
{
return WarpReduce(temp_storage).HeadSegmentedReduce(data, flag, reduction_op);
}
template <typename FlagT>
static __device__ __forceinline__ T TailSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
ReductionOp &reduction_op)
{
return WarpReduce(temp_storage).TailSegmentedReduce(data, flag, reduction_op);
}
};
/**
* Summation
*/
template <
typename T,
typename WarpReduce>
struct DeviceTest<T, Sum, WarpReduce, true>
{
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
Sum &reduction_op)
{
return WarpReduce(temp_storage).Sum(data);
}
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
Sum &reduction_op,
const int &valid_warp_threads)
{
return WarpReduce(temp_storage).Sum(data, valid_warp_threads);
}
template <typename FlagT>
static __device__ __forceinline__ T HeadSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
Sum &reduction_op)
{
return WarpReduce(temp_storage).HeadSegmentedSum(data, flag);
}
template <typename FlagT>
static __device__ __forceinline__ T TailSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
Sum &reduction_op)
{
return WarpReduce(temp_storage).TailSegmentedSum(data, flag);
}
};
/**
* Full-tile warp reduction kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
__global__ void FullWarpReduceKernel(
T *d_in,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative warp-reduce utility type (1 warp)
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::Reduce(
temp_storage[warp_id], input, reduction_op);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = (threadIdx.x % LOGICAL_WARP_THREADS == 0) ?
output :
input;
}
/**
* Partially-full warp reduction kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
__global__ void PartialWarpReduceKernel(
T *d_in,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed,
int valid_warp_threads)
{
// Cooperative warp-reduce utility type
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test partial-warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::Reduce(
temp_storage[warp_id], input, reduction_op, valid_warp_threads);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = (threadIdx.x % LOGICAL_WARP_THREADS == 0) ?
output :
input;
}
/**
* Head-based segmented warp reduction test kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename FlagT,
typename ReductionOp>
__global__ void WarpHeadSegmentedReduceKernel(
T *d_in,
FlagT *d_head_flags,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative warp-reduce utility type
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
FlagT head_flag = d_head_flags[threadIdx.x];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test segmented warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::HeadSegmentedReduce(
temp_storage[warp_id], input, head_flag, reduction_op);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = ((threadIdx.x % LOGICAL_WARP_THREADS == 0) || head_flag) ?
output :
input;
}
/**
* Tail-based segmented warp reduction test kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename FlagT,
typename ReductionOp>
__global__ void WarpTailSegmentedReduceKernel(
T *d_in,
FlagT *d_tail_flags,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative warp-reduce utility type
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
FlagT tail_flag = d_tail_flags[threadIdx.x];
FlagT head_flag = (threadIdx.x == 0) ?
0 :
d_tail_flags[threadIdx.x - 1];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test segmented warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::TailSegmentedReduce(
temp_storage[warp_id], input, tail_flag, reduction_op);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = ((threadIdx.x % LOGICAL_WARP_THREADS == 0) || head_flag) ?
output :
input;
}
//---------------------------------------------------------------------
// Host utility subroutines
//---------------------------------------------------------------------
/**
* Initialize reduction problem (and solution)
*/
template <
typename T,
typename ReductionOp>
void Initialize(
GenMode gen_mode,
int flag_entropy,
T *h_in,
int *h_flags,
int warps,
int warp_threads,
int valid_warp_threads,
ReductionOp reduction_op,
T *h_head_out,
T *h_tail_out)
{
for (int i = 0; i < warps * warp_threads; ++i)
{
// Sample a value for this item
InitValue(gen_mode, h_in[i], i);
h_head_out[i] = h_in[i];
h_tail_out[i] = h_in[i];
// Sample whether or not this item will be a segment head
char bits;
RandomBits(bits, flag_entropy);
h_flags[i] = bits & 0x1;
}
// Accumulate segments (lane 0 of each warp is implicitly a segment head)
for (int warp = 0; warp < warps; ++warp)
{
int warp_offset = warp * warp_threads;
int item_offset = warp_offset + valid_warp_threads - 1;
// Last item in warp
T head_aggregate = h_in[item_offset];
T tail_aggregate = h_in[item_offset];
if (h_flags[item_offset])
h_head_out[item_offset] = head_aggregate;
item_offset--;
// Work backwards
while (item_offset >= warp_offset)
{
if (h_flags[item_offset + 1])
{
head_aggregate = h_in[item_offset];
}
else
{
head_aggregate = reduction_op(head_aggregate, h_in[item_offset]);
}
if (h_flags[item_offset])
{
h_head_out[item_offset] = head_aggregate;
h_tail_out[item_offset + 1] = tail_aggregate;
tail_aggregate = h_in[item_offset];
}
else
{
tail_aggregate = reduction_op(tail_aggregate, h_in[item_offset]);
}
item_offset--;
}
// Record last segment head_aggregate to head offset
h_head_out[warp_offset] = head_aggregate;
h_tail_out[warp_offset] = tail_aggregate;
}
}
/**
* Test warp reduction
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
void TestReduce(
GenMode gen_mode,
ReductionOp reduction_op,
int valid_warp_threads = LOGICAL_WARP_THREADS)
{
const int BLOCK_THREADS = LOGICAL_WARP_THREADS * WARPS;
// Allocate host arrays
T *h_in = new T[BLOCK_THREADS];
int *h_flags = new int[BLOCK_THREADS];
T *h_out = new T[BLOCK_THREADS];
T *h_tail_out = new T[BLOCK_THREADS];
// Initialize problem
Initialize(gen_mode, -1, h_in, h_flags, WARPS, LOGICAL_WARP_THREADS, valid_warp_threads, reduction_op, h_out, h_tail_out);
// Initialize/clear device arrays
T *d_in = NULL;
T *d_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t)));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * BLOCK_THREADS, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * BLOCK_THREADS));
if (g_verbose)
{
printf("Data:\n");
for (int i = 0; i < WARPS; ++i)
DisplayResults(h_in + (i * LOGICAL_WARP_THREADS), valid_warp_threads);
}
// Run kernel
printf("\nGen-mode %d, %d warps, %d warp threads, %d valid lanes, %s (%d bytes) elements:\n",
gen_mode,
WARPS,
LOGICAL_WARP_THREADS,
valid_warp_threads,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
if (valid_warp_threads == LOGICAL_WARP_THREADS)
{
// Run full-warp kernel
FullWarpReduceKernel<WARPS, LOGICAL_WARP_THREADS><<<1, BLOCK_THREADS>>>(
d_in,
d_out,
reduction_op,
d_elapsed);
}
else
{
// Run partial-warp kernel
PartialWarpReduceKernel<WARPS, LOGICAL_WARP_THREADS><<<1, BLOCK_THREADS>>>(
d_in,
d_out,
reduction_op,
d_elapsed,
valid_warp_threads);
}
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Copy out and display results
printf("\tReduction results: ");
int compare = CompareDeviceResults(h_out, d_out, BLOCK_THREADS, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (h_flags) delete[] h_flags;
if (h_out) delete[] h_out;
if (h_tail_out) delete[] h_tail_out;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test warp segmented reduction
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
void TestSegmentedReduce(
GenMode gen_mode,
int flag_entropy,
ReductionOp reduction_op)
{
const int BLOCK_THREADS = LOGICAL_WARP_THREADS * WARPS;
// Allocate host arrays
int compare;
T *h_in = new T[BLOCK_THREADS];
int *h_flags = new int[BLOCK_THREADS];
T *h_head_out = new T[BLOCK_THREADS];
T *h_tail_out = new T[BLOCK_THREADS];
// Initialize problem
Initialize(gen_mode, flag_entropy, h_in, h_flags, WARPS, LOGICAL_WARP_THREADS, LOGICAL_WARP_THREADS, reduction_op, h_head_out, h_tail_out);
// Initialize/clear device arrays
T *d_in = NULL;
int *d_flags = NULL;
T *d_head_out = NULL;
T *d_tail_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(int) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_head_out, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_tail_out, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t)));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * BLOCK_THREADS, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemcpy(d_flags, h_flags, sizeof(int) * BLOCK_THREADS, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemset(d_head_out, 0, sizeof(T) * BLOCK_THREADS));
CubDebugExit(cudaMemset(d_tail_out, 0, sizeof(T) * BLOCK_THREADS));
if (g_verbose)
{
printf("Data:\n");
for (int i = 0; i < WARPS; ++i)
DisplayResults(h_in + (i * LOGICAL_WARP_THREADS), LOGICAL_WARP_THREADS);
printf("\nFlags:\n");
for (int i = 0; i < WARPS; ++i)
DisplayResults(h_flags + (i * LOGICAL_WARP_THREADS), LOGICAL_WARP_THREADS);
}
printf("\nGen-mode %d, head flag entropy reduction %d, %d warps, %d warp threads, %s (%d bytes) elements:\n",
gen_mode,
flag_entropy,
WARPS,
LOGICAL_WARP_THREADS,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
// Run head-based kernel
WarpHeadSegmentedReduceKernel<WARPS, LOGICAL_WARP_THREADS><<<1, BLOCK_THREADS>>>(
d_in,
d_flags,
d_head_out,
reduction_op,
d_elapsed);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Copy out and display results
printf("\tHead-based segmented reduction results: ");
compare = CompareDeviceResults(h_head_out, d_head_out, BLOCK_THREADS, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Run tail-based kernel
WarpTailSegmentedReduceKernel<WARPS, LOGICAL_WARP_THREADS><<<1, BLOCK_THREADS>>>(
d_in,
d_flags,
d_tail_out,
reduction_op,
d_elapsed);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Copy out and display results
printf("\tTail-based segmented reduction results: ");
compare = CompareDeviceResults(h_tail_out, d_tail_out, BLOCK_THREADS, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (h_flags) delete[] h_flags;
if (h_head_out) delete[] h_head_out;
if (h_tail_out) delete[] h_tail_out;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags));
if (d_head_out) CubDebugExit(g_allocator.DeviceFree(d_head_out));
if (d_tail_out) CubDebugExit(g_allocator.DeviceFree(d_tail_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Run battery of tests for different full and partial tile sizes
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
void Test(
GenMode gen_mode,
ReductionOp reduction_op)
{
// Partial tiles
for (
int valid_warp_threads = 1;
valid_warp_threads < LOGICAL_WARP_THREADS;
valid_warp_threads += CUB_MAX(1, LOGICAL_WARP_THREADS / 5))
{
// Without wrapper (to test non-excepting PTX POD-op specializations)
TestReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, reduction_op, valid_warp_threads);
// With wrapper to ensure no ops called on OOB lanes
WrapperFunctor<ReductionOp, LOGICAL_WARP_THREADS> wrapped_op(reduction_op, valid_warp_threads);
TestReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, wrapped_op, valid_warp_threads);
}
// Full tile
TestReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, reduction_op, LOGICAL_WARP_THREADS);
// Segmented reduction with different head flags
for (int flag_entropy = 0; flag_entropy < 10; ++flag_entropy)
{
TestSegmentedReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, flag_entropy, reduction_op);
}
}
/**
* Run battery of tests for different data types and reduce ops
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS>
void Test(GenMode gen_mode)
{
// primitive
Test<WARPS, LOGICAL_WARP_THREADS, char>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, short>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, int>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, long long>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned char>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned short>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned int>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned long long>( gen_mode, Sum());
if (gen_mode != RANDOM)
{
Test<WARPS, LOGICAL_WARP_THREADS, float>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, double>( gen_mode, Sum());
}
// primitive (alternative reduce op)
Test<WARPS, LOGICAL_WARP_THREADS, unsigned char>( gen_mode, Max());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned short>( gen_mode, Max());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned int>( gen_mode, Max());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned long long>( gen_mode, Max());
// vec-1
Test<WARPS, LOGICAL_WARP_THREADS, uchar1>( gen_mode, Sum());
// vec-2
Test<WARPS, LOGICAL_WARP_THREADS, uchar2>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ushort2>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, uint2>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ulonglong2>( gen_mode, Sum());
// vec-4
Test<WARPS, LOGICAL_WARP_THREADS, uchar4>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ushort4>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, uint4>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ulonglong4>( gen_mode, Sum());
// complex
Test<WARPS, LOGICAL_WARP_THREADS, TestFoo>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, TestBar>( gen_mode, Sum());
}
/**
* Run battery of tests for different problem generation options
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS>
void Test()
{
Test<WARPS, LOGICAL_WARP_THREADS>(UNIFORM);
Test<WARPS, LOGICAL_WARP_THREADS>(INTEGER_SEED);
Test<WARPS, LOGICAL_WARP_THREADS>(RANDOM);
}
/**
* Run battery of tests for different number of active warps
*/
template <int LOGICAL_WARP_THREADS>
void Test()
{
Test<1, LOGICAL_WARP_THREADS>();
// Only power-of-two subwarps can be tiled
if ((LOGICAL_WARP_THREADS == 32) || PowerOfTwo<LOGICAL_WARP_THREADS>::VALUE)
Test<2, LOGICAL_WARP_THREADS>();
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
#ifdef CUB_TEST_BENCHMARK
// Compile/run quick tests
TestReduce<1, 32, int>(UNIFORM, Sum());
TestReduce<1, 32, double>(UNIFORM, Sum());
TestReduce<2, 16, TestBar>(UNIFORM, Sum());
TestSegmentedReduce<1, 32, int>(UNIFORM, 1, Sum());
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test logical warp sizes
Test<32>();
Test<16>();
Test<9>();
Test<7>();
}
#endif
return 0;
}
|
be946bbe3d24532df0d90479a257be6554bf43f8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CSS-535 Lab 03: CUDA GEMV Implementation
* Authors: Afrooz Rahmati & Tony Varela
*
* Description: This is my (Tony) reimplementation of Afrooz's original code in C++ (thanks to her for getting this started!).
* For now, let's focus on Part 0 - the naive implementation of GEMV using CUDA.
*/
// included header files
// CUDA stuff
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <rocblas.h> // as a benchmark
#include <random> // for random initialization
#include <chrono> // timing
#include <iostream> // for output
/*
* naive gemv kernel; each instance of this function is supposed to take one output vector element
* this specific implementation does not rely on padding, but rather using an if (divergence).
* M is for the rows, N is for the columns. And this assumes row major data.
*/
__global__ void naive_gemv(const float *A, const float *x, float *y, const size_t M, const size_t N) {
const size_t total_thread_num{static_cast<size_t>(gridDim.x) * blockDim.x};
const size_t tid{threadIdx.x + static_cast<size_t>(blockIdx.x) * blockDim.x};
size_t stride{M / total_thread_num};
if (stride == 0) { stride = 1; if (tid > M) return; } // if we're on a thread that's greater than the vector size, just get out
// if we had a stride of 0, that means we have more threads than elements... just add a stride in there just in case.
// else, that means stride >= 1 (more elements than threads); if the current thread index is the LAST ONE, we need to consider the possible remainders. and ONLY IF we have more vector elements than threads.
else if (tid == total_thread_num - 1) {
stride += (M <= total_thread_num) ? 0 : M % total_thread_num;
}
for (auto i{tid * stride}; i < (tid*stride) + stride; i++) {
y[i] = 0.0f;
for (size_t j{0}; j < N; j++) y[i] += A[i * M + j] * x[j];
}
}
// Credits to Brian Luger for the main structure of this program (just the way it is divided, I learned this from our time together on Lab 2)
int main(int argc, char **argv) {
// TODO: create command line arguments to configure grid/block dimensions
// This program should only take in the M and N dimensions; within the program, we figure out the execution configurations ourselves
// cublas declarations
hipblasHandle_t cublas_handle;
// for now, let's put the matrix/vector dimensions in here as well
const size_t M{ 10 };
const size_t N{ 10 };
// yes, I know they're always going to be square, but I like separating M and N for my own understanding.
// TODO: consider experimenting with thrust device/host vectors as well
// seed RNG
std::default_random_engine dre;
dre.seed(3); // seeded for reproducibility
const std::uniform_real_distribution<float> uniform_dist(-10, 10); // uniform distribution [-10, 10]
// allocate host memory
float *m{new float[M * N]};
float *v_in{new float[N]};
float *v_out_naive{new float[M]};
float *v_out_cublas{new float[M]};
// allocate device memory
float *d_m, *d_v_in, *d_v_out_naive, *d_v_out_cublas;
hipMalloc(reinterpret_cast<void**>(&d_m), sizeof(float) * M * N);
hipMalloc(reinterpret_cast<void**>(&d_v_in), sizeof(float) * N);
hipMalloc(reinterpret_cast<void**>(&d_v_out_naive), sizeof(float) * M);
hipMalloc(reinterpret_cast<void**>(&d_v_out_cublas), sizeof(float) * M);
// initialize host array with random data
// for the matrix
for (size_t i{0}; i < M; i++) for (size_t j{0}; j < N; j++) m[i * M + j] = uniform_dist(dre);
//std::cout << "Printing Matrix:\n";
//for (size_t i{0}; i < M; i++) {
// for (size_t j{0}; j < N; j++) {
// std::cout << m[i * M + j] << ' ';
// }
// std::cout << '\n';
//}
// for the vector
for (size_t i{0}; i < N; i++) v_in[i] = uniform_dist(dre);
//std::cout << "Printing Input Vector:\n";
//for (size_t i{0}; i < N; i++) std::cout << v_in[i] << ' ';
std::cout << '\n';
// copy m and v_in into device memory, time it as well
auto d2h_start = std::chrono::high_resolution_clock::now();
hipMemcpy(d_m, m, sizeof(float) * M * N, hipMemcpyHostToDevice);
hipMemcpy(d_v_in, v_in, sizeof(float) * N, hipMemcpyHostToDevice);
auto d2h_end = std::chrono::high_resolution_clock::now();
auto d2h_duration = std::chrono::duration_cast<std::chrono::microseconds>(d2h_end - d2h_start).count();
// TODO: there are CUBLAS operations for getting/setting matrices/vectors between host/device; consider looking/timing these as well: https://developer.nvidia.com/sites/default/files/akamai/cuda/files/Misc/mygpu.pdf (pg.48-49)
// let's create the grid / block configuration, but just really simply.
dim3 grid(1); // (1, 1, 1)
dim3 block(1);
// (M, 1, 1); since each thread is in charge of ONE output element, and our output is a vector, we only need as many threads as vector elements !
std::cout << "STARTING NAIVE" << std::endl;
auto naive_exec_start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( naive_gemv) , dim3(grid), dim3(block) , 0, 0, d_m, d_v_in, d_v_out_naive, M, N);
hipDeviceSynchronize();
std::cout << "FINISHED NAIVE" << std::endl;
// since the kernels are executed asynchronously, need to sync so that we can get accurate timing
auto naive_exec_end = std::chrono::high_resolution_clock::now();
auto naive_exec_duration = std::chrono::duration_cast<std::chrono::microseconds>(naive_exec_end - naive_exec_start).
count();
// copy d_v_out_naive back into host
auto h2d_start = std::chrono::high_resolution_clock::now();
hipMemcpy(v_out_naive, d_v_out_naive, sizeof(float) * M, hipMemcpyDeviceToHost);
auto h2d_end = std::chrono::high_resolution_clock::now();
auto h2d_duration = std::chrono::duration_cast<std::chrono::microseconds>(h2d_end - h2d_start).count();
// get total inclusive time
auto gpu_transfer_total_duration = h2d_duration + d2h_duration;
// try timing cublas (not timing inclusive times, although I am copying back out to host as well)
hipblasCreate(&cublas_handle);
// hipblasSetMatrix(M, N, sizeof(float), m, M, )
const float a{1.0f};
const float b{0.0f};
auto cublas_exec_start = std::chrono::high_resolution_clock::now();
hipblasSgemv(cublas_handle, HIPBLAS_OP_T, N, M, &a, d_m, N, d_v_in, 1, &b, d_v_out_cublas, 1);
auto cublas_exec_end = std::chrono::high_resolution_clock::now();
auto cublas_exec_duration = std::chrono::duration_cast<std::chrono::microseconds>(
cublas_exec_end - cublas_exec_start).count();
// copy the cublas device vector back out to host
hipMemcpy(v_out_cublas, d_v_out_cublas, sizeof(float) * M, hipMemcpyDeviceToHost);
std::cout << "Comparing output vectors:\n";
float rse{ 0.0f };
for (size_t i{ 0 }; i < M; i++) rse += abs(v_out_naive[i] - v_out_cublas[i]);
std::cout << "ERROR: " << rse << std::endl;
//std::cout << "Naive: ";
//for (size_t i{ 0 }; i < M; i++) std::cout << v_out_naive[i] << ' ';
//std::cout << '\n';
//
//std::cout << "cuBLAS: ";
//for (size_t i{0}; i < M; i++) std::cout << v_out_cublas[i] << ' ';
//std::cout << '\n';
std::cout <<
"Total Inclusive Time, Naive Execution Time, cuBLAS Execution Time, Naive Total Time, cuBLAS Total Time\n";
std::cout << gpu_transfer_total_duration << ", " << naive_exec_duration << ", " << cublas_exec_duration << ", " <<
naive_exec_duration +
gpu_transfer_total_duration << ", " << cublas_exec_duration + gpu_transfer_total_duration << '\n';
// clean up
hipblasDestroy(cublas_handle);
hipFree(d_v_out_cublas);
hipFree(d_v_out_naive);
hipFree(d_v_in);
hipFree(d_m);
delete[] v_out_cublas;
delete[] v_out_naive;
delete[] v_in;
delete[] m;
return 0;
} | be946bbe3d24532df0d90479a257be6554bf43f8.cu | /*
* CSS-535 Lab 03: CUDA GEMV Implementation
* Authors: Afrooz Rahmati & Tony Varela
*
* Description: This is my (Tony) reimplementation of Afrooz's original code in C++ (thanks to her for getting this started!).
* For now, let's focus on Part 0 - the naive implementation of GEMV using CUDA.
*/
// included header files
// CUDA stuff
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cublas_v2.h> // as a benchmark
#include <random> // for random initialization
#include <chrono> // timing
#include <iostream> // for output
/*
* naive gemv kernel; each instance of this function is supposed to take one output vector element
* this specific implementation does not rely on padding, but rather using an if (divergence).
* M is for the rows, N is for the columns. And this assumes row major data.
*/
__global__ void naive_gemv(const float *A, const float *x, float *y, const size_t M, const size_t N) {
const size_t total_thread_num{static_cast<size_t>(gridDim.x) * blockDim.x};
const size_t tid{threadIdx.x + static_cast<size_t>(blockIdx.x) * blockDim.x};
size_t stride{M / total_thread_num};
if (stride == 0) { stride = 1; if (tid > M) return; } // if we're on a thread that's greater than the vector size, just get out
// if we had a stride of 0, that means we have more threads than elements... just add a stride in there just in case.
// else, that means stride >= 1 (more elements than threads); if the current thread index is the LAST ONE, we need to consider the possible remainders. and ONLY IF we have more vector elements than threads.
else if (tid == total_thread_num - 1) {
stride += (M <= total_thread_num) ? 0 : M % total_thread_num;
}
for (auto i{tid * stride}; i < (tid*stride) + stride; i++) {
y[i] = 0.0f;
for (size_t j{0}; j < N; j++) y[i] += A[i * M + j] * x[j];
}
}
// Credits to Brian Luger for the main structure of this program (just the way it is divided, I learned this from our time together on Lab 2)
int main(int argc, char **argv) {
// TODO: create command line arguments to configure grid/block dimensions
// This program should only take in the M and N dimensions; within the program, we figure out the execution configurations ourselves
// cublas declarations
cublasHandle_t cublas_handle;
// for now, let's put the matrix/vector dimensions in here as well
const size_t M{ 10 };
const size_t N{ 10 };
// yes, I know they're always going to be square, but I like separating M and N for my own understanding.
// TODO: consider experimenting with thrust device/host vectors as well
// seed RNG
std::default_random_engine dre;
dre.seed(3); // seeded for reproducibility
const std::uniform_real_distribution<float> uniform_dist(-10, 10); // uniform distribution [-10, 10]
// allocate host memory
float *m{new float[M * N]};
float *v_in{new float[N]};
float *v_out_naive{new float[M]};
float *v_out_cublas{new float[M]};
// allocate device memory
float *d_m, *d_v_in, *d_v_out_naive, *d_v_out_cublas;
cudaMalloc(reinterpret_cast<void**>(&d_m), sizeof(float) * M * N);
cudaMalloc(reinterpret_cast<void**>(&d_v_in), sizeof(float) * N);
cudaMalloc(reinterpret_cast<void**>(&d_v_out_naive), sizeof(float) * M);
cudaMalloc(reinterpret_cast<void**>(&d_v_out_cublas), sizeof(float) * M);
// initialize host array with random data
// for the matrix
for (size_t i{0}; i < M; i++) for (size_t j{0}; j < N; j++) m[i * M + j] = uniform_dist(dre);
//std::cout << "Printing Matrix:\n";
//for (size_t i{0}; i < M; i++) {
// for (size_t j{0}; j < N; j++) {
// std::cout << m[i * M + j] << ' ';
// }
// std::cout << '\n';
//}
// for the vector
for (size_t i{0}; i < N; i++) v_in[i] = uniform_dist(dre);
//std::cout << "Printing Input Vector:\n";
//for (size_t i{0}; i < N; i++) std::cout << v_in[i] << ' ';
std::cout << '\n';
// copy m and v_in into device memory, time it as well
auto d2h_start = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_m, m, sizeof(float) * M * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_v_in, v_in, sizeof(float) * N, cudaMemcpyHostToDevice);
auto d2h_end = std::chrono::high_resolution_clock::now();
auto d2h_duration = std::chrono::duration_cast<std::chrono::microseconds>(d2h_end - d2h_start).count();
// TODO: there are CUBLAS operations for getting/setting matrices/vectors between host/device; consider looking/timing these as well: https://developer.nvidia.com/sites/default/files/akamai/cuda/files/Misc/mygpu.pdf (pg.48-49)
// let's create the grid / block configuration, but just really simply.
dim3 grid(1); // (1, 1, 1)
dim3 block(1);
// (M, 1, 1); since each thread is in charge of ONE output element, and our output is a vector, we only need as many threads as vector elements !
std::cout << "STARTING NAIVE" << std::endl;
auto naive_exec_start = std::chrono::high_resolution_clock::now();
naive_gemv <<<grid, block >>>(d_m, d_v_in, d_v_out_naive, M, N);
cudaDeviceSynchronize();
std::cout << "FINISHED NAIVE" << std::endl;
// since the kernels are executed asynchronously, need to sync so that we can get accurate timing
auto naive_exec_end = std::chrono::high_resolution_clock::now();
auto naive_exec_duration = std::chrono::duration_cast<std::chrono::microseconds>(naive_exec_end - naive_exec_start).
count();
// copy d_v_out_naive back into host
auto h2d_start = std::chrono::high_resolution_clock::now();
cudaMemcpy(v_out_naive, d_v_out_naive, sizeof(float) * M, cudaMemcpyDeviceToHost);
auto h2d_end = std::chrono::high_resolution_clock::now();
auto h2d_duration = std::chrono::duration_cast<std::chrono::microseconds>(h2d_end - h2d_start).count();
// get total inclusive time
auto gpu_transfer_total_duration = h2d_duration + d2h_duration;
// try timing cublas (not timing inclusive times, although I am copying back out to host as well)
cublasCreate(&cublas_handle);
// cublasSetMatrix(M, N, sizeof(float), m, M, )
const float a{1.0f};
const float b{0.0f};
auto cublas_exec_start = std::chrono::high_resolution_clock::now();
cublasSgemv(cublas_handle, CUBLAS_OP_T, N, M, &a, d_m, N, d_v_in, 1, &b, d_v_out_cublas, 1);
auto cublas_exec_end = std::chrono::high_resolution_clock::now();
auto cublas_exec_duration = std::chrono::duration_cast<std::chrono::microseconds>(
cublas_exec_end - cublas_exec_start).count();
// copy the cublas device vector back out to host
cudaMemcpy(v_out_cublas, d_v_out_cublas, sizeof(float) * M, cudaMemcpyDeviceToHost);
std::cout << "Comparing output vectors:\n";
float rse{ 0.0f };
for (size_t i{ 0 }; i < M; i++) rse += abs(v_out_naive[i] - v_out_cublas[i]);
std::cout << "ERROR: " << rse << std::endl;
//std::cout << "Naive: ";
//for (size_t i{ 0 }; i < M; i++) std::cout << v_out_naive[i] << ' ';
//std::cout << '\n';
//
//std::cout << "cuBLAS: ";
//for (size_t i{0}; i < M; i++) std::cout << v_out_cublas[i] << ' ';
//std::cout << '\n';
std::cout <<
"Total Inclusive Time, Naive Execution Time, cuBLAS Execution Time, Naive Total Time, cuBLAS Total Time\n";
std::cout << gpu_transfer_total_duration << ", " << naive_exec_duration << ", " << cublas_exec_duration << ", " <<
naive_exec_duration +
gpu_transfer_total_duration << ", " << cublas_exec_duration + gpu_transfer_total_duration << '\n';
// clean up
cublasDestroy(cublas_handle);
cudaFree(d_v_out_cublas);
cudaFree(d_v_out_naive);
cudaFree(d_v_in);
cudaFree(d_m);
delete[] v_out_cublas;
delete[] v_out_naive;
delete[] v_in;
delete[] m;
return 0;
} |
da6904abb23a5896b4c332d05b72a382dbd49d61.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cmath>
#include <ctime>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <string>
#include <vector>
#include <fstream>
#include <sstream>
#include <random>
int N = 100;
float a = 2.0; // diameter
float b = 2.0; // diameter
float k = 10.0;
float mu = 1.0;
float mu_r = 1.0;
float dt = 1.0/300.0;
float Dr = 0.0;
float Dt = 0.0;
float v0 = 1.0;
float L = 1./0.;
float TotalTime = 30.0;
float tau = 0.0;
float alpha = 0.0;
float packing = 0.5;
int StepsBetweenSaves = 30;
int silent = 0;
unsigned long seed = 123456789;
#include "../include/NonInertial.cuh"
void help(){
std::cout << "Options are: \n";
std::cout << "-N (int) Number of particles, DEFAULT 100\n";
std::cout << "-T (float) Max time, DEFAULT = 30.0, [seconds]\n";
std::cout << "-dt (float) Time step, DEFAULT = 1.0/300.0,[seconds]\n";
std::cout << "-mur (float) rotational mobility coefficient, DEFAULT = 0.0001\n";
std::cout << "-a (float) Particle major axis, DEFAULT = 2.0, this defines the length scale\n";
std::cout << "-b (float) Particle minor axis, DEFAULT = 2.0, this defines the length scale\n";
std::cout << "Note: (a,b) = (2,2) implies a unit circle\n";
std::cout << "-k (float) Spring constant, DEFAULT = 10.0\n";
std::cout << "-mu (float) accel = f(v,x,t) + mu * sum(collision_forces(x,t)), DEFAULT = 1.0\n";
std::cout << "-Dr (float) rotational diffusion, DEFAULT = 0.0,[rad]^2[s]^-1\n";
std::cout << "-Dt (float) translational diffusion, DEFAULT = 0.00, [r]^2[s]^-1\n";
std::cout << "-v (float) v0, DEFAULT = 10.0 ,[r][s]^-1\n";
std::cout << "--initial-packing-fraction (float) density of random intial condition, DEFAULT = 0.5\n";
std::cout << "--box-length (float) length of periodic box, DEFAULT inf => no box\n";
std::cout << "-tau (float) reorientation coefficient, DEFAULT = 0.0\n";
std::cout << "-alpha (float) reorientation exponent, DEFAULT = 0.0\n";
std::cout << "--save-every (int) save state every --save-every time steps, DEFAULT = 10\n";
std::cout << "--random-seed (unsigned long) DEFAULT = 31415926535897\n";
std::cout << "-silent suppress cout DEFAULT = 0 (don't suppress)\n";
}
int main(int argc, char ** argv){
if ( (argc+1) % 2 == 0 && argc >= 1){
// should have -OptionName Option pairs, + the program name
for (int i = 1; i+1 < argc; i+=2){
std::string OptionName = argv[i];
std::string Option = argv[i+1];
if (OptionName == "-h"){
help();
return 0;
}
else if (OptionName == "-N"){
N = std::stoi(Option);
}
else if (OptionName == "-T"){
TotalTime = std::stod(Option);
}
else if (OptionName == "-dt"){
dt = std::stod(Option);
}
else if (OptionName == "-mur"){
mu_r = std::stod(Option);
}
else if (OptionName == "-a"){
a = std::stod(Option);
}
else if (OptionName == "-b"){
b = std::stod(Option);
}
else if (OptionName == "-k"){
k = std::stod(Option);
}
else if (OptionName == "-mu"){
mu = std::stod(Option);
}
else if (OptionName == "-Dr"){
Dr = std::stod(Option);
}
else if (OptionName == "-Dt"){
Dt = std::stod(Option);
}
else if (OptionName == "-v"){
v0 = std::stod(Option);
}
else if (OptionName == "--initial-packing-fraction"){
packing = std::stod(Option);
}
else if (OptionName == "--box-length"){
L = std::stod(Option);
}
else if (OptionName == "-tau"){
tau = std::stod(Option);
}
else if (OptionName == "-alpha"){
alpha = std::stod(Option);
}
else if (OptionName == "--save-every"){
StepsBetweenSaves = std::stoi(Option);
}
else if (OptionName == "--random-seed"){
seed = std::stoi(Option);
}
else if (OptionName == "-silent"){
silent = std::stoi(Option);
}
}
}
else{
std::cout << "Incomplete options\n";
std::cout << "Options should be given in pairs, e.g -N 100\n";
help();
return 0;
}
if (silent == 0){
std::cout << "#######################################\n";
std::cout << "Parameters Set: \n";
std::cout << "N " << N << std::endl;
std::cout << "T " << TotalTime << std::endl;
std::cout << "dt " << dt << std::endl;
std::cout << "a " << a << std::endl;
std::cout << "b " << b << std::endl;
std::cout << "force strength " << k << std::endl;
std::cout << "mobility " << mu << std::endl;
std::cout << "rotational mobility " << mu_r << std::endl;
std::cout << "rotation-diffusion coefficient " << Dr << std::endl;
std::cout << "translation diffusion coefficient " << Dt << std::endl;
std::cout << "self propulsion speed " << v0 << std::endl;
std::cout << "intial packing-fraction " << packing << std::endl;
std::cout << "box length " << L << std::endl;
std::cout << "tau " << tau << std::endl;
std::cout << "alpha " << alpha << std::endl;
std::cout << "save every " << StepsBetweenSaves << std::endl;
std::cout << "random seed " << seed << std::endl;
std::cout << "#######################################\n";
}
float * X; // Positions
float * O; // orientations, theta
float * Trajectories; // will store the answers
X = new float [N*2];
O = new float [N];
if (tau != 0.0){
// require this for density each step
if (silent == 0){
std::cout << "Warning: tau != 0.0 requires density calculation each step.\n";
std::cout << "You will loose the benifit of not copying from the device each step\n";
}
}
int total_steps = int(ceil(TotalTime/dt));
Trajectories = new float [total_steps/StepsBetweenSaves*N*4]; // x,y,o,density for each N and t
std::default_random_engine generator(seed);
std::uniform_real_distribution<double> uniform_real(0.0, 1.0);
std::normal_distribution<double> normal(0.0, 1.0);
double l = std::sqrt((N*M_PI*a*b)/packing);
if (std::isnan(L)){
l = sqrt((N*M_PI*a*b)/0.5);
}
for (int i = 0; i < N; i++){
// initialise positions with packing fraction = packing
X[i*2] = uniform_real(generator)*l;
X[i*2+1] = uniform_real(generator)*l;
// random normal oritentations
O[i] = normal(generator)*2.0*M_PI;
Trajectories[0*N*4 + 4*i + 0] = X[i*2];
Trajectories[0*N*4 + 4*i + 1] = X[i*2+1];
Trajectories[0*N*4 + 4*i + 2] = O[i];
Trajectories[0*N*4 + 4*i + 3] = 0.0;
}
TakeSteps(X,O,Trajectories,N,total_steps,StepsBetweenSaves,dt,k,mu,mu_r,
a,b,Dt,Dr,v0,tau,alpha,L);
if (silent == 0){
std::cout << "Simulation done, saving data...\n";
}
//set up output to save data.
std::ostringstream namestring;
namestring << "trajectories.txt";
std::string str1 = namestring.str();
std::ofstream output(str1.c_str());
clock_t start;
start = clock();
for (int t = 0; t < total_steps/StepsBetweenSaves; t++){
for (int i = 0; i < N; i++){
output << Trajectories[t*N*4 + 4*i + 0] << ", ";
output << Trajectories[t*N*4 + 4*i + 1] << ", ";
output << Trajectories[t*N*4 + 4*i + 2] << ", ";
output << Trajectories[t*N*4 + 4*i + 3];
output << std::endl;
}
}
float time = (clock()-start)/(float)CLOCKS_PER_SEC;
float rounded_down = floorf(time * 100) / 100;
if (silent == 0){
std::cout << "Saving data took: " << rounded_down << " s\n";
}
std::free(X);
std::free(O);
std::free(Trajectories);
return 0;
}
| da6904abb23a5896b4c332d05b72a382dbd49d61.cu | #include <iostream>
#include <cmath>
#include <ctime>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <string>
#include <vector>
#include <fstream>
#include <sstream>
#include <random>
int N = 100;
float a = 2.0; // diameter
float b = 2.0; // diameter
float k = 10.0;
float mu = 1.0;
float mu_r = 1.0;
float dt = 1.0/300.0;
float Dr = 0.0;
float Dt = 0.0;
float v0 = 1.0;
float L = 1./0.;
float TotalTime = 30.0;
float tau = 0.0;
float alpha = 0.0;
float packing = 0.5;
int StepsBetweenSaves = 30;
int silent = 0;
unsigned long seed = 123456789;
#include "../include/NonInertial.cuh"
void help(){
std::cout << "Options are: \n";
std::cout << "-N (int) Number of particles, DEFAULT 100\n";
std::cout << "-T (float) Max time, DEFAULT = 30.0, [seconds]\n";
std::cout << "-dt (float) Time step, DEFAULT = 1.0/300.0,[seconds]\n";
std::cout << "-mur (float) rotational mobility coefficient, DEFAULT = 0.0001\n";
std::cout << "-a (float) Particle major axis, DEFAULT = 2.0, this defines the length scale\n";
std::cout << "-b (float) Particle minor axis, DEFAULT = 2.0, this defines the length scale\n";
std::cout << "Note: (a,b) = (2,2) implies a unit circle\n";
std::cout << "-k (float) Spring constant, DEFAULT = 10.0\n";
std::cout << "-mu (float) accel = f(v,x,t) + mu * sum(collision_forces(x,t)), DEFAULT = 1.0\n";
std::cout << "-Dr (float) rotational diffusion, DEFAULT = 0.0,[rad]^2[s]^-1\n";
std::cout << "-Dt (float) translational diffusion, DEFAULT = 0.00, [r]^2[s]^-1\n";
std::cout << "-v (float) v0, DEFAULT = 10.0 ,[r][s]^-1\n";
std::cout << "--initial-packing-fraction (float) density of random intial condition, DEFAULT = 0.5\n";
std::cout << "--box-length (float) length of periodic box, DEFAULT inf => no box\n";
std::cout << "-tau (float) reorientation coefficient, DEFAULT = 0.0\n";
std::cout << "-alpha (float) reorientation exponent, DEFAULT = 0.0\n";
std::cout << "--save-every (int) save state every --save-every time steps, DEFAULT = 10\n";
std::cout << "--random-seed (unsigned long) DEFAULT = 31415926535897\n";
std::cout << "-silent suppress cout DEFAULT = 0 (don't suppress)\n";
}
int main(int argc, char ** argv){
if ( (argc+1) % 2 == 0 && argc >= 1){
// should have -OptionName Option pairs, + the program name
for (int i = 1; i+1 < argc; i+=2){
std::string OptionName = argv[i];
std::string Option = argv[i+1];
if (OptionName == "-h"){
help();
return 0;
}
else if (OptionName == "-N"){
N = std::stoi(Option);
}
else if (OptionName == "-T"){
TotalTime = std::stod(Option);
}
else if (OptionName == "-dt"){
dt = std::stod(Option);
}
else if (OptionName == "-mur"){
mu_r = std::stod(Option);
}
else if (OptionName == "-a"){
a = std::stod(Option);
}
else if (OptionName == "-b"){
b = std::stod(Option);
}
else if (OptionName == "-k"){
k = std::stod(Option);
}
else if (OptionName == "-mu"){
mu = std::stod(Option);
}
else if (OptionName == "-Dr"){
Dr = std::stod(Option);
}
else if (OptionName == "-Dt"){
Dt = std::stod(Option);
}
else if (OptionName == "-v"){
v0 = std::stod(Option);
}
else if (OptionName == "--initial-packing-fraction"){
packing = std::stod(Option);
}
else if (OptionName == "--box-length"){
L = std::stod(Option);
}
else if (OptionName == "-tau"){
tau = std::stod(Option);
}
else if (OptionName == "-alpha"){
alpha = std::stod(Option);
}
else if (OptionName == "--save-every"){
StepsBetweenSaves = std::stoi(Option);
}
else if (OptionName == "--random-seed"){
seed = std::stoi(Option);
}
else if (OptionName == "-silent"){
silent = std::stoi(Option);
}
}
}
else{
std::cout << "Incomplete options\n";
std::cout << "Options should be given in pairs, e.g -N 100\n";
help();
return 0;
}
if (silent == 0){
std::cout << "#######################################\n";
std::cout << "Parameters Set: \n";
std::cout << "N " << N << std::endl;
std::cout << "T " << TotalTime << std::endl;
std::cout << "dt " << dt << std::endl;
std::cout << "a " << a << std::endl;
std::cout << "b " << b << std::endl;
std::cout << "force strength " << k << std::endl;
std::cout << "mobility " << mu << std::endl;
std::cout << "rotational mobility " << mu_r << std::endl;
std::cout << "rotation-diffusion coefficient " << Dr << std::endl;
std::cout << "translation diffusion coefficient " << Dt << std::endl;
std::cout << "self propulsion speed " << v0 << std::endl;
std::cout << "intial packing-fraction " << packing << std::endl;
std::cout << "box length " << L << std::endl;
std::cout << "tau " << tau << std::endl;
std::cout << "alpha " << alpha << std::endl;
std::cout << "save every " << StepsBetweenSaves << std::endl;
std::cout << "random seed " << seed << std::endl;
std::cout << "#######################################\n";
}
float * X; // Positions
float * O; // orientations, theta
float * Trajectories; // will store the answers
X = new float [N*2];
O = new float [N];
if (tau != 0.0){
// require this for density each step
if (silent == 0){
std::cout << "Warning: tau != 0.0 requires density calculation each step.\n";
std::cout << "You will loose the benifit of not copying from the device each step\n";
}
}
int total_steps = int(ceil(TotalTime/dt));
Trajectories = new float [total_steps/StepsBetweenSaves*N*4]; // x,y,o,density for each N and t
std::default_random_engine generator(seed);
std::uniform_real_distribution<double> uniform_real(0.0, 1.0);
std::normal_distribution<double> normal(0.0, 1.0);
double l = std::sqrt((N*M_PI*a*b)/packing);
if (std::isnan(L)){
l = sqrt((N*M_PI*a*b)/0.5);
}
for (int i = 0; i < N; i++){
// initialise positions with packing fraction = packing
X[i*2] = uniform_real(generator)*l;
X[i*2+1] = uniform_real(generator)*l;
// random normal oritentations
O[i] = normal(generator)*2.0*M_PI;
Trajectories[0*N*4 + 4*i + 0] = X[i*2];
Trajectories[0*N*4 + 4*i + 1] = X[i*2+1];
Trajectories[0*N*4 + 4*i + 2] = O[i];
Trajectories[0*N*4 + 4*i + 3] = 0.0;
}
TakeSteps(X,O,Trajectories,N,total_steps,StepsBetweenSaves,dt,k,mu,mu_r,
a,b,Dt,Dr,v0,tau,alpha,L);
if (silent == 0){
std::cout << "Simulation done, saving data...\n";
}
//set up output to save data.
std::ostringstream namestring;
namestring << "trajectories.txt";
std::string str1 = namestring.str();
std::ofstream output(str1.c_str());
clock_t start;
start = clock();
for (int t = 0; t < total_steps/StepsBetweenSaves; t++){
for (int i = 0; i < N; i++){
output << Trajectories[t*N*4 + 4*i + 0] << ", ";
output << Trajectories[t*N*4 + 4*i + 1] << ", ";
output << Trajectories[t*N*4 + 4*i + 2] << ", ";
output << Trajectories[t*N*4 + 4*i + 3];
output << std::endl;
}
}
float time = (clock()-start)/(float)CLOCKS_PER_SEC;
float rounded_down = floorf(time * 100) / 100;
if (silent == 0){
std::cout << "Saving data took: " << rounded_down << " s\n";
}
std::free(X);
std::free(O);
std::free(Trajectories);
return 0;
}
|
168594b30073d780358c23767c7df7c42e81330a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <FLOAT.h>
#include "MkFn.h"
#include "../../Lib/Lib.h"
#include "LibCuda.h"
#include "RecurrentLayerCudaD.h"
__constant__ int _BatchSize;
__constant__ double _LearningRate;
void RecurrentLayerCudaD::GetFieldName(int field_idx, wchar_t* name){
switch(field_idx){
case 0: wcscpy(name, L"T"); break;
case 1: wcscpy(name, L"X"); break;
case 2: wcscpy(name, L"Y"); break;
case 3: wcscpy(name, L"x"); break;
case 4: wcscpy(name, L"y"); break;
case 5: wcscpy(name, L"win"); break;
case 6: wcscpy(name, L"w"); break;
case 7: wcscpy(name, L"b"); break;
case 8: wcscpy(name, L"u"); break;
case 9: wcscpy(name, L"delta_x"); break;
case 10: wcscpy(name, L"delta_y"); break;
case 11: wcscpy(name, L"delta_win"); break;
case 12: wcscpy(name, L"delta_w"); break;
case 13: wcscpy(name, L"delta_b"); break;
case 14: wcscpy(name, L"delta_u"); break;
default: name[0] = 0; break;
}
}
int RecurrentLayerCudaD::GetFieldDimension(int field_idx){
switch(field_idx){
case 0: return 0;
case 1: return 0;
case 2: return 0;
case 3: return 2;
case 4: return 2;
case 5: return 2;
case 6: return 2;
case 7: return 1;
case 8: return 2;
case 9: return 2;
case 10: return 2;
case 11: return 2;
case 12: return 2;
case 13: return 1;
case 14: return 2;
default: return -1;
}
}
int* RecurrentLayerCudaD::GetFieldSize(int field_idx){
switch(field_idx){
case 0: return 0;
case 1: return 0;
case 2: return 0;
case 3:
x_size_[0] = T;
x_size_[1] = X;
return x_size_;
case 4:
y_size_[0] = T;
y_size_[1] = Y;
return y_size_;
case 5:
win_size_[0] = Y;
win_size_[1] = X;
return win_size_;
case 6:
w_size_[0] = Y;
w_size_[1] = Y;
return w_size_;
case 7:
b_size_[0] = Y;
return b_size_;
case 8:
u_size_[0] = T;
u_size_[1] = Y;
return u_size_;
case 9:
delta_x_size_[0] = T;
delta_x_size_[1] = X;
return delta_x_size_;
case 10:
delta_y_size_[0] = T;
delta_y_size_[1] = Y;
return delta_y_size_;
case 11:
delta_win_size_[0] = Y;
delta_win_size_[1] = X;
return delta_win_size_;
case 12:
delta_w_size_[0] = Y;
delta_w_size_[1] = Y;
return delta_w_size_;
case 13:
delta_b_size_[0] = Y;
return delta_b_size_;
case 14:
delta_u_size_[0] = T;
delta_u_size_[1] = Y;
return delta_u_size_;
default: return 0;
}
}
void RecurrentLayerCudaD::GetFieldValue(int field_idx, void* dst){
int _cnt = GetFieldElementCount(field_idx);
switch(field_idx){
case 0: memcpy(dst, &T, _cnt * sizeof(int)); break;
case 1: memcpy(dst, &X, _cnt * sizeof(int)); break;
case 2: memcpy(dst, &Y, _cnt * sizeof(int)); break;
case 3: memcpy(dst, x, _cnt * sizeof(double)); break;
case 4: memcpy(dst, y, _cnt * sizeof(double)); break;
case 5: memcpy(dst, win, _cnt * sizeof(double)); break;
case 6: memcpy(dst, w, _cnt * sizeof(double)); break;
case 7: memcpy(dst, b, _cnt * sizeof(double)); break;
case 8: memcpy(dst, u, _cnt * sizeof(double)); break;
case 9: memcpy(dst, delta_x, _cnt * sizeof(double)); break;
case 10: memcpy(dst, delta_y, _cnt * sizeof(double)); break;
case 11: memcpy(dst, delta_win, _cnt * sizeof(double)); break;
case 12: memcpy(dst, delta_w, _cnt * sizeof(double)); break;
case 13: memcpy(dst, delta_b, _cnt * sizeof(double)); break;
case 14: memcpy(dst, delta_u, _cnt * sizeof(double)); break;
}
}
void RecurrentLayerCudaD::SetFieldValue(int field_idx, void* src){
int _cnt = GetFieldElementCount(field_idx);
switch(field_idx){
case 0: memcpy(&T, src, _cnt * sizeof(int)); break;
case 1: memcpy(&X, src, _cnt * sizeof(int)); break;
case 2: memcpy(&Y, src, _cnt * sizeof(int)); break;
case 3: memcpy(x, src, _cnt * sizeof(double)); break;
case 4: memcpy(y, src, _cnt * sizeof(double)); break;
case 5: memcpy(win, src, _cnt * sizeof(double)); break;
case 6: memcpy(w, src, _cnt * sizeof(double)); break;
case 7: memcpy(b, src, _cnt * sizeof(double)); break;
case 8: memcpy(u, src, _cnt * sizeof(double)); break;
case 9: memcpy(delta_x, src, _cnt * sizeof(double)); break;
case 10: memcpy(delta_y, src, _cnt * sizeof(double)); break;
case 11: memcpy(delta_win, src, _cnt * sizeof(double)); break;
case 12: memcpy(delta_w, src, _cnt * sizeof(double)); break;
case 13: memcpy(delta_b, src, _cnt * sizeof(double)); break;
case 14: memcpy(delta_u, src, _cnt * sizeof(double)); break;
}
}
RecurrentLayerCudaD::RecurrentLayerCudaD(int t_size, int x_size, int y_size){
// T = t_size
T = t_size;
// X = x_size
X = x_size;
// Y = y_size
Y = y_size;
SetNormalRand(win, Y * X);
SetNormalRand(w, Y * Y);
SetNormalRand(b, Y);
_chk(hipStreamCreate(&_stream_y));
_chk(hipStreamCreate(&_stream_u));
_chk(hipStreamCreate(&_stream_delta_x));
_chk(hipStreamCreate(&_stream_delta_win));
_chk(hipStreamCreate(&_stream_delta_w));
_chk(hipStreamCreate(&_stream_delta_b));
_chk(hipStreamCreate(&_stream_delta_u));
_chk(hipEventCreate(&_event_y));
_chk(hipEventCreate(&_event_u));
_chk(hipEventCreate(&_event_delta_x));
_chk(hipEventCreate(&_event_delta_win));
_chk(hipEventCreate(&_event_delta_w));
_chk(hipEventCreate(&_event_delta_b));
_chk(hipEventCreate(&_event_delta_u));
}
RecurrentLayerCudaD::~RecurrentLayerCudaD(){
Free();
_chk(hipStreamDestroy(_stream_y));
_chk(hipStreamDestroy(_stream_u));
_chk(hipStreamDestroy(_stream_delta_x));
_chk(hipStreamDestroy(_stream_delta_win));
_chk(hipStreamDestroy(_stream_delta_w));
_chk(hipStreamDestroy(_stream_delta_b));
_chk(hipStreamDestroy(_stream_delta_u));
_chk(hipEventDestroy(_event_y));
_chk(hipEventDestroy(_event_u));
_chk(hipEventDestroy(_event_delta_x));
_chk(hipEventDestroy(_event_delta_win));
_chk(hipEventDestroy(_event_delta_w));
_chk(hipEventDestroy(_event_delta_b));
_chk(hipEventDestroy(_event_delta_u));
_Free(win);
_Free(w);
_Free(b);
}
void RecurrentLayerCudaD::Allocate(){
_chk(_Malloc(y, BatchSize * T * Y * sizeof(double)));
_chk(_Malloc(u, BatchSize * T * Y * sizeof(double)));
_chk(_Malloc(delta_x, BatchSize * T * X * sizeof(double)));
_chk(_Malloc(delta_win, BatchSize * Y * X * sizeof(double)));
_chk(_Malloc(delta_w, BatchSize * Y * Y * sizeof(double)));
_chk(_Malloc(delta_b, BatchSize * Y * sizeof(double)));
_chk(_Malloc(delta_u, BatchSize * T * Y * sizeof(double)));
}
void RecurrentLayerCudaD::Free(){
_chk(_Free(y));
_chk(_Free(u));
_chk(_Free(delta_x));
_chk(_Free(delta_win));
_chk(_Free(delta_w));
_chk(_Free(delta_b));
_chk(_Free(delta_u));
}
__global__ static void forward_u(double* u, int X, double* x, double* win, int Y, double* w, double* y, double* b, int T, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.x;
// u[t, j] = (from i in Range(X) select x[t, i] * win[j, i]).Sum() + (from i in Range(Y) select w[j, i] * y[t -1, i]).Sum() + b[j]
double _wk1 = 0;
for(int i = 0; i < X; i++){
_wk1 += x[(X * t + i) * _BatchSize + _batch_idx] * win[X * j + i];
}
double _wk2 = 0;
for(int i = 0; i < Y; i++){
_wk2 += w[Y * j + i] * (0 <= t - 1 ? y[(Y * (t -1) + i) * _BatchSize + _batch_idx] : 0);
}
u[(Y * t + j) * _BatchSize + _batch_idx] = _wk1 + _wk2 + b[j];
}
void RecurrentLayerCudaD::Start_forward_u(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
hipLaunchKernelGGL(( forward_u), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, _stream_u, u, X, x, win, Y, w, y, b, T, t);
_chk(hipEventRecord(_event_u, _stream_u));
}
__global__ static void forward_y(double* y, double* u, int T, int Y, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.x;
// y[t, j] = sigmoid(u[t, j])
y[(Y * t + j) * _BatchSize + _batch_idx] = sigmoid(u[(Y * t + j) * _BatchSize + _batch_idx]);
}
void RecurrentLayerCudaD::Start_forward_y(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
_chk(hipStreamWaitEvent(_stream_y, _event_u, 0));
hipLaunchKernelGGL(( forward_y), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, _stream_y, y, u, T, Y, t);
_chk(hipEventRecord(_event_y, _stream_y));
}
void RecurrentLayerCudaD::Forward(){
_chk(_MemcpyToSymbol(_BatchSize, BatchSize, sizeof(BatchSize)));
Start_forward_u();
Start_forward_y();
}
__global__ static void backward_delta_y(double* delta_y, int Y, double* delta_u, double* w, int T, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.x;
// delta_y[t, j] = (from j in Range(Y) select delta_u[t + 1, j] * w[j, j]).Sum()
double _wk3 = 0;
for(int j = 0; j < Y; j++){
_wk3 += (t + 1 < T ? delta_u[(Y * (t + 1) + j) * _BatchSize + _batch_idx] : 0) * w[Y * j + j];
}
delta_y[(Y * t + j) * _BatchSize + _batch_idx] += _wk3;
}
void RecurrentLayerCudaD::Start_backward_delta_y(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
hipLaunchKernelGGL(( backward_delta_y), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, _stream_delta_y, delta_y, Y, delta_u, w, T, t);
_chk(hipEventRecord(_event_delta_y, _stream_delta_y));
}
__global__ static void backward_delta_u(double* delta_u, double* delta_y, double* u, int T, int Y, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.x;
// delta_u[t, j] = delta_y[t, j] * sigmoid_prime(u[t, j])
delta_u[(Y * t + j) * _BatchSize + _batch_idx] = delta_y[(Y * t + j) * _BatchSize + _batch_idx] * sigmoid_prime(u[(Y * t + j) * _BatchSize + _batch_idx]);
}
void RecurrentLayerCudaD::Start_backward_delta_u(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
_chk(hipStreamWaitEvent(_stream_delta_u, _event_delta_y, 0));
hipLaunchKernelGGL(( backward_delta_u), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, _stream_delta_u, delta_u, delta_y, u, T, Y, t);
_chk(hipEventRecord(_event_delta_u, _stream_delta_u));
}
__global__ static void backward_delta_x(double* delta_x, int Y, double* delta_u, double* win, int T, int X, int t){
int _batch_idx = threadIdx.x;
int i_i = blockIdx.x;
// delta_x[t, i_i] = (from j in Range(Y) select delta_u[t, j] * win[j, i_i]).Sum()
double _wk4 = 0;
for(int j = 0; j < Y; j++){
_wk4 += delta_u[(Y * t + j) * _BatchSize + _batch_idx] * win[X * j + i_i];
}
delta_x[(X * t + i_i) * _BatchSize + _batch_idx] = _wk4;
}
void RecurrentLayerCudaD::Start_backward_delta_x(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = X;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
_chk(hipStreamWaitEvent(_stream_delta_x, _event_delta_u, 0));
hipLaunchKernelGGL(( backward_delta_x), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, _stream_delta_x, delta_x, Y, delta_u, win, T, X, t);
_chk(hipEventRecord(_event_delta_x, _stream_delta_x));
}
__global__ static void backward_delta_win(double* delta_win, int T, double* delta_u, double* x, int Y, int X, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.y;
int i_i = blockIdx.x;
// delta_win[j, i_i] = (from t in Range(T) select delta_u[t, j] * x[t, i_i]).Sum()
double _wk5 = 0;
for(int t = 0; t < T; t++){
_wk5 += delta_u[(Y * t + j) * _BatchSize + _batch_idx] * x[(X * t + i_i) * _BatchSize + _batch_idx];
}
delta_win[(X * j + i_i) * _BatchSize + _batch_idx] = _wk5;
}
void RecurrentLayerCudaD::Start_backward_delta_win(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_y = Y;
blocks_x = X;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
_chk(hipStreamWaitEvent(_stream_delta_win, _event_delta_u, 0));
hipLaunchKernelGGL(( backward_delta_win), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, _stream_delta_win, delta_win, T, delta_u, x, Y, X, t);
_chk(hipEventRecord(_event_delta_win, _stream_delta_win));
}
__global__ static void backward_delta_w(double* delta_w, int T, double* delta_u, double* y, int Y, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.y;
int i_i = blockIdx.x;
// delta_w[j, i_i] = (from t in Range(T) select delta_u[t, j] * y[t -1, i_i]).Sum()
double _wk6 = 0;
for(int t = 0; t < T; t++){
_wk6 += delta_u[(Y * t + j) * _BatchSize + _batch_idx] * (0 <= t - 1 ? y[(Y * (t -1) + i_i) * _BatchSize + _batch_idx] : 0);
}
delta_w[(Y * j + i_i) * _BatchSize + _batch_idx] = _wk6;
}
void RecurrentLayerCudaD::Start_backward_delta_w(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_y = Y;
blocks_x = Y;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
_chk(hipStreamWaitEvent(_stream_delta_w, _event_delta_u, 0));
hipLaunchKernelGGL(( backward_delta_w), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, _stream_delta_w, delta_w, T, delta_u, y, Y, t);
_chk(hipEventRecord(_event_delta_w, _stream_delta_w));
}
__global__ static void backward_delta_b(double* delta_b, int T, double* delta_u, int Y, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.x;
// delta_b[j] = (from t in Range(T) select delta_u[t, j]).Sum()
double _wk7 = 0;
for(int t = 0; t < T; t++){
_wk7 += delta_u[(Y * t + j) * _BatchSize + _batch_idx];
}
delta_b[(j) * _BatchSize + _batch_idx] = _wk7;
}
void RecurrentLayerCudaD::Start_backward_delta_b(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
_chk(hipStreamWaitEvent(_stream_delta_b, _event_delta_u, 0));
hipLaunchKernelGGL(( backward_delta_b), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, _stream_delta_b, delta_b, T, delta_u, Y, t);
_chk(hipEventRecord(_event_delta_b, _stream_delta_b));
}
void RecurrentLayerCudaD::Backward(){
_chk(_MemcpyToSymbol(_BatchSize, BatchSize, sizeof(BatchSize)));
Start_backward_delta_y();
Start_backward_delta_u();
Start_backward_delta_x();
Start_backward_delta_win();
Start_backward_delta_w();
Start_backward_delta_b();
}
__global__ static void UpdateParameterKernel_0(double* win, double* delta_win){
int _idx = (blockIdx.x) * blockDim.x + threadIdx.x;
int offset = _idx * _BatchSize;
{
double sum = 0;
for (int i = 0; i < _BatchSize; i++) {
sum += delta_win[offset + i];
}
win[_idx] -= _LearningRate * sum;
}
}
void RecurrentLayerCudaD::UpdateParameter_0(){
int threads_x = 1;
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
threads_x = X;
dim3 threadsPerBlock = dim3(threads_x);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
hipLaunchKernelGGL(( UpdateParameterKernel_0), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, win, delta_win);
}
__global__ static void UpdateParameterKernel_1(double* w, double* delta_w){
int _idx = (blockIdx.x) * blockDim.x + threadIdx.x;
int offset = _idx * _BatchSize;
{
double sum = 0;
for (int i = 0; i < _BatchSize; i++) {
sum += delta_w[offset + i];
}
w[_idx] -= _LearningRate * sum;
}
}
void RecurrentLayerCudaD::UpdateParameter_1(){
int threads_x = 1;
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
threads_x = Y;
dim3 threadsPerBlock = dim3(threads_x);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
hipLaunchKernelGGL(( UpdateParameterKernel_1), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, w, delta_w);
}
__global__ static void UpdateParameterKernel_2(double* b, double* delta_b){
int _idx = threadIdx.x;
int offset = _idx * _BatchSize;
{
double sum = 0;
for (int i = 0; i < _BatchSize; i++) {
sum += delta_b[offset + i];
}
b[_idx] -= _LearningRate * sum;
}
}
void RecurrentLayerCudaD::UpdateParameter_2(){
int threads_x = 1;
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
threads_x = Y;
dim3 threadsPerBlock = dim3(threads_x);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
hipLaunchKernelGGL(( UpdateParameterKernel_2), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, b, delta_b);
}
void RecurrentLayerCudaD::UpdateParameter(){
_chk(_MemcpyToSymbol(_BatchSize, BatchSize, sizeof(BatchSize)));
_chk(_MemcpyToSymbol(_LearningRate, LearningRate, sizeof(LearningRate)));
_chk(hipDeviceSynchronize());
UpdateParameter_0();
UpdateParameter_1();
UpdateParameter_2();
_chk(hipDeviceSynchronize());
}
extern "C" DllExport Layer* MakeRecurrentLayerCudaD(int t_size, int x_size, int y_size){
return new RecurrentLayerCudaD(t_size, x_size, y_size);
}
| 168594b30073d780358c23767c7df7c42e81330a.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <FLOAT.h>
#include "MkFn.h"
#include "../../Lib/Lib.h"
#include "LibCuda.h"
#include "RecurrentLayerCudaD.h"
__constant__ int _BatchSize;
__constant__ double _LearningRate;
void RecurrentLayerCudaD::GetFieldName(int field_idx, wchar_t* name){
switch(field_idx){
case 0: wcscpy(name, L"T"); break;
case 1: wcscpy(name, L"X"); break;
case 2: wcscpy(name, L"Y"); break;
case 3: wcscpy(name, L"x"); break;
case 4: wcscpy(name, L"y"); break;
case 5: wcscpy(name, L"win"); break;
case 6: wcscpy(name, L"w"); break;
case 7: wcscpy(name, L"b"); break;
case 8: wcscpy(name, L"u"); break;
case 9: wcscpy(name, L"delta_x"); break;
case 10: wcscpy(name, L"delta_y"); break;
case 11: wcscpy(name, L"delta_win"); break;
case 12: wcscpy(name, L"delta_w"); break;
case 13: wcscpy(name, L"delta_b"); break;
case 14: wcscpy(name, L"delta_u"); break;
default: name[0] = 0; break;
}
}
int RecurrentLayerCudaD::GetFieldDimension(int field_idx){
switch(field_idx){
case 0: return 0;
case 1: return 0;
case 2: return 0;
case 3: return 2;
case 4: return 2;
case 5: return 2;
case 6: return 2;
case 7: return 1;
case 8: return 2;
case 9: return 2;
case 10: return 2;
case 11: return 2;
case 12: return 2;
case 13: return 1;
case 14: return 2;
default: return -1;
}
}
int* RecurrentLayerCudaD::GetFieldSize(int field_idx){
switch(field_idx){
case 0: return 0;
case 1: return 0;
case 2: return 0;
case 3:
x_size_[0] = T;
x_size_[1] = X;
return x_size_;
case 4:
y_size_[0] = T;
y_size_[1] = Y;
return y_size_;
case 5:
win_size_[0] = Y;
win_size_[1] = X;
return win_size_;
case 6:
w_size_[0] = Y;
w_size_[1] = Y;
return w_size_;
case 7:
b_size_[0] = Y;
return b_size_;
case 8:
u_size_[0] = T;
u_size_[1] = Y;
return u_size_;
case 9:
delta_x_size_[0] = T;
delta_x_size_[1] = X;
return delta_x_size_;
case 10:
delta_y_size_[0] = T;
delta_y_size_[1] = Y;
return delta_y_size_;
case 11:
delta_win_size_[0] = Y;
delta_win_size_[1] = X;
return delta_win_size_;
case 12:
delta_w_size_[0] = Y;
delta_w_size_[1] = Y;
return delta_w_size_;
case 13:
delta_b_size_[0] = Y;
return delta_b_size_;
case 14:
delta_u_size_[0] = T;
delta_u_size_[1] = Y;
return delta_u_size_;
default: return 0;
}
}
void RecurrentLayerCudaD::GetFieldValue(int field_idx, void* dst){
int _cnt = GetFieldElementCount(field_idx);
switch(field_idx){
case 0: memcpy(dst, &T, _cnt * sizeof(int)); break;
case 1: memcpy(dst, &X, _cnt * sizeof(int)); break;
case 2: memcpy(dst, &Y, _cnt * sizeof(int)); break;
case 3: memcpy(dst, x, _cnt * sizeof(double)); break;
case 4: memcpy(dst, y, _cnt * sizeof(double)); break;
case 5: memcpy(dst, win, _cnt * sizeof(double)); break;
case 6: memcpy(dst, w, _cnt * sizeof(double)); break;
case 7: memcpy(dst, b, _cnt * sizeof(double)); break;
case 8: memcpy(dst, u, _cnt * sizeof(double)); break;
case 9: memcpy(dst, delta_x, _cnt * sizeof(double)); break;
case 10: memcpy(dst, delta_y, _cnt * sizeof(double)); break;
case 11: memcpy(dst, delta_win, _cnt * sizeof(double)); break;
case 12: memcpy(dst, delta_w, _cnt * sizeof(double)); break;
case 13: memcpy(dst, delta_b, _cnt * sizeof(double)); break;
case 14: memcpy(dst, delta_u, _cnt * sizeof(double)); break;
}
}
void RecurrentLayerCudaD::SetFieldValue(int field_idx, void* src){
int _cnt = GetFieldElementCount(field_idx);
switch(field_idx){
case 0: memcpy(&T, src, _cnt * sizeof(int)); break;
case 1: memcpy(&X, src, _cnt * sizeof(int)); break;
case 2: memcpy(&Y, src, _cnt * sizeof(int)); break;
case 3: memcpy(x, src, _cnt * sizeof(double)); break;
case 4: memcpy(y, src, _cnt * sizeof(double)); break;
case 5: memcpy(win, src, _cnt * sizeof(double)); break;
case 6: memcpy(w, src, _cnt * sizeof(double)); break;
case 7: memcpy(b, src, _cnt * sizeof(double)); break;
case 8: memcpy(u, src, _cnt * sizeof(double)); break;
case 9: memcpy(delta_x, src, _cnt * sizeof(double)); break;
case 10: memcpy(delta_y, src, _cnt * sizeof(double)); break;
case 11: memcpy(delta_win, src, _cnt * sizeof(double)); break;
case 12: memcpy(delta_w, src, _cnt * sizeof(double)); break;
case 13: memcpy(delta_b, src, _cnt * sizeof(double)); break;
case 14: memcpy(delta_u, src, _cnt * sizeof(double)); break;
}
}
RecurrentLayerCudaD::RecurrentLayerCudaD(int t_size, int x_size, int y_size){
// T = t_size
T = t_size;
// X = x_size
X = x_size;
// Y = y_size
Y = y_size;
SetNormalRand(win, Y * X);
SetNormalRand(w, Y * Y);
SetNormalRand(b, Y);
_chk(cudaStreamCreate(&_stream_y));
_chk(cudaStreamCreate(&_stream_u));
_chk(cudaStreamCreate(&_stream_delta_x));
_chk(cudaStreamCreate(&_stream_delta_win));
_chk(cudaStreamCreate(&_stream_delta_w));
_chk(cudaStreamCreate(&_stream_delta_b));
_chk(cudaStreamCreate(&_stream_delta_u));
_chk(cudaEventCreate(&_event_y));
_chk(cudaEventCreate(&_event_u));
_chk(cudaEventCreate(&_event_delta_x));
_chk(cudaEventCreate(&_event_delta_win));
_chk(cudaEventCreate(&_event_delta_w));
_chk(cudaEventCreate(&_event_delta_b));
_chk(cudaEventCreate(&_event_delta_u));
}
RecurrentLayerCudaD::~RecurrentLayerCudaD(){
Free();
_chk(cudaStreamDestroy(_stream_y));
_chk(cudaStreamDestroy(_stream_u));
_chk(cudaStreamDestroy(_stream_delta_x));
_chk(cudaStreamDestroy(_stream_delta_win));
_chk(cudaStreamDestroy(_stream_delta_w));
_chk(cudaStreamDestroy(_stream_delta_b));
_chk(cudaStreamDestroy(_stream_delta_u));
_chk(cudaEventDestroy(_event_y));
_chk(cudaEventDestroy(_event_u));
_chk(cudaEventDestroy(_event_delta_x));
_chk(cudaEventDestroy(_event_delta_win));
_chk(cudaEventDestroy(_event_delta_w));
_chk(cudaEventDestroy(_event_delta_b));
_chk(cudaEventDestroy(_event_delta_u));
_Free(win);
_Free(w);
_Free(b);
}
void RecurrentLayerCudaD::Allocate(){
_chk(_Malloc(y, BatchSize * T * Y * sizeof(double)));
_chk(_Malloc(u, BatchSize * T * Y * sizeof(double)));
_chk(_Malloc(delta_x, BatchSize * T * X * sizeof(double)));
_chk(_Malloc(delta_win, BatchSize * Y * X * sizeof(double)));
_chk(_Malloc(delta_w, BatchSize * Y * Y * sizeof(double)));
_chk(_Malloc(delta_b, BatchSize * Y * sizeof(double)));
_chk(_Malloc(delta_u, BatchSize * T * Y * sizeof(double)));
}
void RecurrentLayerCudaD::Free(){
_chk(_Free(y));
_chk(_Free(u));
_chk(_Free(delta_x));
_chk(_Free(delta_win));
_chk(_Free(delta_w));
_chk(_Free(delta_b));
_chk(_Free(delta_u));
}
__global__ static void forward_u(double* u, int X, double* x, double* win, int Y, double* w, double* y, double* b, int T, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.x;
// u[t, j] = (from i in Range(X) select x[t, i] * win[j, i]).Sum() + (from i in Range(Y) select w[j, i] * y[t -1, i]).Sum() + b[j]
double _wk1 = 0;
for(int i = 0; i < X; i++){
_wk1 += x[(X * t + i) * _BatchSize + _batch_idx] * win[X * j + i];
}
double _wk2 = 0;
for(int i = 0; i < Y; i++){
_wk2 += w[Y * j + i] * (0 <= t - 1 ? y[(Y * (t -1) + i) * _BatchSize + _batch_idx] : 0);
}
u[(Y * t + j) * _BatchSize + _batch_idx] = _wk1 + _wk2 + b[j];
}
void RecurrentLayerCudaD::Start_forward_u(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
forward_u<<<blocksPerGrid, threadsPerBlock, 0, _stream_u>>>(u, X, x, win, Y, w, y, b, T, t);
_chk(cudaEventRecord(_event_u, _stream_u));
}
__global__ static void forward_y(double* y, double* u, int T, int Y, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.x;
// y[t, j] = sigmoid(u[t, j])
y[(Y * t + j) * _BatchSize + _batch_idx] = sigmoid(u[(Y * t + j) * _BatchSize + _batch_idx]);
}
void RecurrentLayerCudaD::Start_forward_y(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
_chk(cudaStreamWaitEvent(_stream_y, _event_u, 0));
forward_y<<<blocksPerGrid, threadsPerBlock, 0, _stream_y>>>(y, u, T, Y, t);
_chk(cudaEventRecord(_event_y, _stream_y));
}
void RecurrentLayerCudaD::Forward(){
_chk(_MemcpyToSymbol(_BatchSize, BatchSize, sizeof(BatchSize)));
Start_forward_u();
Start_forward_y();
}
__global__ static void backward_delta_y(double* delta_y, int Y, double* delta_u, double* w, int T, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.x;
// delta_y[t, j] = (from j in Range(Y) select delta_u[t + 1, j] * w[j, j]).Sum()
double _wk3 = 0;
for(int j = 0; j < Y; j++){
_wk3 += (t + 1 < T ? delta_u[(Y * (t + 1) + j) * _BatchSize + _batch_idx] : 0) * w[Y * j + j];
}
delta_y[(Y * t + j) * _BatchSize + _batch_idx] += _wk3;
}
void RecurrentLayerCudaD::Start_backward_delta_y(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
backward_delta_y<<<blocksPerGrid, threadsPerBlock, 0, _stream_delta_y>>>(delta_y, Y, delta_u, w, T, t);
_chk(cudaEventRecord(_event_delta_y, _stream_delta_y));
}
__global__ static void backward_delta_u(double* delta_u, double* delta_y, double* u, int T, int Y, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.x;
// delta_u[t, j] = delta_y[t, j] * sigmoid_prime(u[t, j])
delta_u[(Y * t + j) * _BatchSize + _batch_idx] = delta_y[(Y * t + j) * _BatchSize + _batch_idx] * sigmoid_prime(u[(Y * t + j) * _BatchSize + _batch_idx]);
}
void RecurrentLayerCudaD::Start_backward_delta_u(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
_chk(cudaStreamWaitEvent(_stream_delta_u, _event_delta_y, 0));
backward_delta_u<<<blocksPerGrid, threadsPerBlock, 0, _stream_delta_u>>>(delta_u, delta_y, u, T, Y, t);
_chk(cudaEventRecord(_event_delta_u, _stream_delta_u));
}
__global__ static void backward_delta_x(double* delta_x, int Y, double* delta_u, double* win, int T, int X, int t){
int _batch_idx = threadIdx.x;
int i_i = blockIdx.x;
// delta_x[t, i_i] = (from j in Range(Y) select delta_u[t, j] * win[j, i_i]).Sum()
double _wk4 = 0;
for(int j = 0; j < Y; j++){
_wk4 += delta_u[(Y * t + j) * _BatchSize + _batch_idx] * win[X * j + i_i];
}
delta_x[(X * t + i_i) * _BatchSize + _batch_idx] = _wk4;
}
void RecurrentLayerCudaD::Start_backward_delta_x(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = X;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
_chk(cudaStreamWaitEvent(_stream_delta_x, _event_delta_u, 0));
backward_delta_x<<<blocksPerGrid, threadsPerBlock, 0, _stream_delta_x>>>(delta_x, Y, delta_u, win, T, X, t);
_chk(cudaEventRecord(_event_delta_x, _stream_delta_x));
}
__global__ static void backward_delta_win(double* delta_win, int T, double* delta_u, double* x, int Y, int X, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.y;
int i_i = blockIdx.x;
// delta_win[j, i_i] = (from t in Range(T) select delta_u[t, j] * x[t, i_i]).Sum()
double _wk5 = 0;
for(int t = 0; t < T; t++){
_wk5 += delta_u[(Y * t + j) * _BatchSize + _batch_idx] * x[(X * t + i_i) * _BatchSize + _batch_idx];
}
delta_win[(X * j + i_i) * _BatchSize + _batch_idx] = _wk5;
}
void RecurrentLayerCudaD::Start_backward_delta_win(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_y = Y;
blocks_x = X;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
_chk(cudaStreamWaitEvent(_stream_delta_win, _event_delta_u, 0));
backward_delta_win<<<blocksPerGrid, threadsPerBlock, 0, _stream_delta_win>>>(delta_win, T, delta_u, x, Y, X, t);
_chk(cudaEventRecord(_event_delta_win, _stream_delta_win));
}
__global__ static void backward_delta_w(double* delta_w, int T, double* delta_u, double* y, int Y, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.y;
int i_i = blockIdx.x;
// delta_w[j, i_i] = (from t in Range(T) select delta_u[t, j] * y[t -1, i_i]).Sum()
double _wk6 = 0;
for(int t = 0; t < T; t++){
_wk6 += delta_u[(Y * t + j) * _BatchSize + _batch_idx] * (0 <= t - 1 ? y[(Y * (t -1) + i_i) * _BatchSize + _batch_idx] : 0);
}
delta_w[(Y * j + i_i) * _BatchSize + _batch_idx] = _wk6;
}
void RecurrentLayerCudaD::Start_backward_delta_w(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_y = Y;
blocks_x = Y;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
_chk(cudaStreamWaitEvent(_stream_delta_w, _event_delta_u, 0));
backward_delta_w<<<blocksPerGrid, threadsPerBlock, 0, _stream_delta_w>>>(delta_w, T, delta_u, y, Y, t);
_chk(cudaEventRecord(_event_delta_w, _stream_delta_w));
}
__global__ static void backward_delta_b(double* delta_b, int T, double* delta_u, int Y, int t){
int _batch_idx = threadIdx.x;
int j = blockIdx.x;
// delta_b[j] = (from t in Range(T) select delta_u[t, j]).Sum()
double _wk7 = 0;
for(int t = 0; t < T; t++){
_wk7 += delta_u[(Y * t + j) * _BatchSize + _batch_idx];
}
delta_b[(j) * _BatchSize + _batch_idx] = _wk7;
}
void RecurrentLayerCudaD::Start_backward_delta_b(){
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
dim3 threadsPerBlock = dim3(BatchSize);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
_chk(cudaStreamWaitEvent(_stream_delta_b, _event_delta_u, 0));
backward_delta_b<<<blocksPerGrid, threadsPerBlock, 0, _stream_delta_b>>>(delta_b, T, delta_u, Y, t);
_chk(cudaEventRecord(_event_delta_b, _stream_delta_b));
}
void RecurrentLayerCudaD::Backward(){
_chk(_MemcpyToSymbol(_BatchSize, BatchSize, sizeof(BatchSize)));
Start_backward_delta_y();
Start_backward_delta_u();
Start_backward_delta_x();
Start_backward_delta_win();
Start_backward_delta_w();
Start_backward_delta_b();
}
__global__ static void UpdateParameterKernel_0(double* win, double* delta_win){
int _idx = (blockIdx.x) * blockDim.x + threadIdx.x;
int offset = _idx * _BatchSize;
{
double sum = 0;
for (int i = 0; i < _BatchSize; i++) {
sum += delta_win[offset + i];
}
win[_idx] -= _LearningRate * sum;
}
}
void RecurrentLayerCudaD::UpdateParameter_0(){
int threads_x = 1;
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
threads_x = X;
dim3 threadsPerBlock = dim3(threads_x);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
UpdateParameterKernel_0<<<blocksPerGrid, threadsPerBlock>>>(win, delta_win);
}
__global__ static void UpdateParameterKernel_1(double* w, double* delta_w){
int _idx = (blockIdx.x) * blockDim.x + threadIdx.x;
int offset = _idx * _BatchSize;
{
double sum = 0;
for (int i = 0; i < _BatchSize; i++) {
sum += delta_w[offset + i];
}
w[_idx] -= _LearningRate * sum;
}
}
void RecurrentLayerCudaD::UpdateParameter_1(){
int threads_x = 1;
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
blocks_x = Y;
threads_x = Y;
dim3 threadsPerBlock = dim3(threads_x);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
UpdateParameterKernel_1<<<blocksPerGrid, threadsPerBlock>>>(w, delta_w);
}
__global__ static void UpdateParameterKernel_2(double* b, double* delta_b){
int _idx = threadIdx.x;
int offset = _idx * _BatchSize;
{
double sum = 0;
for (int i = 0; i < _BatchSize; i++) {
sum += delta_b[offset + i];
}
b[_idx] -= _LearningRate * sum;
}
}
void RecurrentLayerCudaD::UpdateParameter_2(){
int threads_x = 1;
int blocks_x = 1;
int blocks_y = 1;
int blocks_z = 1;
threads_x = Y;
dim3 threadsPerBlock = dim3(threads_x);
dim3 blocksPerGrid = dim3(blocks_x, blocks_y, blocks_z);
UpdateParameterKernel_2<<<blocksPerGrid, threadsPerBlock>>>(b, delta_b);
}
void RecurrentLayerCudaD::UpdateParameter(){
_chk(_MemcpyToSymbol(_BatchSize, BatchSize, sizeof(BatchSize)));
_chk(_MemcpyToSymbol(_LearningRate, LearningRate, sizeof(LearningRate)));
_chk(cudaDeviceSynchronize());
UpdateParameter_0();
UpdateParameter_1();
UpdateParameter_2();
_chk(cudaDeviceSynchronize());
}
extern "C" DllExport Layer* MakeRecurrentLayerCudaD(int t_size, int x_size, int y_size){
return new RecurrentLayerCudaD(t_size, x_size, y_size);
}
|
c280e4b742498a5f6da51da44e916377c24f2ef8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 16
__global__ void addMatrix(float *A, float *B, float *C) {
int i = threadIdx.y;
int j = threadIdx.x;
int index = i*N+j;
C[indexi] = A[index] + B[index];
}
int main(int argc , char *argv[])
{
int i,j;
int size = N * N * sizeof(float);
float = a[N][N],b[N][N], c[N][N], *devA, *devB,*devC;
for(i=0; i<N;i++)
{
for(j=0;j< N ;j++)
{
a[i][j] = 1;b[i][j]=2;
}
}
hipMalloc( (void**)&devA,size);
hipMalloc( (void**)&devB,size);
hipMalloc( (void**)&devC,size);
hipMemcpy( devA, a, size, hipMemcpyHostToDevice);
hipMemcpy( devB, b, size, hipMemcpyHostToDevice);
dim3 dimBlock (N,N);
dim3 dimGrid (1,1);
// int nblocks = n/T;
// int nblocks = (n+T - 1) / T; // Efficient way to run
hipLaunchKernelGGL(( addMatrix), dim3(dimGrid), dim3(dimBlock), 0, 0, devA, devB, devC);
hipMemcpy(c, devC, size, hipMemcpyDeviceToHost);
hipFree(devA);
hipFree(devB);
hipFree(devC);
for(i=0; i<N;i++)
{
for(j=0;j< N ;j++)
{
printf("%.2f " ,c[i][j]);
}
printf("\n");
}
} | c280e4b742498a5f6da51da44e916377c24f2ef8.cu | #include <stdio.h>
#define N 16
__global__ void addMatrix(float *A, float *B, float *C) {
int i = threadIdx.y;
int j = threadIdx.x;
int index = i*N+j;
C[indexi] = A[index] + B[index];
}
int main(int argc , char *argv[])
{
int i,j;
int size = N * N * sizeof(float);
float = a[N][N],b[N][N], c[N][N], *devA, *devB,*devC;
for(i=0; i<N;i++)
{
for(j=0;j< N ;j++)
{
a[i][j] = 1;b[i][j]=2;
}
}
cudaMalloc( (void**)&devA,size);
cudaMalloc( (void**)&devB,size);
cudaMalloc( (void**)&devC,size);
cudaMemcpy( devA, a, size, cudaMemcpyHostToDevice);
cudaMemcpy( devB, b, size, cudaMemcpyHostToDevice);
dim3 dimBlock (N,N);
dim3 dimGrid (1,1);
// int nblocks = n/T;
// int nblocks = (n+T - 1) / T; // Efficient way to run
addMatrix<<<dimGrid, dimBlock>>>(devA, devB, devC);
cudaMemcpy(c, devC, size, cudaMemcpyDeviceToHost);
cudaFree(devA);
cudaFree(devB);
cudaFree(devC);
for(i=0; i<N;i++)
{
for(j=0;j< N ;j++)
{
printf("%.2f " ,c[i][j]);
}
printf("\n");
}
} |
a7cd0cb365c72947e5e194ceafec30ca94dcf540.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2020 Stanford, Los Alamos National Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_helper.h"
#include "flexflow_dataloader.h"
void ImgDataLoader::load_label(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SampleIdxs* meta = (SampleIdxs*) task->local_args;
TensorAccessorR<int, 2> acc_full_label(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<int, 2> acc_batch_label(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
int batch_size = acc_batch_label.rect.hi[1] - acc_batch_label.rect.lo[1] + 1;
//FIXME: currently assume continous indices
assert(batch_size == meta->num_samples);
for (int i = 1; i < batch_size; i++)
assert(meta->idxs[i] == meta->idxs[0] + i);
const int* input_zc = acc_full_label.ptr + meta->idxs[0];
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
hipLaunchKernelGGL(( copy_kernel), dim3(GET_BLOCKS(acc_batch_label.rect.volume())), dim3(CUDA_NUM_THREADS), 0, stream,
acc_batch_label.ptr, input_zc, acc_batch_label.rect.volume());
checkCUDA(hipDeviceSynchronize());
}
void ImgDataLoader4D::load_input(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SampleIdxs* meta = (SampleIdxs*) task->local_args;
TensorAccessorR<float, 4> acc_full_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_batch_input(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
coord_t batch_size = acc_batch_input.rect.hi[3] - acc_batch_input.rect.lo[3] + 1;
coord_t channels = acc_batch_input.rect.hi[2] - acc_batch_input.rect.lo[2] + 1;
coord_t height = acc_batch_input.rect.hi[1] - acc_batch_input.rect.lo[1] + 1;
coord_t width = acc_batch_input.rect.hi[0] - acc_batch_input.rect.lo[0] + 1;
//FIXME: currently assume continous indices
assert(batch_size == meta->num_samples);
for (int i = 1; i < batch_size; i++)
assert(meta->idxs[i] == meta->idxs[0] + i);
coord_t start_idx = meta->idxs[0];
const float* input_zc = acc_full_input.ptr + start_idx * channels * height * width;
//printf("load input %d %d %d %d\n", meta->idxs[0], channels, height, width);
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
hipLaunchKernelGGL(( copy_kernel), dim3(GET_BLOCKS(acc_batch_input.rect.volume())), dim3(CUDA_NUM_THREADS), 0, stream,
acc_batch_input.ptr, input_zc, acc_batch_input.rect.volume());
checkCUDA(hipDeviceSynchronize());
}
void ImgDataLoader2D::load_input(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SampleIdxs* meta = (SampleIdxs*) task->local_args;
TensorAccessorR<float, 2> acc_full_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> acc_batch_input(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
coord_t batch_size = acc_batch_input.rect.hi[1] - acc_batch_input.rect.lo[1] + 1;
coord_t width = acc_batch_input.rect.hi[0] - acc_batch_input.rect.lo[0] + 1;
//FIXME: currently assume continous indices
assert(batch_size == meta->num_samples);
for (int i = 1; i < batch_size; i++)
assert(meta->idxs[i] == meta->idxs[0] + i);
coord_t start_idx = meta->idxs[0];
const float* input_zc = acc_full_input.ptr + start_idx * width;
//printf("load input %d %d %d %d\n", meta->idxs[0], channels, height, width);
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
hipLaunchKernelGGL(( copy_kernel), dim3(GET_BLOCKS(acc_batch_input.rect.volume())), dim3(CUDA_NUM_THREADS), 0, stream,
acc_batch_input.ptr, input_zc, acc_batch_input.rect.volume());
checkCUDA(hipDeviceSynchronize());
}
template<typename DT>
void SingleDataLoader::load_input(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return load_input_with_dim<DT, DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
template<typename DT, int NDIM>
void SingleDataLoader::load_input_with_dim(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SampleIdxs* meta = (SampleIdxs*) task->local_args;
TensorAccessorR<DT, NDIM> acc_full_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<DT, NDIM> acc_batch_input(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
coord_t batch_size = acc_batch_input.rect.hi[NDIM-1] - acc_batch_input.rect.lo[NDIM-1] + 1;
coord_t num_elements_per_batch = acc_batch_input.rect.volume() / batch_size;
//FIXME: currently assume continous indices
assert(batch_size == meta->num_samples);
for (int i = 1; i < batch_size; i++)
assert(meta->idxs[i] == meta->idxs[0] + i);
coord_t start_idx = meta->idxs[0];
const DT* input_zc = acc_full_input.ptr + start_idx * num_elements_per_batch;
// const int point = task->index_point.point_data[0];
// printf("Load batch point %d, start_idx %ld, ptr %p\n", point, start_idx, input_zc);
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
//printf("ptr(%p, %p), idx0 %d nb_elements_per_batch %d, batch_size %d, %d\n", acc_full_input.ptr, input_zc, start_idx, num_elements_per_batch, batch_size, start_idx * num_elements_per_batch);
hipLaunchKernelGGL(( copy_kernel<DT>), dim3(GET_BLOCKS(acc_batch_input.rect.volume())), dim3(CUDA_NUM_THREADS), 0, stream,
acc_batch_input.ptr, input_zc, acc_batch_input.rect.volume());
checkCUDA(hipDeviceSynchronize());
}
template void SingleDataLoader::load_input<float>(const Task *task, const std::vector<PhysicalRegion> ®ions, Context ctx, Runtime* runtime);
template void SingleDataLoader::load_input<int32_t>(const Task *task, const std::vector<PhysicalRegion> ®ions, Context ctx, Runtime* runtime);
template void SingleDataLoader::load_input<int64_t>(const Task *task, const std::vector<PhysicalRegion> ®ions, Context ctx, Runtime* runtime);
| a7cd0cb365c72947e5e194ceafec30ca94dcf540.cu | /* Copyright 2020 Stanford, Los Alamos National Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_helper.h"
#include "flexflow_dataloader.h"
void ImgDataLoader::load_label(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SampleIdxs* meta = (SampleIdxs*) task->local_args;
TensorAccessorR<int, 2> acc_full_label(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<int, 2> acc_batch_label(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
int batch_size = acc_batch_label.rect.hi[1] - acc_batch_label.rect.lo[1] + 1;
//FIXME: currently assume continous indices
assert(batch_size == meta->num_samples);
for (int i = 1; i < batch_size; i++)
assert(meta->idxs[i] == meta->idxs[0] + i);
const int* input_zc = acc_full_label.ptr + meta->idxs[0];
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
copy_kernel<<<GET_BLOCKS(acc_batch_label.rect.volume()), CUDA_NUM_THREADS, 0, stream>>>(
acc_batch_label.ptr, input_zc, acc_batch_label.rect.volume());
checkCUDA(cudaDeviceSynchronize());
}
void ImgDataLoader4D::load_input(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SampleIdxs* meta = (SampleIdxs*) task->local_args;
TensorAccessorR<float, 4> acc_full_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_batch_input(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
coord_t batch_size = acc_batch_input.rect.hi[3] - acc_batch_input.rect.lo[3] + 1;
coord_t channels = acc_batch_input.rect.hi[2] - acc_batch_input.rect.lo[2] + 1;
coord_t height = acc_batch_input.rect.hi[1] - acc_batch_input.rect.lo[1] + 1;
coord_t width = acc_batch_input.rect.hi[0] - acc_batch_input.rect.lo[0] + 1;
//FIXME: currently assume continous indices
assert(batch_size == meta->num_samples);
for (int i = 1; i < batch_size; i++)
assert(meta->idxs[i] == meta->idxs[0] + i);
coord_t start_idx = meta->idxs[0];
const float* input_zc = acc_full_input.ptr + start_idx * channels * height * width;
//printf("load input %d %d %d %d\n", meta->idxs[0], channels, height, width);
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
copy_kernel<<<GET_BLOCKS(acc_batch_input.rect.volume()), CUDA_NUM_THREADS, 0, stream>>>(
acc_batch_input.ptr, input_zc, acc_batch_input.rect.volume());
checkCUDA(cudaDeviceSynchronize());
}
void ImgDataLoader2D::load_input(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SampleIdxs* meta = (SampleIdxs*) task->local_args;
TensorAccessorR<float, 2> acc_full_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> acc_batch_input(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
coord_t batch_size = acc_batch_input.rect.hi[1] - acc_batch_input.rect.lo[1] + 1;
coord_t width = acc_batch_input.rect.hi[0] - acc_batch_input.rect.lo[0] + 1;
//FIXME: currently assume continous indices
assert(batch_size == meta->num_samples);
for (int i = 1; i < batch_size; i++)
assert(meta->idxs[i] == meta->idxs[0] + i);
coord_t start_idx = meta->idxs[0];
const float* input_zc = acc_full_input.ptr + start_idx * width;
//printf("load input %d %d %d %d\n", meta->idxs[0], channels, height, width);
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
copy_kernel<<<GET_BLOCKS(acc_batch_input.rect.volume()), CUDA_NUM_THREADS, 0, stream>>>(
acc_batch_input.ptr, input_zc, acc_batch_input.rect.volume());
checkCUDA(cudaDeviceSynchronize());
}
template<typename DT>
void SingleDataLoader::load_input(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return load_input_with_dim<DT, DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
template<typename DT, int NDIM>
void SingleDataLoader::load_input_with_dim(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx,
Runtime* runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
SampleIdxs* meta = (SampleIdxs*) task->local_args;
TensorAccessorR<DT, NDIM> acc_full_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<DT, NDIM> acc_batch_input(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
coord_t batch_size = acc_batch_input.rect.hi[NDIM-1] - acc_batch_input.rect.lo[NDIM-1] + 1;
coord_t num_elements_per_batch = acc_batch_input.rect.volume() / batch_size;
//FIXME: currently assume continous indices
assert(batch_size == meta->num_samples);
for (int i = 1; i < batch_size; i++)
assert(meta->idxs[i] == meta->idxs[0] + i);
coord_t start_idx = meta->idxs[0];
const DT* input_zc = acc_full_input.ptr + start_idx * num_elements_per_batch;
// const int point = task->index_point.point_data[0];
// printf("Load batch point %d, start_idx %ld, ptr %p\n", point, start_idx, input_zc);
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
//printf("ptr(%p, %p), idx0 %d nb_elements_per_batch %d, batch_size %d, %d\n", acc_full_input.ptr, input_zc, start_idx, num_elements_per_batch, batch_size, start_idx * num_elements_per_batch);
copy_kernel<DT><<<GET_BLOCKS(acc_batch_input.rect.volume()), CUDA_NUM_THREADS, 0, stream>>>(
acc_batch_input.ptr, input_zc, acc_batch_input.rect.volume());
checkCUDA(cudaDeviceSynchronize());
}
template void SingleDataLoader::load_input<float>(const Task *task, const std::vector<PhysicalRegion> ®ions, Context ctx, Runtime* runtime);
template void SingleDataLoader::load_input<int32_t>(const Task *task, const std::vector<PhysicalRegion> ®ions, Context ctx, Runtime* runtime);
template void SingleDataLoader::load_input<int64_t>(const Task *task, const std::vector<PhysicalRegion> ®ions, Context ctx, Runtime* runtime);
|
ce6e07fa8d27e994c0c3932b4416c5e25cd9bd01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by wei on 17-10-22.
//
#include "core/directional_tsdf.h"
#include "core/functions.h"
#include "engine/main_engine.h"
#include "mapping/allocate.h"
#include "mapping/block_traversal.hpp"
#include "util/timer.h"
#include "allocate.h"
/**
* @param hash_table
* @param sensor_data
* @param sensor_params
* @param wTc
* @param geometry_helper
* @param candidate_entries
* @param allocate_along_normal Determines whether allocation is done along the view ray or in normal direction
*/
__global__
void AllocBlockArrayKernel(
EntryArray candidate_entries,
SensorData sensor_data,
SensorParams sensor_params,
float4x4 wTc,
HashTable hash_table,
GeometryHelper geometry_helper,
RuntimeParams runtime_params)
{
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
const uint y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= sensor_params.width || y >= sensor_params.height)
return;
/// 1. Get observed data
float depth = tex2D<float>(sensor_data.depth_texture, x, y);
float4 normal_camera = tex2D<float4>(sensor_data.normal_texture, x, y);
normal_camera.w = 0;
if (not IsValidDepth(depth) or depth >= geometry_helper.sdf_upper_bound or not IsValidNormal(normal_camera))
return;
float truncation = geometry_helper.truncate_distance(depth);
float3 point_camera_pos = geometry_helper.ImageReprojectToCamera(x, y, depth,
sensor_params.fx, sensor_params.fy,
sensor_params.cx, sensor_params.cy);
float3 point_world_pos = wTc * point_camera_pos;
float3 normal_world = make_float3(wTc * normal_camera);
/// 2. Set range where blocks are allocated
float3 ray_direction_before;
float3 ray_direction_behind;
if (runtime_params.raycasting_mode == RAY_DIRECTION_CAMERA)
{
float3 camera_world_pos = make_float3(wTc * make_float4(0, 0, 0, 1));
ray_direction_before = ray_direction_behind = normalize(point_world_pos - camera_world_pos);
}
if (runtime_params.raycasting_mode == RAY_DIRECTION_POS_CAMERA_NEG_NORMAL)
{
float3 camera_world_pos = make_float3(wTc * make_float4(0, 0, 0, 1));
ray_direction_before = normalize(point_world_pos - camera_world_pos);
ray_direction_behind = -normal_world;
} else // (runtime_params.raycasting_mode == RAY_DIRECTION_NORMAL)
{
ray_direction_behind = ray_direction_before = -normal_world;
}
/// 3. Traverse all blocks in truncation range and allocate VoxelArray, if necessary
float directional_weights[N_DIRECTIONS];
ComputeDirectionWeights(normal_world, directional_weights);
BlockTraversal voxel_traversal_before(
point_world_pos - truncation * ray_direction_before,
ray_direction_before,
truncation,
geometry_helper.voxel_size);
BlockTraversal voxel_traversal_behind(
point_world_pos,
ray_direction_behind,
truncation,
geometry_helper.voxel_size);
if (voxel_traversal_behind.HasNextBlock()) voxel_traversal_behind.GetNextBlock(); // Skip first voxel to prevent duplicate fusion
while (voxel_traversal_before.HasNextBlock() or voxel_traversal_behind.HasNextBlock())
{
int3 voxel_idx;
if (voxel_traversal_before.HasNextBlock())
voxel_idx = voxel_traversal_before.GetNextBlock();
else
voxel_idx = voxel_traversal_behind.GetNextBlock();
int3 block_idx = geometry_helper.VoxelToBlock(voxel_idx);
hash_table.AllocEntry(block_idx);
// Flag the corresponding hash entry
int entry_idx = hash_table.GetEntryIndex(block_idx);
if (entry_idx >= 0)
{
// set flag to binary mask indicating which directions are affected (for allocating VoxelArrays in the next step)
for (size_t direction = 0; direction < N_DIRECTIONS; direction++)
{
if (directional_weights[direction] > 0)
{
candidate_entries.flag(entry_idx) |= (1 << direction);
}
}
}
}
}
/**
*
* @param candidate_entries
* @param num_entries
* @param blocks
* @param enable_directional Whether to perform directional allocation. Otherwise voxel array 0 is allocated for all blocks.
*/
__global__
void AllocateVoxelArrayKernel(
EntryArray candidate_entries,
uint num_entries,
BlockArray blocks,
bool enable_directional = false
)
{
size_t idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= num_entries)
{
return;
}
const HashEntry &entry = candidate_entries[idx];
if (enable_directional)
{
for (size_t direction = 0; direction < N_DIRECTIONS; direction++)
{
if (entry.direction_flags & (1 << direction))
{
blocks.AllocateVoxelArrayWithMutex(entry.ptr, direction);
}
}
} else
{
blocks.AllocateVoxelArrayWithMutex(entry.ptr, 0);
}
}
__global__
void AllocateVoxelArrayKernelDirectional(
EntryArray candidate_entries,
uint num_entries,
SensorData sensor_data,
SensorParams sensor_params,
float4x4 cTw,
float4x4 wTc,
BlockArray blocks,
GeometryHelper geometry_helper
)
{
size_t idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= num_entries)
{
return;
}
const HashEntry &entry = candidate_entries[idx];
int3 voxel_base_pos = geometry_helper.BlockToVoxel(entry.pos);
int allocate_directions[6] = {0};
// For each voxel check which direction normal is pointing -> allocate VoxelArrays accordingly
for (uint voxel_idx = 0; voxel_idx < BLOCK_SIZE; voxel_idx++)
{
int3 voxel_pos = voxel_base_pos + make_int3(geometry_helper.DevectorizeIndex(voxel_idx));
float3 world_pos = geometry_helper.VoxelToWorld(voxel_pos);
float3 camera_pos = cTw * world_pos;
uint2 image_pos = make_uint2(
geometry_helper.CameraProjectToImagei(camera_pos,
sensor_params.fx, sensor_params.fy,
sensor_params.cx, sensor_params.cy));
if (image_pos.x >= sensor_params.width or image_pos.y >= sensor_params.height)
continue;
float4 normal = tex2D<float4>(sensor_data.normal_texture, image_pos.x, image_pos.y);
if (not IsValidNormal(normal))
{ // No normal value for this coordinate (NaN or only 0s)
continue;
}
normal.w = 1;
float4x4 wTcRotOnly = wTc;
wTcRotOnly.m14 = 0;
wTcRotOnly.m24 = 0;
wTcRotOnly.m34 = 0;
float3 normal_world = make_float3(wTcRotOnly * normal);
float weights[N_DIRECTIONS];
ComputeDirectionWeights(normal_world, weights);
for (size_t direction = 0; direction < N_DIRECTIONS; direction++)
{
allocate_directions[direction] |= (weights[direction] > 0);
}
}
for (uint i = 0; i < 6; i++)
{
if (allocate_directions[i])
{
blocks.AllocateVoxelArrayWithMutex(entry.ptr, i);
}
}
}
double AllocBlockArray(
EntryArray candidate_entries,
Sensor &sensor,
MainEngine &main_engine
)
{
Timer timer;
timer.Tick();
main_engine.hash_table().ResetMutexes();
const uint threads_per_block = 8;
const dim3 grid_size((sensor.sensor_params().width + threads_per_block - 1)
/ threads_per_block,
(sensor.sensor_params().height + threads_per_block - 1)
/ threads_per_block);
const dim3 block_size(threads_per_block, threads_per_block);
AllocBlockArrayKernel << < grid_size, block_size >> > (
candidate_entries,
sensor.data(),
sensor.sensor_params(), sensor.wTc(),
main_engine.hash_table(),
main_engine.geometry_helper(),
main_engine.runtime_params()
);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
return timer.Tock();
}
double AllocVoxelArray(
EntryArray candidate_entries,
Sensor &sensor,
MainEngine &main_engine
)
{
Timer timer;
timer.Tick();
const dim3 grid_size(static_cast<unsigned int>(
::ceil(candidate_entries.count() / static_cast<double>(CUDA_THREADS_PER_BLOCK))));
const dim3 block_size(CUDA_THREADS_PER_BLOCK);
if (main_engine.runtime_params().enable_directional_sdf and
main_engine.runtime_params().update_type == UPDATE_TYPE_VOXEL_PROJECTION)
{
AllocateVoxelArrayKernelDirectional << < grid_size, block_size >> > (
candidate_entries,
candidate_entries.count(),
sensor.data(),
sensor.sensor_params(),
sensor.cTw(),
sensor.wTc(),
main_engine.blocks(),
main_engine.geometry_helper()
);
} else
{
AllocateVoxelArrayKernel << < grid_size, block_size >> > (
candidate_entries,
candidate_entries.count(),
main_engine.blocks(),
main_engine.runtime_params().enable_directional_sdf
);
}
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
return timer.Tock();
}
| ce6e07fa8d27e994c0c3932b4416c5e25cd9bd01.cu | //
// Created by wei on 17-10-22.
//
#include "core/directional_tsdf.h"
#include "core/functions.h"
#include "engine/main_engine.h"
#include "mapping/allocate.h"
#include "mapping/block_traversal.hpp"
#include "util/timer.h"
#include "allocate.h"
/**
* @param hash_table
* @param sensor_data
* @param sensor_params
* @param wTc
* @param geometry_helper
* @param candidate_entries
* @param allocate_along_normal Determines whether allocation is done along the view ray or in normal direction
*/
__global__
void AllocBlockArrayKernel(
EntryArray candidate_entries,
SensorData sensor_data,
SensorParams sensor_params,
float4x4 wTc,
HashTable hash_table,
GeometryHelper geometry_helper,
RuntimeParams runtime_params)
{
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
const uint y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= sensor_params.width || y >= sensor_params.height)
return;
/// 1. Get observed data
float depth = tex2D<float>(sensor_data.depth_texture, x, y);
float4 normal_camera = tex2D<float4>(sensor_data.normal_texture, x, y);
normal_camera.w = 0;
if (not IsValidDepth(depth) or depth >= geometry_helper.sdf_upper_bound or not IsValidNormal(normal_camera))
return;
float truncation = geometry_helper.truncate_distance(depth);
float3 point_camera_pos = geometry_helper.ImageReprojectToCamera(x, y, depth,
sensor_params.fx, sensor_params.fy,
sensor_params.cx, sensor_params.cy);
float3 point_world_pos = wTc * point_camera_pos;
float3 normal_world = make_float3(wTc * normal_camera);
/// 2. Set range where blocks are allocated
float3 ray_direction_before;
float3 ray_direction_behind;
if (runtime_params.raycasting_mode == RAY_DIRECTION_CAMERA)
{
float3 camera_world_pos = make_float3(wTc * make_float4(0, 0, 0, 1));
ray_direction_before = ray_direction_behind = normalize(point_world_pos - camera_world_pos);
}
if (runtime_params.raycasting_mode == RAY_DIRECTION_POS_CAMERA_NEG_NORMAL)
{
float3 camera_world_pos = make_float3(wTc * make_float4(0, 0, 0, 1));
ray_direction_before = normalize(point_world_pos - camera_world_pos);
ray_direction_behind = -normal_world;
} else // (runtime_params.raycasting_mode == RAY_DIRECTION_NORMAL)
{
ray_direction_behind = ray_direction_before = -normal_world;
}
/// 3. Traverse all blocks in truncation range and allocate VoxelArray, if necessary
float directional_weights[N_DIRECTIONS];
ComputeDirectionWeights(normal_world, directional_weights);
BlockTraversal voxel_traversal_before(
point_world_pos - truncation * ray_direction_before,
ray_direction_before,
truncation,
geometry_helper.voxel_size);
BlockTraversal voxel_traversal_behind(
point_world_pos,
ray_direction_behind,
truncation,
geometry_helper.voxel_size);
if (voxel_traversal_behind.HasNextBlock()) voxel_traversal_behind.GetNextBlock(); // Skip first voxel to prevent duplicate fusion
while (voxel_traversal_before.HasNextBlock() or voxel_traversal_behind.HasNextBlock())
{
int3 voxel_idx;
if (voxel_traversal_before.HasNextBlock())
voxel_idx = voxel_traversal_before.GetNextBlock();
else
voxel_idx = voxel_traversal_behind.GetNextBlock();
int3 block_idx = geometry_helper.VoxelToBlock(voxel_idx);
hash_table.AllocEntry(block_idx);
// Flag the corresponding hash entry
int entry_idx = hash_table.GetEntryIndex(block_idx);
if (entry_idx >= 0)
{
// set flag to binary mask indicating which directions are affected (for allocating VoxelArrays in the next step)
for (size_t direction = 0; direction < N_DIRECTIONS; direction++)
{
if (directional_weights[direction] > 0)
{
candidate_entries.flag(entry_idx) |= (1 << direction);
}
}
}
}
}
/**
*
* @param candidate_entries
* @param num_entries
* @param blocks
* @param enable_directional Whether to perform directional allocation. Otherwise voxel array 0 is allocated for all blocks.
*/
__global__
void AllocateVoxelArrayKernel(
EntryArray candidate_entries,
uint num_entries,
BlockArray blocks,
bool enable_directional = false
)
{
size_t idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= num_entries)
{
return;
}
const HashEntry &entry = candidate_entries[idx];
if (enable_directional)
{
for (size_t direction = 0; direction < N_DIRECTIONS; direction++)
{
if (entry.direction_flags & (1 << direction))
{
blocks.AllocateVoxelArrayWithMutex(entry.ptr, direction);
}
}
} else
{
blocks.AllocateVoxelArrayWithMutex(entry.ptr, 0);
}
}
__global__
void AllocateVoxelArrayKernelDirectional(
EntryArray candidate_entries,
uint num_entries,
SensorData sensor_data,
SensorParams sensor_params,
float4x4 cTw,
float4x4 wTc,
BlockArray blocks,
GeometryHelper geometry_helper
)
{
size_t idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= num_entries)
{
return;
}
const HashEntry &entry = candidate_entries[idx];
int3 voxel_base_pos = geometry_helper.BlockToVoxel(entry.pos);
int allocate_directions[6] = {0};
// For each voxel check which direction normal is pointing -> allocate VoxelArrays accordingly
for (uint voxel_idx = 0; voxel_idx < BLOCK_SIZE; voxel_idx++)
{
int3 voxel_pos = voxel_base_pos + make_int3(geometry_helper.DevectorizeIndex(voxel_idx));
float3 world_pos = geometry_helper.VoxelToWorld(voxel_pos);
float3 camera_pos = cTw * world_pos;
uint2 image_pos = make_uint2(
geometry_helper.CameraProjectToImagei(camera_pos,
sensor_params.fx, sensor_params.fy,
sensor_params.cx, sensor_params.cy));
if (image_pos.x >= sensor_params.width or image_pos.y >= sensor_params.height)
continue;
float4 normal = tex2D<float4>(sensor_data.normal_texture, image_pos.x, image_pos.y);
if (not IsValidNormal(normal))
{ // No normal value for this coordinate (NaN or only 0s)
continue;
}
normal.w = 1;
float4x4 wTcRotOnly = wTc;
wTcRotOnly.m14 = 0;
wTcRotOnly.m24 = 0;
wTcRotOnly.m34 = 0;
float3 normal_world = make_float3(wTcRotOnly * normal);
float weights[N_DIRECTIONS];
ComputeDirectionWeights(normal_world, weights);
for (size_t direction = 0; direction < N_DIRECTIONS; direction++)
{
allocate_directions[direction] |= (weights[direction] > 0);
}
}
for (uint i = 0; i < 6; i++)
{
if (allocate_directions[i])
{
blocks.AllocateVoxelArrayWithMutex(entry.ptr, i);
}
}
}
double AllocBlockArray(
EntryArray candidate_entries,
Sensor &sensor,
MainEngine &main_engine
)
{
Timer timer;
timer.Tick();
main_engine.hash_table().ResetMutexes();
const uint threads_per_block = 8;
const dim3 grid_size((sensor.sensor_params().width + threads_per_block - 1)
/ threads_per_block,
(sensor.sensor_params().height + threads_per_block - 1)
/ threads_per_block);
const dim3 block_size(threads_per_block, threads_per_block);
AllocBlockArrayKernel << < grid_size, block_size >> > (
candidate_entries,
sensor.data(),
sensor.sensor_params(), sensor.wTc(),
main_engine.hash_table(),
main_engine.geometry_helper(),
main_engine.runtime_params()
);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
return timer.Tock();
}
double AllocVoxelArray(
EntryArray candidate_entries,
Sensor &sensor,
MainEngine &main_engine
)
{
Timer timer;
timer.Tick();
const dim3 grid_size(static_cast<unsigned int>(
std::ceil(candidate_entries.count() / static_cast<double>(CUDA_THREADS_PER_BLOCK))));
const dim3 block_size(CUDA_THREADS_PER_BLOCK);
if (main_engine.runtime_params().enable_directional_sdf and
main_engine.runtime_params().update_type == UPDATE_TYPE_VOXEL_PROJECTION)
{
AllocateVoxelArrayKernelDirectional << < grid_size, block_size >> > (
candidate_entries,
candidate_entries.count(),
sensor.data(),
sensor.sensor_params(),
sensor.cTw(),
sensor.wTc(),
main_engine.blocks(),
main_engine.geometry_helper()
);
} else
{
AllocateVoxelArrayKernel << < grid_size, block_size >> > (
candidate_entries,
candidate_entries.count(),
main_engine.blocks(),
main_engine.runtime_params().enable_directional_sdf
);
}
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
return timer.Tock();
}
|
d68f4146bdf10d8104458e5197682af9e5e82f40.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define NDEBUG
#include <chrono>
#include <stdio.h>
#include <assert.h>
#include <inttypes.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "wa-tor/aos/wator.h"
//#include "wa-tor/aos/halloc_allocator.h"
//#include "wa-tor/aos/scatteralloc_allocator.h"
//#include "wa-tor/aos/aos_allocator.h"
#include "wa-tor/aos/cuda_allocator.h"
//#include "wa-tor/aos/mallocmc_allocator.h"
#define SPAWN_THRESHOLD 4
#define ENERGY_BOOST 4
#define ENERGY_START 2
#define GRID_SIZE_X 2048
#define GRID_SIZE_Y 1024
#define THREADS_PER_BLOCK 256
#define NUM_BLOCKS 1024
#define OPTION_SHARK_DIE true
#define OPTION_SHARK_SPAWN true
#define OPTION_FISH_SPAWN true
namespace wa_tor {
__device__ uint32_t random_number(uint32_t* state, uint32_t max) {
// Advance and return random state.
// Source: https://en.wikipedia.org/wiki/Lehmer_random_number_generator
assert(*state != 0);
*state = static_cast<uint32_t>(
static_cast<uint64_t>(*state) * 1103515245u + 12345) % 2147483648u;
return ((*state) >> 7) % max;
}
__device__ uint32_t random_number(uint32_t* state) {
// Advance and return random state.
// Source: https://en.wikipedia.org/wiki/Lehmer_random_number_generator
assert(*state != 0);
*state = static_cast<uint32_t>(
static_cast<uint64_t>(*state) * 1103515245u + 12345) % 2147483648u;
return ((*state) >> 7);
}
__device__ Cell::Cell(uint32_t random_state) : random_state_(random_state),
agent_(nullptr) {
assert(random_state != 0);
prepare();
}
__device__ Agent* Cell::agent() const {
return agent_;
}
__device__ void Cell::decide() {
if (neighbor_request_[4]) {
// This cell has priority.
agent_->set_new_position(this);
} else {
uint8_t candidates[4];
uint8_t num_candidates = 0;
for (int i = 0; i < 4; ++i) {
if (neighbor_request_[i]) {
candidates[num_candidates++] = i;
}
}
if (num_candidates > 0) {
uint32_t selected_index = random_number(&random_state_, num_candidates);
neighbors_[candidates[selected_index]]->agent()->set_new_position(this);
}
}
}
__device__ void Cell::enter(Agent* agent) {
assert(agent_ == nullptr);
#ifndef NDEBUG
// Ensure that no two agents are trying to enter this cell at the same time.
uint64_t old_val = atomicExch(reinterpret_cast<unsigned long long int*>(&agent_),
reinterpret_cast<unsigned long long int>(agent));
assert(old_val == 0);
#else
agent_ = agent;
#endif
agent->set_position(this);
}
__device__ bool Cell::has_fish() const {
return agent_ != nullptr && agent_->type_identifier() == Fish::kTypeId;
}
__device__ bool Cell::has_shark() const {
return agent_ != nullptr && agent_->type_identifier() == Shark::kTypeId;
}
__device__ bool Cell::is_free() const {
return agent_ == nullptr;
}
__device__ void Cell::leave() {
assert(agent_ != nullptr);
agent_ = nullptr;
}
__device__ void Cell::prepare() {
for (int i = 0; i < 5; ++i) {
neighbor_request_[i] = false;
}
}
__device__ uint32_t* Cell::random_state() {
return &random_state_;
}
__device__ void Cell::request_random_fish_neighbor() {
if (!request_random_neighbor<&Cell::has_fish>(agent_->random_state())) {
// No fish found. Look for free cell.
if (!request_random_neighbor<&Cell::is_free>(agent_->random_state())) {
neighbor_request_[4] = true;
}
}
}
__device__ void Cell::request_random_free_neighbor() {
if (!request_random_neighbor<&Cell::is_free>(agent_->random_state())) {
neighbor_request_[4] = true;
}
}
template<bool(Cell::*predicate)() const>
__device__ bool Cell::request_random_neighbor(uint32_t* random_state) {
uint8_t candidates[4];
uint8_t num_candidates = 0;
for (int i = 0; i < 4; ++i) {
if ((neighbors_[i]->*predicate)()) {
candidates[num_candidates++] = i;
}
}
if (num_candidates == 0) {
return false;
} else {
uint32_t selected_index = random_number(random_state, num_candidates);
uint8_t selected = candidates[selected_index];
uint8_t neighbor_index = (selected + 2) % 4;
neighbors_[selected]->neighbor_request_[neighbor_index] = true;
// Check correctness of neighbor calculation.
assert(neighbors_[selected]->neighbors_[neighbor_index] == this);
return true;
}
}
__device__ void Cell::set_neighbors(Cell* left, Cell* top,
Cell* right, Cell* bottom) {
neighbors_[0] = left;
neighbors_[1] = top;
neighbors_[2] = right;
neighbors_[3] = bottom;
}
__device__ Agent::Agent(uint32_t random_state, uint8_t type_identifier)
: random_state_(random_state), type_identifier_(type_identifier) {
assert(random_state != 0);
}
__device__ uint32_t* Agent::random_state() {
return &random_state_;
}
__device__ void Agent::set_new_position(Cell* new_pos) {
// Check for race condition. (This is not bullet proof.)
assert(new_position_ == position_);
new_position_ = new_pos;
}
__device__ Cell* Agent::position() const {
return position_;
}
__device__ void Agent::set_position(Cell* cell) {
position_ = cell;
}
// TODO: Verify that RTTI (dynamic_cast) does not work in device code.
__device__ uint8_t Agent::type_identifier() const {
return type_identifier_;
}
__device__ Fish::Fish(uint32_t random_state)
: Agent(random_state, kTypeId),
egg_timer_(random_state % SPAWN_THRESHOLD) {
assert(random_state != 0);
}
__device__ void Fish::prepare() {
assert(type_identifier() == kTypeId);
egg_timer_++;
// Fallback: Stay on current cell.
new_position_ = position_;
assert(position_ != nullptr);
position_->request_random_free_neighbor();
}
__device__ void Fish::update() {
assert(type_identifier() == kTypeId);
Cell* old_position = position_;
if (old_position != new_position_) {
old_position->leave();
new_position_->enter(this);
if (OPTION_FISH_SPAWN && egg_timer_ > SPAWN_THRESHOLD) {
uint32_t new_random_state = random_number(&random_state_) + 401;
new_random_state = new_random_state != 0 ? new_random_state
: random_state_;
auto* new_fish = allocate<Fish>(new_random_state);
assert(new_fish != nullptr);
old_position->enter(new_fish);
egg_timer_ = 0;
}
}
}
__device__ Shark::Shark(uint32_t random_state)
: Agent(random_state, kTypeId), energy_(ENERGY_START),
egg_timer_(random_state % SPAWN_THRESHOLD) {
assert(random_state_ != 0);
}
__device__ void Shark::prepare() {
assert(type_identifier() == kTypeId);
egg_timer_++;
energy_--;
assert(position_ != nullptr);
if (OPTION_SHARK_DIE && energy_ == 0) {
// Do nothing. Shark will die.
} else {
// Fallback: Stay on current cell.
new_position_ = position_;
position_->request_random_fish_neighbor();
}
}
__device__ void Shark::update() {
assert(type_identifier() == kTypeId);
if (OPTION_SHARK_DIE && energy_ == 0) {
position_->kill();
} else {
Cell* old_position = position_;
if (old_position != new_position_) {
if (new_position_->has_fish()) {
energy_ += ENERGY_BOOST;
new_position_->kill();
}
old_position->leave();
new_position_->enter(this);
if (OPTION_SHARK_SPAWN && egg_timer_ > SPAWN_THRESHOLD) {
assert(random_state_ != 0);
uint32_t new_random_state = random_number(&random_state_) + 601;
new_random_state = new_random_state != 0 ? new_random_state
: random_state_;
auto* new_shark = allocate<Shark>(new_random_state);
assert(new_shark != nullptr);
old_position->enter(new_shark);
egg_timer_ = 0;
}
}
}
}
__device__ void Cell::kill() {
assert(agent_ != nullptr);
if (agent_->type_identifier() == 1) {
deallocate_untyped<1>(agent_);
} else if (agent_->type_identifier() == 2) {
deallocate_untyped<2>(agent_);
} else {
// Unknown type.
assert(false);
}
agent_ = nullptr;
}
// ----- KERNELS -----
__device__ Cell* cells[GRID_SIZE_X * GRID_SIZE_Y];
__global__ void create_cells() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < GRID_SIZE_Y*GRID_SIZE_X;
tid += blockDim.x*gridDim.x) {
int x = tid % GRID_SIZE_X;
int y = tid / GRID_SIZE_X;
float init_state = __logf(tid + 401);
uint32_t init_state_int = *reinterpret_cast<uint32_t*>(&init_state);
// Cell* new_cell = new Cell(init_state_int);
Cell* new_cell = allocate<Cell>(601*x*x*y + init_state_int);
assert(new_cell != nullptr);
cells[tid] = new_cell;
}
}
__global__ void setup_cells() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < GRID_SIZE_Y*GRID_SIZE_X;
tid += blockDim.x*gridDim.x) {
int x = tid % GRID_SIZE_X;
int y = tid / GRID_SIZE_X;
Cell* left = x > 0 ? cells[y*GRID_SIZE_X + x - 1]
: cells[y*GRID_SIZE_X + GRID_SIZE_X - 1];
Cell* right = x < GRID_SIZE_X - 1 ? cells[y*GRID_SIZE_X + x + 1]
: cells[y*GRID_SIZE_X];
Cell* top = y > 0 ? cells[(y - 1)*GRID_SIZE_X + x]
: cells[(GRID_SIZE_Y - 1)*GRID_SIZE_X + x];
Cell* bottom = y < GRID_SIZE_Y - 1 ? cells[(y + 1)*GRID_SIZE_X + x]
: cells[x];
// left, top, right, bottom
cells[tid]->set_neighbors(left, top, right, bottom);
// Initialize with random agent.
uint32_t agent_type = random_number(cells[tid]->random_state(), 4);
if (agent_type == 0) {
auto* agent = allocate<Fish>(*(cells[tid]->random_state()));
assert(agent != nullptr);
cells[tid]->enter(agent);
} else if (agent_type == 1) {
auto* agent = allocate<Shark>(*(cells[tid]->random_state()));
assert(agent != nullptr);
cells[tid]->enter(agent);
} else {
// Free cell.
}
}
}
// Problem: It is not easy to keep track of all objects of a class if they are
// dynamically allocated. But we want to benchmark the performance of new/
// delete in CUDA.
// Solution: Fill these arrays in a separate kernel by iterating over all
// cells, storing agents in the respective array slots, and compacting the
// arrays. We do not measure the performance of these steps.
__device__ uint32_t num_sharks = 0;
__device__ Shark* sharks[GRID_SIZE_Y * GRID_SIZE_X];
__device__ uint32_t num_fish = 0;
__device__ Fish* fish[GRID_SIZE_Y * GRID_SIZE_X];
void sort_arrays() {
uintptr_t* dev_sharks;
uintptr_t* dev_fish;
uintptr_t* dev_cells;
hipGetSymbolAddress((void**) &dev_sharks, sharks);
hipGetSymbolAddress((void**) &dev_fish, fish);
hipGetSymbolAddress((void**) &dev_cells, cells);
uint32_t h_num_sharks, h_num_fish;
hipMemcpyFromSymbol(&h_num_sharks, num_sharks, sizeof(uint32_t), 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&h_num_fish, num_fish, sizeof(uint32_t), 0, hipMemcpyDeviceToHost);
gpuErrchk(hipDeviceSynchronize());
thrust::device_ptr<uintptr_t> t_sharks(dev_sharks);
thrust::device_ptr<uintptr_t> t_fish(dev_fish);
thrust::device_ptr<uintptr_t> t_cells(dev_cells);
thrust::sort(t_sharks, t_sharks+h_num_sharks);
thrust::sort(t_fish, t_fish+h_num_fish);
thrust::sort(t_cells, t_cells+GRID_SIZE_X*GRID_SIZE_Y);
gpuErrchk(hipDeviceSynchronize());
}
__global__ void print_checksum() {
uint64_t chksum = 0;
// Sorting of the array does not matter in the calculation here.
for (int i = 0; i < num_sharks; ++i) {
chksum += *(sharks[i]->position()->random_state()) % 601;
}
for (int i = 0; i < num_fish; ++i) {
chksum += *(fish[i]->position()->random_state()) % 601;
}
printf("%" PRIu64 "\n", chksum);
}
__global__ void reset_fish_array() {
num_fish = 0;
}
__global__ void reset_shark_array() {
num_sharks = 0;
}
// One thread per cell.
__global__ void find_fish() {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < GRID_SIZE_Y*GRID_SIZE_X) {
if (cells[tid]->has_fish()) {
uint32_t idx = atomicAdd(&num_fish, 1);
fish[idx] = reinterpret_cast<Fish*>(cells[tid]->agent());
}
}
}
// One thread per cell.
__global__ void find_sharks() {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < GRID_SIZE_Y*GRID_SIZE_X) {
if (cells[tid]->has_shark()) {
uint32_t idx = atomicAdd(&num_sharks, 1);
sharks[idx] = reinterpret_cast<Shark*>(cells[tid]->agent());
}
}
}
void generate_fish_array() {
hipLaunchKernelGGL(( reset_fish_array), dim3(1), dim3(1), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( find_fish), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
void generate_shark_array() {
hipLaunchKernelGGL(( reset_shark_array), dim3(1), dim3(1), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( find_sharks), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
__global__ void cell_prepare() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < GRID_SIZE_Y*GRID_SIZE_X;
tid += blockDim.x*gridDim.x) {
cells[tid]->prepare();
}
}
__global__ void cell_decide() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < GRID_SIZE_Y*GRID_SIZE_X;
tid += blockDim.x*gridDim.x) {
cells[tid]->decide();
}
}
__global__ void fish_prepare() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < num_fish;
tid += blockDim.x*gridDim.x) {
assert(fish[tid] != nullptr);
fish[tid]->prepare();
}
}
__global__ void fish_update() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < num_fish;
tid += blockDim.x*gridDim.x) {
assert(fish[tid] != nullptr);
fish[tid]->update();
}
}
__global__ void shark_prepare() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < num_sharks;
tid += blockDim.x*gridDim.x) {
assert(sharks[tid] != nullptr);
sharks[tid]->prepare();
}
}
__global__ void shark_update() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < num_sharks;
tid += blockDim.x*gridDim.x) {
assert(sharks[tid] != nullptr);
sharks[tid]->update();
}
}
void generate_shark_fish_arrays() {
generate_fish_array();
generate_shark_array();
sort_arrays();
}
void step() {
hipLaunchKernelGGL(( cell_prepare), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( fish_prepare), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( cell_decide), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( fish_update), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( cell_prepare), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( shark_prepare), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( cell_decide), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( shark_update), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
__global__ void init_memory_system() {
initialize_allocator();
}
void initialize() {
//init the heap
initHeap(512*1024U*1024U);
hipLaunchKernelGGL(( init_memory_system), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( create_cells), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( setup_cells), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
__device__ uint32_t d_gui_map[GRID_SIZE_Y * GRID_SIZE_X];
uint32_t gui_map[GRID_SIZE_Y * GRID_SIZE_X];
__global__ void fill_gui_map() {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < GRID_SIZE_Y*GRID_SIZE_X) {
if (cells[tid]->agent() != nullptr) {
d_gui_map[tid] = cells[tid]->agent()->type_identifier();
} else {
d_gui_map[tid] = 0;
}
}
}
void update_gui_map() {
hipLaunchKernelGGL(( fill_gui_map), dim3(GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipMemcpyFromSymbol(gui_map, d_gui_map, sizeof(uint32_t)*GRID_SIZE_X*GRID_SIZE_Y,
0, hipMemcpyDeviceToHost);
gpuErrchk(hipDeviceSynchronize());
}
int h_num_fish = 0;
int h_num_sharks = 0;
void print_stats() {
generate_fish_array();
generate_shark_array();
//printf("\n Fish: %i, Sharks: %i CHKSUM: ", h_num_fish, h_num_sharks);
hipLaunchKernelGGL(( print_checksum), dim3(1), dim3(1), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
int main(int argc, char* arvg[]) {
//hipDeviceSetLimit(hipLimitMallocHeapSize, 256*1024*1024);
initialize();
size_t heap_size;
hipDeviceGetLimit(&heap_size, hipLimitMallocHeapSize);
printf("CUDA heap size: %lu\n", heap_size);
//printf("Computing...\n");
//int time_running = 0;
for (int i = 0; i<500; ++i) {
if (i%50==0) {
//print_stats();
//render();
//printf(" Time: %i usec", time_running);
//time_running = 0;
}
generate_shark_fish_arrays();
// Printing: RUNNING TIME, NUM_FISH, NUM_SHARKS, CHKSUM, FISH_USE, FISH_ALLOC, SHARK_USE, SHARK_ALLOC
auto time_before = std::chrono::system_clock::now();
step();
auto time_after = std::chrono::system_clock::now();
int time_running = std::chrono::duration_cast<std::chrono::microseconds>(
time_after - time_before).count();
printf("%i,", time_running);
print_stats();
//printf("\n");
}
return 0;
}
} // namespace wa_tor
int main(int argc, char* arvg[]) {
return wa_tor::main(0, nullptr);
}
| d68f4146bdf10d8104458e5197682af9e5e82f40.cu | #define NDEBUG
#include <chrono>
#include <stdio.h>
#include <assert.h>
#include <inttypes.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "wa-tor/aos/wator.h"
//#include "wa-tor/aos/halloc_allocator.h"
//#include "wa-tor/aos/scatteralloc_allocator.h"
//#include "wa-tor/aos/aos_allocator.h"
#include "wa-tor/aos/cuda_allocator.h"
//#include "wa-tor/aos/mallocmc_allocator.h"
#define SPAWN_THRESHOLD 4
#define ENERGY_BOOST 4
#define ENERGY_START 2
#define GRID_SIZE_X 2048
#define GRID_SIZE_Y 1024
#define THREADS_PER_BLOCK 256
#define NUM_BLOCKS 1024
#define OPTION_SHARK_DIE true
#define OPTION_SHARK_SPAWN true
#define OPTION_FISH_SPAWN true
namespace wa_tor {
__device__ uint32_t random_number(uint32_t* state, uint32_t max) {
// Advance and return random state.
// Source: https://en.wikipedia.org/wiki/Lehmer_random_number_generator
assert(*state != 0);
*state = static_cast<uint32_t>(
static_cast<uint64_t>(*state) * 1103515245u + 12345) % 2147483648u;
return ((*state) >> 7) % max;
}
__device__ uint32_t random_number(uint32_t* state) {
// Advance and return random state.
// Source: https://en.wikipedia.org/wiki/Lehmer_random_number_generator
assert(*state != 0);
*state = static_cast<uint32_t>(
static_cast<uint64_t>(*state) * 1103515245u + 12345) % 2147483648u;
return ((*state) >> 7);
}
__device__ Cell::Cell(uint32_t random_state) : random_state_(random_state),
agent_(nullptr) {
assert(random_state != 0);
prepare();
}
__device__ Agent* Cell::agent() const {
return agent_;
}
__device__ void Cell::decide() {
if (neighbor_request_[4]) {
// This cell has priority.
agent_->set_new_position(this);
} else {
uint8_t candidates[4];
uint8_t num_candidates = 0;
for (int i = 0; i < 4; ++i) {
if (neighbor_request_[i]) {
candidates[num_candidates++] = i;
}
}
if (num_candidates > 0) {
uint32_t selected_index = random_number(&random_state_, num_candidates);
neighbors_[candidates[selected_index]]->agent()->set_new_position(this);
}
}
}
__device__ void Cell::enter(Agent* agent) {
assert(agent_ == nullptr);
#ifndef NDEBUG
// Ensure that no two agents are trying to enter this cell at the same time.
uint64_t old_val = atomicExch(reinterpret_cast<unsigned long long int*>(&agent_),
reinterpret_cast<unsigned long long int>(agent));
assert(old_val == 0);
#else
agent_ = agent;
#endif
agent->set_position(this);
}
__device__ bool Cell::has_fish() const {
return agent_ != nullptr && agent_->type_identifier() == Fish::kTypeId;
}
__device__ bool Cell::has_shark() const {
return agent_ != nullptr && agent_->type_identifier() == Shark::kTypeId;
}
__device__ bool Cell::is_free() const {
return agent_ == nullptr;
}
__device__ void Cell::leave() {
assert(agent_ != nullptr);
agent_ = nullptr;
}
__device__ void Cell::prepare() {
for (int i = 0; i < 5; ++i) {
neighbor_request_[i] = false;
}
}
__device__ uint32_t* Cell::random_state() {
return &random_state_;
}
__device__ void Cell::request_random_fish_neighbor() {
if (!request_random_neighbor<&Cell::has_fish>(agent_->random_state())) {
// No fish found. Look for free cell.
if (!request_random_neighbor<&Cell::is_free>(agent_->random_state())) {
neighbor_request_[4] = true;
}
}
}
__device__ void Cell::request_random_free_neighbor() {
if (!request_random_neighbor<&Cell::is_free>(agent_->random_state())) {
neighbor_request_[4] = true;
}
}
template<bool(Cell::*predicate)() const>
__device__ bool Cell::request_random_neighbor(uint32_t* random_state) {
uint8_t candidates[4];
uint8_t num_candidates = 0;
for (int i = 0; i < 4; ++i) {
if ((neighbors_[i]->*predicate)()) {
candidates[num_candidates++] = i;
}
}
if (num_candidates == 0) {
return false;
} else {
uint32_t selected_index = random_number(random_state, num_candidates);
uint8_t selected = candidates[selected_index];
uint8_t neighbor_index = (selected + 2) % 4;
neighbors_[selected]->neighbor_request_[neighbor_index] = true;
// Check correctness of neighbor calculation.
assert(neighbors_[selected]->neighbors_[neighbor_index] == this);
return true;
}
}
__device__ void Cell::set_neighbors(Cell* left, Cell* top,
Cell* right, Cell* bottom) {
neighbors_[0] = left;
neighbors_[1] = top;
neighbors_[2] = right;
neighbors_[3] = bottom;
}
__device__ Agent::Agent(uint32_t random_state, uint8_t type_identifier)
: random_state_(random_state), type_identifier_(type_identifier) {
assert(random_state != 0);
}
__device__ uint32_t* Agent::random_state() {
return &random_state_;
}
__device__ void Agent::set_new_position(Cell* new_pos) {
// Check for race condition. (This is not bullet proof.)
assert(new_position_ == position_);
new_position_ = new_pos;
}
__device__ Cell* Agent::position() const {
return position_;
}
__device__ void Agent::set_position(Cell* cell) {
position_ = cell;
}
// TODO: Verify that RTTI (dynamic_cast) does not work in device code.
__device__ uint8_t Agent::type_identifier() const {
return type_identifier_;
}
__device__ Fish::Fish(uint32_t random_state)
: Agent(random_state, kTypeId),
egg_timer_(random_state % SPAWN_THRESHOLD) {
assert(random_state != 0);
}
__device__ void Fish::prepare() {
assert(type_identifier() == kTypeId);
egg_timer_++;
// Fallback: Stay on current cell.
new_position_ = position_;
assert(position_ != nullptr);
position_->request_random_free_neighbor();
}
__device__ void Fish::update() {
assert(type_identifier() == kTypeId);
Cell* old_position = position_;
if (old_position != new_position_) {
old_position->leave();
new_position_->enter(this);
if (OPTION_FISH_SPAWN && egg_timer_ > SPAWN_THRESHOLD) {
uint32_t new_random_state = random_number(&random_state_) + 401;
new_random_state = new_random_state != 0 ? new_random_state
: random_state_;
auto* new_fish = allocate<Fish>(new_random_state);
assert(new_fish != nullptr);
old_position->enter(new_fish);
egg_timer_ = 0;
}
}
}
__device__ Shark::Shark(uint32_t random_state)
: Agent(random_state, kTypeId), energy_(ENERGY_START),
egg_timer_(random_state % SPAWN_THRESHOLD) {
assert(random_state_ != 0);
}
__device__ void Shark::prepare() {
assert(type_identifier() == kTypeId);
egg_timer_++;
energy_--;
assert(position_ != nullptr);
if (OPTION_SHARK_DIE && energy_ == 0) {
// Do nothing. Shark will die.
} else {
// Fallback: Stay on current cell.
new_position_ = position_;
position_->request_random_fish_neighbor();
}
}
__device__ void Shark::update() {
assert(type_identifier() == kTypeId);
if (OPTION_SHARK_DIE && energy_ == 0) {
position_->kill();
} else {
Cell* old_position = position_;
if (old_position != new_position_) {
if (new_position_->has_fish()) {
energy_ += ENERGY_BOOST;
new_position_->kill();
}
old_position->leave();
new_position_->enter(this);
if (OPTION_SHARK_SPAWN && egg_timer_ > SPAWN_THRESHOLD) {
assert(random_state_ != 0);
uint32_t new_random_state = random_number(&random_state_) + 601;
new_random_state = new_random_state != 0 ? new_random_state
: random_state_;
auto* new_shark = allocate<Shark>(new_random_state);
assert(new_shark != nullptr);
old_position->enter(new_shark);
egg_timer_ = 0;
}
}
}
}
__device__ void Cell::kill() {
assert(agent_ != nullptr);
if (agent_->type_identifier() == 1) {
deallocate_untyped<1>(agent_);
} else if (agent_->type_identifier() == 2) {
deallocate_untyped<2>(agent_);
} else {
// Unknown type.
assert(false);
}
agent_ = nullptr;
}
// ----- KERNELS -----
__device__ Cell* cells[GRID_SIZE_X * GRID_SIZE_Y];
__global__ void create_cells() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < GRID_SIZE_Y*GRID_SIZE_X;
tid += blockDim.x*gridDim.x) {
int x = tid % GRID_SIZE_X;
int y = tid / GRID_SIZE_X;
float init_state = __logf(tid + 401);
uint32_t init_state_int = *reinterpret_cast<uint32_t*>(&init_state);
// Cell* new_cell = new Cell(init_state_int);
Cell* new_cell = allocate<Cell>(601*x*x*y + init_state_int);
assert(new_cell != nullptr);
cells[tid] = new_cell;
}
}
__global__ void setup_cells() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < GRID_SIZE_Y*GRID_SIZE_X;
tid += blockDim.x*gridDim.x) {
int x = tid % GRID_SIZE_X;
int y = tid / GRID_SIZE_X;
Cell* left = x > 0 ? cells[y*GRID_SIZE_X + x - 1]
: cells[y*GRID_SIZE_X + GRID_SIZE_X - 1];
Cell* right = x < GRID_SIZE_X - 1 ? cells[y*GRID_SIZE_X + x + 1]
: cells[y*GRID_SIZE_X];
Cell* top = y > 0 ? cells[(y - 1)*GRID_SIZE_X + x]
: cells[(GRID_SIZE_Y - 1)*GRID_SIZE_X + x];
Cell* bottom = y < GRID_SIZE_Y - 1 ? cells[(y + 1)*GRID_SIZE_X + x]
: cells[x];
// left, top, right, bottom
cells[tid]->set_neighbors(left, top, right, bottom);
// Initialize with random agent.
uint32_t agent_type = random_number(cells[tid]->random_state(), 4);
if (agent_type == 0) {
auto* agent = allocate<Fish>(*(cells[tid]->random_state()));
assert(agent != nullptr);
cells[tid]->enter(agent);
} else if (agent_type == 1) {
auto* agent = allocate<Shark>(*(cells[tid]->random_state()));
assert(agent != nullptr);
cells[tid]->enter(agent);
} else {
// Free cell.
}
}
}
// Problem: It is not easy to keep track of all objects of a class if they are
// dynamically allocated. But we want to benchmark the performance of new/
// delete in CUDA.
// Solution: Fill these arrays in a separate kernel by iterating over all
// cells, storing agents in the respective array slots, and compacting the
// arrays. We do not measure the performance of these steps.
__device__ uint32_t num_sharks = 0;
__device__ Shark* sharks[GRID_SIZE_Y * GRID_SIZE_X];
__device__ uint32_t num_fish = 0;
__device__ Fish* fish[GRID_SIZE_Y * GRID_SIZE_X];
void sort_arrays() {
uintptr_t* dev_sharks;
uintptr_t* dev_fish;
uintptr_t* dev_cells;
cudaGetSymbolAddress((void**) &dev_sharks, sharks);
cudaGetSymbolAddress((void**) &dev_fish, fish);
cudaGetSymbolAddress((void**) &dev_cells, cells);
uint32_t h_num_sharks, h_num_fish;
cudaMemcpyFromSymbol(&h_num_sharks, num_sharks, sizeof(uint32_t), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&h_num_fish, num_fish, sizeof(uint32_t), 0, cudaMemcpyDeviceToHost);
gpuErrchk(cudaDeviceSynchronize());
thrust::device_ptr<uintptr_t> t_sharks(dev_sharks);
thrust::device_ptr<uintptr_t> t_fish(dev_fish);
thrust::device_ptr<uintptr_t> t_cells(dev_cells);
thrust::sort(t_sharks, t_sharks+h_num_sharks);
thrust::sort(t_fish, t_fish+h_num_fish);
thrust::sort(t_cells, t_cells+GRID_SIZE_X*GRID_SIZE_Y);
gpuErrchk(cudaDeviceSynchronize());
}
__global__ void print_checksum() {
uint64_t chksum = 0;
// Sorting of the array does not matter in the calculation here.
for (int i = 0; i < num_sharks; ++i) {
chksum += *(sharks[i]->position()->random_state()) % 601;
}
for (int i = 0; i < num_fish; ++i) {
chksum += *(fish[i]->position()->random_state()) % 601;
}
printf("%" PRIu64 "\n", chksum);
}
__global__ void reset_fish_array() {
num_fish = 0;
}
__global__ void reset_shark_array() {
num_sharks = 0;
}
// One thread per cell.
__global__ void find_fish() {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < GRID_SIZE_Y*GRID_SIZE_X) {
if (cells[tid]->has_fish()) {
uint32_t idx = atomicAdd(&num_fish, 1);
fish[idx] = reinterpret_cast<Fish*>(cells[tid]->agent());
}
}
}
// One thread per cell.
__global__ void find_sharks() {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < GRID_SIZE_Y*GRID_SIZE_X) {
if (cells[tid]->has_shark()) {
uint32_t idx = atomicAdd(&num_sharks, 1);
sharks[idx] = reinterpret_cast<Shark*>(cells[tid]->agent());
}
}
}
void generate_fish_array() {
reset_fish_array<<<1, 1>>>();
gpuErrchk(cudaDeviceSynchronize());
find_fish<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
}
void generate_shark_array() {
reset_shark_array<<<1, 1>>>();
gpuErrchk(cudaDeviceSynchronize());
find_sharks<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
}
__global__ void cell_prepare() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < GRID_SIZE_Y*GRID_SIZE_X;
tid += blockDim.x*gridDim.x) {
cells[tid]->prepare();
}
}
__global__ void cell_decide() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < GRID_SIZE_Y*GRID_SIZE_X;
tid += blockDim.x*gridDim.x) {
cells[tid]->decide();
}
}
__global__ void fish_prepare() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < num_fish;
tid += blockDim.x*gridDim.x) {
assert(fish[tid] != nullptr);
fish[tid]->prepare();
}
}
__global__ void fish_update() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < num_fish;
tid += blockDim.x*gridDim.x) {
assert(fish[tid] != nullptr);
fish[tid]->update();
}
}
__global__ void shark_prepare() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < num_sharks;
tid += blockDim.x*gridDim.x) {
assert(sharks[tid] != nullptr);
sharks[tid]->prepare();
}
}
__global__ void shark_update() {
for (int tid = threadIdx.x + blockDim.x*blockIdx.x;
tid < num_sharks;
tid += blockDim.x*gridDim.x) {
assert(sharks[tid] != nullptr);
sharks[tid]->update();
}
}
void generate_shark_fish_arrays() {
generate_fish_array();
generate_shark_array();
sort_arrays();
}
void step() {
cell_prepare<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
fish_prepare<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
cell_decide<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
fish_update<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
cell_prepare<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
shark_prepare<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
cell_decide<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
shark_update<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
}
__global__ void init_memory_system() {
initialize_allocator();
}
void initialize() {
//init the heap
initHeap(512*1024U*1024U);
init_memory_system<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
create_cells<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
setup_cells<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
}
__device__ uint32_t d_gui_map[GRID_SIZE_Y * GRID_SIZE_X];
uint32_t gui_map[GRID_SIZE_Y * GRID_SIZE_X];
__global__ void fill_gui_map() {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < GRID_SIZE_Y*GRID_SIZE_X) {
if (cells[tid]->agent() != nullptr) {
d_gui_map[tid] = cells[tid]->agent()->type_identifier();
} else {
d_gui_map[tid] = 0;
}
}
}
void update_gui_map() {
fill_gui_map<<<GRID_SIZE_X*GRID_SIZE_Y/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>();
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpyFromSymbol(gui_map, d_gui_map, sizeof(uint32_t)*GRID_SIZE_X*GRID_SIZE_Y,
0, cudaMemcpyDeviceToHost);
gpuErrchk(cudaDeviceSynchronize());
}
int h_num_fish = 0;
int h_num_sharks = 0;
void print_stats() {
generate_fish_array();
generate_shark_array();
//printf("\n Fish: %i, Sharks: %i CHKSUM: ", h_num_fish, h_num_sharks);
print_checksum<<<1, 1>>>();
gpuErrchk(cudaDeviceSynchronize());
}
int main(int argc, char* arvg[]) {
//cudaDeviceSetLimit(cudaLimitMallocHeapSize, 256*1024*1024);
initialize();
size_t heap_size;
cudaDeviceGetLimit(&heap_size, cudaLimitMallocHeapSize);
printf("CUDA heap size: %lu\n", heap_size);
//printf("Computing...\n");
//int time_running = 0;
for (int i = 0; i<500; ++i) {
if (i%50==0) {
//print_stats();
//render();
//printf(" Time: %i usec", time_running);
//time_running = 0;
}
generate_shark_fish_arrays();
// Printing: RUNNING TIME, NUM_FISH, NUM_SHARKS, CHKSUM, FISH_USE, FISH_ALLOC, SHARK_USE, SHARK_ALLOC
auto time_before = std::chrono::system_clock::now();
step();
auto time_after = std::chrono::system_clock::now();
int time_running = std::chrono::duration_cast<std::chrono::microseconds>(
time_after - time_before).count();
printf("%i,", time_running);
print_stats();
//printf("\n");
}
return 0;
}
} // namespace wa_tor
int main(int argc, char* arvg[]) {
return wa_tor::main(0, nullptr);
}
|
6cb6593dbea1b186d4c3a44137bebd0465504491.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @brief
* ragged_ops
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
* Mobvoi Inc. (authors: Fangjun Kuang)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <memory>
#include <vector>
#include "hipcub/hipcub.hpp"
#include "k2/csrc/array_ops.h"
#include "k2/csrc/math.h"
#include "k2/csrc/moderngpu_allocator.h"
#include "k2/csrc/ragged.h"
#include "k2/csrc/ragged_ops.h"
#include "moderngpu/kernel_mergesort.hxx"
namespace {
/*
A helper function used in RaggedShape3;
if both first and second are non-NULL, it will check if the context of them
is compatible or not and return that context if compatible;
if one of them is NULL, returns the other one's context.
*/
static k2::ContextPtr GetContext(const k2::Array1<int32_t> *first,
const k2::Array1<int32_t> *second) {
K2_CHECK(first != nullptr || second != nullptr)
<< "At least one of first and second must be non-NULL";
if (first == nullptr)
return second->Context();
else if (second == nullptr)
return first->Context();
else
return k2::GetContext(*first, *second);
}
} // namespace
namespace k2 {
RaggedShape RandomRaggedShape(bool set_row_ids, int32_t min_num_axes,
int32_t max_num_axes, int32_t min_num_elements,
int32_t max_num_elements) {
ContextPtr c = GetCpuContext();
K2_CHECK(min_num_axes >= 2 && max_num_axes >= min_num_axes &&
min_num_elements >= 0 && max_num_elements >= min_num_elements);
int32_t num_axes = RandInt(min_num_axes, max_num_axes);
int32_t num_elements = RandIntGeometric(min_num_elements, max_num_elements);
bool done_repeats = false;
std::vector<RaggedShapeDim> axes(num_axes - 1);
for (int32_t axis = num_axes - 2; axis >= 0; axis--) {
// this axis will have row_ids of length num_elements and
// row_splits of length to be determined.
int32_t cur_row_split = 0;
std::vector<int32_t> row_splits_vec;
std::vector<int32_t> row_ids_vec;
row_splits_vec.push_back(cur_row_split);
// The reason for "|| RandInt(0, 2) == 0)" is so that even if there
// are no elements we can still potentially generate empty row-splits.
while (cur_row_split < num_elements || RandInt(0, 2) == 0) {
int32_t split_size = RandIntGeometric(0, num_elements - cur_row_split);
cur_row_split += split_size;
// sometimes we have a bunch of empty rows in a row (this will test out
// more of the code), so here we generate a bunch of empty rows, but we
// just do this only once (that's why we declare `done_repeats` here).
if (split_size == 0 && RandInt(0, 30) == 0 && !done_repeats) {
int32_t num_repeats = RandIntGeometric(1, 128);
row_splits_vec.insert(row_splits_vec.end(), num_repeats, cur_row_split);
// don't need to set `row_ids_vec` as there's no element.
done_repeats = true;
}
row_splits_vec.push_back(cur_row_split);
if (set_row_ids) {
int32_t cur_row = static_cast<int32_t>(row_splits_vec.size()) - 2;
row_ids_vec.insert(row_ids_vec.end(), split_size, cur_row);
}
}
axes[axis].row_splits = Array1<int32_t>(c, row_splits_vec);
if (set_row_ids) axes[axis].row_ids = Array1<int32_t>(c, row_ids_vec);
axes[axis].cached_tot_size = num_elements;
num_elements = axes[axis].row_splits.Dim() - 1;
}
// RaggedShape(axes, true) will check the returned RaggedShape for
// consistency.
return RaggedShape(axes, true);
}
RaggedShape RaggedShape2(Array1<int32_t> *row_splits, Array1<int32_t> *row_ids,
int32_t cached_tot_size) {
K2_CHECK(row_splits != nullptr || row_ids != nullptr)
<< "At least one of row_splits and row_ids must be defined";
ContextPtr ctx = ::GetContext(row_splits, row_ids);
if (cached_tot_size != -1) {
if (row_ids != nullptr) K2_CHECK_EQ(cached_tot_size, row_ids->Dim());
if (row_splits != nullptr) {
// may be slow as it may copy memory from device to host
K2_DCHECK_EQ(cached_tot_size, row_splits->Back());
}
}
std::vector<RaggedShapeDim> axes(1);
if (row_splits != nullptr) {
axes[0].row_splits = *row_splits;
} else {
// we need to work out row_splits as we always require row_splits is not
// empty for RaggedShape. Note here we suppose the last element in row_ids
// is num_rows - 1, i.e. there's no empty rows after row `row_ids[-1]`.
int32_t num_rows = row_ids->Dim() == 0 ? 0 : row_ids->Back() + 1;
Array1<int32_t> row_splits_array(ctx, num_rows + 1);
RowIdsToRowSplits(*row_ids, &row_splits_array);
axes[0].row_splits = row_splits_array;
}
if (row_ids != nullptr) axes[0].row_ids = *row_ids;
if (cached_tot_size == -1) {
cached_tot_size =
row_ids != nullptr ? row_ids->Dim() : axes[0].row_splits.Back();
}
axes[0].cached_tot_size = cached_tot_size;
// note below line will check if row_splits and row_ids are valid and agree
// with each other.
return RaggedShape(axes);
}
RaggedShape ComposeRaggedShapes(const RaggedShape &a, const RaggedShape &b) {
if (a.NumElements() != b.Dim0()) {
K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements()
<< " vs. " << b.Dim0();
}
const auto &a_axes = a.Axes();
const auto &b_axes = b.Axes();
std::vector<RaggedShapeDim> axes(a_axes.size() + b_axes.size());
std::size_t a_size = a_axes.size(), b_size = b_axes.size();
for (std::size_t i = 0; i < a_size; ++i) axes[i] = a_axes[i];
for (std::size_t i = 0; i < b_size; ++i) axes[i + a_size] = b_axes[i];
return RaggedShape(axes);
}
RaggedShape RaggedShape3(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2) {
K2_CHECK(row_splits1 != nullptr || row_ids1 != nullptr)
<< "At least one of row_splits1 and row_ids1 must be defined";
K2_CHECK(row_splits2 != nullptr || row_ids2 != nullptr)
<< "At least one of row_splits2 and row_ids2 must be defined";
// check context
ContextPtr ctx1 = ::GetContext(row_splits1, row_ids1);
ContextPtr ctx2 = ::GetContext(row_splits2, row_ids2);
K2_CHECK(ctx1->IsCompatible(*ctx2));
// check row_splits and row_ids of axis-1
if (cached_tot_size1 != -1) {
if (row_ids1 != nullptr) K2_CHECK_EQ(cached_tot_size1, row_ids1->Dim());
if (row_splits1 != nullptr) {
// may be slow as it may copy memory from device to host
K2_DCHECK_EQ(cached_tot_size1, row_splits1->Back());
}
}
// check row_splits and row_ids of axis-2
if (cached_tot_size2 != -1) {
if (row_ids2 != nullptr) K2_CHECK_EQ(cached_tot_size2, row_ids2->Dim());
if (row_splits2 != nullptr) {
// may be slow as it may copy memory from device to host
K2_DCHECK_EQ(cached_tot_size2, row_splits2->Back());
}
}
std::vector<RaggedShapeDim> axes(2);
// set row_splits and row_ids for axis 1
if (row_splits1 != nullptr) {
axes[0].row_splits = *row_splits1;
} else {
// work out row_splits1, see code in RaggedShape2 above for the reason
int32_t num_rows = row_ids1->Dim() == 0 ? 0 : row_ids1->Back() + 1;
Array1<int32_t> row_splits_array(ctx1, num_rows + 1);
RowIdsToRowSplits(*row_ids1, &row_splits_array);
axes[0].row_splits = row_splits_array;
}
if (row_ids1 != nullptr) axes[0].row_ids = *row_ids1;
if (cached_tot_size1 == -1) {
cached_tot_size1 =
row_ids1 != nullptr ? row_ids1->Dim() : axes[0].row_splits.Back();
}
axes[0].cached_tot_size = cached_tot_size1;
// set row_splits and row_ids for axis 2
if (row_splits2 != nullptr) {
axes[1].row_splits = *row_splits2;
} else {
// work out row_splits1, see code in RaggedShape2 above for the reason
int32_t num_rows = row_ids2->Dim() == 0 ? 0 : row_ids2->Back() + 1;
Array1<int32_t> row_splits_array(ctx1, num_rows + 1);
RowIdsToRowSplits(*row_ids2, &row_splits_array);
axes[1].row_splits = row_splits_array;
}
if (row_ids2 != nullptr) axes[1].row_ids = *row_ids2;
if (cached_tot_size2 == -1) {
cached_tot_size2 =
row_ids2 != nullptr ? row_ids2->Dim() : axes[1].row_splits.Back();
}
axes[1].cached_tot_size = cached_tot_size2;
// we don't check here if
// row_splits1[row_splits1.Dim() - 1] == row_ids1.Dim()
// == (row_splits2.Dim() - 1)
// >= (row_ids2[row_ids2.Dim() - 1] + 1)
// but RaggedShape(axes) below will check this.
return RaggedShape(axes);
}
RaggedShape RaggedShapeFromTotSizes(ContextPtr &c, int32_t num_axes,
int32_t *tot_sizes) {
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeDim> axes(num_axes - 1);
// In future we might choose to allocate everything in one big array, to avoid
// multiple allocations, but for now just do it the simple way.
for (int32_t axis = 1; axis < num_axes; ++axis) {
axes[axis - 1].row_splits = Array1<int32_t>(c, tot_sizes[axis - 1] + 1);
axes[axis - 1].row_ids = Array1<int32_t>(c, tot_sizes[axis]);
axes[axis - 1].cached_tot_size = tot_sizes[axis];
}
// Not check here as we did not set the values of row_splits and row_ids
return RaggedShape(axes, false);
}
Array1<int32_t *> GetRowSplitsPtr(RaggedShape &src) {
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
std::vector<int32_t *> row_splits_start(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
Array1<int32_t> &cur_splits = src.RowSplits(i);
row_splits_start[i - 1] = cur_splits.Data();
}
return Array1<int32_t *>(src.Context(), row_splits_start);
}
// See declaration in ragged.h for documentation of its purpose and interface.
RaggedShape Unsqueeze(const RaggedShape &src, int32_t axis) {
// If axis == 0, initial row_splits and row_ids will look like the following,
// if for example src.Dim0() was 5: [ 0 5 ], [ 0 0 0 0 0 ]. The other axes
// would be pushed forward.
//
// If 0 < axis <= src.NumAxes(), the inserted row_splits and row_ids would
// look like the following, if for instance the src.TotSize(axis) = 8:
// [ 0 1 2 3 4 5 6 7 8 ], [ 0 1 2 3 4 5 6 7 ].
//
// The reason why the code is different for axis == 0, is that in that case we
// are really making visible an "implicit" axis of the input `src`; we could
// call it axis 0 of the original RaggedShape. Imagine that "implicit" axis's
// row_splits and row_ids map respectively from an idx_minus1 -> idx0 and from
// an idx_0 to idx_minus1, where idx_minus1 is always 0 and 0 <= idx0 <
// Dim0().
ContextPtr c = src.Context();
K2_CHECK(axis >= 0 && axis <= src.NumAxes());
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
int32_t num_axes_in = src.NumAxes();
// Note: in RaggedShape, the vector of RaggedShapeDim is of length
// num_axes - 1, so the output will have one more axis than the input.
std::vector<RaggedShapeDim> axes_out(num_axes_in);
int32_t row_splits_dim, row_ids_dim;
Array1<int32_t> mem;
if (axis == 0) {
row_splits_dim = 2; // e.g. [ 0 5 ]
row_ids_dim = src.Dim0(); // e.g. [ 0 0 0 0 0 ]
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
auto lambda_set_mem = [=] __host__ __device__(int32_t i) -> void {
if (i == 1)
mem_data[i] = row_ids_dim;
else
mem_data[i] = 0;
};
Eval(c, mem.Dim(), lambda_set_mem);
} else {
int32_t tot_size = src.TotSize(axis);
row_splits_dim = tot_size + 1;
row_ids_dim = tot_size;
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
auto lambda_set_mem2 = [=] __host__ __device__(int32_t i) -> void {
mem_data[i] = i % (tot_size + 1);
};
Eval(c, mem.Dim(), lambda_set_mem2);
}
axes_out[axis].row_splits = mem.Range(0, row_splits_dim);
axes_out[axis].row_ids = mem.Range(row_splits_dim, row_ids_dim);
axes_out[axis].cached_tot_size = row_ids_dim;
for (int32_t i = 0; i < axis; ++i) axes_out[i] = axes_in[i];
// Note: the returned array has `num_axes_in + 1` axes, so its
// array of RaggedShapeDim is of length `num_axes_in`.
for (int32_t i = axis + 1; i < num_axes_in; ++i) axes_out[i] = axes_in[i - 1];
return RaggedShape(axes_out);
}
std::vector<RaggedShape> UnsqueezeParallel(int32_t num_srcs, RaggedShape **src,
int32_t axis) {
NVTX_RANGE(__func__);
K2_CHECK_EQ(axis, 0);
std::vector<RaggedShape> ans;
if (num_srcs == 0)
return ans;
ans.reserve(num_srcs);
ContextPtr c = src[0]->Context();
std::vector<int32_t> all_row_splits_vec(num_srcs * 2);
int32_t max_dim = 0;
// all_row_splits_vec will contain [ 0 d0 0 d1 0 d2 .. ]
// where d0 == src[0]->Dim0(), d1 == src[1]->Dim0()..
for (int32_t i = 0; i < num_srcs; i++) {
int32_t this_dim0 = src[i]->Dim0();
if (this_dim0 > max_dim)
max_dim = this_dim0;
all_row_splits_vec[i * 2] = 0;
all_row_splits_vec[i * 2 + 1] = this_dim0;
}
Array1<int32_t> all_row_splits(c, all_row_splits_vec);
Array1<int32_t> all_row_ids(c, max_dim, 0);
for (int32_t i = 0; i < num_srcs; i++) {
int32_t num_axes = src[i]->NumAxes();
std::vector<RaggedShapeDim> axes;
axes.reserve(num_axes); // note, the size of the `axes` of a RaggedShape
// is its NumAxes() - 1.
axes.resize(1);
int32_t this_old_dim0 = all_row_splits_vec[i * 2 + 1];
axes[0].row_splits = all_row_splits.Range(i * 2, 2);
axes[0].row_ids = all_row_ids.Range(0, this_old_dim0);
axes[0].cached_tot_size = this_old_dim0;
axes.insert(axes.end(), src[i]->Axes().begin(), src[i]->Axes().end());
ans.emplace_back(axes);
}
return ans;
}
/*
Internal function used in Index(), which gets certain arrays used internally.
@param [in] src Source shape to be indexed
@param [in] src_row_splits_ptrs Result of calling GetRowSplitsPtr(src)
@param [in] new2old Array of indexes into axis 0 of src
@param [out] old_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()), whose (i,j)'th
element contains the offset into axis i of `src`
where the slice of `src` with index0 (i.e. index
into 0'th-axis of `src`) equal to `new2old[j]`
begins.
@param [out] new_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()+1), whose (i,j)'th
element contains the offset into axis i of `ans`
where the data in `ans` corresponding to
index j (i.e. index j into axis 0 of `ans`) begins.
Note: `ans` is the result of Index(), with
ans.Dim0() == new2old.Dim().
*/
inline void GetOldAndNewOffsets(RaggedShape &src,
const Array1<int32_t *> &src_row_splits_ptrs,
const Array1<int32_t> &new2old,
Array2<int32_t> *old_offsets,
Array2<int32_t> *new_offsets) {
K2_CHECK(src.NumAxes() > 1);
ContextPtr &c = src.Context();
int32_t num_axes = src.NumAxes(), ans_dim0 = new2old.Dim();
int32_t *const *src_row_splits_ptrs_data = src_row_splits_ptrs.Data();
const int32_t *new2old_data = new2old.Data();
*old_offsets = Array2<int32_t>(c, num_axes, ans_dim0);
*new_offsets = Array2<int32_t>(c, num_axes, ans_dim0 + 1);
auto old_offsets_acc = old_offsets->Accessor(),
new_offsets_acc = new_offsets->Accessor();
// Set old_offsets; and for now, set new_offsets to the corresponding
// sizes of the output slices.
auto lambda_set_offsets = [=] __host__ __device__(int32_t i) {
// 0 <= i < ans_dim0
int32_t old_offset = new2old_data[i], old_offset_next = old_offset + 1;
for (int32_t axis = 0;; axis++) {
old_offsets_acc(axis, i) = old_offset;
// Below, 'new_offsets_acc' currently contains the size rather
// than the offset; we need to do exclusive-sum.
new_offsets_acc(axis, i) = old_offset_next - old_offset;
if (axis + 1 == num_axes) return;
old_offset = src_row_splits_ptrs_data[axis][old_offset];
old_offset_next = src_row_splits_ptrs_data[axis][old_offset_next];
}
};
Eval(c, ans_dim0, lambda_set_offsets);
ExclusiveSum(*new_offsets, new_offsets);
}
RaggedShape Index(RaggedShape &src, const Array1<int32_t> &new2old,
Array1<int32_t> *elem_indexes /*=nullptr*/) {
ContextPtr c = src.Context();
bool is_cpu = (c->GetDeviceType() == kCpu);
K2_CHECK(IsCompatible(src, new2old));
int32_t num_axes = src.NumAxes(), src_dim0 = src.Dim0(),
ans_dim0 = new2old.Dim();
if (ans_dim0 == 0) {
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, 0);
return EmptyRaggedShape(c, num_axes);
}
Array1<int32_t *> src_row_splits_ptrs = GetRowSplitsPtr(src);
Array2<int32_t> old_offsets, // num_axes by ans_dim0
new_offsets; // num_axes by (ans_dim0 + 1).
GetOldAndNewOffsets(src, src_row_splits_ptrs, new2old, &old_offsets,
&new_offsets);
Array1<int32_t> tot_sizes_out =
Array1<int32_t>(new_offsets.Col(ans_dim0)).To(GetCpuContext());
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, tot_sizes_out.Back());
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.Data());
auto old_offsets_acc = old_offsets.Accessor(),
new_offsets_acc = new_offsets.Accessor();
ParallelRunner pr(c);
std::vector<hipStream_t> streams(num_axes);
int32_t num_jobs = ans_dim0 * 2; // note: this formula is not a heuristic;
// it's how TaskRedirect works..
Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs);
auto task_redirects_acc = task_redirects.Accessor();
for (int32_t axis = 0; axis < num_axes; ++axis) {
streams[axis] = pr.NewStream();
With w(streams[axis]);
const int32_t *new_offsets_ptr = new_offsets_acc.Row(axis);
TaskRedirect *task_redirect_ptr = task_redirects_acc.Row(axis);
GetTaskRedirect(c, ans_dim0, new_offsets_ptr, task_redirect_ptr);
}
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
{
int32_t *this_new_row_splits = ans.RowSplits(axis + 1).Data();
const int32_t *this_old_row_splits = src.RowSplits(axis + 1).Data();
auto lambda_set_row_splits = [=] __host__ __device__(
int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
// 0 <= ans_idx0 < ans_dim0; and 0 <= thread_idx < num_threads,
// num_threads may have any value > 0 as far as this code is concerned.
//
// Reminder of how row_splits work dimensionally: they are a map
// from, e.g. an idx0 to an idx0x. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
// The locations in the row_splits array are as given by
// the `axis`'th row of `offsets`; the values in the array
// are related to those in the `axis+1`'th row.
int32_t this_new_offset = new_offsets_acc(axis, ans_idx0),
next_new_offset = new_offsets_acc(axis, ans_idx0 + 1),
num_rows = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis, ans_idx0),
value_offset = new_offsets_acc(axis + 1, ans_idx0) -
old_offsets_acc(axis + 1, ans_idx0);
// Using <= instead of < below causes threads for different ans_idx0 to
// write a single overlapping value, but also ensures that the
// terminating value is written. This only works because row_splits
// vectors always start with 0, which is not necessarily the case
// for row-ids.
for (; thread_idx <= num_rows; thread_idx += num_threads) {
this_new_row_splits[this_new_offset + thread_idx] =
value_offset + this_old_row_splits[this_old_offset + thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis),
min_threads_per_job, tot_work, target_num_loops,
lambda_set_row_splits);
}
{
int32_t *this_new_row_ids = ans.RowIds(axis + 1).Data();
const int32_t *this_old_row_ids = src.RowIds(axis + 1).Data();
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
if (elem_indexes == nullptr || axis != num_axes - 2) {
// If we don't need to write to `elem_indexes`... [caution: the next
// code block differs from this only by a statement that sets
// `elem_indexes` and they should be kept in sync.]
auto lambda_set_row_ids = [=] __host__ __device__(
int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_ids work dimensionally: they are a map
// from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
// The locations in the row_ids array are as given by
// the `axis+1`'th row of `offsets`; the values in the array
// are related to those in the `axis`'th row.
int32_t this_new_offset = new_offsets_acc(axis + 1, ans_idx0),
next_new_offset = new_offsets_acc(axis + 1, ans_idx0 + 1),
num_elems = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis + 1, ans_idx0),
value_offset = new_offsets_acc(axis, ans_idx0) -
old_offsets_acc(axis, ans_idx0);
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_new_row_ids[this_new_offset + thread_idx] =
value_offset + this_old_row_ids[this_old_offset + thread_idx];
}
};
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops, lambda_set_row_ids);
} else {
int32_t *elem_indexes_data = elem_indexes->Data();
// We need to write to `elem_indexes`. Note: this code block only
// differs from the above by an extra statement regarding
// `elem_indexes`. Comments have been removed.
auto lambda_set_row_ids_and_elem_indexes =
[=] __host__ __device__(int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
int32_t this_new_offset = new_offsets_acc(axis + 1, ans_idx0),
next_new_offset = new_offsets_acc(axis + 1, ans_idx0 + 1),
num_elems = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis + 1, ans_idx0),
value_offset = new_offsets_acc(axis, ans_idx0) -
old_offsets_acc(axis, ans_idx0);
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_new_row_ids[this_new_offset + thread_idx] =
value_offset + this_old_row_ids[this_old_offset + thread_idx];
elem_indexes_data[this_new_offset + thread_idx] =
this_old_offset + thread_idx;
}
};
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops,
lambda_set_row_ids_and_elem_indexes);
}
}
}
#if !defined(NDEBUG)
ans.Check();
#endif
return ans;
}
Array2<int32_t> GetOffsets(int32_t num_srcs, RaggedShape **src) {
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
ContextPtr ctx = src[0]->Context();
Array2<int32_t> src_offsets(GetCpuContext(), num_axes_in + 1, num_srcs + 1);
int32_t *src_offsets_data = src_offsets.Data();
int32_t src_offsets_stride0 = src_offsets.ElemStride0();
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
for (int32_t axis = 0; axis <= num_axes_in; ++axis) {
int32_t sum = 0;
for (int32_t i = 0; i <= num_srcs; ++i) { // i is the column
src_offsets_data[axis * src_offsets_stride0 + i] = sum;
if (i < num_srcs) {
sum += (axis == 0 ? 1 : src[i]->TotSize(axis - 1));
}
}
}
return src_offsets;
}
void GetRowInfo(RaggedShape &src, Array1<int32_t *> *row_splits,
Array1<int32_t *> *row_ids) {
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
src.Populate();
std::vector<int32_t *> row_splits_ptrs(axes - 1);
std::vector<int32_t *> row_ids_ptrs(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
row_splits_ptrs[i - 1] = src.RowSplits(i).Data();
row_ids_ptrs[i - 1] = src.RowIds(i).Data();
}
ContextPtr ctx = src.Context();
*row_splits = Array1<int32_t *>(ctx, row_splits_ptrs);
*row_ids = Array1<int32_t *>(ctx, row_ids_ptrs);
}
void GetRowInfoMulti(int32_t num_srcs, RaggedShape **src,
Array2<int32_t *> *row_splits,
Array2<int32_t *> *row_ids) {
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
K2_CHECK_GE(num_axes_in, 2);
ContextPtr ctx = src[0]->Context();
// check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
Array2<int32_t *> row_splits_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
Array2<int32_t *> row_ids_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
int32_t **splits_ptr_data = row_splits_ptrs.Data();
int32_t **ids_ptr_data = row_ids_ptrs.Data();
int32_t stride0 = row_splits_ptrs.ElemStride0();
K2_CHECK_EQ(stride0, row_ids_ptrs.ElemStride0());
for (int32_t axis = 0; axis != num_axes_in - 1; ++axis) {
for (int32_t i = 0; i != num_srcs; ++i) {
splits_ptr_data[axis * stride0 + i] = src[i]->RowSplits(axis + 1).Data();
ids_ptr_data[axis * stride0 + i] = src[i]->RowIds(axis + 1).Data();
}
}
*row_splits = row_splits_ptrs.To(ctx);
*row_ids = row_ids_ptrs.To(ctx);
}
RaggedShape Append(int32_t axis, int32_t num_srcs, RaggedShape **src) {
NVTX_RANGE("Append(RaggedShape)");
if (num_srcs == 1) return **src;
K2_CHECK_GT(num_srcs, 1);
if (axis == 1) {
RaggedShape temp = Stack(axis, num_srcs, src);
return RemoveAxis(temp, axis);
}
K2_CHECK_EQ(axis, 0) << "Append() with axis > 1 not yet supported";
int32_t num_axes = src[0]->NumAxes();
ContextPtr c = src[0]->Context();
bool is_cpu = (c->GetDeviceType() == kCpu);
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(num_axes, src[i]->NumAxes());
K2_CHECK(IsCompatible(*src[0], *src[i]));
}
// `offsets` will be on CPU for now.
Array2<int32_t> offsets = GetOffsets(num_srcs, src);
auto offsets_acc = offsets.Accessor();
std::vector<int32_t> tot_sizes_out(num_axes);
for (int32_t axis = 0; axis < num_axes; ++axis)
tot_sizes_out[axis] = offsets_acc(axis + 1, num_srcs);
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.data());
Array2<int32_t *> src_row_splits, src_row_ids;
GetRowInfoMulti(num_srcs, src, &src_row_splits, &src_row_ids);
auto src_row_splits_acc = src_row_splits.Accessor(),
src_row_ids_acc = src_row_ids.Accessor();
offsets = offsets.To(c);
offsets_acc = offsets.Accessor(); // on GPU now (if we're using one)
ParallelRunner pr(c);
std::vector<hipStream_t> streams(num_axes);
int32_t num_jobs = num_srcs * 2;
// task_redirects is a device array (if using GPU).
// We have `num_axes - 1` different sets of row_splits/row_ids to
// populate but they have different sizes; the total number of distinct
// sizes is `num_axes`.
Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs);
auto task_redirects_acc = task_redirects.Accessor();
// populate task_redirects (these allocate blocks of threads roughly
// proportionally to the amount of data to process from this source.
for (int32_t axis = 0; axis < num_axes; ++axis) {
streams[axis] = pr.NewStream();
With w(streams[axis]);
const int32_t *offsets = offsets_acc.Row(axis + 1);
// c->GetCudaStream() == stream[axis] as it has been overridden by With
GetTaskRedirect(c, num_srcs, offsets, task_redirects_acc.Row(axis));
}
for (int32_t axis = 0; axis < num_axes - 1; axis++) {
// first set the row-splits.
int32_t **this_src_row_splits = src_row_splits_acc.Row(axis),
**this_src_row_ids = src_row_ids_acc.Row(axis);
int32_t *this_dest_row_splits = ans.RowSplits(axis + 1).Data(),
*this_dest_row_ids = ans.RowIds(axis + 1).Data();
const int32_t *offsets_this_axis = offsets_acc.Row(axis + 1),
*offsets_next_axis = offsets_acc.Row(axis + 2);
auto lambda_set_row_splits = [=] __host__ __device__(
int32_t src_idx, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_splits work dimensionally: they are a map
// from, e.g. an idx0 to an idx0x. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
int32_t this_offset = offsets_this_axis[src_idx],
next_offset = offsets_this_axis[src_idx + 1],
this_value_offset = offsets_next_axis[src_idx],
num_rows = next_offset - this_offset;
int32_t *src_row_splits_ptr = this_src_row_splits[src_idx];
// Using <= instead of < below causes threads for different src_idx to
// write a single overlapping value, but also ensures that the
// terminating value is written. This only works because row_splits
// vectors always start with 0, which is not necessarily the case
// for row-ids.
for (; thread_idx <= num_rows; thread_idx += num_threads) {
this_dest_row_splits[this_offset + thread_idx] =
this_value_offset + src_row_splits_ptr[thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis),
min_threads_per_job, tot_work, target_num_loops,
lambda_set_row_splits);
{ // set the row-ids
auto lambda_set_row_ids = [=] __host__ __device__(
int32_t src_idx, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_ids work dimensionally: they are a map
// from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
int32_t this_offset = offsets_next_axis[src_idx],
next_offset = offsets_next_axis[src_idx + 1],
this_value_offset = offsets_this_axis[src_idx],
num_elems = next_offset - this_offset;
int32_t *src_row_ids_ptr = this_src_row_ids[src_idx];
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_dest_row_ids[this_offset + thread_idx] =
this_value_offset + src_row_ids_ptr[thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis + 1],
target_num_loops = (tot_work > 1000000 ? 4 : 2);
// TODO(haowen): maybe we should launch kernels for row_splits and row_ids
// in different streams
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops, lambda_set_row_ids);
}
}
return ans;
}
RaggedShape RemoveAxis(RaggedShape &src, int32_t axis) {
K2_CHECK_GT(src.NumAxes(), 2);
K2_CHECK(axis >= 0 && axis < src.NumAxes());
// note, `axes_in` is of dim src.NumAxes() - 1.
// Also note: axes_in[i] pertains to the relationship between
// axes i and i+1 in the source.
src.Populate();
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
std::vector<RaggedShapeDim> axes_out(axes_in.size() - 1);
int32_t axes_out_size = static_cast<int32_t>(axes_out.size());
for (int32_t i = 0; i < axis - 1; ++i) axes_out[i] = axes_in[i];
if (axis > 0 && axis + 1 < src.NumAxes()) {
axes_out[axis - 1].row_ids =
axes_in[axis - 1].row_ids[axes_in[axis].row_ids];
axes_out[axis - 1].row_splits =
axes_in[axis].row_splits[axes_in[axis - 1].row_splits];
axes_out[axis - 1].cached_tot_size = axes_out[axis - 1].row_ids.Dim();
}
for (int32_t i = axis; i < axes_out_size; ++i) axes_out[i] = axes_in[i + 1];
return RaggedShape(axes_out);
}
RaggedShape MakeTransposable(RaggedShape &src) {
K2_CHECK_GE(src.NumAxes(), 2);
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 1) return src;
ContextPtr c = src.Context();
int32_t num_axes = src.NumAxes();
int32_t max_size = src.MaxSize(1);
if (max_size <= 0) return src;
int32_t ans_tot_size1 = max_size * src_dim0;
src.Populate();
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
std::vector<RaggedShapeDim> axes_out(num_axes - 1);
const int32_t *src_row_splits1_data = src.RowSplits(1).Data();
const int32_t *src_row_ids1_data = src.RowIds(1).Data();
{
ParallelRunner pr(c);
RaggedShapeDim &axis1_shape = axes_out[0];
{
// set ans.RowSplits(1);
With w(pr.NewStream());
axis1_shape.row_splits = Range(c, src_dim0 + 1, 0, max_size);
}
{
// set ans.RowIds(1);
With w(pr.NewStream());
axis1_shape.row_ids = Array1<int32_t>(c, ans_tot_size1);
int32_t *row_ids1_data = axis1_shape.row_ids.Data();
axis1_shape.cached_tot_size = ans_tot_size1;
auto lambda_set_row_ids1 = [=] __host__ __device__(int32_t i) {
row_ids1_data[i] = i / max_size;
};
Eval(c, ans_tot_size1, lambda_set_row_ids1);
}
if (num_axes > 2) {
RaggedShapeDim &axis2_shape = axes_out[1];
const int32_t *src_row_splits2_data = src.RowSplits(2).Data();
{
// set ans.RowSplits(2);
With w(pr.NewStream());
axis2_shape.cached_tot_size = src.TotSize(2);
axis2_shape.row_splits = Array1<int32_t>(c, ans_tot_size1 + 1);
int32_t *ans_row_splits2_data = axis2_shape.row_splits.Data();
auto lambda_set_row_splits2 = [=] __host__ __device__(int32_t idx01) {
if (idx01 == ans_tot_size1) {
ans_row_splits2_data[idx01] = src_row_splits2_data[src_tot_size1];
return;
}
int32_t idx0 = idx01 / max_size, idx1 = idx01 % max_size;
int32_t idx0x = src_row_splits1_data[idx0],
idx0x_next = src_row_splits1_data[idx0 + 1];
int32_t num_elems_this_row = idx0x_next - idx0x;
if (idx1 < num_elems_this_row)
ans_row_splits2_data[idx01] = src_row_splits2_data[idx0x + idx1];
else
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x_next]; // append empty row
};
Eval(c, ans_tot_size1 + 1, lambda_set_row_splits2);
}
{
// set ans.RowIds(2);
With w(pr.NewStream());
int32_t tot_size2 = src.TotSize(2);
axis2_shape.row_ids = Array1<int32_t>(c, tot_size2);
int32_t *ans_row_ids2_data = axis2_shape.row_ids.Data();
const int32_t *src_row_ids2_data = src.RowIds(2).Data();
auto lambda_set_row_ids2 = [=] __host__ __device__(int32_t idx012) {
int32_t src_idx01 = src_row_ids2_data[idx012];
int32_t src_idx0 = src_row_ids1_data[src_idx01];
int32_t src_idx1 = src_idx01 - src_row_splits1_data[src_idx0];
ans_row_ids2_data[idx012] = (src_idx0 * max_size) + src_idx1;
};
Eval(c, tot_size2, lambda_set_row_ids2);
}
}
}
// copy left row_splits and row_ids;
for (int32_t i = 2; i < num_axes - 1; ++i) axes_out[i] = axes_in[i];
return RaggedShape(axes_out);
}
// transpose axes 0 and 1.
RaggedShape Transpose(RaggedShape &src, Array1<int32_t> *value_indexes) {
K2_CHECK_GT(src.NumAxes(), 2);
ContextPtr c = src.Context();
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 0) return src;
int32_t src_dim1 = src_tot_size1 / src_dim0;
K2_CHECK_EQ(src_tot_size1 % src_dim0, 0)
<< "Transpose(): all dims on axis 0 must be the same.\n"
<< "src_tot_size1: " << src_tot_size1 << "\n"
<< "src_dim0: " << src_dim0 << ", array is: " << src;
K2_DCHECK(
Equal(src.RowSplits(1), Range(c, src.RowSplits(1).Dim(), 0, src_dim1)))
<< " Expected row-splits to be evenly spaced: " << src.RowSplits(1);
RaggedShape src_no_axis0 = RemoveAxis(src, 0);
K2_CHECK_EQ(src_no_axis0.Dim0(), src_tot_size1);
// `renumbering` is a `new2old` map, that maps from the first index in
// src_no_axis0_renumbered
// to the first index into src_no_axis0.
Array1<int32_t> renumbering(c, src_tot_size1);
int32_t *renumbering_data = renumbering.Data();
auto lambda_set_renumbering = [=] __host__ __device__(int32_t i) {
int32_t j = i % src_dim0, k = i / src_dim0, i_old = j * src_dim1 + k;
renumbering_data[i] = i_old;
};
Eval(c, src_tot_size1, lambda_set_renumbering);
RaggedShape src_no_axis0_renumbered =
Index(src_no_axis0, renumbering, value_indexes);
int32_t num_rows = src_dim1, row_splits_dim = num_rows + 1,
row_ids_dim = src_tot_size1;
std::vector<RaggedShapeDim> ans_axis0(1);
Array1<int32_t> mem(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
auto lambda_set_row_info = [=] __host__ __device__(int32_t i) {
int32_t val;
if (i >= row_splits_dim) {
// row_ids
int32_t elem_idx = i - row_splits_dim;
val = elem_idx / src_dim0;
} else {
// row_splits
int32_t row_idx = i;
val = row_idx * src_dim0;
}
mem_data[i] = val;
};
Eval(c, row_splits_dim + row_ids_dim, lambda_set_row_info);
ans_axis0[0].row_splits = mem.Range(0, row_splits_dim);
ans_axis0[0].row_ids = mem.Range(row_splits_dim, row_ids_dim);
ans_axis0[0].cached_tot_size = row_ids_dim;
RaggedShape temp(ans_axis0);
return ComposeRaggedShapes(temp, src_no_axis0_renumbered);
}
RaggedShape Stack(int32_t axis, int32_t num_srcs, RaggedShape **src) {
NVTX_RANGE("Stack(RaggedShape)");
K2_CHECK_GT(num_srcs, 0);
K2_CHECK(axis >= 0 && axis <= 1);
ContextPtr c = src[0]->Context();
int32_t num_axes = src[0]->NumAxes();
// Check if they have the same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(num_axes, src[i]->NumAxes());
K2_CHECK(c->IsCompatible(*src[i]->Context()));
}
std::vector<RaggedShape> unsqueezed = UnsqueezeParallel(
num_srcs, src, 0);
std::vector<RaggedShape *> unsqueezed_ptrs(num_srcs);
for (int32_t i = 0; i < num_srcs; i++)
unsqueezed_ptrs[i] = &(unsqueezed[i]);
RaggedShape ans = Append(0, num_srcs, unsqueezed_ptrs.data());
// Transpose will check if all src->Dim0() has the same value.
if (axis == 1) ans = Transpose(ans);
return ans;
}
RaggedShape TrivialShape(ContextPtr &c, int32_t num_elems) {
// row_splits= [
Array1<int32_t> row_splits = Range<int32_t>(c, 2, 0, num_elems);
Array1<int32_t> row_ids(c, num_elems, 0);
return RaggedShape2(&row_splits, &row_ids, num_elems);
}
RaggedShape RegularRaggedShape(ContextPtr &c, int32_t dim0, int32_t dim1) {
Array1<int32_t> row_splits = Range<int32_t>(c, dim0 + 1, 0, dim1);
int32_t *row_splits_data = row_splits.Data();
Array1<int32_t> row_ids(c, dim0 * dim1);
int32_t *row_ids_data = row_ids.Data();
auto lambda_set_row_ids = [=] __host__ __device__(int32_t i, int32_t j) {
row_ids_data[i * dim1 + j] = i;
};
Eval2(c, dim0, dim1, lambda_set_row_ids);
return RaggedShape2(&row_splits, &row_ids, dim0 * dim1);
}
Ragged<int32_t> GetCountsPartitioned(Ragged<int32_t> &src,
RaggedShape &ans_ragged_shape) {
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK_EQ(ans_ragged_shape.NumAxes(), 2);
K2_CHECK(IsCompatible(src, ans_ragged_shape));
K2_CHECK_EQ(src.Dim0(), ans_ragged_shape.Dim0());
const Array1<int32_t> &values = src.values;
const Array1<int32_t> &row_splits = ans_ragged_shape.RowSplits(1);
int32_t n = ans_ragged_shape.NumElements();
Array1<int32_t> counts = GetCounts(values, n);
return Ragged<int32_t>(ans_ragged_shape, counts);
}
static Array1<int32_t> GetTransposeReorderingCpu(Ragged<int32_t> &src,
int32_t num_cols) {
std::vector<std::vector<int32_t>> column_indexes(num_cols); // [column][row]
const int32_t *values_data = src.values.Data();
int32_t n = src.values.Dim();
for (int32_t i = 0; i != n; ++i) {
int32_t bucket = values_data[i];
column_indexes[bucket].push_back(i);
}
Array1<int32_t> ans(src.Context(), n);
int32_t *ans_data = ans.Data();
for (int32_t i = 0; i != num_cols; ++i) {
std::copy(column_indexes[i].begin(), column_indexes[i].end(), ans_data);
ans_data += column_indexes[i].size();
}
return ans;
}
static Array1<int32_t> GetTransposeReorderingThreeAxesCuda(Ragged<int32_t> &src,
int32_t num_cols) {
K2_CHECK_EQ(src.NumAxes(), 3);
ContextPtr &context = src.Context();
K2_CHECK_EQ(context->GetDeviceType(), kCuda);
const Array1<int32_t> &row_splits1 = src.RowSplits(1);
const int32_t *row_ids2_data = src.RowIds(2).Data();
const int32_t *value_data = src.values.Data();
Array1<int32_t> segments = src.RowSplits(2)[row_splits1];
auto lambda_comp = [=] __device__(int32_t a_idx012,
int32_t b_idx012) -> bool {
int32_t a_col_index = value_data[a_idx012];
int32_t b_col_index = value_data[b_idx012];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// at this point, a_idx012 and b_idx012 belong to the same column;
// then we sort by its row indexes
int32_t a_idx01 = row_ids2_data[a_idx012];
int32_t b_idx01 = row_ids2_data[b_idx012];
if (a_idx01 < b_idx01) return true;
if (a_idx01 > b_idx01) return false;
// at this point, a_idx012 and b_idx012 are duplicate elements
return false; // either true or false is fine
};
std::unique_ptr<mgpu::context_t> mgpu_context =
GetModernGpuAllocator(context->GetDeviceId());
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
K2_CUDA_SAFE_CALL(mgpu::segmented_sort(ans.Data(), // keys
ans.Dim(), // count
segments.Data(), // segments
segments.Dim(), // num_segments
lambda_comp, *mgpu_context));
return ans;
}
Array1<int32_t> GetTransposeReordering(Ragged<int32_t> &src, int32_t num_cols) {
ContextPtr &context = src.Context();
if (src.NumAxes() < 2) {
// src is empty
return Array1<int32_t>(context, 0);
}
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) return GetTransposeReorderingCpu(src, num_cols);
K2_CHECK_EQ(device_type, kCuda);
if (src.NumAxes() == 3)
return GetTransposeReorderingThreeAxesCuda(src, num_cols);
const int32_t *row_splits1_data = src.RowSplits(src.NumAxes() - 1).Data();
const int32_t *row_ids1_data = src.RowIds(src.NumAxes() - 1).Data();
const int32_t *value_data = src.values.Data();
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
auto lambda_comp = [=] __device__(int32_t a_idx01, int32_t b_idx01) -> bool {
int32_t a_idx0 = row_ids1_data[a_idx01];
int32_t b_idx0 = row_ids1_data[b_idx01];
int32_t a_col_index = value_data[a_idx01];
int32_t b_col_index = value_data[b_idx01];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// now we have a_col_index == b_col_index
if (a_idx0 < b_idx0) return true; // sort by row indexes
if (a_idx0 > b_idx0) return false;
// now we have a_idx0 == b_idx0 && a_col_index == b_col_index
// this entry is duplicated in the sparse matrix.
return false; // we can return either true or false here.
};
std::unique_ptr<mgpu::context_t> mgpu_context =
GetModernGpuAllocator(context->GetDeviceId());
K2_CUDA_SAFE_CALL(mgpu::mergesort(ans.Data(), n, lambda_comp, *mgpu_context));
return ans;
}
RaggedShape ChangeSublistSize(RaggedShape &src, int32_t size_delta) {
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeDim axes).
std::vector<RaggedShapeDim> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Axes()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1),
src_num_elems = src.TotSize(last_axis),
num_elems = src_num_elems + size_delta * num_rows;
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
ans_axes.back().row_ids = Array1<int32_t>(c, num_elems);
ans_axes.back().cached_tot_size = num_elems;
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data(),
*src_row_ids_data = src.RowIds(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data(),
*row_ids_data = ans_axes.back().row_ids.Data();
{
ParallelRunner pr(c);
{
With w(pr.NewStream());
auto lambda_set_row_splits =
[=] __host__ __device__(int32_t idx0) -> void {
row_splits_data[idx0] = src_row_splits_data[idx0] + size_delta * idx0;
};
Eval(c, num_rows + 1, lambda_set_row_splits);
}
{
With w(pr.NewStream());
auto lambda_set_row_ids1 =
[=] __host__ __device__(int32_t src_idx01) -> void {
int32_t src_idx0 = src_row_ids_data[src_idx01],
src_idx0x = src_row_splits_data[src_idx0],
src_idx1 = src_idx01 - src_idx0x,
new_idx0x = row_splits_data[src_idx0],
new_idx0x_next = row_splits_data[src_idx0 + 1],
new_idx01 = new_idx0x + src_idx1;
// it's only necessary to guard the next statement with in 'if' because
// size_delta might be negative.
if (new_idx01 < new_idx0x_next) row_ids_data[new_idx01] = src_idx0;
};
Eval(c, src_num_elems, lambda_set_row_ids1);
}
if (size_delta > 0) {
// This sets the row-ids that are not set by lambda_set_row_ids1.
With w(pr.NewStream());
auto lambda_set_row_ids2 = [=] __host__ __device__(int32_t i) -> void {
int32_t idx0 = i / size_delta, n = i % size_delta, next_idx0 = idx0 + 1;
// The following formula is the same as the one in
// lambda_set_row_splits; we want to compute the new value of
// row_splits_data[next_idx0] without waiting for that kernel to
// terminate.
int32_t next_idx0x =
src_row_splits_data[next_idx0] + size_delta * next_idx0;
row_ids_data[next_idx0x - 1 - n] = idx0;
};
Eval(c, num_rows * size_delta, lambda_set_row_ids2);
}
// make the ParallelRunner go out of scope (should do this before any
// validation code that gets invoked by the constructor of RaggedShape
// below).
}
return RaggedShape(ans_axes);
}
RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &renumbering) {
K2_CHECK_EQ(renumbering.NumOldElems(), src.NumElements());
// Make sure final row-ids are populated.
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeDim> axes = src.Axes();
axes.back().row_ids = axes.back().row_ids[renumbering.New2Old()];
axes.back().row_splits = renumbering.Old2New()[axes.back().row_splits];
axes.back().cached_tot_size = axes.back().row_ids.Dim();
return RaggedShape(axes);
}
RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &r_before_last,
Renumbering &r_last) {
K2_CHECK_EQ(r_before_last.NumOldElems(), src.TotSize(src.NumAxes() - 2));
K2_CHECK_EQ(r_last.NumOldElems(), src.NumElements());
// Make sure final and before-final row-ids are populated.
src.RowIds(src.NumAxes() - 2);
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeDim> axes = src.Axes();
// Suppose this shape has 3 axes (0,1,2). Its NumAxes()==3;
// axes.size()==2.
// r_before_last deals with the numbering on axis 1.
// r_last deals with the numbering on axis 2.
RaggedShapeDim &before_last = axes[axes.size() - 2],
&last = axes[axes.size() - 1];
int32_t new_tot_size1 = r_before_last.NumNewElems(),
new_tot_size2 = r_last.NumNewElems();
ContextPtr c = src.Context();
Array1<int32_t> before_last_row_ids(c, new_tot_size1),
last_row_splits(c, new_tot_size1 + 1), last_row_ids(c, new_tot_size2);
// The variable names below use this 3-axis assumption but the
// code will work for greater number of axes.
int32_t *new_row_ids1_data = before_last_row_ids.Data(),
*new_row_splits2_data = last_row_splits.Data(),
*new_row_ids2_data = last_row_ids.Data();
const int32_t *old_row_ids1_data = before_last.row_ids.Data(),
*old_row_splits2_data = last.row_splits.Data(),
*old_row_ids2_data = last.row_ids.Data();
const int32_t *idx01_new2old_data = r_before_last.New2Old().Data(),
*idx01_old2new_data = r_before_last.Old2New().Data(),
*idx012_new2old_data = r_last.New2Old().Data(),
*idx012_old2new_data = r_last.Old2New().Data();
ParallelRunner pr(c);
{
With w(pr.NewStream());
// before_last.row_splits maps from idx0 -> idx01 (contains idx01's). Map
// the idx01's; the idx0s stay the same.
before_last.row_splits = r_before_last.Old2New()[before_last.row_splits];
}
{
With w(pr.NewStream());
auto lambda_set_row_ids1_and_row_splits2 =
[=] __host__ __device__(int32_t new_idx01) -> void {
// row_ids1 maps from idx01 -> idx0. Select subset of
// idx01's; the idx0 stays the same.
int32_t old_idx01 = idx01_new2old_data[new_idx01];
if (new_idx01 < new_tot_size1)
new_row_ids1_data[new_idx01] = old_row_ids1_data[old_idx01];
// row_splits2 maps from idx01 -> idx012. Map both indexes.
// idx01's; the idx0 stays the same.
new_row_splits2_data[new_idx01] =
idx012_old2new_data[old_row_splits2_data[old_idx01]];
};
Eval(c, new_tot_size1 + 1, lambda_set_row_ids1_and_row_splits2);
}
{
With w(pr.NewStream());
auto lambda_set_row_ids2 =
[=] __host__ __device__(int32_t new_idx012) -> void {
// row_ids2 maps from idx012 -> idx01. Both must be mapped.
int32_t old_idx012 = idx012_new2old_data[new_idx012];
int32_t old_idx01 = old_row_ids2_data[old_idx012],
new_idx01 = idx01_old2new_data[old_idx01];
new_row_ids2_data[new_idx012] = new_idx01;
};
Eval(c, new_tot_size2, lambda_set_row_ids2);
}
before_last.row_ids = before_last_row_ids;
before_last.cached_tot_size = new_tot_size1;
last.row_splits = last_row_splits;
last.row_ids = last_row_ids;
last.cached_tot_size = new_tot_size2;
return RaggedShape(axes);
}
RaggedShape EmptyRaggedShape(ContextPtr &c, int32_t num_axes) {
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeDim> axes(num_axes - 1);
axes[0].row_splits = Array1<int32_t>(c, 1, 0);
// row_ids will be the empty vector, with context `c`.
axes[0].row_ids = axes[0].row_splits.Range(0, 0);
axes[0].cached_tot_size = 0;
for (int32_t a = 1; a + 1 < num_axes; a++) axes[a] = axes[0];
return RaggedShape(axes);
}
} // namespace k2
| 6cb6593dbea1b186d4c3a44137bebd0465504491.cu | /**
* @brief
* ragged_ops
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
* Mobvoi Inc. (authors: Fangjun Kuang)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <memory>
#include <vector>
#include "cub/cub.cuh"
#include "k2/csrc/array_ops.h"
#include "k2/csrc/math.h"
#include "k2/csrc/moderngpu_allocator.h"
#include "k2/csrc/ragged.h"
#include "k2/csrc/ragged_ops.h"
#include "moderngpu/kernel_mergesort.hxx"
namespace {
/*
A helper function used in RaggedShape3;
if both first and second are non-NULL, it will check if the context of them
is compatible or not and return that context if compatible;
if one of them is NULL, returns the other one's context.
*/
static k2::ContextPtr GetContext(const k2::Array1<int32_t> *first,
const k2::Array1<int32_t> *second) {
K2_CHECK(first != nullptr || second != nullptr)
<< "At least one of first and second must be non-NULL";
if (first == nullptr)
return second->Context();
else if (second == nullptr)
return first->Context();
else
return k2::GetContext(*first, *second);
}
} // namespace
namespace k2 {
RaggedShape RandomRaggedShape(bool set_row_ids, int32_t min_num_axes,
int32_t max_num_axes, int32_t min_num_elements,
int32_t max_num_elements) {
ContextPtr c = GetCpuContext();
K2_CHECK(min_num_axes >= 2 && max_num_axes >= min_num_axes &&
min_num_elements >= 0 && max_num_elements >= min_num_elements);
int32_t num_axes = RandInt(min_num_axes, max_num_axes);
int32_t num_elements = RandIntGeometric(min_num_elements, max_num_elements);
bool done_repeats = false;
std::vector<RaggedShapeDim> axes(num_axes - 1);
for (int32_t axis = num_axes - 2; axis >= 0; axis--) {
// this axis will have row_ids of length num_elements and
// row_splits of length to be determined.
int32_t cur_row_split = 0;
std::vector<int32_t> row_splits_vec;
std::vector<int32_t> row_ids_vec;
row_splits_vec.push_back(cur_row_split);
// The reason for "|| RandInt(0, 2) == 0)" is so that even if there
// are no elements we can still potentially generate empty row-splits.
while (cur_row_split < num_elements || RandInt(0, 2) == 0) {
int32_t split_size = RandIntGeometric(0, num_elements - cur_row_split);
cur_row_split += split_size;
// sometimes we have a bunch of empty rows in a row (this will test out
// more of the code), so here we generate a bunch of empty rows, but we
// just do this only once (that's why we declare `done_repeats` here).
if (split_size == 0 && RandInt(0, 30) == 0 && !done_repeats) {
int32_t num_repeats = RandIntGeometric(1, 128);
row_splits_vec.insert(row_splits_vec.end(), num_repeats, cur_row_split);
// don't need to set `row_ids_vec` as there's no element.
done_repeats = true;
}
row_splits_vec.push_back(cur_row_split);
if (set_row_ids) {
int32_t cur_row = static_cast<int32_t>(row_splits_vec.size()) - 2;
row_ids_vec.insert(row_ids_vec.end(), split_size, cur_row);
}
}
axes[axis].row_splits = Array1<int32_t>(c, row_splits_vec);
if (set_row_ids) axes[axis].row_ids = Array1<int32_t>(c, row_ids_vec);
axes[axis].cached_tot_size = num_elements;
num_elements = axes[axis].row_splits.Dim() - 1;
}
// RaggedShape(axes, true) will check the returned RaggedShape for
// consistency.
return RaggedShape(axes, true);
}
RaggedShape RaggedShape2(Array1<int32_t> *row_splits, Array1<int32_t> *row_ids,
int32_t cached_tot_size) {
K2_CHECK(row_splits != nullptr || row_ids != nullptr)
<< "At least one of row_splits and row_ids must be defined";
ContextPtr ctx = ::GetContext(row_splits, row_ids);
if (cached_tot_size != -1) {
if (row_ids != nullptr) K2_CHECK_EQ(cached_tot_size, row_ids->Dim());
if (row_splits != nullptr) {
// may be slow as it may copy memory from device to host
K2_DCHECK_EQ(cached_tot_size, row_splits->Back());
}
}
std::vector<RaggedShapeDim> axes(1);
if (row_splits != nullptr) {
axes[0].row_splits = *row_splits;
} else {
// we need to work out row_splits as we always require row_splits is not
// empty for RaggedShape. Note here we suppose the last element in row_ids
// is num_rows - 1, i.e. there's no empty rows after row `row_ids[-1]`.
int32_t num_rows = row_ids->Dim() == 0 ? 0 : row_ids->Back() + 1;
Array1<int32_t> row_splits_array(ctx, num_rows + 1);
RowIdsToRowSplits(*row_ids, &row_splits_array);
axes[0].row_splits = row_splits_array;
}
if (row_ids != nullptr) axes[0].row_ids = *row_ids;
if (cached_tot_size == -1) {
cached_tot_size =
row_ids != nullptr ? row_ids->Dim() : axes[0].row_splits.Back();
}
axes[0].cached_tot_size = cached_tot_size;
// note below line will check if row_splits and row_ids are valid and agree
// with each other.
return RaggedShape(axes);
}
RaggedShape ComposeRaggedShapes(const RaggedShape &a, const RaggedShape &b) {
if (a.NumElements() != b.Dim0()) {
K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements()
<< " vs. " << b.Dim0();
}
const auto &a_axes = a.Axes();
const auto &b_axes = b.Axes();
std::vector<RaggedShapeDim> axes(a_axes.size() + b_axes.size());
std::size_t a_size = a_axes.size(), b_size = b_axes.size();
for (std::size_t i = 0; i < a_size; ++i) axes[i] = a_axes[i];
for (std::size_t i = 0; i < b_size; ++i) axes[i + a_size] = b_axes[i];
return RaggedShape(axes);
}
RaggedShape RaggedShape3(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2) {
K2_CHECK(row_splits1 != nullptr || row_ids1 != nullptr)
<< "At least one of row_splits1 and row_ids1 must be defined";
K2_CHECK(row_splits2 != nullptr || row_ids2 != nullptr)
<< "At least one of row_splits2 and row_ids2 must be defined";
// check context
ContextPtr ctx1 = ::GetContext(row_splits1, row_ids1);
ContextPtr ctx2 = ::GetContext(row_splits2, row_ids2);
K2_CHECK(ctx1->IsCompatible(*ctx2));
// check row_splits and row_ids of axis-1
if (cached_tot_size1 != -1) {
if (row_ids1 != nullptr) K2_CHECK_EQ(cached_tot_size1, row_ids1->Dim());
if (row_splits1 != nullptr) {
// may be slow as it may copy memory from device to host
K2_DCHECK_EQ(cached_tot_size1, row_splits1->Back());
}
}
// check row_splits and row_ids of axis-2
if (cached_tot_size2 != -1) {
if (row_ids2 != nullptr) K2_CHECK_EQ(cached_tot_size2, row_ids2->Dim());
if (row_splits2 != nullptr) {
// may be slow as it may copy memory from device to host
K2_DCHECK_EQ(cached_tot_size2, row_splits2->Back());
}
}
std::vector<RaggedShapeDim> axes(2);
// set row_splits and row_ids for axis 1
if (row_splits1 != nullptr) {
axes[0].row_splits = *row_splits1;
} else {
// work out row_splits1, see code in RaggedShape2 above for the reason
int32_t num_rows = row_ids1->Dim() == 0 ? 0 : row_ids1->Back() + 1;
Array1<int32_t> row_splits_array(ctx1, num_rows + 1);
RowIdsToRowSplits(*row_ids1, &row_splits_array);
axes[0].row_splits = row_splits_array;
}
if (row_ids1 != nullptr) axes[0].row_ids = *row_ids1;
if (cached_tot_size1 == -1) {
cached_tot_size1 =
row_ids1 != nullptr ? row_ids1->Dim() : axes[0].row_splits.Back();
}
axes[0].cached_tot_size = cached_tot_size1;
// set row_splits and row_ids for axis 2
if (row_splits2 != nullptr) {
axes[1].row_splits = *row_splits2;
} else {
// work out row_splits1, see code in RaggedShape2 above for the reason
int32_t num_rows = row_ids2->Dim() == 0 ? 0 : row_ids2->Back() + 1;
Array1<int32_t> row_splits_array(ctx1, num_rows + 1);
RowIdsToRowSplits(*row_ids2, &row_splits_array);
axes[1].row_splits = row_splits_array;
}
if (row_ids2 != nullptr) axes[1].row_ids = *row_ids2;
if (cached_tot_size2 == -1) {
cached_tot_size2 =
row_ids2 != nullptr ? row_ids2->Dim() : axes[1].row_splits.Back();
}
axes[1].cached_tot_size = cached_tot_size2;
// we don't check here if
// row_splits1[row_splits1.Dim() - 1] == row_ids1.Dim()
// == (row_splits2.Dim() - 1)
// >= (row_ids2[row_ids2.Dim() - 1] + 1)
// but RaggedShape(axes) below will check this.
return RaggedShape(axes);
}
RaggedShape RaggedShapeFromTotSizes(ContextPtr &c, int32_t num_axes,
int32_t *tot_sizes) {
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeDim> axes(num_axes - 1);
// In future we might choose to allocate everything in one big array, to avoid
// multiple allocations, but for now just do it the simple way.
for (int32_t axis = 1; axis < num_axes; ++axis) {
axes[axis - 1].row_splits = Array1<int32_t>(c, tot_sizes[axis - 1] + 1);
axes[axis - 1].row_ids = Array1<int32_t>(c, tot_sizes[axis]);
axes[axis - 1].cached_tot_size = tot_sizes[axis];
}
// Not check here as we did not set the values of row_splits and row_ids
return RaggedShape(axes, false);
}
Array1<int32_t *> GetRowSplitsPtr(RaggedShape &src) {
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
std::vector<int32_t *> row_splits_start(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
Array1<int32_t> &cur_splits = src.RowSplits(i);
row_splits_start[i - 1] = cur_splits.Data();
}
return Array1<int32_t *>(src.Context(), row_splits_start);
}
// See declaration in ragged.h for documentation of its purpose and interface.
RaggedShape Unsqueeze(const RaggedShape &src, int32_t axis) {
// If axis == 0, initial row_splits and row_ids will look like the following,
// if for example src.Dim0() was 5: [ 0 5 ], [ 0 0 0 0 0 ]. The other axes
// would be pushed forward.
//
// If 0 < axis <= src.NumAxes(), the inserted row_splits and row_ids would
// look like the following, if for instance the src.TotSize(axis) = 8:
// [ 0 1 2 3 4 5 6 7 8 ], [ 0 1 2 3 4 5 6 7 ].
//
// The reason why the code is different for axis == 0, is that in that case we
// are really making visible an "implicit" axis of the input `src`; we could
// call it axis 0 of the original RaggedShape. Imagine that "implicit" axis's
// row_splits and row_ids map respectively from an idx_minus1 -> idx0 and from
// an idx_0 to idx_minus1, where idx_minus1 is always 0 and 0 <= idx0 <
// Dim0().
ContextPtr c = src.Context();
K2_CHECK(axis >= 0 && axis <= src.NumAxes());
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
int32_t num_axes_in = src.NumAxes();
// Note: in RaggedShape, the vector of RaggedShapeDim is of length
// num_axes - 1, so the output will have one more axis than the input.
std::vector<RaggedShapeDim> axes_out(num_axes_in);
int32_t row_splits_dim, row_ids_dim;
Array1<int32_t> mem;
if (axis == 0) {
row_splits_dim = 2; // e.g. [ 0 5 ]
row_ids_dim = src.Dim0(); // e.g. [ 0 0 0 0 0 ]
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
auto lambda_set_mem = [=] __host__ __device__(int32_t i) -> void {
if (i == 1)
mem_data[i] = row_ids_dim;
else
mem_data[i] = 0;
};
Eval(c, mem.Dim(), lambda_set_mem);
} else {
int32_t tot_size = src.TotSize(axis);
row_splits_dim = tot_size + 1;
row_ids_dim = tot_size;
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
auto lambda_set_mem2 = [=] __host__ __device__(int32_t i) -> void {
mem_data[i] = i % (tot_size + 1);
};
Eval(c, mem.Dim(), lambda_set_mem2);
}
axes_out[axis].row_splits = mem.Range(0, row_splits_dim);
axes_out[axis].row_ids = mem.Range(row_splits_dim, row_ids_dim);
axes_out[axis].cached_tot_size = row_ids_dim;
for (int32_t i = 0; i < axis; ++i) axes_out[i] = axes_in[i];
// Note: the returned array has `num_axes_in + 1` axes, so its
// array of RaggedShapeDim is of length `num_axes_in`.
for (int32_t i = axis + 1; i < num_axes_in; ++i) axes_out[i] = axes_in[i - 1];
return RaggedShape(axes_out);
}
std::vector<RaggedShape> UnsqueezeParallel(int32_t num_srcs, RaggedShape **src,
int32_t axis) {
NVTX_RANGE(__func__);
K2_CHECK_EQ(axis, 0);
std::vector<RaggedShape> ans;
if (num_srcs == 0)
return ans;
ans.reserve(num_srcs);
ContextPtr c = src[0]->Context();
std::vector<int32_t> all_row_splits_vec(num_srcs * 2);
int32_t max_dim = 0;
// all_row_splits_vec will contain [ 0 d0 0 d1 0 d2 .. ]
// where d0 == src[0]->Dim0(), d1 == src[1]->Dim0()..
for (int32_t i = 0; i < num_srcs; i++) {
int32_t this_dim0 = src[i]->Dim0();
if (this_dim0 > max_dim)
max_dim = this_dim0;
all_row_splits_vec[i * 2] = 0;
all_row_splits_vec[i * 2 + 1] = this_dim0;
}
Array1<int32_t> all_row_splits(c, all_row_splits_vec);
Array1<int32_t> all_row_ids(c, max_dim, 0);
for (int32_t i = 0; i < num_srcs; i++) {
int32_t num_axes = src[i]->NumAxes();
std::vector<RaggedShapeDim> axes;
axes.reserve(num_axes); // note, the size of the `axes` of a RaggedShape
// is its NumAxes() - 1.
axes.resize(1);
int32_t this_old_dim0 = all_row_splits_vec[i * 2 + 1];
axes[0].row_splits = all_row_splits.Range(i * 2, 2);
axes[0].row_ids = all_row_ids.Range(0, this_old_dim0);
axes[0].cached_tot_size = this_old_dim0;
axes.insert(axes.end(), src[i]->Axes().begin(), src[i]->Axes().end());
ans.emplace_back(axes);
}
return ans;
}
/*
Internal function used in Index(), which gets certain arrays used internally.
@param [in] src Source shape to be indexed
@param [in] src_row_splits_ptrs Result of calling GetRowSplitsPtr(src)
@param [in] new2old Array of indexes into axis 0 of src
@param [out] old_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()), whose (i,j)'th
element contains the offset into axis i of `src`
where the slice of `src` with index0 (i.e. index
into 0'th-axis of `src`) equal to `new2old[j]`
begins.
@param [out] new_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()+1), whose (i,j)'th
element contains the offset into axis i of `ans`
where the data in `ans` corresponding to
index j (i.e. index j into axis 0 of `ans`) begins.
Note: `ans` is the result of Index(), with
ans.Dim0() == new2old.Dim().
*/
inline void GetOldAndNewOffsets(RaggedShape &src,
const Array1<int32_t *> &src_row_splits_ptrs,
const Array1<int32_t> &new2old,
Array2<int32_t> *old_offsets,
Array2<int32_t> *new_offsets) {
K2_CHECK(src.NumAxes() > 1);
ContextPtr &c = src.Context();
int32_t num_axes = src.NumAxes(), ans_dim0 = new2old.Dim();
int32_t *const *src_row_splits_ptrs_data = src_row_splits_ptrs.Data();
const int32_t *new2old_data = new2old.Data();
*old_offsets = Array2<int32_t>(c, num_axes, ans_dim0);
*new_offsets = Array2<int32_t>(c, num_axes, ans_dim0 + 1);
auto old_offsets_acc = old_offsets->Accessor(),
new_offsets_acc = new_offsets->Accessor();
// Set old_offsets; and for now, set new_offsets to the corresponding
// sizes of the output slices.
auto lambda_set_offsets = [=] __host__ __device__(int32_t i) {
// 0 <= i < ans_dim0
int32_t old_offset = new2old_data[i], old_offset_next = old_offset + 1;
for (int32_t axis = 0;; axis++) {
old_offsets_acc(axis, i) = old_offset;
// Below, 'new_offsets_acc' currently contains the size rather
// than the offset; we need to do exclusive-sum.
new_offsets_acc(axis, i) = old_offset_next - old_offset;
if (axis + 1 == num_axes) return;
old_offset = src_row_splits_ptrs_data[axis][old_offset];
old_offset_next = src_row_splits_ptrs_data[axis][old_offset_next];
}
};
Eval(c, ans_dim0, lambda_set_offsets);
ExclusiveSum(*new_offsets, new_offsets);
}
RaggedShape Index(RaggedShape &src, const Array1<int32_t> &new2old,
Array1<int32_t> *elem_indexes /*=nullptr*/) {
ContextPtr c = src.Context();
bool is_cpu = (c->GetDeviceType() == kCpu);
K2_CHECK(IsCompatible(src, new2old));
int32_t num_axes = src.NumAxes(), src_dim0 = src.Dim0(),
ans_dim0 = new2old.Dim();
if (ans_dim0 == 0) {
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, 0);
return EmptyRaggedShape(c, num_axes);
}
Array1<int32_t *> src_row_splits_ptrs = GetRowSplitsPtr(src);
Array2<int32_t> old_offsets, // num_axes by ans_dim0
new_offsets; // num_axes by (ans_dim0 + 1).
GetOldAndNewOffsets(src, src_row_splits_ptrs, new2old, &old_offsets,
&new_offsets);
Array1<int32_t> tot_sizes_out =
Array1<int32_t>(new_offsets.Col(ans_dim0)).To(GetCpuContext());
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, tot_sizes_out.Back());
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.Data());
auto old_offsets_acc = old_offsets.Accessor(),
new_offsets_acc = new_offsets.Accessor();
ParallelRunner pr(c);
std::vector<cudaStream_t> streams(num_axes);
int32_t num_jobs = ans_dim0 * 2; // note: this formula is not a heuristic;
// it's how TaskRedirect works..
Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs);
auto task_redirects_acc = task_redirects.Accessor();
for (int32_t axis = 0; axis < num_axes; ++axis) {
streams[axis] = pr.NewStream();
With w(streams[axis]);
const int32_t *new_offsets_ptr = new_offsets_acc.Row(axis);
TaskRedirect *task_redirect_ptr = task_redirects_acc.Row(axis);
GetTaskRedirect(c, ans_dim0, new_offsets_ptr, task_redirect_ptr);
}
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
{
int32_t *this_new_row_splits = ans.RowSplits(axis + 1).Data();
const int32_t *this_old_row_splits = src.RowSplits(axis + 1).Data();
auto lambda_set_row_splits = [=] __host__ __device__(
int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
// 0 <= ans_idx0 < ans_dim0; and 0 <= thread_idx < num_threads,
// num_threads may have any value > 0 as far as this code is concerned.
//
// Reminder of how row_splits work dimensionally: they are a map
// from, e.g. an idx0 to an idx0x. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
// The locations in the row_splits array are as given by
// the `axis`'th row of `offsets`; the values in the array
// are related to those in the `axis+1`'th row.
int32_t this_new_offset = new_offsets_acc(axis, ans_idx0),
next_new_offset = new_offsets_acc(axis, ans_idx0 + 1),
num_rows = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis, ans_idx0),
value_offset = new_offsets_acc(axis + 1, ans_idx0) -
old_offsets_acc(axis + 1, ans_idx0);
// Using <= instead of < below causes threads for different ans_idx0 to
// write a single overlapping value, but also ensures that the
// terminating value is written. This only works because row_splits
// vectors always start with 0, which is not necessarily the case
// for row-ids.
for (; thread_idx <= num_rows; thread_idx += num_threads) {
this_new_row_splits[this_new_offset + thread_idx] =
value_offset + this_old_row_splits[this_old_offset + thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis),
min_threads_per_job, tot_work, target_num_loops,
lambda_set_row_splits);
}
{
int32_t *this_new_row_ids = ans.RowIds(axis + 1).Data();
const int32_t *this_old_row_ids = src.RowIds(axis + 1).Data();
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
if (elem_indexes == nullptr || axis != num_axes - 2) {
// If we don't need to write to `elem_indexes`... [caution: the next
// code block differs from this only by a statement that sets
// `elem_indexes` and they should be kept in sync.]
auto lambda_set_row_ids = [=] __host__ __device__(
int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_ids work dimensionally: they are a map
// from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
// The locations in the row_ids array are as given by
// the `axis+1`'th row of `offsets`; the values in the array
// are related to those in the `axis`'th row.
int32_t this_new_offset = new_offsets_acc(axis + 1, ans_idx0),
next_new_offset = new_offsets_acc(axis + 1, ans_idx0 + 1),
num_elems = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis + 1, ans_idx0),
value_offset = new_offsets_acc(axis, ans_idx0) -
old_offsets_acc(axis, ans_idx0);
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_new_row_ids[this_new_offset + thread_idx] =
value_offset + this_old_row_ids[this_old_offset + thread_idx];
}
};
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops, lambda_set_row_ids);
} else {
int32_t *elem_indexes_data = elem_indexes->Data();
// We need to write to `elem_indexes`. Note: this code block only
// differs from the above by an extra statement regarding
// `elem_indexes`. Comments have been removed.
auto lambda_set_row_ids_and_elem_indexes =
[=] __host__ __device__(int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
int32_t this_new_offset = new_offsets_acc(axis + 1, ans_idx0),
next_new_offset = new_offsets_acc(axis + 1, ans_idx0 + 1),
num_elems = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis + 1, ans_idx0),
value_offset = new_offsets_acc(axis, ans_idx0) -
old_offsets_acc(axis, ans_idx0);
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_new_row_ids[this_new_offset + thread_idx] =
value_offset + this_old_row_ids[this_old_offset + thread_idx];
elem_indexes_data[this_new_offset + thread_idx] =
this_old_offset + thread_idx;
}
};
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops,
lambda_set_row_ids_and_elem_indexes);
}
}
}
#if !defined(NDEBUG)
ans.Check();
#endif
return ans;
}
Array2<int32_t> GetOffsets(int32_t num_srcs, RaggedShape **src) {
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
ContextPtr ctx = src[0]->Context();
Array2<int32_t> src_offsets(GetCpuContext(), num_axes_in + 1, num_srcs + 1);
int32_t *src_offsets_data = src_offsets.Data();
int32_t src_offsets_stride0 = src_offsets.ElemStride0();
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
for (int32_t axis = 0; axis <= num_axes_in; ++axis) {
int32_t sum = 0;
for (int32_t i = 0; i <= num_srcs; ++i) { // i is the column
src_offsets_data[axis * src_offsets_stride0 + i] = sum;
if (i < num_srcs) {
sum += (axis == 0 ? 1 : src[i]->TotSize(axis - 1));
}
}
}
return src_offsets;
}
void GetRowInfo(RaggedShape &src, Array1<int32_t *> *row_splits,
Array1<int32_t *> *row_ids) {
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
src.Populate();
std::vector<int32_t *> row_splits_ptrs(axes - 1);
std::vector<int32_t *> row_ids_ptrs(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
row_splits_ptrs[i - 1] = src.RowSplits(i).Data();
row_ids_ptrs[i - 1] = src.RowIds(i).Data();
}
ContextPtr ctx = src.Context();
*row_splits = Array1<int32_t *>(ctx, row_splits_ptrs);
*row_ids = Array1<int32_t *>(ctx, row_ids_ptrs);
}
void GetRowInfoMulti(int32_t num_srcs, RaggedShape **src,
Array2<int32_t *> *row_splits,
Array2<int32_t *> *row_ids) {
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
K2_CHECK_GE(num_axes_in, 2);
ContextPtr ctx = src[0]->Context();
// check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
Array2<int32_t *> row_splits_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
Array2<int32_t *> row_ids_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
int32_t **splits_ptr_data = row_splits_ptrs.Data();
int32_t **ids_ptr_data = row_ids_ptrs.Data();
int32_t stride0 = row_splits_ptrs.ElemStride0();
K2_CHECK_EQ(stride0, row_ids_ptrs.ElemStride0());
for (int32_t axis = 0; axis != num_axes_in - 1; ++axis) {
for (int32_t i = 0; i != num_srcs; ++i) {
splits_ptr_data[axis * stride0 + i] = src[i]->RowSplits(axis + 1).Data();
ids_ptr_data[axis * stride0 + i] = src[i]->RowIds(axis + 1).Data();
}
}
*row_splits = row_splits_ptrs.To(ctx);
*row_ids = row_ids_ptrs.To(ctx);
}
RaggedShape Append(int32_t axis, int32_t num_srcs, RaggedShape **src) {
NVTX_RANGE("Append(RaggedShape)");
if (num_srcs == 1) return **src;
K2_CHECK_GT(num_srcs, 1);
if (axis == 1) {
RaggedShape temp = Stack(axis, num_srcs, src);
return RemoveAxis(temp, axis);
}
K2_CHECK_EQ(axis, 0) << "Append() with axis > 1 not yet supported";
int32_t num_axes = src[0]->NumAxes();
ContextPtr c = src[0]->Context();
bool is_cpu = (c->GetDeviceType() == kCpu);
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(num_axes, src[i]->NumAxes());
K2_CHECK(IsCompatible(*src[0], *src[i]));
}
// `offsets` will be on CPU for now.
Array2<int32_t> offsets = GetOffsets(num_srcs, src);
auto offsets_acc = offsets.Accessor();
std::vector<int32_t> tot_sizes_out(num_axes);
for (int32_t axis = 0; axis < num_axes; ++axis)
tot_sizes_out[axis] = offsets_acc(axis + 1, num_srcs);
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.data());
Array2<int32_t *> src_row_splits, src_row_ids;
GetRowInfoMulti(num_srcs, src, &src_row_splits, &src_row_ids);
auto src_row_splits_acc = src_row_splits.Accessor(),
src_row_ids_acc = src_row_ids.Accessor();
offsets = offsets.To(c);
offsets_acc = offsets.Accessor(); // on GPU now (if we're using one)
ParallelRunner pr(c);
std::vector<cudaStream_t> streams(num_axes);
int32_t num_jobs = num_srcs * 2;
// task_redirects is a device array (if using GPU).
// We have `num_axes - 1` different sets of row_splits/row_ids to
// populate but they have different sizes; the total number of distinct
// sizes is `num_axes`.
Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs);
auto task_redirects_acc = task_redirects.Accessor();
// populate task_redirects (these allocate blocks of threads roughly
// proportionally to the amount of data to process from this source.
for (int32_t axis = 0; axis < num_axes; ++axis) {
streams[axis] = pr.NewStream();
With w(streams[axis]);
const int32_t *offsets = offsets_acc.Row(axis + 1);
// c->GetCudaStream() == stream[axis] as it has been overridden by With
GetTaskRedirect(c, num_srcs, offsets, task_redirects_acc.Row(axis));
}
for (int32_t axis = 0; axis < num_axes - 1; axis++) {
// first set the row-splits.
int32_t **this_src_row_splits = src_row_splits_acc.Row(axis),
**this_src_row_ids = src_row_ids_acc.Row(axis);
int32_t *this_dest_row_splits = ans.RowSplits(axis + 1).Data(),
*this_dest_row_ids = ans.RowIds(axis + 1).Data();
const int32_t *offsets_this_axis = offsets_acc.Row(axis + 1),
*offsets_next_axis = offsets_acc.Row(axis + 2);
auto lambda_set_row_splits = [=] __host__ __device__(
int32_t src_idx, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_splits work dimensionally: they are a map
// from, e.g. an idx0 to an idx0x. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
int32_t this_offset = offsets_this_axis[src_idx],
next_offset = offsets_this_axis[src_idx + 1],
this_value_offset = offsets_next_axis[src_idx],
num_rows = next_offset - this_offset;
int32_t *src_row_splits_ptr = this_src_row_splits[src_idx];
// Using <= instead of < below causes threads for different src_idx to
// write a single overlapping value, but also ensures that the
// terminating value is written. This only works because row_splits
// vectors always start with 0, which is not necessarily the case
// for row-ids.
for (; thread_idx <= num_rows; thread_idx += num_threads) {
this_dest_row_splits[this_offset + thread_idx] =
this_value_offset + src_row_splits_ptr[thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis),
min_threads_per_job, tot_work, target_num_loops,
lambda_set_row_splits);
{ // set the row-ids
auto lambda_set_row_ids = [=] __host__ __device__(
int32_t src_idx, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_ids work dimensionally: they are a map
// from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
int32_t this_offset = offsets_next_axis[src_idx],
next_offset = offsets_next_axis[src_idx + 1],
this_value_offset = offsets_this_axis[src_idx],
num_elems = next_offset - this_offset;
int32_t *src_row_ids_ptr = this_src_row_ids[src_idx];
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_dest_row_ids[this_offset + thread_idx] =
this_value_offset + src_row_ids_ptr[thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis + 1],
target_num_loops = (tot_work > 1000000 ? 4 : 2);
// TODO(haowen): maybe we should launch kernels for row_splits and row_ids
// in different streams
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops, lambda_set_row_ids);
}
}
return ans;
}
RaggedShape RemoveAxis(RaggedShape &src, int32_t axis) {
K2_CHECK_GT(src.NumAxes(), 2);
K2_CHECK(axis >= 0 && axis < src.NumAxes());
// note, `axes_in` is of dim src.NumAxes() - 1.
// Also note: axes_in[i] pertains to the relationship between
// axes i and i+1 in the source.
src.Populate();
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
std::vector<RaggedShapeDim> axes_out(axes_in.size() - 1);
int32_t axes_out_size = static_cast<int32_t>(axes_out.size());
for (int32_t i = 0; i < axis - 1; ++i) axes_out[i] = axes_in[i];
if (axis > 0 && axis + 1 < src.NumAxes()) {
axes_out[axis - 1].row_ids =
axes_in[axis - 1].row_ids[axes_in[axis].row_ids];
axes_out[axis - 1].row_splits =
axes_in[axis].row_splits[axes_in[axis - 1].row_splits];
axes_out[axis - 1].cached_tot_size = axes_out[axis - 1].row_ids.Dim();
}
for (int32_t i = axis; i < axes_out_size; ++i) axes_out[i] = axes_in[i + 1];
return RaggedShape(axes_out);
}
RaggedShape MakeTransposable(RaggedShape &src) {
K2_CHECK_GE(src.NumAxes(), 2);
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 1) return src;
ContextPtr c = src.Context();
int32_t num_axes = src.NumAxes();
int32_t max_size = src.MaxSize(1);
if (max_size <= 0) return src;
int32_t ans_tot_size1 = max_size * src_dim0;
src.Populate();
const std::vector<RaggedShapeDim> &axes_in = src.Axes();
std::vector<RaggedShapeDim> axes_out(num_axes - 1);
const int32_t *src_row_splits1_data = src.RowSplits(1).Data();
const int32_t *src_row_ids1_data = src.RowIds(1).Data();
{
ParallelRunner pr(c);
RaggedShapeDim &axis1_shape = axes_out[0];
{
// set ans.RowSplits(1);
With w(pr.NewStream());
axis1_shape.row_splits = Range(c, src_dim0 + 1, 0, max_size);
}
{
// set ans.RowIds(1);
With w(pr.NewStream());
axis1_shape.row_ids = Array1<int32_t>(c, ans_tot_size1);
int32_t *row_ids1_data = axis1_shape.row_ids.Data();
axis1_shape.cached_tot_size = ans_tot_size1;
auto lambda_set_row_ids1 = [=] __host__ __device__(int32_t i) {
row_ids1_data[i] = i / max_size;
};
Eval(c, ans_tot_size1, lambda_set_row_ids1);
}
if (num_axes > 2) {
RaggedShapeDim &axis2_shape = axes_out[1];
const int32_t *src_row_splits2_data = src.RowSplits(2).Data();
{
// set ans.RowSplits(2);
With w(pr.NewStream());
axis2_shape.cached_tot_size = src.TotSize(2);
axis2_shape.row_splits = Array1<int32_t>(c, ans_tot_size1 + 1);
int32_t *ans_row_splits2_data = axis2_shape.row_splits.Data();
auto lambda_set_row_splits2 = [=] __host__ __device__(int32_t idx01) {
if (idx01 == ans_tot_size1) {
ans_row_splits2_data[idx01] = src_row_splits2_data[src_tot_size1];
return;
}
int32_t idx0 = idx01 / max_size, idx1 = idx01 % max_size;
int32_t idx0x = src_row_splits1_data[idx0],
idx0x_next = src_row_splits1_data[idx0 + 1];
int32_t num_elems_this_row = idx0x_next - idx0x;
if (idx1 < num_elems_this_row)
ans_row_splits2_data[idx01] = src_row_splits2_data[idx0x + idx1];
else
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x_next]; // append empty row
};
Eval(c, ans_tot_size1 + 1, lambda_set_row_splits2);
}
{
// set ans.RowIds(2);
With w(pr.NewStream());
int32_t tot_size2 = src.TotSize(2);
axis2_shape.row_ids = Array1<int32_t>(c, tot_size2);
int32_t *ans_row_ids2_data = axis2_shape.row_ids.Data();
const int32_t *src_row_ids2_data = src.RowIds(2).Data();
auto lambda_set_row_ids2 = [=] __host__ __device__(int32_t idx012) {
int32_t src_idx01 = src_row_ids2_data[idx012];
int32_t src_idx0 = src_row_ids1_data[src_idx01];
int32_t src_idx1 = src_idx01 - src_row_splits1_data[src_idx0];
ans_row_ids2_data[idx012] = (src_idx0 * max_size) + src_idx1;
};
Eval(c, tot_size2, lambda_set_row_ids2);
}
}
}
// copy left row_splits and row_ids;
for (int32_t i = 2; i < num_axes - 1; ++i) axes_out[i] = axes_in[i];
return RaggedShape(axes_out);
}
// transpose axes 0 and 1.
RaggedShape Transpose(RaggedShape &src, Array1<int32_t> *value_indexes) {
K2_CHECK_GT(src.NumAxes(), 2);
ContextPtr c = src.Context();
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 0) return src;
int32_t src_dim1 = src_tot_size1 / src_dim0;
K2_CHECK_EQ(src_tot_size1 % src_dim0, 0)
<< "Transpose(): all dims on axis 0 must be the same.\n"
<< "src_tot_size1: " << src_tot_size1 << "\n"
<< "src_dim0: " << src_dim0 << ", array is: " << src;
K2_DCHECK(
Equal(src.RowSplits(1), Range(c, src.RowSplits(1).Dim(), 0, src_dim1)))
<< " Expected row-splits to be evenly spaced: " << src.RowSplits(1);
RaggedShape src_no_axis0 = RemoveAxis(src, 0);
K2_CHECK_EQ(src_no_axis0.Dim0(), src_tot_size1);
// `renumbering` is a `new2old` map, that maps from the first index in
// src_no_axis0_renumbered
// to the first index into src_no_axis0.
Array1<int32_t> renumbering(c, src_tot_size1);
int32_t *renumbering_data = renumbering.Data();
auto lambda_set_renumbering = [=] __host__ __device__(int32_t i) {
int32_t j = i % src_dim0, k = i / src_dim0, i_old = j * src_dim1 + k;
renumbering_data[i] = i_old;
};
Eval(c, src_tot_size1, lambda_set_renumbering);
RaggedShape src_no_axis0_renumbered =
Index(src_no_axis0, renumbering, value_indexes);
int32_t num_rows = src_dim1, row_splits_dim = num_rows + 1,
row_ids_dim = src_tot_size1;
std::vector<RaggedShapeDim> ans_axis0(1);
Array1<int32_t> mem(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
auto lambda_set_row_info = [=] __host__ __device__(int32_t i) {
int32_t val;
if (i >= row_splits_dim) {
// row_ids
int32_t elem_idx = i - row_splits_dim;
val = elem_idx / src_dim0;
} else {
// row_splits
int32_t row_idx = i;
val = row_idx * src_dim0;
}
mem_data[i] = val;
};
Eval(c, row_splits_dim + row_ids_dim, lambda_set_row_info);
ans_axis0[0].row_splits = mem.Range(0, row_splits_dim);
ans_axis0[0].row_ids = mem.Range(row_splits_dim, row_ids_dim);
ans_axis0[0].cached_tot_size = row_ids_dim;
RaggedShape temp(ans_axis0);
return ComposeRaggedShapes(temp, src_no_axis0_renumbered);
}
RaggedShape Stack(int32_t axis, int32_t num_srcs, RaggedShape **src) {
NVTX_RANGE("Stack(RaggedShape)");
K2_CHECK_GT(num_srcs, 0);
K2_CHECK(axis >= 0 && axis <= 1);
ContextPtr c = src[0]->Context();
int32_t num_axes = src[0]->NumAxes();
// Check if they have the same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(num_axes, src[i]->NumAxes());
K2_CHECK(c->IsCompatible(*src[i]->Context()));
}
std::vector<RaggedShape> unsqueezed = UnsqueezeParallel(
num_srcs, src, 0);
std::vector<RaggedShape *> unsqueezed_ptrs(num_srcs);
for (int32_t i = 0; i < num_srcs; i++)
unsqueezed_ptrs[i] = &(unsqueezed[i]);
RaggedShape ans = Append(0, num_srcs, unsqueezed_ptrs.data());
// Transpose will check if all src->Dim0() has the same value.
if (axis == 1) ans = Transpose(ans);
return ans;
}
RaggedShape TrivialShape(ContextPtr &c, int32_t num_elems) {
// row_splits= [
Array1<int32_t> row_splits = Range<int32_t>(c, 2, 0, num_elems);
Array1<int32_t> row_ids(c, num_elems, 0);
return RaggedShape2(&row_splits, &row_ids, num_elems);
}
RaggedShape RegularRaggedShape(ContextPtr &c, int32_t dim0, int32_t dim1) {
Array1<int32_t> row_splits = Range<int32_t>(c, dim0 + 1, 0, dim1);
int32_t *row_splits_data = row_splits.Data();
Array1<int32_t> row_ids(c, dim0 * dim1);
int32_t *row_ids_data = row_ids.Data();
auto lambda_set_row_ids = [=] __host__ __device__(int32_t i, int32_t j) {
row_ids_data[i * dim1 + j] = i;
};
Eval2(c, dim0, dim1, lambda_set_row_ids);
return RaggedShape2(&row_splits, &row_ids, dim0 * dim1);
}
Ragged<int32_t> GetCountsPartitioned(Ragged<int32_t> &src,
RaggedShape &ans_ragged_shape) {
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK_EQ(ans_ragged_shape.NumAxes(), 2);
K2_CHECK(IsCompatible(src, ans_ragged_shape));
K2_CHECK_EQ(src.Dim0(), ans_ragged_shape.Dim0());
const Array1<int32_t> &values = src.values;
const Array1<int32_t> &row_splits = ans_ragged_shape.RowSplits(1);
int32_t n = ans_ragged_shape.NumElements();
Array1<int32_t> counts = GetCounts(values, n);
return Ragged<int32_t>(ans_ragged_shape, counts);
}
static Array1<int32_t> GetTransposeReorderingCpu(Ragged<int32_t> &src,
int32_t num_cols) {
std::vector<std::vector<int32_t>> column_indexes(num_cols); // [column][row]
const int32_t *values_data = src.values.Data();
int32_t n = src.values.Dim();
for (int32_t i = 0; i != n; ++i) {
int32_t bucket = values_data[i];
column_indexes[bucket].push_back(i);
}
Array1<int32_t> ans(src.Context(), n);
int32_t *ans_data = ans.Data();
for (int32_t i = 0; i != num_cols; ++i) {
std::copy(column_indexes[i].begin(), column_indexes[i].end(), ans_data);
ans_data += column_indexes[i].size();
}
return ans;
}
static Array1<int32_t> GetTransposeReorderingThreeAxesCuda(Ragged<int32_t> &src,
int32_t num_cols) {
K2_CHECK_EQ(src.NumAxes(), 3);
ContextPtr &context = src.Context();
K2_CHECK_EQ(context->GetDeviceType(), kCuda);
const Array1<int32_t> &row_splits1 = src.RowSplits(1);
const int32_t *row_ids2_data = src.RowIds(2).Data();
const int32_t *value_data = src.values.Data();
Array1<int32_t> segments = src.RowSplits(2)[row_splits1];
auto lambda_comp = [=] __device__(int32_t a_idx012,
int32_t b_idx012) -> bool {
int32_t a_col_index = value_data[a_idx012];
int32_t b_col_index = value_data[b_idx012];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// at this point, a_idx012 and b_idx012 belong to the same column;
// then we sort by its row indexes
int32_t a_idx01 = row_ids2_data[a_idx012];
int32_t b_idx01 = row_ids2_data[b_idx012];
if (a_idx01 < b_idx01) return true;
if (a_idx01 > b_idx01) return false;
// at this point, a_idx012 and b_idx012 are duplicate elements
return false; // either true or false is fine
};
std::unique_ptr<mgpu::context_t> mgpu_context =
GetModernGpuAllocator(context->GetDeviceId());
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
K2_CUDA_SAFE_CALL(mgpu::segmented_sort(ans.Data(), // keys
ans.Dim(), // count
segments.Data(), // segments
segments.Dim(), // num_segments
lambda_comp, *mgpu_context));
return ans;
}
Array1<int32_t> GetTransposeReordering(Ragged<int32_t> &src, int32_t num_cols) {
ContextPtr &context = src.Context();
if (src.NumAxes() < 2) {
// src is empty
return Array1<int32_t>(context, 0);
}
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) return GetTransposeReorderingCpu(src, num_cols);
K2_CHECK_EQ(device_type, kCuda);
if (src.NumAxes() == 3)
return GetTransposeReorderingThreeAxesCuda(src, num_cols);
const int32_t *row_splits1_data = src.RowSplits(src.NumAxes() - 1).Data();
const int32_t *row_ids1_data = src.RowIds(src.NumAxes() - 1).Data();
const int32_t *value_data = src.values.Data();
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
auto lambda_comp = [=] __device__(int32_t a_idx01, int32_t b_idx01) -> bool {
int32_t a_idx0 = row_ids1_data[a_idx01];
int32_t b_idx0 = row_ids1_data[b_idx01];
int32_t a_col_index = value_data[a_idx01];
int32_t b_col_index = value_data[b_idx01];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// now we have a_col_index == b_col_index
if (a_idx0 < b_idx0) return true; // sort by row indexes
if (a_idx0 > b_idx0) return false;
// now we have a_idx0 == b_idx0 && a_col_index == b_col_index
// this entry is duplicated in the sparse matrix.
return false; // we can return either true or false here.
};
std::unique_ptr<mgpu::context_t> mgpu_context =
GetModernGpuAllocator(context->GetDeviceId());
K2_CUDA_SAFE_CALL(mgpu::mergesort(ans.Data(), n, lambda_comp, *mgpu_context));
return ans;
}
RaggedShape ChangeSublistSize(RaggedShape &src, int32_t size_delta) {
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeDim axes).
std::vector<RaggedShapeDim> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Axes()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1),
src_num_elems = src.TotSize(last_axis),
num_elems = src_num_elems + size_delta * num_rows;
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
ans_axes.back().row_ids = Array1<int32_t>(c, num_elems);
ans_axes.back().cached_tot_size = num_elems;
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data(),
*src_row_ids_data = src.RowIds(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data(),
*row_ids_data = ans_axes.back().row_ids.Data();
{
ParallelRunner pr(c);
{
With w(pr.NewStream());
auto lambda_set_row_splits =
[=] __host__ __device__(int32_t idx0) -> void {
row_splits_data[idx0] = src_row_splits_data[idx0] + size_delta * idx0;
};
Eval(c, num_rows + 1, lambda_set_row_splits);
}
{
With w(pr.NewStream());
auto lambda_set_row_ids1 =
[=] __host__ __device__(int32_t src_idx01) -> void {
int32_t src_idx0 = src_row_ids_data[src_idx01],
src_idx0x = src_row_splits_data[src_idx0],
src_idx1 = src_idx01 - src_idx0x,
new_idx0x = row_splits_data[src_idx0],
new_idx0x_next = row_splits_data[src_idx0 + 1],
new_idx01 = new_idx0x + src_idx1;
// it's only necessary to guard the next statement with in 'if' because
// size_delta might be negative.
if (new_idx01 < new_idx0x_next) row_ids_data[new_idx01] = src_idx0;
};
Eval(c, src_num_elems, lambda_set_row_ids1);
}
if (size_delta > 0) {
// This sets the row-ids that are not set by lambda_set_row_ids1.
With w(pr.NewStream());
auto lambda_set_row_ids2 = [=] __host__ __device__(int32_t i) -> void {
int32_t idx0 = i / size_delta, n = i % size_delta, next_idx0 = idx0 + 1;
// The following formula is the same as the one in
// lambda_set_row_splits; we want to compute the new value of
// row_splits_data[next_idx0] without waiting for that kernel to
// terminate.
int32_t next_idx0x =
src_row_splits_data[next_idx0] + size_delta * next_idx0;
row_ids_data[next_idx0x - 1 - n] = idx0;
};
Eval(c, num_rows * size_delta, lambda_set_row_ids2);
}
// make the ParallelRunner go out of scope (should do this before any
// validation code that gets invoked by the constructor of RaggedShape
// below).
}
return RaggedShape(ans_axes);
}
RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &renumbering) {
K2_CHECK_EQ(renumbering.NumOldElems(), src.NumElements());
// Make sure final row-ids are populated.
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeDim> axes = src.Axes();
axes.back().row_ids = axes.back().row_ids[renumbering.New2Old()];
axes.back().row_splits = renumbering.Old2New()[axes.back().row_splits];
axes.back().cached_tot_size = axes.back().row_ids.Dim();
return RaggedShape(axes);
}
RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &r_before_last,
Renumbering &r_last) {
K2_CHECK_EQ(r_before_last.NumOldElems(), src.TotSize(src.NumAxes() - 2));
K2_CHECK_EQ(r_last.NumOldElems(), src.NumElements());
// Make sure final and before-final row-ids are populated.
src.RowIds(src.NumAxes() - 2);
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeDim> axes = src.Axes();
// Suppose this shape has 3 axes (0,1,2). Its NumAxes()==3;
// axes.size()==2.
// r_before_last deals with the numbering on axis 1.
// r_last deals with the numbering on axis 2.
RaggedShapeDim &before_last = axes[axes.size() - 2],
&last = axes[axes.size() - 1];
int32_t new_tot_size1 = r_before_last.NumNewElems(),
new_tot_size2 = r_last.NumNewElems();
ContextPtr c = src.Context();
Array1<int32_t> before_last_row_ids(c, new_tot_size1),
last_row_splits(c, new_tot_size1 + 1), last_row_ids(c, new_tot_size2);
// The variable names below use this 3-axis assumption but the
// code will work for greater number of axes.
int32_t *new_row_ids1_data = before_last_row_ids.Data(),
*new_row_splits2_data = last_row_splits.Data(),
*new_row_ids2_data = last_row_ids.Data();
const int32_t *old_row_ids1_data = before_last.row_ids.Data(),
*old_row_splits2_data = last.row_splits.Data(),
*old_row_ids2_data = last.row_ids.Data();
const int32_t *idx01_new2old_data = r_before_last.New2Old().Data(),
*idx01_old2new_data = r_before_last.Old2New().Data(),
*idx012_new2old_data = r_last.New2Old().Data(),
*idx012_old2new_data = r_last.Old2New().Data();
ParallelRunner pr(c);
{
With w(pr.NewStream());
// before_last.row_splits maps from idx0 -> idx01 (contains idx01's). Map
// the idx01's; the idx0s stay the same.
before_last.row_splits = r_before_last.Old2New()[before_last.row_splits];
}
{
With w(pr.NewStream());
auto lambda_set_row_ids1_and_row_splits2 =
[=] __host__ __device__(int32_t new_idx01) -> void {
// row_ids1 maps from idx01 -> idx0. Select subset of
// idx01's; the idx0 stays the same.
int32_t old_idx01 = idx01_new2old_data[new_idx01];
if (new_idx01 < new_tot_size1)
new_row_ids1_data[new_idx01] = old_row_ids1_data[old_idx01];
// row_splits2 maps from idx01 -> idx012. Map both indexes.
// idx01's; the idx0 stays the same.
new_row_splits2_data[new_idx01] =
idx012_old2new_data[old_row_splits2_data[old_idx01]];
};
Eval(c, new_tot_size1 + 1, lambda_set_row_ids1_and_row_splits2);
}
{
With w(pr.NewStream());
auto lambda_set_row_ids2 =
[=] __host__ __device__(int32_t new_idx012) -> void {
// row_ids2 maps from idx012 -> idx01. Both must be mapped.
int32_t old_idx012 = idx012_new2old_data[new_idx012];
int32_t old_idx01 = old_row_ids2_data[old_idx012],
new_idx01 = idx01_old2new_data[old_idx01];
new_row_ids2_data[new_idx012] = new_idx01;
};
Eval(c, new_tot_size2, lambda_set_row_ids2);
}
before_last.row_ids = before_last_row_ids;
before_last.cached_tot_size = new_tot_size1;
last.row_splits = last_row_splits;
last.row_ids = last_row_ids;
last.cached_tot_size = new_tot_size2;
return RaggedShape(axes);
}
RaggedShape EmptyRaggedShape(ContextPtr &c, int32_t num_axes) {
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeDim> axes(num_axes - 1);
axes[0].row_splits = Array1<int32_t>(c, 1, 0);
// row_ids will be the empty vector, with context `c`.
axes[0].row_ids = axes[0].row_splits.Range(0, 0);
axes[0].cached_tot_size = 0;
for (int32_t a = 1; a + 1 < num_axes; a++) axes[a] = axes[0];
return RaggedShape(axes);
}
} // namespace k2
|
bf2d089c19ee92f6170c491e0cf88c490750a9cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <math.h>
#include <string.h>
#include <iostream>
#include "timer.h"
using namespace std;
#define hist_size 256
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(hipGetLastError());
*/
static void checkCudaCall(hipError_t result) {
if (result != hipSuccess) {
cerr << "cuda error: " << hipGetErrorString(result) << endl;
exit(1);
}
}
/**
* Calculates the histogram in parallel where each thread does one pixel of the image.
* A block creates a local histogram in shared memory.
*/
__global__ void histogramKernel(unsigned char* __restrict__ image, long img_size, unsigned int* __restrict__ histos) {
__shared__ unsigned int shared_histo[hist_size];
unsigned int tid = threadIdx.x;
unsigned int i = tid + blockDim.x * blockIdx.x;
// initialize shared memory to 0 in parallel (256 first threads in each block)
if(tid < hist_size) {
shared_histo[tid] = 0;
}
// make sure, that all writes to shared memory are finished
__syncthreads();
if(i < img_size) {
atomicAdd(&shared_histo[image[i]], 1);
}
// make sure, that all writes to shared memory are finished
__syncthreads();
// write histogram of block back to global memory
if(tid < hist_size) {
// advance pointer to histograms to block specific one
histos += blockIdx.x * hist_size;
histos[tid] = shared_histo[tid];
}
}
/**
* Reduces two histograms to one.
*/
__global__ void reduceKernel(unsigned int* __restrict__ histos, const int reduce_blocks, const int last_reduction_blocks) {
unsigned int tid = threadIdx.x;
if((blockIdx.x + reduce_blocks) < last_reduction_blocks) {
// get current position
int thread = blockIdx.x * hist_size + tid;
// get position from block to reduce
int thread_next = (blockIdx.x + reduce_blocks) * hist_size + tid;
histos[thread] += histos[thread_next];
}
}
/**
* Prepares the GPU for kernel execution of the optimized histogram kernel and the reduce kernel.
*/
void histogramCuda(unsigned char* image, long img_size, unsigned int* histogram) {
int threadBlockSize = 1024;
int blocks;
// calculate number of blocks based on img_size
blocks = img_size / threadBlockSize;
if(img_size % threadBlockSize != 0) {
blocks++;
}
// allocate the vectors on the GPU
unsigned char* deviceImage = NULL;
checkCudaCall(hipMalloc((void **) &deviceImage, img_size * sizeof(unsigned char)));
if (deviceImage == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
unsigned int* deviceHistos = NULL;
checkCudaCall(hipMalloc((void **) &deviceHistos, blocks * hist_size * sizeof(unsigned int)));
if (deviceHistos == NULL) {
checkCudaCall(hipFree(deviceImage));
cout << "could not allocate memory!" << endl;
return;
}
timer kernelTime1 = timer("kernelTime1");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(hipMemcpy(deviceImage, image, img_size*sizeof(unsigned char), hipMemcpyHostToDevice));
memoryTime.stop();
// execute kernel
kernelTime1.start();
hipLaunchKernelGGL(( histogramKernel), dim3(blocks), dim3(threadBlockSize), 0, 0, deviceImage, img_size, deviceHistos);
hipDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(hipGetLastError());
// reduce local histgrams of blocks to one final histogram
int reduce_blocks = (int) ceil(blocks / 2.0);
if(reduce_blocks % 2 != 0) {
reduce_blocks++;
}
int last_reduction = blocks;
while(reduce_blocks >= 1) {
// execute reduce kernel
kernelTime1.start();
hipLaunchKernelGGL(( reduceKernel), dim3(reduce_blocks), dim3(hist_size), 0, 0, deviceHistos, reduce_blocks, last_reduction);
hipDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(hipGetLastError());
if(floor(reduce_blocks / 2.0) == 0) {
break;
}
last_reduction = reduce_blocks;
reduce_blocks = (int) ceil(reduce_blocks / 2.0);
if(reduce_blocks % 2 != 0 && reduce_blocks != 1) {
reduce_blocks++;
}
}
// copy result back
memoryTime.start();
checkCudaCall(hipMemcpy(histogram, deviceHistos, hist_size * sizeof(unsigned int), hipMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(hipFree(deviceImage));
checkCudaCall(hipFree(deviceHistos));
cout << "histogram (kernel): \t\t" << kernelTime1 << endl;
cout << "histogram (memory): \t\t" << memoryTime << endl;
cout << "histogram total: \t\t = " << (kernelTime1.getTimeInSeconds() + memoryTime.getTimeInSeconds()) << " seconds" << endl;
}
/**
* Calculates the histogram in parallel where each thread does one pixel of the image.
*/
__global__ void histogramKernelSimple(unsigned char* __restrict__ image, long img_size, unsigned int* __restrict__ histogram) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < img_size) {
atomicAdd(&histogram[image[i]], 1);
}
}
/**
* Prepares the GPU for kernel execution of the simple histogram kernel.
*/
void histogramCudaSimple(unsigned char* image, long img_size, unsigned int* histogram) {
int threadBlockSize = 1024;
int blocks;
// calculate number of blocks based on img_size
blocks = img_size / threadBlockSize;
if(img_size % threadBlockSize != 0) {
blocks++;
}
// allocate the vectors on the GPU
unsigned char* deviceImage = NULL;
checkCudaCall(hipMalloc((void **) &deviceImage, img_size * sizeof(unsigned char)));
if (deviceImage == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
unsigned int* deviceHisto = NULL;
checkCudaCall(hipMalloc((void **) &deviceHisto, hist_size * sizeof(unsigned int)));
if (deviceHisto == NULL) {
checkCudaCall(hipFree(deviceImage));
cout << "could not allocate memory!" << endl;
return;
}
timer kernelTime1 = timer("kernelTime");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(hipMemcpy(deviceImage, image, img_size*sizeof(unsigned char), hipMemcpyHostToDevice));
checkCudaCall(hipMemset(deviceHisto, 0, hist_size * sizeof(unsigned int)));
memoryTime.stop();
// execute kernel
kernelTime1.start();
hipLaunchKernelGGL(( histogramKernelSimple), dim3(blocks), dim3(threadBlockSize), 0, 0, deviceImage, img_size, deviceHisto);
hipDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(hipGetLastError());
// copy result back
memoryTime.start();
checkCudaCall(hipMemcpy(histogram, deviceHisto, hist_size * sizeof(unsigned int), hipMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(hipFree(deviceImage));
checkCudaCall(hipFree(deviceHisto));
cout << "histogram simple (kernel): \t" << kernelTime1 << endl;
cout << "histogram simple (memory): \t" << memoryTime << endl;
cout << "histogram simple total: \t = " << (kernelTime1.getTimeInSeconds() + memoryTime.getTimeInSeconds()) << " seconds" << endl;
}
/**
* Calculates the histogram sequentially.
*/
void histogramSeq(unsigned char* image, long img_size, unsigned int* histogram) {
int i;
timer sequentialTime = timer("Sequential");
for (i=0; i<hist_size; i++) histogram[i]=0;
sequentialTime.start();
for (i=0; i<img_size; i++) {
histogram[image[i]]++;
}
sequentialTime.stop();
cout << "histogram (seq): \t\t" << sequentialTime << endl;
}
/**
* usage: ./myhistogram
*
* arguments:
* -l {number of elements} image size in 1D. default: 655360
* -s execute kernel using simple version. default: optimized version
* -b creates a black image. default: random
*/
int main(int argc, char* argv[]) {
int c;
long img_size = 655360;
int simple = 0;
int black = 0;
while((c = getopt(argc, argv, "l:sb")) != -1) {
switch(c) {
case 'l':
img_size = atoi(optarg);
break;
case 's':
simple = 1;
break;
case 'b':
black = 1;
break;
case '?':
if(optopt == 'l') {
fprintf(stderr, "Option -%c requires an argument.\n", optopt);
}
else if(isprint(optopt)) {
fprintf(stderr, "Unknown option '-%c'.\n", optopt);
}
else {
fprintf(stderr, "Unknown option character '\\x%x'.\n", optopt);
}
return -1;
default:
return -1;
}
}
unsigned char *image = (unsigned char *)malloc(img_size * sizeof(unsigned char));
unsigned int *histogramS = (unsigned int *)malloc(hist_size * sizeof(unsigned int));
unsigned int *histogram = (unsigned int *)malloc(hist_size * sizeof(unsigned int));
// initialize the vectors.
for(long i=0; i<img_size; i++) {
if(black) {
image[i] = 0;
} else {
image[i] = (unsigned char) (rand() % hist_size);
}
}
cout << "Compute the histogram of a gray image with " << img_size << " pixels." << endl;
histogramSeq(image, img_size, histogramS);
if(simple == 1) {
// call simple implementation
histogramCudaSimple(image, img_size, histogram);
} else {
// call optimized
histogramCuda(image, img_size, histogram);
}
// verify the results
for(int i=0; i<hist_size; i++) {
if (histogram[i]!=histogramS[i]) {
cout << "error in results! Bin " << i << " is "<< histogram[i] << ", but should be " << histogramS[i] << endl;
exit(1);
}
}
cout << "results OK!" << endl;
free(image);
free(histogram);
free(histogramS);
return 0;
}
| bf2d089c19ee92f6170c491e0cf88c490750a9cc.cu | #include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <math.h>
#include <string.h>
#include <iostream>
#include "timer.h"
using namespace std;
#define hist_size 256
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(cudaGetLastError());
*/
static void checkCudaCall(cudaError_t result) {
if (result != cudaSuccess) {
cerr << "cuda error: " << cudaGetErrorString(result) << endl;
exit(1);
}
}
/**
* Calculates the histogram in parallel where each thread does one pixel of the image.
* A block creates a local histogram in shared memory.
*/
__global__ void histogramKernel(unsigned char* __restrict__ image, long img_size, unsigned int* __restrict__ histos) {
__shared__ unsigned int shared_histo[hist_size];
unsigned int tid = threadIdx.x;
unsigned int i = tid + blockDim.x * blockIdx.x;
// initialize shared memory to 0 in parallel (256 first threads in each block)
if(tid < hist_size) {
shared_histo[tid] = 0;
}
// make sure, that all writes to shared memory are finished
__syncthreads();
if(i < img_size) {
atomicAdd(&shared_histo[image[i]], 1);
}
// make sure, that all writes to shared memory are finished
__syncthreads();
// write histogram of block back to global memory
if(tid < hist_size) {
// advance pointer to histograms to block specific one
histos += blockIdx.x * hist_size;
histos[tid] = shared_histo[tid];
}
}
/**
* Reduces two histograms to one.
*/
__global__ void reduceKernel(unsigned int* __restrict__ histos, const int reduce_blocks, const int last_reduction_blocks) {
unsigned int tid = threadIdx.x;
if((blockIdx.x + reduce_blocks) < last_reduction_blocks) {
// get current position
int thread = blockIdx.x * hist_size + tid;
// get position from block to reduce
int thread_next = (blockIdx.x + reduce_blocks) * hist_size + tid;
histos[thread] += histos[thread_next];
}
}
/**
* Prepares the GPU for kernel execution of the optimized histogram kernel and the reduce kernel.
*/
void histogramCuda(unsigned char* image, long img_size, unsigned int* histogram) {
int threadBlockSize = 1024;
int blocks;
// calculate number of blocks based on img_size
blocks = img_size / threadBlockSize;
if(img_size % threadBlockSize != 0) {
blocks++;
}
// allocate the vectors on the GPU
unsigned char* deviceImage = NULL;
checkCudaCall(cudaMalloc((void **) &deviceImage, img_size * sizeof(unsigned char)));
if (deviceImage == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
unsigned int* deviceHistos = NULL;
checkCudaCall(cudaMalloc((void **) &deviceHistos, blocks * hist_size * sizeof(unsigned int)));
if (deviceHistos == NULL) {
checkCudaCall(cudaFree(deviceImage));
cout << "could not allocate memory!" << endl;
return;
}
timer kernelTime1 = timer("kernelTime1");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(cudaMemcpy(deviceImage, image, img_size*sizeof(unsigned char), cudaMemcpyHostToDevice));
memoryTime.stop();
// execute kernel
kernelTime1.start();
histogramKernel<<<blocks, threadBlockSize>>>(deviceImage, img_size, deviceHistos);
cudaDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(cudaGetLastError());
// reduce local histgrams of blocks to one final histogram
int reduce_blocks = (int) ceil(blocks / 2.0);
if(reduce_blocks % 2 != 0) {
reduce_blocks++;
}
int last_reduction = blocks;
while(reduce_blocks >= 1) {
// execute reduce kernel
kernelTime1.start();
reduceKernel<<<reduce_blocks, hist_size>>>(deviceHistos, reduce_blocks, last_reduction);
cudaDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(cudaGetLastError());
if(floor(reduce_blocks / 2.0) == 0) {
break;
}
last_reduction = reduce_blocks;
reduce_blocks = (int) ceil(reduce_blocks / 2.0);
if(reduce_blocks % 2 != 0 && reduce_blocks != 1) {
reduce_blocks++;
}
}
// copy result back
memoryTime.start();
checkCudaCall(cudaMemcpy(histogram, deviceHistos, hist_size * sizeof(unsigned int), cudaMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(cudaFree(deviceImage));
checkCudaCall(cudaFree(deviceHistos));
cout << "histogram (kernel): \t\t" << kernelTime1 << endl;
cout << "histogram (memory): \t\t" << memoryTime << endl;
cout << "histogram total: \t\t = " << (kernelTime1.getTimeInSeconds() + memoryTime.getTimeInSeconds()) << " seconds" << endl;
}
/**
* Calculates the histogram in parallel where each thread does one pixel of the image.
*/
__global__ void histogramKernelSimple(unsigned char* __restrict__ image, long img_size, unsigned int* __restrict__ histogram) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < img_size) {
atomicAdd(&histogram[image[i]], 1);
}
}
/**
* Prepares the GPU for kernel execution of the simple histogram kernel.
*/
void histogramCudaSimple(unsigned char* image, long img_size, unsigned int* histogram) {
int threadBlockSize = 1024;
int blocks;
// calculate number of blocks based on img_size
blocks = img_size / threadBlockSize;
if(img_size % threadBlockSize != 0) {
blocks++;
}
// allocate the vectors on the GPU
unsigned char* deviceImage = NULL;
checkCudaCall(cudaMalloc((void **) &deviceImage, img_size * sizeof(unsigned char)));
if (deviceImage == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
unsigned int* deviceHisto = NULL;
checkCudaCall(cudaMalloc((void **) &deviceHisto, hist_size * sizeof(unsigned int)));
if (deviceHisto == NULL) {
checkCudaCall(cudaFree(deviceImage));
cout << "could not allocate memory!" << endl;
return;
}
timer kernelTime1 = timer("kernelTime");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(cudaMemcpy(deviceImage, image, img_size*sizeof(unsigned char), cudaMemcpyHostToDevice));
checkCudaCall(cudaMemset(deviceHisto, 0, hist_size * sizeof(unsigned int)));
memoryTime.stop();
// execute kernel
kernelTime1.start();
histogramKernelSimple<<<blocks, threadBlockSize>>>(deviceImage, img_size, deviceHisto);
cudaDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(cudaGetLastError());
// copy result back
memoryTime.start();
checkCudaCall(cudaMemcpy(histogram, deviceHisto, hist_size * sizeof(unsigned int), cudaMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(cudaFree(deviceImage));
checkCudaCall(cudaFree(deviceHisto));
cout << "histogram simple (kernel): \t" << kernelTime1 << endl;
cout << "histogram simple (memory): \t" << memoryTime << endl;
cout << "histogram simple total: \t = " << (kernelTime1.getTimeInSeconds() + memoryTime.getTimeInSeconds()) << " seconds" << endl;
}
/**
* Calculates the histogram sequentially.
*/
void histogramSeq(unsigned char* image, long img_size, unsigned int* histogram) {
int i;
timer sequentialTime = timer("Sequential");
for (i=0; i<hist_size; i++) histogram[i]=0;
sequentialTime.start();
for (i=0; i<img_size; i++) {
histogram[image[i]]++;
}
sequentialTime.stop();
cout << "histogram (seq): \t\t" << sequentialTime << endl;
}
/**
* usage: ./myhistogram
*
* arguments:
* -l {number of elements} image size in 1D. default: 655360
* -s execute kernel using simple version. default: optimized version
* -b creates a black image. default: random
*/
int main(int argc, char* argv[]) {
int c;
long img_size = 655360;
int simple = 0;
int black = 0;
while((c = getopt(argc, argv, "l:sb")) != -1) {
switch(c) {
case 'l':
img_size = atoi(optarg);
break;
case 's':
simple = 1;
break;
case 'b':
black = 1;
break;
case '?':
if(optopt == 'l') {
fprintf(stderr, "Option -%c requires an argument.\n", optopt);
}
else if(isprint(optopt)) {
fprintf(stderr, "Unknown option '-%c'.\n", optopt);
}
else {
fprintf(stderr, "Unknown option character '\\x%x'.\n", optopt);
}
return -1;
default:
return -1;
}
}
unsigned char *image = (unsigned char *)malloc(img_size * sizeof(unsigned char));
unsigned int *histogramS = (unsigned int *)malloc(hist_size * sizeof(unsigned int));
unsigned int *histogram = (unsigned int *)malloc(hist_size * sizeof(unsigned int));
// initialize the vectors.
for(long i=0; i<img_size; i++) {
if(black) {
image[i] = 0;
} else {
image[i] = (unsigned char) (rand() % hist_size);
}
}
cout << "Compute the histogram of a gray image with " << img_size << " pixels." << endl;
histogramSeq(image, img_size, histogramS);
if(simple == 1) {
// call simple implementation
histogramCudaSimple(image, img_size, histogram);
} else {
// call optimized
histogramCuda(image, img_size, histogram);
}
// verify the results
for(int i=0; i<hist_size; i++) {
if (histogram[i]!=histogramS[i]) {
cout << "error in results! Bin " << i << " is "<< histogram[i] << ", but should be " << histogramS[i] << endl;
exit(1);
}
}
cout << "results OK!" << endl;
free(image);
free(histogram);
free(histogramS);
return 0;
}
|
5ae230071563ff1001b08111ca114b6946b65e3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "util.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <algorithm> // std::min std::max
using namespace std;
#define TILE_WIDTH 32 // Width of block size (32 threads)
#define TILE_HEIGHT 32 // Height of block size (32 threads)
#define MASK_RADIUS 5 // Mask radius
#define MASK_DIAMETER (MASK_RADIUS*2 + 1) // Mask diameter
#define MASK_SIZE (MASK_DIAMETER*MASK_DIAMETER) // Mask size
#define BLOCK_WIDTH (TILE_WIDTH + (2*MASK_RADIUS)) // Width of shared memory block
#define BLOCK_HEIGHT (TILE_HEIGHT + (2*MASK_RADIUS)) // Height of shared memory block
__constant__ float D_MASK[121];
// CUDA Thread Indexing Cheatsheet https://cs.calvin.edu/courses/cs/374/CUDA/CUDA-Thread-Indexing-Cheatsheet.pdf
// Ejemplo filtro https://www.nvidia.com/content/nvision2008/tech_presentations/Game_Developer_Track/NVISION08-Image_Processing_and_Video_with_CUDA.pdf
// Ejemplo multiplicacion de matrices http://selkie.macalester.edu/csinparallel/modules/GPUProgramming/build/html/CUDA2D/CUDA2D.html
// Ej 2a) Kernel que aplica el filtro Gaussiano en la GPU empleando memoria compartida (comprarar tiempos y nvprof con practico3 blur sin mascara const float* __restrict__ d_msk)
// Ej 2b-1) Agregar mscara const float* __restrict__ d_msk (y comparar tiempos con 2a)
// Estas flags dicen que: el dato es de solo lectura (const) y es la unica versin de ese puntero (__restrict__)
// Permite al compilador hacer optimizaciones y usar la cache constante
// Ej 2b-2) Copiar mscara con __constant__ y hipMemcpyToSymbol (para que resida en mem constante) (y comparar tiempos con 2b-1)
// Ac estamos optimizando la memoria constante.
// La memoria constante es de 64KB, est optimizada para que si todo acceso del warp accede al mismo elem el acceso es ptimo
__global__ void blur_kernel_a(float* d_input, int width, int height, float* d_output, float* d_msk) {
__shared__ float block_memory[BLOCK_WIDTH][BLOCK_HEIGHT];
// Coordenada del pixel que al hilo le corresponde escribir
int imgx = (blockIdx.x * blockDim.x) + threadIdx.x;
int imgy = (blockIdx.y * blockDim.y) + threadIdx.y;
// Carga de la memoria compartida maximizando paralelismo entre hilos
// Para cargar la mascara de 36x36 con los bloques de 32x32 como indica la letra es necesario que
// el pixel que cada hilo lea y escriba en memoria est shifteado -2,-2 (2 hacia arriba y hacia la izquierda)
// d_input auxiliary indexes
int shifted_imgx = imgx - MASK_RADIUS;
int shifted_imgy = imgy - MASK_RADIUS;
int right_shifted_imgx = shifted_imgx + blockDim.x;
int under_shifted_imgy = shifted_imgy + blockDim.y;
int shifted_image_position_y = shifted_imgy*width;
int under_shifted_image_position_y = under_shifted_imgy*width;
// block_memory auxiliary indexes
int memory_index_x = threadIdx.x;
int memory_index_y = threadIdx.y;
int right_shifted_memory_index_x = memory_index_x + blockDim.x;
int under_shifted_memory_index_y = memory_index_y + blockDim.y;
// Cada hilo carga su lugar shifteado 2 posiciones hacia la izquierda y 2 hacia arriba (-2, -2)
if (shifted_imgx >= 0 && shifted_imgx < width && shifted_imgy >= 0 && shifted_imgy < height) {
block_memory[memory_index_x][memory_index_y] = d_input[shifted_image_position_y + shifted_imgx];
}
// Cada hilo carga su lugar shifteado (blockDim.x - 2) posiciones hacia la derecha y 2 hacia arriba (+29, -2)
if (right_shifted_memory_index_x >= 0 && right_shifted_memory_index_x < BLOCK_WIDTH && memory_index_y >= 0 && memory_index_y < BLOCK_HEIGHT) {
if(right_shifted_imgx >= 0 && right_shifted_imgx < width && shifted_imgy >= 0 && shifted_imgy < height) {
block_memory[right_shifted_memory_index_x][memory_index_y] = d_input[shifted_image_position_y + right_shifted_imgx];
} else {
block_memory[right_shifted_memory_index_x][memory_index_y] = 0;
}
}
// Cada hilo carga su lugar shifteado 2 posiciones hacia la izquierda y (blockDim.y - 2) hacia abajo (-2, +29)
if (memory_index_x >= 0 && memory_index_x < BLOCK_WIDTH && under_shifted_memory_index_y >= 0 && under_shifted_memory_index_y < BLOCK_HEIGHT) {
if(shifted_imgx >= 0 && shifted_imgx < width && under_shifted_imgy >= 0 && under_shifted_imgy < height) {
block_memory[memory_index_x][under_shifted_memory_index_y] = d_input[under_shifted_image_position_y + shifted_imgx];
} else {
block_memory[memory_index_x][under_shifted_memory_index_y] = 0;
}
}
// Cada hilo carga su lugar shifteado (blockDim.x - 2) posiciones hacia la derecha y (blockDim.y - 2) hacia abajo (+29, +29)
if (right_shifted_memory_index_x >= 0 && right_shifted_memory_index_x < BLOCK_WIDTH && under_shifted_memory_index_y >= 0 && under_shifted_memory_index_y < BLOCK_HEIGHT) {
if(right_shifted_imgx >= 0 && right_shifted_imgx < width && under_shifted_imgy >= 0 && under_shifted_imgy < height) {
block_memory[right_shifted_memory_index_x][under_shifted_memory_index_y] = d_input[under_shifted_image_position_y + right_shifted_imgx];
} else {
block_memory[right_shifted_memory_index_x][under_shifted_memory_index_y] = 0;
}
}
__syncthreads();
// Aplicacin de la mscara (blur)
float val_pixel = 0;
int ix, iy, full_image_ix, full_image_iy;
// Tomamos los indices del centro (32x32) de la imagen shifteando +2,+2 (hacia abajo y derecha)
int memory_imgx = threadIdx.x + MASK_RADIUS;
int memory_imgy = threadIdx.y + MASK_RADIUS;
for (int i = -MASK_RADIUS; i <= MASK_RADIUS; ++i) {
full_image_ix = imgx + i;
if (full_image_ix >= 0 && full_image_ix < width) {
ix = memory_imgx + i;
for (int j = -MASK_RADIUS; j <= MASK_RADIUS; ++j) {
full_image_iy = imgy + j;
if (full_image_iy >= 0 && full_image_iy < height) {
iy = memory_imgy + j;
// Altera el valor de un pixel, segn sus vecinos.
val_pixel += block_memory[ix][iy] * d_msk[(i + MASK_RADIUS)*MASK_DIAMETER + j + MASK_RADIUS];
}
}
}
}
// Escribimos la salida
if (imgx < width && imgy < height) {
d_output[(imgy*width) + imgx] = val_pixel;
}
}
// Ej 2b-1) Agregar mscara const float* __restrict__ d_msk (y comparar tiempos con 2a)
// Estas flags dicen que: el dato es de solo lectura (const) y que no otro puntero apunta a su direccin (__restrict__)
// Permite al compilador hacer optimizaciones y usar la cache constante
__global__ void blur_kernel_b(float* d_input, int width, int height, float* d_output, const float* __restrict__ d_msk) {
__shared__ float block_memory[BLOCK_WIDTH][BLOCK_HEIGHT];
// Coordenada del pixel que al hilo le corresponde escribir
int imgx = (blockIdx.x * blockDim.x) + threadIdx.x;
int imgy = (blockIdx.y * blockDim.y) + threadIdx.y;
// Carga de la memoria compartida maximizando paralelismo entre hilos
// Para cargar la mascara de 36x36 con los bloques de 32x32 como indica la letra es necesario que
// el pixel que cada hilo lea y escriba en memoria est shifteado -2,-2 (2 hacia arriba y hacia la izquierda)
// d_input auxiliary indexes
int shifted_imgx = imgx - MASK_RADIUS;
int shifted_imgy = imgy - MASK_RADIUS;
int right_shifted_imgx = shifted_imgx + blockDim.x;
int under_shifted_imgy = shifted_imgy + blockDim.y;
int shifted_image_position_y = shifted_imgy*width;
int under_shifted_image_position_y = under_shifted_imgy*width;
// block_memory auxiliary indexes
int memory_index_x = threadIdx.x;
int memory_index_y = threadIdx.y;
int right_shifted_memory_index_x = memory_index_x + blockDim.x;
int under_shifted_memory_index_y = memory_index_y + blockDim.y;
// Cada hilo carga su lugar shifteado 2 posiciones hacia la izquierda y 2 hacia arriba (-2, -2)
if (shifted_imgx >= 0 && shifted_imgx < width && shifted_imgy >= 0 && shifted_imgy < height) {
block_memory[memory_index_x][memory_index_y] = d_input[shifted_image_position_y + shifted_imgx];
}
// Cada hilo carga su lugar shifteado (blockDim.x - 2) posiciones hacia la derecha y 2 hacia arriba (+29, -2)
if (right_shifted_memory_index_x >= 0 && right_shifted_memory_index_x < BLOCK_WIDTH && memory_index_y >= 0 && memory_index_y < BLOCK_HEIGHT) {
if(right_shifted_imgx >= 0 && right_shifted_imgx < width && shifted_imgy >= 0 && shifted_imgy < height) {
block_memory[right_shifted_memory_index_x][memory_index_y] = d_input[shifted_image_position_y + right_shifted_imgx];
} else {
block_memory[right_shifted_memory_index_x][memory_index_y] = 0;
}
}
// Cada hilo carga su lugar shifteado 2 posiciones hacia la izquierda y (blockDim.y - 2) hacia abajo (-2, +29)
if (memory_index_x >= 0 && memory_index_x < BLOCK_WIDTH && under_shifted_memory_index_y >= 0 && under_shifted_memory_index_y < BLOCK_HEIGHT) {
if(shifted_imgx >= 0 && shifted_imgx < width && under_shifted_imgy >= 0 && under_shifted_imgy < height) {
block_memory[memory_index_x][under_shifted_memory_index_y] = d_input[under_shifted_image_position_y + shifted_imgx];
} else {
block_memory[memory_index_x][under_shifted_memory_index_y] = 0;
}
}
// Cada hilo carga su lugar shifteado (blockDim.x - 2) posiciones hacia la derecha y (blockDim.y - 2) hacia abajo (+29, +29)
if (right_shifted_memory_index_x >= 0 && right_shifted_memory_index_x < BLOCK_WIDTH && under_shifted_memory_index_y >= 0 && under_shifted_memory_index_y < BLOCK_HEIGHT) {
if(right_shifted_imgx >= 0 && right_shifted_imgx < width && under_shifted_imgy >= 0 && under_shifted_imgy < height) {
block_memory[right_shifted_memory_index_x][under_shifted_memory_index_y] = d_input[under_shifted_image_position_y + right_shifted_imgx];
} else {
block_memory[right_shifted_memory_index_x][under_shifted_memory_index_y] = 0;
}
}
__syncthreads();
// Aplicacin de la mscara (blur)
float val_pixel = 0;
int ix, iy, full_image_ix, full_image_iy;
// Tomamos los indices del centro (32x32) de la imagen shifteando +2,+2 (hacia abajo y derecha)
int memory_imgx = threadIdx.x + MASK_RADIUS;
int memory_imgy = threadIdx.y + MASK_RADIUS;
for (int i = -MASK_RADIUS; i <= MASK_RADIUS; ++i) {
full_image_ix = imgx + i;
if (full_image_ix >= 0 && full_image_ix < width) {
ix = memory_imgx + i;
for (int j = -MASK_RADIUS; j <= MASK_RADIUS; ++j) {
full_image_iy = imgy + j;
if (full_image_iy >= 0 && full_image_iy < height) {
iy = memory_imgy + j;
// Altera el valor de un pixel, segn sus vecinos.
val_pixel += block_memory[ix][iy] * d_msk[(i + MASK_RADIUS)*MASK_DIAMETER + j + MASK_RADIUS];
}
}
}
}
// Escribimos la salida
if (imgx < width && imgy < height) {
d_output[(imgy*width) + imgx] = val_pixel;
}
}
// Ej 2b-2) Copiar mscara con __constant__ y hipMemcpyToSymbol (para que resida en mem constante) (y comparar tiempos con 2b-1)
// Cada hilo accede a la mascara desde la memoria constante de la GPU a velocidad de registro.
// La memoria constante es de 64KB, est optimizada para que si todo acceso del warp accede al mismo elem el acceso es ptimo
__global__ void blur_kernel_c(float* d_input, int width, int height, float* d_output) {
__shared__ float block_memory[BLOCK_WIDTH][BLOCK_HEIGHT];
// Coordenada del pixel que al hilo le corresponde escribir
int imgx = (blockIdx.x * blockDim.x) + threadIdx.x;
int imgy = (blockIdx.y * blockDim.y) + threadIdx.y;
// Carga de la memoria compartida maximizando paralelismo entre hilos
// Para cargar la mascara de 36x36 con los bloques de 32x32 como indica la letra es necesario que
// el pixel que cada hilo lea y escriba en memoria est shifteado -2,-2 (2 hacia arriba y hacia la izquierda)
// d_input auxiliary indexes
int shifted_imgx = imgx - MASK_RADIUS;
int shifted_imgy = imgy - MASK_RADIUS;
int right_shifted_imgx = shifted_imgx + blockDim.x;
int under_shifted_imgy = shifted_imgy + blockDim.y;
int shifted_image_position_y = shifted_imgy*width;
int under_shifted_image_position_y = under_shifted_imgy*width;
// block_memory auxiliary indexes
int memory_index_x = threadIdx.x;
int memory_index_y = threadIdx.y;
int right_shifted_memory_index_x = memory_index_x + blockDim.x;
int under_shifted_memory_index_y = memory_index_y + blockDim.y;
// Cada hilo carga su lugar shifteado 2 posiciones hacia la izquierda y 2 hacia arriba (-2, -2)
if (shifted_imgx >= 0 && shifted_imgx < width && shifted_imgy >= 0 && shifted_imgy < height) {
block_memory[memory_index_x][memory_index_y] = d_input[shifted_image_position_y + shifted_imgx];
}
// Cada hilo carga su lugar shifteado (blockDim.x - 2) posiciones hacia la derecha y 2 hacia arriba (+29, -2)
if (right_shifted_memory_index_x >= 0 && right_shifted_memory_index_x < BLOCK_WIDTH && memory_index_y >= 0 && memory_index_y < BLOCK_HEIGHT) {
if(right_shifted_imgx >= 0 && right_shifted_imgx < width && shifted_imgy >= 0 && shifted_imgy < height) {
block_memory[right_shifted_memory_index_x][memory_index_y] = d_input[shifted_image_position_y + right_shifted_imgx];
} else {
block_memory[right_shifted_memory_index_x][memory_index_y] = 0;
}
}
// Cada hilo carga su lugar shifteado 2 posiciones hacia la izquierda y (blockDim.y - 2) hacia abajo (-2, +29)
if (memory_index_x >= 0 && memory_index_x < BLOCK_WIDTH && under_shifted_memory_index_y >= 0 && under_shifted_memory_index_y < BLOCK_HEIGHT) {
if(shifted_imgx >= 0 && shifted_imgx < width && under_shifted_imgy >= 0 && under_shifted_imgy < height) {
block_memory[memory_index_x][under_shifted_memory_index_y] = d_input[under_shifted_image_position_y + shifted_imgx];
} else {
block_memory[memory_index_x][under_shifted_memory_index_y] = 0;
}
}
// Cada hilo carga su lugar shifteado (blockDim.x - 2) posiciones hacia la derecha y (blockDim.y - 2) hacia abajo (+29, +29)
if (right_shifted_memory_index_x >= 0 && right_shifted_memory_index_x < BLOCK_WIDTH && under_shifted_memory_index_y >= 0 && under_shifted_memory_index_y < BLOCK_HEIGHT) {
if(right_shifted_imgx >= 0 && right_shifted_imgx < width && under_shifted_imgy >= 0 && under_shifted_imgy < height) {
block_memory[right_shifted_memory_index_x][under_shifted_memory_index_y] = d_input[under_shifted_image_position_y + right_shifted_imgx];
} else {
block_memory[right_shifted_memory_index_x][under_shifted_memory_index_y] = 0;
}
}
__syncthreads();
// Aplicacin de la mscara (blur)
float val_pixel = 0;
int ix, iy, full_image_ix, full_image_iy;
// Tomamos los indices del centro (32x32) de la imagen shifteando +2,+2 (hacia abajo y derecha)
int memory_imgx = threadIdx.x + MASK_RADIUS;
int memory_imgy = threadIdx.y + MASK_RADIUS;
for (int i = -MASK_RADIUS; i <= MASK_RADIUS; ++i) {
full_image_ix = imgx + i;
if (full_image_ix >= 0 && full_image_ix < width) {
ix = memory_imgx + i;
for (int j = -MASK_RADIUS; j <= MASK_RADIUS; ++j) {
full_image_iy = imgy + j;
if (full_image_iy >= 0 && full_image_iy < height) {
iy = memory_imgy + j;
// Altera el valor de un pixel, segn sus vecinos.
val_pixel += block_memory[ix][iy] * D_MASK[(i + MASK_RADIUS)*MASK_DIAMETER + j + MASK_RADIUS];
}
}
}
}
// Escribimos la salida
if (imgx < width && imgy < height) {
d_output[(imgy*width) + imgx] = val_pixel;
}
}
// Blur kernel mem global
__global__ void blur_kernel_global(float* d_input, int width, int height, float* d_output, float* d_msk) {
// Coordenada del pixel que al hilo le corresponde escribir
int imgx = (blockIdx.x * blockDim.x) + threadIdx.x;
int imgy = (blockIdx.y * blockDim.y) + threadIdx.y;
float val_pixel = 0;
int ix, iy;
for (int i = -MASK_RADIUS; i <= MASK_RADIUS; ++i) {
ix = imgx + i;
if (ix >= 0 && ix < width) {
for (int j = -MASK_RADIUS; j <= MASK_RADIUS; ++j) {
iy = imgy + j;
if (iy >= 0 && iy < height) {
// Altera el valor de un pixel, segn sus vecinos.
val_pixel += d_input[(iy * width) + ix] * d_msk[(i + MASK_RADIUS)*MASK_DIAMETER + j + MASK_RADIUS];
}
}
}
}
// Escribimos la salida
if (imgx < width && imgy < height) {
d_output[(imgy*width) + imgx] = val_pixel;
}
}
// Ej 2) Aplica un filtro Gaussiano que reduce el ruido de una imagen en escala de grises.
// El filtro sustituye el valor de intensidad de cada pixel por un promedio ponderado de los pixeles vecinos.
// Los pesos por los cuales se pondera cada vecino en el promedio se almacenan en una matriz cuadrada (mscara)
void blur_gpu(float * img_in, int width, int height, float * img_out, float msk[], int algorithm){
switch(algorithm) {
// Prctico 3) Kernel con memoria global
case 1:
printf("\n");
printf("-> Kernel con memoria global\n");
break;
// Ej 2a) Kernel con memoria compartida
case 2:
printf("\n");
printf("-> Kernel con memoria compartida\n");
break;
// Ej 2b1) Kernel con memoria compartida y optimizando la mscara cmo read_only y restricted pointer.
case 3:
printf("\n");
printf("-> Kernel con memoria compartida y optimizando la mscara cmo read_only y restricted pointer\n");
break;
// Ej 2b2) Kernel con con memoria compartida y almacenando la mscara en la memoria constante de la GPU
case 4:
printf("\n");
printf("-> Kernel con memoria compartida y almacenando la mscara en memoria constante\n");
break;
default:
printf("Invocar como: './ej2.x nombre_archivo, algoritmo'\n");
printf("-> Algoritmo:\n");
printf("\t 1 - Kernel con memoria global\n");
printf("\t 2 - Kernel con memoria compartida\n");
printf("\t 3 - Kernel con memoria compartida y mascara read_only con restricted pointer\n");
printf("\t 4 - Kernel con memoria compartida y mascara en memoria constante\n");
printf("\t 0 - Todos los algoritmos\n");
}
// Auxiliar para contar tiempo total
// float t_total = 0;
// Etapa 1: Reserva de Memoria
// CLK_CUEVTS_INIT;
// CLK_CUEVTS_START;
// Reserva en CPU
unsigned int size = width * height * sizeof(float);
unsigned int size_msk = MASK_SIZE * sizeof(float);
float * device_img_in = (float *)malloc(size);
float * device_img_out = (float *)malloc(size);
float * device_msk;
// Reserva en GPU
CUDA_CHK(hipMalloc((void**)& device_img_in, size));
CUDA_CHK(hipMalloc((void**)& device_img_out, size));
if(algorithm != 4) {
device_msk = (float *)malloc(size_msk);
CUDA_CHK(hipMalloc((void**)& device_msk, size_msk));
}
// CLK_CUEVTS_STOP;
// CLK_CUEVTS_ELAPSED;
// printf("Tiempo filtro gaussiano GPU (Reserva de memoria): %f ms\n", t_elap);
// t_total = t_total + t_elap;
// Etapa 2: Transferencia de datos (Host -> Device)
// CLK_CUEVTS_START;
CUDA_CHK(hipMemcpy(device_img_in, img_in, size, hipMemcpyHostToDevice)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia
if(algorithm != 4) {
// Transfiero la mascara a la memoria de la GPU
CUDA_CHK(hipMemcpy(device_msk, msk, size_msk, hipMemcpyHostToDevice)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia
} else {
// Guardo la mascara en la memoria constante de la GPU
CUDA_CHK(hipMemcpyToSymbol(D_MASK, msk, size_msk, 0, hipMemcpyHostToDevice));
}
// CLK_CUEVTS_STOP;
// CLK_CUEVTS_ELAPSED;
// printf("Tiempo filtro gaussiano GPU (Transferencia de datos (Host -> Device)): %f ms\n", t_elap);
// t_total = t_total + t_elap;
// Etapa 3: Definir grilla
int amount_of_blocks_x = width / TILE_WIDTH + (width % TILE_WIDTH != 0); // Division with ceiling
int amount_of_blocks_y = height / TILE_HEIGHT + (height % TILE_HEIGHT != 0); // Division with ceiling
dim3 tamGrid(amount_of_blocks_x, amount_of_blocks_y); // Grid dimension
dim3 tamBlock(TILE_WIDTH, TILE_HEIGHT); // Block dimension
// Etapa 4 : Lanzar Kernel
// CLK_CUEVTS_START;
switch(algorithm) {
// Prctico 3) Kernel con memoria global
case 1:
hipLaunchKernelGGL(( blur_kernel_global), dim3(tamGrid), dim3(tamBlock), 0, 0, device_img_in, width, height, device_img_out, device_msk);
break;
// Ej 2a) Kernel con memoria compartida
case 2:
hipLaunchKernelGGL(( blur_kernel_a), dim3(tamGrid), dim3(tamBlock), 0, 0, device_img_in, width, height, device_img_out, device_msk);
break;
// Ej 2b1) Kernel con memoria compartida y optimizando la mscara cmo read_only y restricted pointer.
case 3:
hipLaunchKernelGGL(( blur_kernel_b), dim3(tamGrid), dim3(tamBlock), 0, 0, device_img_in, width, height, device_img_out, device_msk);
break;
// Ej 2b2) Kernel con con memoria compartida y almacenando la mscara en la memoria constante de la GPU
case 4:
hipLaunchKernelGGL(( blur_kernel_c), dim3(tamGrid), dim3(tamBlock), 0, 0, device_img_in, width, height, device_img_out);
break;
}
// Sincronizar threads antes de parar timers
hipDeviceSynchronize();
// CLK_CUEVTS_STOP;
// CLK_CUEVTS_ELAPSED;
// printf("Tiempo filtro gaussiano GPU (Kernel): %f ms\n", t_elap);
// t_total = t_total + t_elap;
// Etapa 5: Transferencia de Datos (Device -> Host)
// CLK_CUEVTS_START;
CUDA_CHK(hipMemcpy(img_out, device_img_out, size, hipMemcpyDeviceToHost)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia
// CLK_CUEVTS_STOP;
// CLK_CUEVTS_ELAPSED;
// printf("Tiempo filtro gaussiano GPU (Transferencia de datos (Host <- Device)): %f ms\n", t_elap);
// t_total = t_total + t_elap;
// printf("Tiempo filtro gaussiano GPU: %f ms\n", t_total);
// printf("\n");
// Etapa 6: Liberacin de Memoria
CUDA_CHK(hipFree(device_img_in));
CUDA_CHK(hipFree(device_img_out));
}
// Recorre la imagen aplicando secuencialmente un filtro Gaussiano que reduce el ruido de una imagen en escala de grises.
void blur_cpu(float * img_in, int width, int height, float * img_out, float msk[], int m_size) {
CLK_POSIX_INIT;
CLK_POSIX_START;
float val_pixel=0;
// Para cada pixel aplicamos el filtro
for(int imgx=0; imgx < width ; imgx++) {
for(int imgy=0; imgy < height; imgy++) {
val_pixel = 0;
// Aca aplicamos la mascara
for (int i = 0; i < m_size ; i++) {
for (int j = 0; j < m_size ; j++) {
int ix =imgx + i - m_size/2;
int iy =imgy + j - m_size/2;
// Altera el valor de un pixel, segn sus vecinos.
if(ix >= 0 && ix < width && iy>= 0 && iy < height)
val_pixel = val_pixel + img_in[iy * width +ix] * msk[i*m_size+j];
}
}
// Guardo valor resultado
img_out[imgy*width+imgx]= val_pixel;
}
}
CLK_POSIX_STOP;
CLK_POSIX_ELAPSED;
printf("Tiempo filtro Gaussiano CPU: %f ms\n", t_elap);
printf("\n");
} | 5ae230071563ff1001b08111ca114b6946b65e3d.cu | #include "util.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <algorithm> // std::min std::max
using namespace std;
#define TILE_WIDTH 32 // Width of block size (32 threads)
#define TILE_HEIGHT 32 // Height of block size (32 threads)
#define MASK_RADIUS 5 // Mask radius
#define MASK_DIAMETER (MASK_RADIUS*2 + 1) // Mask diameter
#define MASK_SIZE (MASK_DIAMETER*MASK_DIAMETER) // Mask size
#define BLOCK_WIDTH (TILE_WIDTH + (2*MASK_RADIUS)) // Width of shared memory block
#define BLOCK_HEIGHT (TILE_HEIGHT + (2*MASK_RADIUS)) // Height of shared memory block
__constant__ float D_MASK[121];
// CUDA Thread Indexing Cheatsheet https://cs.calvin.edu/courses/cs/374/CUDA/CUDA-Thread-Indexing-Cheatsheet.pdf
// Ejemplo filtro https://www.nvidia.com/content/nvision2008/tech_presentations/Game_Developer_Track/NVISION08-Image_Processing_and_Video_with_CUDA.pdf
// Ejemplo multiplicacion de matrices http://selkie.macalester.edu/csinparallel/modules/GPUProgramming/build/html/CUDA2D/CUDA2D.html
// Ej 2a) Kernel que aplica el filtro Gaussiano en la GPU empleando memoria compartida (comprarar tiempos y nvprof con practico3 blur sin mascara const float* __restrict__ d_msk)
// Ej 2b-1) Agregar máscara const float* __restrict__ d_msk (y comparar tiempos con 2a)
// Estas flags dicen que: el dato es de solo lectura (const) y es la unica versión de ese puntero (__restrict__)
// Permite al compilador hacer optimizaciones y usar la cache constante
// Ej 2b-2) Copiar máscara con __constant__ y cudaMemcpyToSymbol (para que resida en mem constante) (y comparar tiempos con 2b-1)
// Acá estamos optimizando la memoria constante.
// La memoria constante es de 64KB, está optimizada para que si todo acceso del warp accede al mismo elem el acceso es óptimo
__global__ void blur_kernel_a(float* d_input, int width, int height, float* d_output, float* d_msk) {
__shared__ float block_memory[BLOCK_WIDTH][BLOCK_HEIGHT];
// Coordenada del pixel que al hilo le corresponde escribir
int imgx = (blockIdx.x * blockDim.x) + threadIdx.x;
int imgy = (blockIdx.y * blockDim.y) + threadIdx.y;
// Carga de la memoria compartida maximizando paralelismo entre hilos
// Para cargar la mascara de 36x36 con los bloques de 32x32 como indica la letra es necesario que
// el pixel que cada hilo lea y escriba en memoria esté shifteado -2,-2 (2 hacia arriba y hacia la izquierda)
// d_input auxiliary indexes
int shifted_imgx = imgx - MASK_RADIUS;
int shifted_imgy = imgy - MASK_RADIUS;
int right_shifted_imgx = shifted_imgx + blockDim.x;
int under_shifted_imgy = shifted_imgy + blockDim.y;
int shifted_image_position_y = shifted_imgy*width;
int under_shifted_image_position_y = under_shifted_imgy*width;
// block_memory auxiliary indexes
int memory_index_x = threadIdx.x;
int memory_index_y = threadIdx.y;
int right_shifted_memory_index_x = memory_index_x + blockDim.x;
int under_shifted_memory_index_y = memory_index_y + blockDim.y;
// Cada hilo carga su lugar shifteado 2 posiciones hacia la izquierda y 2 hacia arriba (-2, -2)
if (shifted_imgx >= 0 && shifted_imgx < width && shifted_imgy >= 0 && shifted_imgy < height) {
block_memory[memory_index_x][memory_index_y] = d_input[shifted_image_position_y + shifted_imgx];
}
// Cada hilo carga su lugar shifteado (blockDim.x - 2) posiciones hacia la derecha y 2 hacia arriba (+29, -2)
if (right_shifted_memory_index_x >= 0 && right_shifted_memory_index_x < BLOCK_WIDTH && memory_index_y >= 0 && memory_index_y < BLOCK_HEIGHT) {
if(right_shifted_imgx >= 0 && right_shifted_imgx < width && shifted_imgy >= 0 && shifted_imgy < height) {
block_memory[right_shifted_memory_index_x][memory_index_y] = d_input[shifted_image_position_y + right_shifted_imgx];
} else {
block_memory[right_shifted_memory_index_x][memory_index_y] = 0;
}
}
// Cada hilo carga su lugar shifteado 2 posiciones hacia la izquierda y (blockDim.y - 2) hacia abajo (-2, +29)
if (memory_index_x >= 0 && memory_index_x < BLOCK_WIDTH && under_shifted_memory_index_y >= 0 && under_shifted_memory_index_y < BLOCK_HEIGHT) {
if(shifted_imgx >= 0 && shifted_imgx < width && under_shifted_imgy >= 0 && under_shifted_imgy < height) {
block_memory[memory_index_x][under_shifted_memory_index_y] = d_input[under_shifted_image_position_y + shifted_imgx];
} else {
block_memory[memory_index_x][under_shifted_memory_index_y] = 0;
}
}
// Cada hilo carga su lugar shifteado (blockDim.x - 2) posiciones hacia la derecha y (blockDim.y - 2) hacia abajo (+29, +29)
if (right_shifted_memory_index_x >= 0 && right_shifted_memory_index_x < BLOCK_WIDTH && under_shifted_memory_index_y >= 0 && under_shifted_memory_index_y < BLOCK_HEIGHT) {
if(right_shifted_imgx >= 0 && right_shifted_imgx < width && under_shifted_imgy >= 0 && under_shifted_imgy < height) {
block_memory[right_shifted_memory_index_x][under_shifted_memory_index_y] = d_input[under_shifted_image_position_y + right_shifted_imgx];
} else {
block_memory[right_shifted_memory_index_x][under_shifted_memory_index_y] = 0;
}
}
__syncthreads();
// Aplicación de la máscara (blur)
float val_pixel = 0;
int ix, iy, full_image_ix, full_image_iy;
// Tomamos los indices del centro (32x32) de la imagen shifteando +2,+2 (hacia abajo y derecha)
int memory_imgx = threadIdx.x + MASK_RADIUS;
int memory_imgy = threadIdx.y + MASK_RADIUS;
for (int i = -MASK_RADIUS; i <= MASK_RADIUS; ++i) {
full_image_ix = imgx + i;
if (full_image_ix >= 0 && full_image_ix < width) {
ix = memory_imgx + i;
for (int j = -MASK_RADIUS; j <= MASK_RADIUS; ++j) {
full_image_iy = imgy + j;
if (full_image_iy >= 0 && full_image_iy < height) {
iy = memory_imgy + j;
// Altera el valor de un pixel, según sus vecinos.
val_pixel += block_memory[ix][iy] * d_msk[(i + MASK_RADIUS)*MASK_DIAMETER + j + MASK_RADIUS];
}
}
}
}
// Escribimos la salida
if (imgx < width && imgy < height) {
d_output[(imgy*width) + imgx] = val_pixel;
}
}
// Ej 2b-1) Agregar máscara const float* __restrict__ d_msk (y comparar tiempos con 2a)
// Estas flags dicen que: el dato es de solo lectura (const) y que no otro puntero apunta a su dirección (__restrict__)
// Permite al compilador hacer optimizaciones y usar la cache constante
__global__ void blur_kernel_b(float* d_input, int width, int height, float* d_output, const float* __restrict__ d_msk) {
__shared__ float block_memory[BLOCK_WIDTH][BLOCK_HEIGHT];
// Coordenada del pixel que al hilo le corresponde escribir
int imgx = (blockIdx.x * blockDim.x) + threadIdx.x;
int imgy = (blockIdx.y * blockDim.y) + threadIdx.y;
// Carga de la memoria compartida maximizando paralelismo entre hilos
// Para cargar la mascara de 36x36 con los bloques de 32x32 como indica la letra es necesario que
// el pixel que cada hilo lea y escriba en memoria esté shifteado -2,-2 (2 hacia arriba y hacia la izquierda)
// d_input auxiliary indexes
int shifted_imgx = imgx - MASK_RADIUS;
int shifted_imgy = imgy - MASK_RADIUS;
int right_shifted_imgx = shifted_imgx + blockDim.x;
int under_shifted_imgy = shifted_imgy + blockDim.y;
int shifted_image_position_y = shifted_imgy*width;
int under_shifted_image_position_y = under_shifted_imgy*width;
// block_memory auxiliary indexes
int memory_index_x = threadIdx.x;
int memory_index_y = threadIdx.y;
int right_shifted_memory_index_x = memory_index_x + blockDim.x;
int under_shifted_memory_index_y = memory_index_y + blockDim.y;
// Cada hilo carga su lugar shifteado 2 posiciones hacia la izquierda y 2 hacia arriba (-2, -2)
if (shifted_imgx >= 0 && shifted_imgx < width && shifted_imgy >= 0 && shifted_imgy < height) {
block_memory[memory_index_x][memory_index_y] = d_input[shifted_image_position_y + shifted_imgx];
}
// Cada hilo carga su lugar shifteado (blockDim.x - 2) posiciones hacia la derecha y 2 hacia arriba (+29, -2)
if (right_shifted_memory_index_x >= 0 && right_shifted_memory_index_x < BLOCK_WIDTH && memory_index_y >= 0 && memory_index_y < BLOCK_HEIGHT) {
if(right_shifted_imgx >= 0 && right_shifted_imgx < width && shifted_imgy >= 0 && shifted_imgy < height) {
block_memory[right_shifted_memory_index_x][memory_index_y] = d_input[shifted_image_position_y + right_shifted_imgx];
} else {
block_memory[right_shifted_memory_index_x][memory_index_y] = 0;
}
}
// Cada hilo carga su lugar shifteado 2 posiciones hacia la izquierda y (blockDim.y - 2) hacia abajo (-2, +29)
if (memory_index_x >= 0 && memory_index_x < BLOCK_WIDTH && under_shifted_memory_index_y >= 0 && under_shifted_memory_index_y < BLOCK_HEIGHT) {
if(shifted_imgx >= 0 && shifted_imgx < width && under_shifted_imgy >= 0 && under_shifted_imgy < height) {
block_memory[memory_index_x][under_shifted_memory_index_y] = d_input[under_shifted_image_position_y + shifted_imgx];
} else {
block_memory[memory_index_x][under_shifted_memory_index_y] = 0;
}
}
// Cada hilo carga su lugar shifteado (blockDim.x - 2) posiciones hacia la derecha y (blockDim.y - 2) hacia abajo (+29, +29)
if (right_shifted_memory_index_x >= 0 && right_shifted_memory_index_x < BLOCK_WIDTH && under_shifted_memory_index_y >= 0 && under_shifted_memory_index_y < BLOCK_HEIGHT) {
if(right_shifted_imgx >= 0 && right_shifted_imgx < width && under_shifted_imgy >= 0 && under_shifted_imgy < height) {
block_memory[right_shifted_memory_index_x][under_shifted_memory_index_y] = d_input[under_shifted_image_position_y + right_shifted_imgx];
} else {
block_memory[right_shifted_memory_index_x][under_shifted_memory_index_y] = 0;
}
}
__syncthreads();
// Aplicación de la máscara (blur)
float val_pixel = 0;
int ix, iy, full_image_ix, full_image_iy;
// Tomamos los indices del centro (32x32) de la imagen shifteando +2,+2 (hacia abajo y derecha)
int memory_imgx = threadIdx.x + MASK_RADIUS;
int memory_imgy = threadIdx.y + MASK_RADIUS;
for (int i = -MASK_RADIUS; i <= MASK_RADIUS; ++i) {
full_image_ix = imgx + i;
if (full_image_ix >= 0 && full_image_ix < width) {
ix = memory_imgx + i;
for (int j = -MASK_RADIUS; j <= MASK_RADIUS; ++j) {
full_image_iy = imgy + j;
if (full_image_iy >= 0 && full_image_iy < height) {
iy = memory_imgy + j;
// Altera el valor de un pixel, según sus vecinos.
val_pixel += block_memory[ix][iy] * d_msk[(i + MASK_RADIUS)*MASK_DIAMETER + j + MASK_RADIUS];
}
}
}
}
// Escribimos la salida
if (imgx < width && imgy < height) {
d_output[(imgy*width) + imgx] = val_pixel;
}
}
// Ej 2b-2) Copiar máscara con __constant__ y cudaMemcpyToSymbol (para que resida en mem constante) (y comparar tiempos con 2b-1)
// Cada hilo accede a la mascara desde la memoria constante de la GPU a velocidad de registro.
// La memoria constante es de 64KB, está optimizada para que si todo acceso del warp accede al mismo elem el acceso es óptimo
__global__ void blur_kernel_c(float* d_input, int width, int height, float* d_output) {
__shared__ float block_memory[BLOCK_WIDTH][BLOCK_HEIGHT];
// Coordenada del pixel que al hilo le corresponde escribir
int imgx = (blockIdx.x * blockDim.x) + threadIdx.x;
int imgy = (blockIdx.y * blockDim.y) + threadIdx.y;
// Carga de la memoria compartida maximizando paralelismo entre hilos
// Para cargar la mascara de 36x36 con los bloques de 32x32 como indica la letra es necesario que
// el pixel que cada hilo lea y escriba en memoria esté shifteado -2,-2 (2 hacia arriba y hacia la izquierda)
// d_input auxiliary indexes
int shifted_imgx = imgx - MASK_RADIUS;
int shifted_imgy = imgy - MASK_RADIUS;
int right_shifted_imgx = shifted_imgx + blockDim.x;
int under_shifted_imgy = shifted_imgy + blockDim.y;
int shifted_image_position_y = shifted_imgy*width;
int under_shifted_image_position_y = under_shifted_imgy*width;
// block_memory auxiliary indexes
int memory_index_x = threadIdx.x;
int memory_index_y = threadIdx.y;
int right_shifted_memory_index_x = memory_index_x + blockDim.x;
int under_shifted_memory_index_y = memory_index_y + blockDim.y;
// Cada hilo carga su lugar shifteado 2 posiciones hacia la izquierda y 2 hacia arriba (-2, -2)
if (shifted_imgx >= 0 && shifted_imgx < width && shifted_imgy >= 0 && shifted_imgy < height) {
block_memory[memory_index_x][memory_index_y] = d_input[shifted_image_position_y + shifted_imgx];
}
// Cada hilo carga su lugar shifteado (blockDim.x - 2) posiciones hacia la derecha y 2 hacia arriba (+29, -2)
if (right_shifted_memory_index_x >= 0 && right_shifted_memory_index_x < BLOCK_WIDTH && memory_index_y >= 0 && memory_index_y < BLOCK_HEIGHT) {
if(right_shifted_imgx >= 0 && right_shifted_imgx < width && shifted_imgy >= 0 && shifted_imgy < height) {
block_memory[right_shifted_memory_index_x][memory_index_y] = d_input[shifted_image_position_y + right_shifted_imgx];
} else {
block_memory[right_shifted_memory_index_x][memory_index_y] = 0;
}
}
// Cada hilo carga su lugar shifteado 2 posiciones hacia la izquierda y (blockDim.y - 2) hacia abajo (-2, +29)
if (memory_index_x >= 0 && memory_index_x < BLOCK_WIDTH && under_shifted_memory_index_y >= 0 && under_shifted_memory_index_y < BLOCK_HEIGHT) {
if(shifted_imgx >= 0 && shifted_imgx < width && under_shifted_imgy >= 0 && under_shifted_imgy < height) {
block_memory[memory_index_x][under_shifted_memory_index_y] = d_input[under_shifted_image_position_y + shifted_imgx];
} else {
block_memory[memory_index_x][under_shifted_memory_index_y] = 0;
}
}
// Cada hilo carga su lugar shifteado (blockDim.x - 2) posiciones hacia la derecha y (blockDim.y - 2) hacia abajo (+29, +29)
if (right_shifted_memory_index_x >= 0 && right_shifted_memory_index_x < BLOCK_WIDTH && under_shifted_memory_index_y >= 0 && under_shifted_memory_index_y < BLOCK_HEIGHT) {
if(right_shifted_imgx >= 0 && right_shifted_imgx < width && under_shifted_imgy >= 0 && under_shifted_imgy < height) {
block_memory[right_shifted_memory_index_x][under_shifted_memory_index_y] = d_input[under_shifted_image_position_y + right_shifted_imgx];
} else {
block_memory[right_shifted_memory_index_x][under_shifted_memory_index_y] = 0;
}
}
__syncthreads();
// Aplicación de la máscara (blur)
float val_pixel = 0;
int ix, iy, full_image_ix, full_image_iy;
// Tomamos los indices del centro (32x32) de la imagen shifteando +2,+2 (hacia abajo y derecha)
int memory_imgx = threadIdx.x + MASK_RADIUS;
int memory_imgy = threadIdx.y + MASK_RADIUS;
for (int i = -MASK_RADIUS; i <= MASK_RADIUS; ++i) {
full_image_ix = imgx + i;
if (full_image_ix >= 0 && full_image_ix < width) {
ix = memory_imgx + i;
for (int j = -MASK_RADIUS; j <= MASK_RADIUS; ++j) {
full_image_iy = imgy + j;
if (full_image_iy >= 0 && full_image_iy < height) {
iy = memory_imgy + j;
// Altera el valor de un pixel, según sus vecinos.
val_pixel += block_memory[ix][iy] * D_MASK[(i + MASK_RADIUS)*MASK_DIAMETER + j + MASK_RADIUS];
}
}
}
}
// Escribimos la salida
if (imgx < width && imgy < height) {
d_output[(imgy*width) + imgx] = val_pixel;
}
}
// Blur kernel mem global
__global__ void blur_kernel_global(float* d_input, int width, int height, float* d_output, float* d_msk) {
// Coordenada del pixel que al hilo le corresponde escribir
int imgx = (blockIdx.x * blockDim.x) + threadIdx.x;
int imgy = (blockIdx.y * blockDim.y) + threadIdx.y;
float val_pixel = 0;
int ix, iy;
for (int i = -MASK_RADIUS; i <= MASK_RADIUS; ++i) {
ix = imgx + i;
if (ix >= 0 && ix < width) {
for (int j = -MASK_RADIUS; j <= MASK_RADIUS; ++j) {
iy = imgy + j;
if (iy >= 0 && iy < height) {
// Altera el valor de un pixel, según sus vecinos.
val_pixel += d_input[(iy * width) + ix] * d_msk[(i + MASK_RADIUS)*MASK_DIAMETER + j + MASK_RADIUS];
}
}
}
}
// Escribimos la salida
if (imgx < width && imgy < height) {
d_output[(imgy*width) + imgx] = val_pixel;
}
}
// Ej 2) Aplica un filtro Gaussiano que reduce el ruido de una imagen en escala de grises.
// El filtro sustituye el valor de intensidad de cada pixel por un promedio ponderado de los pixeles vecinos.
// Los pesos por los cuales se pondera cada vecino en el promedio se almacenan en una matriz cuadrada (máscara)
void blur_gpu(float * img_in, int width, int height, float * img_out, float msk[], int algorithm){
switch(algorithm) {
// Práctico 3) Kernel con memoria global
case 1:
printf("\n");
printf("-> Kernel con memoria global\n");
break;
// Ej 2a) Kernel con memoria compartida
case 2:
printf("\n");
printf("-> Kernel con memoria compartida\n");
break;
// Ej 2b1) Kernel con memoria compartida y optimizando la máscara cómo read_only y restricted pointer.
case 3:
printf("\n");
printf("-> Kernel con memoria compartida y optimizando la máscara cómo read_only y restricted pointer\n");
break;
// Ej 2b2) Kernel con con memoria compartida y almacenando la máscara en la memoria constante de la GPU
case 4:
printf("\n");
printf("-> Kernel con memoria compartida y almacenando la máscara en memoria constante\n");
break;
default:
printf("Invocar como: './ej2.x nombre_archivo, algoritmo'\n");
printf("-> Algoritmo:\n");
printf("\t 1 - Kernel con memoria global\n");
printf("\t 2 - Kernel con memoria compartida\n");
printf("\t 3 - Kernel con memoria compartida y mascara read_only con restricted pointer\n");
printf("\t 4 - Kernel con memoria compartida y mascara en memoria constante\n");
printf("\t 0 - Todos los algoritmos\n");
}
// Auxiliar para contar tiempo total
// float t_total = 0;
// Etapa 1: Reserva de Memoria
// CLK_CUEVTS_INIT;
// CLK_CUEVTS_START;
// Reserva en CPU
unsigned int size = width * height * sizeof(float);
unsigned int size_msk = MASK_SIZE * sizeof(float);
float * device_img_in = (float *)malloc(size);
float * device_img_out = (float *)malloc(size);
float * device_msk;
// Reserva en GPU
CUDA_CHK(cudaMalloc((void**)& device_img_in, size));
CUDA_CHK(cudaMalloc((void**)& device_img_out, size));
if(algorithm != 4) {
device_msk = (float *)malloc(size_msk);
CUDA_CHK(cudaMalloc((void**)& device_msk, size_msk));
}
// CLK_CUEVTS_STOP;
// CLK_CUEVTS_ELAPSED;
// printf("Tiempo filtro gaussiano GPU (Reserva de memoria): %f ms\n", t_elap);
// t_total = t_total + t_elap;
// Etapa 2: Transferencia de datos (Host -> Device)
// CLK_CUEVTS_START;
CUDA_CHK(cudaMemcpy(device_img_in, img_in, size, cudaMemcpyHostToDevice)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia
if(algorithm != 4) {
// Transfiero la mascara a la memoria de la GPU
CUDA_CHK(cudaMemcpy(device_msk, msk, size_msk, cudaMemcpyHostToDevice)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia
} else {
// Guardo la mascara en la memoria constante de la GPU
CUDA_CHK(cudaMemcpyToSymbol(D_MASK, msk, size_msk, 0, cudaMemcpyHostToDevice));
}
// CLK_CUEVTS_STOP;
// CLK_CUEVTS_ELAPSED;
// printf("Tiempo filtro gaussiano GPU (Transferencia de datos (Host -> Device)): %f ms\n", t_elap);
// t_total = t_total + t_elap;
// Etapa 3: Definir grilla
int amount_of_blocks_x = width / TILE_WIDTH + (width % TILE_WIDTH != 0); // Division with ceiling
int amount_of_blocks_y = height / TILE_HEIGHT + (height % TILE_HEIGHT != 0); // Division with ceiling
dim3 tamGrid(amount_of_blocks_x, amount_of_blocks_y); // Grid dimension
dim3 tamBlock(TILE_WIDTH, TILE_HEIGHT); // Block dimension
// Etapa 4 : Lanzar Kernel
// CLK_CUEVTS_START;
switch(algorithm) {
// Práctico 3) Kernel con memoria global
case 1:
blur_kernel_global<<<tamGrid, tamBlock>>>(device_img_in, width, height, device_img_out, device_msk);
break;
// Ej 2a) Kernel con memoria compartida
case 2:
blur_kernel_a<<<tamGrid, tamBlock>>>(device_img_in, width, height, device_img_out, device_msk);
break;
// Ej 2b1) Kernel con memoria compartida y optimizando la máscara cómo read_only y restricted pointer.
case 3:
blur_kernel_b<<<tamGrid, tamBlock>>>(device_img_in, width, height, device_img_out, device_msk);
break;
// Ej 2b2) Kernel con con memoria compartida y almacenando la máscara en la memoria constante de la GPU
case 4:
blur_kernel_c<<<tamGrid, tamBlock>>>(device_img_in, width, height, device_img_out);
break;
}
// Sincronizar threads antes de parar timers
cudaDeviceSynchronize();
// CLK_CUEVTS_STOP;
// CLK_CUEVTS_ELAPSED;
// printf("Tiempo filtro gaussiano GPU (Kernel): %f ms\n", t_elap);
// t_total = t_total + t_elap;
// Etapa 5: Transferencia de Datos (Device -> Host)
// CLK_CUEVTS_START;
CUDA_CHK(cudaMemcpy(img_out, device_img_out, size, cudaMemcpyDeviceToHost)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia
// CLK_CUEVTS_STOP;
// CLK_CUEVTS_ELAPSED;
// printf("Tiempo filtro gaussiano GPU (Transferencia de datos (Host <- Device)): %f ms\n", t_elap);
// t_total = t_total + t_elap;
// printf("Tiempo filtro gaussiano GPU: %f ms\n", t_total);
// printf("\n");
// Etapa 6: Liberación de Memoria
CUDA_CHK(cudaFree(device_img_in));
CUDA_CHK(cudaFree(device_img_out));
}
// Recorre la imagen aplicando secuencialmente un filtro Gaussiano que reduce el ruido de una imagen en escala de grises.
void blur_cpu(float * img_in, int width, int height, float * img_out, float msk[], int m_size) {
CLK_POSIX_INIT;
CLK_POSIX_START;
float val_pixel=0;
// Para cada pixel aplicamos el filtro
for(int imgx=0; imgx < width ; imgx++) {
for(int imgy=0; imgy < height; imgy++) {
val_pixel = 0;
// Aca aplicamos la mascara
for (int i = 0; i < m_size ; i++) {
for (int j = 0; j < m_size ; j++) {
int ix =imgx + i - m_size/2;
int iy =imgy + j - m_size/2;
// Altera el valor de un pixel, según sus vecinos.
if(ix >= 0 && ix < width && iy>= 0 && iy < height)
val_pixel = val_pixel + img_in[iy * width +ix] * msk[i*m_size+j];
}
}
// Guardo valor resultado
img_out[imgy*width+imgx]= val_pixel;
}
}
CLK_POSIX_STOP;
CLK_POSIX_ELAPSED;
printf("Tiempo filtro Gaussiano CPU: %f ms\n", t_elap);
printf("\n");
} |
2ddef50844ac79debc7539b8cd7ed7abc028685e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "virial_pressure.h"
#include "utils/simple_serializer.h"
#include "utils/time_stamp.h"
#include <mirheo/core/datatypes.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/folders.h>
#include <mirheo/core/utils/common.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/mpi_types.h>
namespace mirheo
{
namespace virial_pressure_kernels
{
__global__ void totalPressure(PVview view, const Stress *stress, FieldDeviceHandler region, virial_pressure_plugin::ReductionType *pressure)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
virial_pressure_plugin::ReductionType P = 0;
Particle p;
if (tid < view.size) {
const Stress s = stress[tid];
auto r = Real3_int(view.readPosition(tid)).v;
if (region(r) > 0)
P = (s.xx + s.yy + s.zz) / 3.0;
}
P = warpReduce(P, [](virial_pressure_plugin::ReductionType a, virial_pressure_plugin::ReductionType b) { return a+b; });
if (laneId() == 0)
atomicAdd(pressure, P);
}
} // namespace virial_pressure_kernels
VirialPressurePlugin::VirialPressurePlugin(const MirState *state, std::string name, std::string pvName,
FieldFunction func, real3 h, int dumpEvery) :
SimulationPlugin(state, name),
pvName_(pvName),
dumpEvery_(dumpEvery),
region_(state, "field_"+name, func, h)
{}
VirialPressurePlugin::~VirialPressurePlugin() = default;
void VirialPressurePlugin::setup(Simulation* simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
pv_ = simulation->getPVbyNameOrDie(pvName_);
region_.setup(comm);
info("Plugin %s initialized for the following particle vector: %s", getCName(), pvName_.c_str());
}
void VirialPressurePlugin::handshake()
{
SimpleSerializer::serialize(sendBuffer_, pvName_);
_send(sendBuffer_);
}
void VirialPressurePlugin::afterIntegration(hipStream_t stream)
{
if (!isTimeEvery(getState(), dumpEvery_)) return;
PVview view(pv_, pv_->local());
const Stress *stress = pv_->local()->dataPerParticle.getData<Stress>(channel_names::stresses)->devPtr();
localVirialPressure_.clear(stream);
constexpr int nthreads = 128;
const int nblocks = getNblocks(view.size, nthreads);
SAFE_KERNEL_LAUNCH(
virial_pressure_kernels::totalPressure,
nblocks, nthreads, 0, stream,
view, stress, region_.handler(), localVirialPressure_.devPtr() );
localVirialPressure_.downloadFromDevice(stream, ContainersSynch::Synch);
savedTime_ = getState()->currentTime;
needToSend_ = true;
}
void VirialPressurePlugin::serializeAndSend(__UNUSED hipStream_t stream)
{
if (!needToSend_) return;
debug2("Plugin %s is sending now data", getCName());
_waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, savedTime_, localVirialPressure_[0]);
_send(sendBuffer_);
needToSend_ = false;
}
//=================================================================================
VirialPressureDumper::VirialPressureDumper(std::string name, std::string path) :
PostprocessPlugin(name),
path_(makePath(path))
{}
void VirialPressureDumper::setup(const MPI_Comm& comm, const MPI_Comm& interComm)
{
PostprocessPlugin::setup(comm, interComm);
activated_ = createFoldersCollective(comm, path_);
}
void VirialPressureDumper::handshake()
{
auto req = waitData();
MPI_Check( MPI_Wait(&req, MPI_STATUS_IGNORE) );
recv();
std::string pvName;
SimpleSerializer::deserialize(data_, pvName);
if (activated_)
{
auto fname = path_ + pvName + ".txt";
auto status = fdump_.open(fname, "w");
if (status != FileWrapper::Status::Success)
die("Could not open file '%s'", fname.c_str());
fprintf(fdump_.get(), "# time Pressure\n");
}
}
void VirialPressureDumper::deserialize()
{
MirState::TimeType curTime;
virial_pressure_plugin::ReductionType localPressure, totalPressure;
SimpleSerializer::deserialize(data_, curTime, localPressure);
if (!activated_) return;
const auto dataType = getMPIFloatType<virial_pressure_plugin::ReductionType>();
MPI_Check( MPI_Reduce(&localPressure, &totalPressure, 1, dataType, MPI_SUM, 0, comm_) );
fprintf(fdump_.get(), "%g %.6e\n", curTime, totalPressure);
}
} // namespace mirheo
| 2ddef50844ac79debc7539b8cd7ed7abc028685e.cu | #include "virial_pressure.h"
#include "utils/simple_serializer.h"
#include "utils/time_stamp.h"
#include <mirheo/core/datatypes.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/folders.h>
#include <mirheo/core/utils/common.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/mpi_types.h>
namespace mirheo
{
namespace virial_pressure_kernels
{
__global__ void totalPressure(PVview view, const Stress *stress, FieldDeviceHandler region, virial_pressure_plugin::ReductionType *pressure)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
virial_pressure_plugin::ReductionType P = 0;
Particle p;
if (tid < view.size) {
const Stress s = stress[tid];
auto r = Real3_int(view.readPosition(tid)).v;
if (region(r) > 0)
P = (s.xx + s.yy + s.zz) / 3.0;
}
P = warpReduce(P, [](virial_pressure_plugin::ReductionType a, virial_pressure_plugin::ReductionType b) { return a+b; });
if (laneId() == 0)
atomicAdd(pressure, P);
}
} // namespace virial_pressure_kernels
VirialPressurePlugin::VirialPressurePlugin(const MirState *state, std::string name, std::string pvName,
FieldFunction func, real3 h, int dumpEvery) :
SimulationPlugin(state, name),
pvName_(pvName),
dumpEvery_(dumpEvery),
region_(state, "field_"+name, func, h)
{}
VirialPressurePlugin::~VirialPressurePlugin() = default;
void VirialPressurePlugin::setup(Simulation* simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
pv_ = simulation->getPVbyNameOrDie(pvName_);
region_.setup(comm);
info("Plugin %s initialized for the following particle vector: %s", getCName(), pvName_.c_str());
}
void VirialPressurePlugin::handshake()
{
SimpleSerializer::serialize(sendBuffer_, pvName_);
_send(sendBuffer_);
}
void VirialPressurePlugin::afterIntegration(cudaStream_t stream)
{
if (!isTimeEvery(getState(), dumpEvery_)) return;
PVview view(pv_, pv_->local());
const Stress *stress = pv_->local()->dataPerParticle.getData<Stress>(channel_names::stresses)->devPtr();
localVirialPressure_.clear(stream);
constexpr int nthreads = 128;
const int nblocks = getNblocks(view.size, nthreads);
SAFE_KERNEL_LAUNCH(
virial_pressure_kernels::totalPressure,
nblocks, nthreads, 0, stream,
view, stress, region_.handler(), localVirialPressure_.devPtr() );
localVirialPressure_.downloadFromDevice(stream, ContainersSynch::Synch);
savedTime_ = getState()->currentTime;
needToSend_ = true;
}
void VirialPressurePlugin::serializeAndSend(__UNUSED cudaStream_t stream)
{
if (!needToSend_) return;
debug2("Plugin %s is sending now data", getCName());
_waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, savedTime_, localVirialPressure_[0]);
_send(sendBuffer_);
needToSend_ = false;
}
//=================================================================================
VirialPressureDumper::VirialPressureDumper(std::string name, std::string path) :
PostprocessPlugin(name),
path_(makePath(path))
{}
void VirialPressureDumper::setup(const MPI_Comm& comm, const MPI_Comm& interComm)
{
PostprocessPlugin::setup(comm, interComm);
activated_ = createFoldersCollective(comm, path_);
}
void VirialPressureDumper::handshake()
{
auto req = waitData();
MPI_Check( MPI_Wait(&req, MPI_STATUS_IGNORE) );
recv();
std::string pvName;
SimpleSerializer::deserialize(data_, pvName);
if (activated_)
{
auto fname = path_ + pvName + ".txt";
auto status = fdump_.open(fname, "w");
if (status != FileWrapper::Status::Success)
die("Could not open file '%s'", fname.c_str());
fprintf(fdump_.get(), "# time Pressure\n");
}
}
void VirialPressureDumper::deserialize()
{
MirState::TimeType curTime;
virial_pressure_plugin::ReductionType localPressure, totalPressure;
SimpleSerializer::deserialize(data_, curTime, localPressure);
if (!activated_) return;
const auto dataType = getMPIFloatType<virial_pressure_plugin::ReductionType>();
MPI_Check( MPI_Reduce(&localPressure, &totalPressure, 1, dataType, MPI_SUM, 0, comm_) );
fprintf(fdump_.get(), "%g %.6e\n", curTime, totalPressure);
}
} // namespace mirheo
|
8e57164f3e3af02e6f4ab8abf6f71ae7053f5bed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
Copyright 2014-2015 Dake Feng, Peri LLC, [email protected]
This file is part of TomograPeri.
TomograPeri is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TomograPeri is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TomograPeri. If not, see <http://www.gnu.org/licenses/>.
*/
#define blockx 16
#define blocky 16
__global__ void _weightTopkernel_cuda(int num_slices, int num_grid, float beta, float *dev_F, float *dev_G, float*dev_wg5, float *dev_recon)
{
uint q;
int ind0, indg[5];
uint k = blockIdx.x*blockDim.x + threadIdx.x;
uint n = blockIdx.y*blockDim.y + threadIdx.y+1;
if ((k>=num_slices)||(n<1)||(n>=(num_grid-1)))
return;
ind0 = n + k*num_grid*num_grid;
indg[0] = ind0+1;
indg[1] = ind0-1;
indg[2] = ind0+num_grid;
indg[3] = ind0+num_grid+1;
indg[4] = ind0+num_grid-1;
for (q = 0; q < 5; q++) {
dev_F[ind0] += 2*beta*dev_wg5[q];
dev_G[ind0] -= 2*beta*dev_wg5[q]*(dev_recon[ind0]+dev_recon[indg[q]]);
}
} | 8e57164f3e3af02e6f4ab8abf6f71ae7053f5bed.cu | #include "includes.h"
/*
Copyright 2014-2015 Dake Feng, Peri LLC, [email protected]
This file is part of TomograPeri.
TomograPeri is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TomograPeri is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TomograPeri. If not, see <http://www.gnu.org/licenses/>.
*/
#define blockx 16
#define blocky 16
__global__ void _weightTopkernel_cuda(int num_slices, int num_grid, float beta, float *dev_F, float *dev_G, float*dev_wg5, float *dev_recon)
{
uint q;
int ind0, indg[5];
uint k = blockIdx.x*blockDim.x + threadIdx.x;
uint n = blockIdx.y*blockDim.y + threadIdx.y+1;
if ((k>=num_slices)||(n<1)||(n>=(num_grid-1)))
return;
ind0 = n + k*num_grid*num_grid;
indg[0] = ind0+1;
indg[1] = ind0-1;
indg[2] = ind0+num_grid;
indg[3] = ind0+num_grid+1;
indg[4] = ind0+num_grid-1;
for (q = 0; q < 5; q++) {
dev_F[ind0] += 2*beta*dev_wg5[q];
dev_G[ind0] -= 2*beta*dev_wg5[q]*(dev_recon[ind0]+dev_recon[indg[q]]);
}
} |
453671344b97fa3ff97117147879e9f715ec192c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_sobel(int width, int height, float *image, float *image_out)
{
////////////////
// TO-DO #6.1 /////////////////////////////////////
// Implement the GPU version of the Sobel filter //
///////////////////////////////////////////////////
} | 453671344b97fa3ff97117147879e9f715ec192c.cu | #include "includes.h"
__global__ void gpu_sobel(int width, int height, float *image, float *image_out)
{
////////////////
// TO-DO #6.1 /////////////////////////////////////
// Implement the GPU version of the Sobel filter //
///////////////////////////////////////////////////
} |
edcb3dade0e145612334fa6a84231ea693e438ac.hip | // !!! This is a file automatically generated by hipify!!!
// BasisSampler.jl: importance sample from arbitrary sets of basis coefficients
// Copyright (C) 2017 Samuel Powell
// sampler.cu: PTX module
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "sampler.h"
__global__
void sample_test(sampler_t s, int ns, float *zeta, unsigned int *k, float *w)
{
const int id = blockIdx.x * blockDim.x + threadIdx.x;
const int gr = blockDim.x * gridDim.x;
for(int sid = id; sid < ns; sid += gr)
{
sample(s, zeta[sid], k+sid, w+sid);
k[sid]++; // Return one-based indices to Julia
}
return;
}
| edcb3dade0e145612334fa6a84231ea693e438ac.cu | // BasisSampler.jl: importance sample from arbitrary sets of basis coefficients
// Copyright (C) 2017 Samuel Powell
// sampler.cu: PTX module
#include <cuda.h>
#include <stdio.h>
#include "sampler.h"
__global__
void sample_test(sampler_t s, int ns, float *zeta, unsigned int *k, float *w)
{
const int id = blockIdx.x * blockDim.x + threadIdx.x;
const int gr = blockDim.x * gridDim.x;
for(int sid = id; sid < ns; sid += gr)
{
sample(s, zeta[sid], k+sid, w+sid);
k[sid]++; // Return one-based indices to Julia
}
return;
}
|
4d3a928def397a057c47dcdab5bc234643789f3a.hip | // !!! This is a file automatically generated by hipify!!!
#include <SDL2/SDL.h>
#include <SDL2/SDL_opengl.h>
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <math.h>
#include <float.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include "vec3.cu"
#define WIDTH 1920
#define HEIGHT 1080
#define BLOCKX 16
#define BLOCKY 16
#define BG_COLOR {150, 150, 255, 255}
#define MS .1
#define TR .05
#define RT_DEPTH 5
#define RAYS_PER_PIXEL 100
// Typedefs
typedef uchar4 Color; // .x->R, .y->G, .z->B, .w->A
typedef enum Light_Type {
AMBIENT,
DIRECTIONAL,
POINT
} Light_Type;
typedef struct Material {
Color color;
Color emmitance;
double roughness;
double reflectance;
double specular;
} Material;
typedef struct Sphere {
double3 center;
double radius;
Material* material;
} Sphere;
typedef struct Triangle {
double3 normal;
double3 v1;
double3 v2;
double3 v3;
Material *material;
} Triangle;
typedef struct Ray {
double3 origin;
double3 direction;
} Ray;
typedef struct Interaction {
double3 point;
double closest_t;
double3 normal;
Material *material;
} Interaction;
typedef struct Light {
double intensity;
Light_Type type;
union {
double3 pos;
double3 dir;
};
} Light;
typedef struct Scene {
Sphere **spheres;
Triangle **triangles;
int sphere_n;
int triangle_n;
Light **lights;
int light_n;
Material **materials;
int material_n;
} Scene;
// Host Functions
void output_fb_to_sdl(SDL_Renderer *renderer, Color *framebuffer, int width, int height);
//void AddSphere(int radius, double3 center, Color color, double specular, double reflect);
void AddSphere(int radius, double3 center, Material *material);
void AddTriangle(double3 normal, double3 v1, double3 v2, double3 v3, Material *material);
void AddTriangle(double3 v1, double3 v2, double3 v3, Material *material);
void AddTrianglesFromSTL(const char *location, Material *material);
void AddTrianglesFromSTL(const char *location, double3 offset, double scale, Material *material);
Material *AddMaterial(Color color, Color emmitance, double reflectance, double specular, double roughness);
void AddLight(Light_Type type, double intensity);
void AddLight(Light_Type type, double intensity, double3 pos_dir);
// Global Functions
__global__ void setup_curand(hiprandState_t *state);
__global__ void renderSingleFrame(Color *framebuffer, int width, int height, hiprandState_t *curand_state);
// Device Functions
__device__ double3 CanvasToViewport(int x, int y, int width, int height);
__device__ Color TraceRay(Ray ray, double t_min, double t_max, Color bg_color, int depth);
__device__ Color TracePath(Ray ray, double t_min, double t_max, Color bg_color, int depth, hiprandState_t *curand_state);
__device__ double3 RandomVectorInHemisphere(double3 vector_in, hiprandState_t *curand_state);
__device__ double3 RandomDeltaVector(double range, hiprandState_t *curand_state);
__device__ double2 IntersectRay(Ray ray, Sphere *sphere);
__device__ double IntersectRay(Ray ray, Triangle *triangle);
__device__ double ComputeLighting(double3 point, double3 normal, double3 view, double spec);
__device__ Interaction ClosestIntersection(Ray ray, double t_min, double t_max);
__device__ double3 ReflectRay(double3 R, double3 normal);
// linear algebra
__host__ __device__ double3 cross(double3 vec1, double3 vec2);
__host__ __device__ double dot(double3 vec1, double3 vec2);
__host__ __device__ double length(double3 vec);
// Global variables
__managed__ double3 origin = {0.0, 0.0, 0.0}; // Current camera position
__managed__ double theta = 0.0; // Rotation of camera about Y axis
__managed__ Scene scene; // struct containing objects to be rendered
__managed__ int pathtrace = 0;
// USER DEFINED
void scene_setup()
{
Material *material;
material = AddMaterial(make_uchar4(40, 200, 90, 255), make_uchar4(0,0,0,0), .05, 10, 100);
AddSphere(5000, make_double3(0,-5001, 0), material);
material = AddMaterial(make_uchar4(255, 0, 0, 0), make_uchar4(0,0,0,0), .8, 1, 100);
AddSphere(1, make_double3(-2.0, 0.0, 4.0), material);
material = AddMaterial(make_uchar4(0, 255, 0, 0), make_uchar4(0,0,0,0), .5, 20, 1);
AddSphere(1, make_double3(0.0, -1.0, 3.0), material);
material = AddMaterial(make_uchar4(0, 0, 255, 0), make_uchar4(0,0,0,0), .3, 50, 50);
AddSphere(1, make_double3(2.0, 0.0, 4.0), material);
material = AddMaterial(make_uchar4(200, 100, 100, 0), make_uchar4(0,0,0,0), .1, 1, 50);
AddTrianglesFromSTL("cube.stl", make_double3(0,1,8), 2, material);
AddLight(AMBIENT, .3);
AddLight(POINT, 0.6, make_double3(2, 1, 0));
AddLight(DIRECTIONAL, 0.2, make_double3(1, 4, 4));
}
int main(int argc, char const *argv[])
{
// Set stack size for cuda threads in bytes (size_t)
// Default is 1024 - If too low, part of scene will render black
// Needed at high recursion depths
hipDeviceSetLimit(hipLimitStackSize, 4096);
Color *fb; // Array of RGBA pixels
size_t fb_size; // WIDTH*HEIGHT*sizeof(Color)
int pitch = WIDTH*sizeof(Color); // How many bytes to jump to move down a row
SDL_Window *window;
SDL_Renderer *renderer;
SDL_Texture *texture;
SDL_Event event;
SDL_Init(SDL_INIT_VIDEO);
window = SDL_CreateWindow(
"RayTracer",
SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED,
WIDTH, // width, in pixels
HEIGHT, // height, in pixels
SDL_WINDOW_OPENGL | // Flags
//SDL_WINDOW_FULLSCREEN |
SDL_WINDOW_SHOWN |
0
);
renderer = SDL_CreateRenderer(
window,
-1,
SDL_RENDERER_ACCELERATED // Change to SDL_RENDERER_SOFTWARE if using VNC
);
texture = SDL_CreateTexture(
renderer,
SDL_PIXELFORMAT_ABGR8888,
SDL_TEXTUREACCESS_STREAMING,
WIDTH,
HEIGHT
);
hipMallocManaged(&scene.materials, sizeof(Material*));
hipMallocManaged(&scene.spheres, sizeof(Sphere*));
hipMallocManaged(&scene.triangles, sizeof(Triangle*));
hipMallocManaged(&scene.lights, sizeof(Light*));
scene_setup();
hipDeviceSynchronize();
int device = -1;
hipGetDevice(&device);
dim3 blocks(WIDTH/BLOCKX+1,HEIGHT/BLOCKY+1);
dim3 threads(BLOCKX,BLOCKY);
fb_size = WIDTH*HEIGHT*sizeof(Color);
hipMallocManaged(&fb, fb_size);
// Setup RNG
hiprandState_t *curand_state;
hipMalloc(&curand_state, sizeof(hiprandState_t));
hipLaunchKernelGGL(( setup_curand), dim3(blocks),dim3(threads), 0, 0, curand_state);
#ifdef PT_ON
pathtrace = 1;
#endif
#ifdef BENCH
/* Benchmarks */
for(int i = 0; i < BENCH; i++) {
hipMemPrefetchAsync(fb, fb_size, device, NULL);
hipLaunchKernelGGL(( renderSingleFrame), dim3(blocks),dim3(threads), 0, 0, fb, WIDTH, HEIGHT, curand_state);
hipDeviceSynchronize();
SDL_UpdateTexture(texture, NULL, (void *)fb, pitch);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
}
#else
// Render first frame
hipLaunchKernelGGL(( renderSingleFrame), dim3(blocks),dim3(threads), 0, 0, fb, WIDTH, HEIGHT, curand_state);
hipDeviceSynchronize();
SDL_UpdateTexture(texture, NULL, (void *)fb, pitch);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
// Render after every movement keypress
// wasd, q-e to rotate, z-x to move up/down
while (1) {
SDL_PollEvent(&event);
if (event.type == SDL_QUIT) {
break;
}
switch(event.type) {
case SDL_KEYDOWN:
switch(event.key.keysym.sym) {
case SDLK_d:
origin.x +=MS*cos(theta);
origin.z -=MS*sin(theta);
break;
case SDLK_a:
origin.x -=MS*cos(theta);
origin.z +=MS*sin(theta);
break;
case SDLK_z:
origin.y +=MS;
break;
case SDLK_x:
origin.y -=MS;
break;
case SDLK_s:
origin.z -=MS*cos(theta);
origin.x -=MS*sin(theta);
break;
case SDLK_w:
origin.z +=MS*cos(theta);
origin.x +=MS*sin(theta);
break;
case SDLK_q:
theta -=TR;
break;
case SDLK_e:
theta +=TR;
break;
case SDLK_r:
pathtrace = pathtrace ? 0 : 1;
break;
}
hipLaunchKernelGGL(( renderSingleFrame), dim3(blocks),dim3(threads), 0, 0, fb, WIDTH, HEIGHT, curand_state);
hipDeviceSynchronize();
SDL_UpdateTexture(texture, NULL, (void *)fb, pitch);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
hipMemPrefetchAsync(fb, fb_size, device, NULL);
break;
case SDL_KEYUP:
break;
default:
//SDL_RenderPresent(renderer);
break;
}
}
#endif
hipFree(fb);
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(window);
SDL_Quit();
return 0;
}
void AddSphere(int radius, double3 center, Material *material)
{
hipMallocManaged(&scene.spheres[scene.sphere_n], sizeof(Sphere));
scene.spheres[scene.sphere_n]->radius = radius;
scene.spheres[scene.sphere_n]->center = center;
scene.spheres[scene.sphere_n]->material = material;
scene.sphere_n++;
}
void AddTriangle(double3 normal, double3 v1, double3 v2, double3 v3, Material *material)
{
hipMallocManaged(&scene.triangles[scene.triangle_n], sizeof(Triangle));
scene.triangles[scene.triangle_n]->normal = normal;
scene.triangles[scene.triangle_n]->v1 = v1;
scene.triangles[scene.triangle_n]->v2 = v2;
scene.triangles[scene.triangle_n]->v3 = v3;
scene.triangles[scene.triangle_n]->material = material;
scene.triangle_n++;
}
void AddTriangle(double3 v1, double3 v2, double3 v3, Material *material)
{
hipMallocManaged(&scene.triangles[scene.triangle_n], sizeof(Triangle));
scene.triangles[scene.triangle_n]->normal = cross(v3-v1, v2-v1);
scene.triangles[scene.triangle_n]->v1 = v1;
scene.triangles[scene.triangle_n]->v2 = v2;
scene.triangles[scene.triangle_n]->v3 = v3;
scene.triangles[scene.triangle_n]->material = material;
scene.triangle_n++;
}
void AddTrianglesFromSTL(const char *location, Material *material)
{
double3 v1, v2, v3, normal;
char sentinel[10];
char name[100];
FILE *f = fopen(location, "r");
fscanf(f, "solid %s\n", name);
fscanf(f, "%s ", sentinel);
//printf("%s\n", sentinel);
while(!strcmp(sentinel, "facet")) {
fscanf(f, "normal %lf %lf %lf\n", &normal.x, &normal.y, &normal.z);
fscanf(f, "outer loop\n");
fscanf(f, "vertex %lf %lf %lf\n", &v1.x, &v1.y, &v1.z);
fscanf(f, "vertex %lf %lf %lf\n", &v2.x, &v2.y, &v2.z);
fscanf(f, "vertex %lf %lf %lf\n", &v3.x, &v3.y, &v3.z);
//printf("vertex %lf %lf %lf\n", v2.x, v2.y, v2.z);
fscanf(f, "endloop\n");
fscanf(f, "endfacet\n");
AddTriangle(normal, v1, v2, v3, material);
fscanf(f, "%s ", sentinel);
}
fclose(f);
}
void AddTrianglesFromSTL(const char *location, double3 offset, double scale, Material *material)
{
double3 v1, v2, v3, normal;
char sentinel[10];
char name[100];
FILE *f = fopen(location, "r");
fscanf(f, "solid %s\n", name);
fscanf(f, "%s ", sentinel);
while(!strcmp(sentinel, "facet")) {
fscanf(f, "normal %lf %lf %lf\n", &normal.x, &normal.y, &normal.z);
fscanf(f, "outer loop\n");
fscanf(f, "vertex %lf %lf %lf\n", &v1.x, &v1.y, &v1.z);
fscanf(f, "vertex %lf %lf %lf\n", &v2.x, &v2.y, &v2.z);
fscanf(f, "vertex %lf %lf %lf\n", &v3.x, &v3.y, &v3.z);
fscanf(f, "endloop\n");
fscanf(f, "endfacet\n");
AddTriangle(normal, (scale*v1)+offset, (scale*v2)+offset, (scale*v3)+offset, material);
fscanf(f, "%s ", sentinel);
}
fclose(f);
}
Material *AddMaterial(Color color, Color emmitance, double reflectance, double specular, double roughness)
{
hipMallocManaged(&scene.materials[scene.material_n], sizeof(Material));
scene.materials[scene.material_n]->color = color;
scene.materials[scene.material_n]->emmitance = emmitance;
scene.materials[scene.material_n]->reflectance = reflectance;
scene.materials[scene.material_n]->specular = specular;
scene.materials[scene.material_n]->roughness = roughness;
scene.material_n++;
return scene.materials[scene.material_n-1];
}
void AddLight(Light_Type type, double intensity)
{
if(type != AMBIENT) return;
hipMallocManaged(&scene.lights[scene.light_n], sizeof(Light));
scene.lights[scene.light_n]->type = type;
scene.lights[scene.light_n]->intensity = intensity;
scene.light_n++;
return;
}
void AddLight(Light_Type type, double intensity, double3 pos_dir)
{
if(type == DIRECTIONAL){
hipMallocManaged(&scene.lights[scene.light_n], sizeof(Light));
scene.lights[scene.light_n]->type = type;
scene.lights[scene.light_n]->intensity = intensity;
scene.lights[scene.light_n]->dir = pos_dir;
scene.light_n++;
} else if(type == POINT){
hipMallocManaged(&scene.lights[scene.light_n], sizeof(Light));
scene.lights[scene.light_n]->type = type;
scene.lights[scene.light_n]->intensity = intensity;
scene.lights[scene.light_n]->pos = pos_dir;
scene.light_n++;
}
return;
}
__global__ void setup_curand(hiprandState_t *state)
{
int idx = threadIdx.x+blockDim.x*blockIdx.x;
hiprand_init(1234, idx, 0, &state[idx]);
}
__global__ void renderSingleFrame(Color *framebuffer, int width, int height, hiprandState_t *curand_state)
{
Ray ray;
ray.origin = origin;
ulong4 colorsum;
Color color = BG_COLOR;
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= width) || (j >= height)) return;
hiprandState_t *state = &curand_state[i];
ray.direction = CanvasToViewport(i, j, width, height);
if(pathtrace) {
colorsum = make_ulong4(0,0,0,0);
for(int n = 0; n < RAYS_PER_PIXEL; n++){
color = TracePath(ray, 0.001, DBL_MAX, BG_COLOR, RT_DEPTH, state);
colorsum = make_ulong4(colorsum.x+color.x, colorsum.y+color.y, colorsum.z+color.z, 0);
}
colorsum = make_ulong4(colorsum.x/RAYS_PER_PIXEL, colorsum.y/RAYS_PER_PIXEL, colorsum.z/RAYS_PER_PIXEL, 0);
color = make_uchar4(colorsum.x, colorsum.y, colorsum.z, 0);
} else {
color = TraceRay(ray, 0.001, DBL_MAX, BG_COLOR, RT_DEPTH);
}
//printf("%f\t", hiprand_normal(state));
framebuffer[i + j*width] = color;
return;
}
__device__ double3 CanvasToViewport(int x, int y, int width, int height)
{
double3 retval;
double3 temp;
temp.x = ((double)x-(width/2))/(double)height;
temp.y = -((double)y-(height/2))/(double)height;
temp.z = 1.0;
retval.x = temp.x*cos(theta) + temp.z*sin(theta);
retval.y = temp.y;
retval.z = -temp.x*sin(theta) + temp.z*cos(theta);
return retval;
}
__device__ Color TraceRay(Ray ray, double t_min, double t_max, Color bg_color, int depth)
{
Color local_color, reflected_color;
Ray reflected_ray;
Interaction interaction;
interaction = ClosestIntersection(ray, t_min, t_max);
if (interaction.closest_t == DBL_MAX){
return bg_color;
}
double3 point = ray.origin + interaction.closest_t * ray.direction;
double3 normal = interaction.normal;
normal = (1/length(normal)) * normal;
local_color = ComputeLighting(point,
normal,
-1 * ray.direction,
interaction.material->specular
)
* interaction.material->color;
if(depth <= 0 || interaction.material->reflectance <= 0) {
return local_color;
}
reflected_ray.origin = point;
reflected_ray.direction = ReflectRay(-1*ray.direction, normal);
reflected_color = TraceRay(reflected_ray, .001, DBL_MAX, bg_color, depth-1);
return ((1 - interaction.material->reflectance) * local_color)
+ (interaction.material->reflectance * reflected_color);
}
__device__ Color TracePath(Ray ray, double t_min, double t_max, Color bg_color, int depth, hiprandState_t *curand_state)
{
Color local_color, reflected_color;
Ray reflected_ray;
Interaction interaction;
interaction = ClosestIntersection(ray, t_min, t_max);
if (interaction.closest_t == DBL_MAX){
return bg_color;
}
double3 point = ray.origin + interaction.closest_t * ray.direction;
double3 normal = interaction.normal;
normal = (1/length(normal)) * normal;
local_color = ComputeLighting(point,
normal,
-1 * ray.direction,
interaction.material->specular
)
* interaction.material->color;
if(depth <= 0 || interaction.material->reflectance <= 0) {
return local_color;
}
reflected_ray.origin = point;
#ifdef PT_NAIVE
reflected_ray.direction = RandomVectorInHemisphere(normal, curand_state);
#else
reflected_ray.direction = ReflectRay(-1*ray.direction, normal)
+ RandomDeltaVector(interaction.material->roughness/1000, curand_state);
#endif
reflected_color = TraceRay(reflected_ray, .001, DBL_MAX, bg_color, depth-1);
return ((1 - interaction.material->reflectance) * local_color)
+ (interaction.material->reflectance * reflected_color);
}
__device__ double3 RandomVectorInHemisphere(double3 vector_in, hiprandState_t *curand_state)
{
double lambda, phi;
double3 vector_out;
lambda = acos(2*hiprand_uniform_double(curand_state) - 1) - (M_PI/2.0);
phi = 2*M_PI * hiprand_uniform_double(curand_state);
vector_out.x = cos(lambda)*cos(phi);
vector_out.y = cos(lambda)*sin(phi);
vector_out.z = sin(lambda);
if(dot(vector_out, vector_out) < 0.0) {
vector_out = -1 * vector_out;
}
return vector_out;
}
__device__ double3 RandomDeltaVector(double range, hiprandState_t *curand_state)
{
double3 vector_out;
vector_out.x = (hiprand_uniform_double(curand_state) - 0.5) * range;
vector_out.y = (hiprand_uniform_double(curand_state) - 0.5) * range;
vector_out.z = (hiprand_uniform_double(curand_state) - 0.5) * range;
return vector_out;
}
__device__ Interaction ClosestIntersection(Ray ray, double t_min, double t_max)
{
Interaction interaction;
double2 t;
interaction.closest_t = DBL_MAX;
for (int i = 0; i < scene.sphere_n; i++) {
t = IntersectRay(ray, scene.spheres[i]);
if (t.x < interaction.closest_t && t.x < t_max && t.x > t_min) {
interaction.closest_t = t.x;
interaction.point = ray.origin + interaction.closest_t * ray.direction;
interaction.normal = interaction.point - scene.spheres[i]->center;
interaction.material = scene.spheres[i]->material;
}
if (t.y < interaction.closest_t && t.y < t_max && t.y > t_min) {
interaction.closest_t = t.y;
interaction.point = ray.origin + interaction.closest_t * ray.direction;
interaction.normal = interaction.point - scene.spheres[i]->center;
interaction.material = scene.spheres[i]->material;
}
}
for (int i = 0; i < scene.triangle_n; i++) {
t.x = IntersectRay(ray, scene.triangles[i]);
if (t.x < interaction.closest_t && t.x < t_max && t.x > t_min) {
interaction.closest_t = t.x;
interaction.point = ray.origin + interaction.closest_t * ray.direction;
interaction.normal = scene.triangles[i]->normal;
interaction.material = scene.triangles[i]->material;
}
}
return interaction;
}
__device__ double3 ReflectRay(double3 R, double3 normal)
{
return ((2*dot(normal, R)) * normal) - R;
}
__device__ double2 IntersectRay(Ray ray, Sphere *sphere)
{
double3 coeffs;
double discriminant;
double3 offset = ray.origin - sphere->center;
coeffs.x = dot(ray.direction, ray.direction);
coeffs.y = 2*(dot(offset, ray.direction));
coeffs.z = dot(offset, offset) - (sphere->radius * sphere->radius);
discriminant = (coeffs.y*coeffs.y) - (4*coeffs.x*coeffs.z);
if(discriminant < 0.0) {
return make_double2(DBL_MAX, DBL_MAX);
}
return make_double2((-coeffs.y + sqrt(discriminant)) / (2*coeffs.x),
(-coeffs.y - sqrt(discriminant)) / (2*coeffs.x));
}
__device__ double IntersectRay(Ray ray, Triangle *triangle)
{
// Using Moeller-Trumhore method
double3 edge1, edge2, tvec, pvec, qvec;
double det, inv_det;
double u, v;
edge1 = triangle->v2 - triangle->v1;
edge2 = triangle->v3 - triangle->v1;
pvec = cross(ray.direction, edge2);
det = dot(edge1, pvec);
if(det < 0.00001 && det > -0.00001) {
return DBL_MAX;
}
inv_det = 1.0/det;
tvec = ray.origin - triangle->v1;
u = dot(tvec, pvec) * inv_det;
if(u < 0.0 || u > 1.0) {
return DBL_MAX;
}
qvec = cross(tvec, edge1);
v = dot(ray.direction, qvec) * inv_det;
if(v < 0.0 || u+v > 1.0){
return DBL_MAX;
}
return dot(edge2, qvec) * inv_det;
}
__device__ double ComputeLighting(double3 point, double3 normal, double3 view, double spec)
{
double intensity = 0.0;
Ray light_ray;
light_ray.origin = point;
for(int i = 0; i < scene.light_n; i++) {
if(scene.lights[i]->type == AMBIENT) {
intensity += scene.lights[i]->intensity;
} else {
if(scene.lights[i]->type == POINT){
light_ray.direction = scene.lights[i]->pos - point;
} else {
light_ray.direction = scene.lights[i]->dir;
}
//Shadows
if(ClosestIntersection(light_ray, 0.001, DBL_MAX).closest_t < DBL_MAX) {
// If object is occluding light then go to next light
continue;
}
// Diffuse
double n_dot_l = dot(normal, light_ray.direction);
if(n_dot_l > 0.0) {
intensity += scene.lights[i]->intensity*n_dot_l/(length(normal)*length(light_ray.direction));
}
//Specular
if(spec != -1) {
double3 reflect = ((2*n_dot_l) * normal) - light_ray.direction;
double r_dot_v = dot(reflect, view);
if(r_dot_v > 0.0) {
intensity += scene.lights[i]->intensity*pow(r_dot_v/(length(reflect)*length(view)), spec);
}
}
}
}
return min(intensity, 1.0);
}
__host__ __device__ double3 cross(double3 vec1, double3 vec2)
{
return make_double3(vec1.y*vec2.z-vec1.z*vec2.y,
vec1.z*vec2.x-vec1.x*vec2.z,
vec1.x*vec2.y-vec1.y*vec2.x
);
}
__host__ __device__ double dot(double3 vec1, double3 vec2)
{
return vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
}
__host__ __device__ double length(double3 vec)
{
return sqrt(dot(vec, vec));
}
| 4d3a928def397a057c47dcdab5bc234643789f3a.cu | #include <SDL2/SDL.h>
#include <SDL2/SDL_opengl.h>
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <math.h>
#include <float.h>
#include <cuda.h>
#include <curand_kernel.h>
#include "vec3.cu"
#define WIDTH 1920
#define HEIGHT 1080
#define BLOCKX 16
#define BLOCKY 16
#define BG_COLOR {150, 150, 255, 255}
#define MS .1
#define TR .05
#define RT_DEPTH 5
#define RAYS_PER_PIXEL 100
// Typedefs
typedef uchar4 Color; // .x->R, .y->G, .z->B, .w->A
typedef enum Light_Type {
AMBIENT,
DIRECTIONAL,
POINT
} Light_Type;
typedef struct Material {
Color color;
Color emmitance;
double roughness;
double reflectance;
double specular;
} Material;
typedef struct Sphere {
double3 center;
double radius;
Material* material;
} Sphere;
typedef struct Triangle {
double3 normal;
double3 v1;
double3 v2;
double3 v3;
Material *material;
} Triangle;
typedef struct Ray {
double3 origin;
double3 direction;
} Ray;
typedef struct Interaction {
double3 point;
double closest_t;
double3 normal;
Material *material;
} Interaction;
typedef struct Light {
double intensity;
Light_Type type;
union {
double3 pos;
double3 dir;
};
} Light;
typedef struct Scene {
Sphere **spheres;
Triangle **triangles;
int sphere_n;
int triangle_n;
Light **lights;
int light_n;
Material **materials;
int material_n;
} Scene;
// Host Functions
void output_fb_to_sdl(SDL_Renderer *renderer, Color *framebuffer, int width, int height);
//void AddSphere(int radius, double3 center, Color color, double specular, double reflect);
void AddSphere(int radius, double3 center, Material *material);
void AddTriangle(double3 normal, double3 v1, double3 v2, double3 v3, Material *material);
void AddTriangle(double3 v1, double3 v2, double3 v3, Material *material);
void AddTrianglesFromSTL(const char *location, Material *material);
void AddTrianglesFromSTL(const char *location, double3 offset, double scale, Material *material);
Material *AddMaterial(Color color, Color emmitance, double reflectance, double specular, double roughness);
void AddLight(Light_Type type, double intensity);
void AddLight(Light_Type type, double intensity, double3 pos_dir);
// Global Functions
__global__ void setup_curand(curandState *state);
__global__ void renderSingleFrame(Color *framebuffer, int width, int height, curandState *curand_state);
// Device Functions
__device__ double3 CanvasToViewport(int x, int y, int width, int height);
__device__ Color TraceRay(Ray ray, double t_min, double t_max, Color bg_color, int depth);
__device__ Color TracePath(Ray ray, double t_min, double t_max, Color bg_color, int depth, curandState *curand_state);
__device__ double3 RandomVectorInHemisphere(double3 vector_in, curandState *curand_state);
__device__ double3 RandomDeltaVector(double range, curandState *curand_state);
__device__ double2 IntersectRay(Ray ray, Sphere *sphere);
__device__ double IntersectRay(Ray ray, Triangle *triangle);
__device__ double ComputeLighting(double3 point, double3 normal, double3 view, double spec);
__device__ Interaction ClosestIntersection(Ray ray, double t_min, double t_max);
__device__ double3 ReflectRay(double3 R, double3 normal);
// linear algebra
__host__ __device__ double3 cross(double3 vec1, double3 vec2);
__host__ __device__ double dot(double3 vec1, double3 vec2);
__host__ __device__ double length(double3 vec);
// Global variables
__managed__ double3 origin = {0.0, 0.0, 0.0}; // Current camera position
__managed__ double theta = 0.0; // Rotation of camera about Y axis
__managed__ Scene scene; // struct containing objects to be rendered
__managed__ int pathtrace = 0;
// USER DEFINED
void scene_setup()
{
Material *material;
material = AddMaterial(make_uchar4(40, 200, 90, 255), make_uchar4(0,0,0,0), .05, 10, 100);
AddSphere(5000, make_double3(0,-5001, 0), material);
material = AddMaterial(make_uchar4(255, 0, 0, 0), make_uchar4(0,0,0,0), .8, 1, 100);
AddSphere(1, make_double3(-2.0, 0.0, 4.0), material);
material = AddMaterial(make_uchar4(0, 255, 0, 0), make_uchar4(0,0,0,0), .5, 20, 1);
AddSphere(1, make_double3(0.0, -1.0, 3.0), material);
material = AddMaterial(make_uchar4(0, 0, 255, 0), make_uchar4(0,0,0,0), .3, 50, 50);
AddSphere(1, make_double3(2.0, 0.0, 4.0), material);
material = AddMaterial(make_uchar4(200, 100, 100, 0), make_uchar4(0,0,0,0), .1, 1, 50);
AddTrianglesFromSTL("cube.stl", make_double3(0,1,8), 2, material);
AddLight(AMBIENT, .3);
AddLight(POINT, 0.6, make_double3(2, 1, 0));
AddLight(DIRECTIONAL, 0.2, make_double3(1, 4, 4));
}
int main(int argc, char const *argv[])
{
// Set stack size for cuda threads in bytes (size_t)
// Default is 1024 - If too low, part of scene will render black
// Needed at high recursion depths
cudaDeviceSetLimit(cudaLimitStackSize, 4096);
Color *fb; // Array of RGBA pixels
size_t fb_size; // WIDTH*HEIGHT*sizeof(Color)
int pitch = WIDTH*sizeof(Color); // How many bytes to jump to move down a row
SDL_Window *window;
SDL_Renderer *renderer;
SDL_Texture *texture;
SDL_Event event;
SDL_Init(SDL_INIT_VIDEO);
window = SDL_CreateWindow(
"RayTracer",
SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED,
WIDTH, // width, in pixels
HEIGHT, // height, in pixels
SDL_WINDOW_OPENGL | // Flags
//SDL_WINDOW_FULLSCREEN |
SDL_WINDOW_SHOWN |
0
);
renderer = SDL_CreateRenderer(
window,
-1,
SDL_RENDERER_ACCELERATED // Change to SDL_RENDERER_SOFTWARE if using VNC
);
texture = SDL_CreateTexture(
renderer,
SDL_PIXELFORMAT_ABGR8888,
SDL_TEXTUREACCESS_STREAMING,
WIDTH,
HEIGHT
);
cudaMallocManaged(&scene.materials, sizeof(Material*));
cudaMallocManaged(&scene.spheres, sizeof(Sphere*));
cudaMallocManaged(&scene.triangles, sizeof(Triangle*));
cudaMallocManaged(&scene.lights, sizeof(Light*));
scene_setup();
cudaDeviceSynchronize();
int device = -1;
cudaGetDevice(&device);
dim3 blocks(WIDTH/BLOCKX+1,HEIGHT/BLOCKY+1);
dim3 threads(BLOCKX,BLOCKY);
fb_size = WIDTH*HEIGHT*sizeof(Color);
cudaMallocManaged(&fb, fb_size);
// Setup RNG
curandState *curand_state;
cudaMalloc(&curand_state, sizeof(curandState));
setup_curand<<<blocks,threads>>>(curand_state);
#ifdef PT_ON
pathtrace = 1;
#endif
#ifdef BENCH
/* Benchmarks */
for(int i = 0; i < BENCH; i++) {
cudaMemPrefetchAsync(fb, fb_size, device, NULL);
renderSingleFrame<<<blocks,threads>>>(fb, WIDTH, HEIGHT, curand_state);
cudaDeviceSynchronize();
SDL_UpdateTexture(texture, NULL, (void *)fb, pitch);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
}
#else
// Render first frame
renderSingleFrame<<<blocks,threads>>>(fb, WIDTH, HEIGHT, curand_state);
cudaDeviceSynchronize();
SDL_UpdateTexture(texture, NULL, (void *)fb, pitch);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
// Render after every movement keypress
// wasd, q-e to rotate, z-x to move up/down
while (1) {
SDL_PollEvent(&event);
if (event.type == SDL_QUIT) {
break;
}
switch(event.type) {
case SDL_KEYDOWN:
switch(event.key.keysym.sym) {
case SDLK_d:
origin.x +=MS*cos(theta);
origin.z -=MS*sin(theta);
break;
case SDLK_a:
origin.x -=MS*cos(theta);
origin.z +=MS*sin(theta);
break;
case SDLK_z:
origin.y +=MS;
break;
case SDLK_x:
origin.y -=MS;
break;
case SDLK_s:
origin.z -=MS*cos(theta);
origin.x -=MS*sin(theta);
break;
case SDLK_w:
origin.z +=MS*cos(theta);
origin.x +=MS*sin(theta);
break;
case SDLK_q:
theta -=TR;
break;
case SDLK_e:
theta +=TR;
break;
case SDLK_r:
pathtrace = pathtrace ? 0 : 1;
break;
}
renderSingleFrame<<<blocks,threads>>>(fb, WIDTH, HEIGHT, curand_state);
cudaDeviceSynchronize();
SDL_UpdateTexture(texture, NULL, (void *)fb, pitch);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
cudaMemPrefetchAsync(fb, fb_size, device, NULL);
break;
case SDL_KEYUP:
break;
default:
//SDL_RenderPresent(renderer);
break;
}
}
#endif
cudaFree(fb);
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(window);
SDL_Quit();
return 0;
}
void AddSphere(int radius, double3 center, Material *material)
{
cudaMallocManaged(&scene.spheres[scene.sphere_n], sizeof(Sphere));
scene.spheres[scene.sphere_n]->radius = radius;
scene.spheres[scene.sphere_n]->center = center;
scene.spheres[scene.sphere_n]->material = material;
scene.sphere_n++;
}
void AddTriangle(double3 normal, double3 v1, double3 v2, double3 v3, Material *material)
{
cudaMallocManaged(&scene.triangles[scene.triangle_n], sizeof(Triangle));
scene.triangles[scene.triangle_n]->normal = normal;
scene.triangles[scene.triangle_n]->v1 = v1;
scene.triangles[scene.triangle_n]->v2 = v2;
scene.triangles[scene.triangle_n]->v3 = v3;
scene.triangles[scene.triangle_n]->material = material;
scene.triangle_n++;
}
void AddTriangle(double3 v1, double3 v2, double3 v3, Material *material)
{
cudaMallocManaged(&scene.triangles[scene.triangle_n], sizeof(Triangle));
scene.triangles[scene.triangle_n]->normal = cross(v3-v1, v2-v1);
scene.triangles[scene.triangle_n]->v1 = v1;
scene.triangles[scene.triangle_n]->v2 = v2;
scene.triangles[scene.triangle_n]->v3 = v3;
scene.triangles[scene.triangle_n]->material = material;
scene.triangle_n++;
}
void AddTrianglesFromSTL(const char *location, Material *material)
{
double3 v1, v2, v3, normal;
char sentinel[10];
char name[100];
FILE *f = fopen(location, "r");
fscanf(f, "solid %s\n", name);
fscanf(f, "%s ", sentinel);
//printf("%s\n", sentinel);
while(!strcmp(sentinel, "facet")) {
fscanf(f, "normal %lf %lf %lf\n", &normal.x, &normal.y, &normal.z);
fscanf(f, "outer loop\n");
fscanf(f, "vertex %lf %lf %lf\n", &v1.x, &v1.y, &v1.z);
fscanf(f, "vertex %lf %lf %lf\n", &v2.x, &v2.y, &v2.z);
fscanf(f, "vertex %lf %lf %lf\n", &v3.x, &v3.y, &v3.z);
//printf("vertex %lf %lf %lf\n", v2.x, v2.y, v2.z);
fscanf(f, "endloop\n");
fscanf(f, "endfacet\n");
AddTriangle(normal, v1, v2, v3, material);
fscanf(f, "%s ", sentinel);
}
fclose(f);
}
void AddTrianglesFromSTL(const char *location, double3 offset, double scale, Material *material)
{
double3 v1, v2, v3, normal;
char sentinel[10];
char name[100];
FILE *f = fopen(location, "r");
fscanf(f, "solid %s\n", name);
fscanf(f, "%s ", sentinel);
while(!strcmp(sentinel, "facet")) {
fscanf(f, "normal %lf %lf %lf\n", &normal.x, &normal.y, &normal.z);
fscanf(f, "outer loop\n");
fscanf(f, "vertex %lf %lf %lf\n", &v1.x, &v1.y, &v1.z);
fscanf(f, "vertex %lf %lf %lf\n", &v2.x, &v2.y, &v2.z);
fscanf(f, "vertex %lf %lf %lf\n", &v3.x, &v3.y, &v3.z);
fscanf(f, "endloop\n");
fscanf(f, "endfacet\n");
AddTriangle(normal, (scale*v1)+offset, (scale*v2)+offset, (scale*v3)+offset, material);
fscanf(f, "%s ", sentinel);
}
fclose(f);
}
Material *AddMaterial(Color color, Color emmitance, double reflectance, double specular, double roughness)
{
cudaMallocManaged(&scene.materials[scene.material_n], sizeof(Material));
scene.materials[scene.material_n]->color = color;
scene.materials[scene.material_n]->emmitance = emmitance;
scene.materials[scene.material_n]->reflectance = reflectance;
scene.materials[scene.material_n]->specular = specular;
scene.materials[scene.material_n]->roughness = roughness;
scene.material_n++;
return scene.materials[scene.material_n-1];
}
void AddLight(Light_Type type, double intensity)
{
if(type != AMBIENT) return;
cudaMallocManaged(&scene.lights[scene.light_n], sizeof(Light));
scene.lights[scene.light_n]->type = type;
scene.lights[scene.light_n]->intensity = intensity;
scene.light_n++;
return;
}
void AddLight(Light_Type type, double intensity, double3 pos_dir)
{
if(type == DIRECTIONAL){
cudaMallocManaged(&scene.lights[scene.light_n], sizeof(Light));
scene.lights[scene.light_n]->type = type;
scene.lights[scene.light_n]->intensity = intensity;
scene.lights[scene.light_n]->dir = pos_dir;
scene.light_n++;
} else if(type == POINT){
cudaMallocManaged(&scene.lights[scene.light_n], sizeof(Light));
scene.lights[scene.light_n]->type = type;
scene.lights[scene.light_n]->intensity = intensity;
scene.lights[scene.light_n]->pos = pos_dir;
scene.light_n++;
}
return;
}
__global__ void setup_curand(curandState *state)
{
int idx = threadIdx.x+blockDim.x*blockIdx.x;
curand_init(1234, idx, 0, &state[idx]);
}
__global__ void renderSingleFrame(Color *framebuffer, int width, int height, curandState *curand_state)
{
Ray ray;
ray.origin = origin;
ulong4 colorsum;
Color color = BG_COLOR;
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= width) || (j >= height)) return;
curandState *state = &curand_state[i];
ray.direction = CanvasToViewport(i, j, width, height);
if(pathtrace) {
colorsum = make_ulong4(0,0,0,0);
for(int n = 0; n < RAYS_PER_PIXEL; n++){
color = TracePath(ray, 0.001, DBL_MAX, BG_COLOR, RT_DEPTH, state);
colorsum = make_ulong4(colorsum.x+color.x, colorsum.y+color.y, colorsum.z+color.z, 0);
}
colorsum = make_ulong4(colorsum.x/RAYS_PER_PIXEL, colorsum.y/RAYS_PER_PIXEL, colorsum.z/RAYS_PER_PIXEL, 0);
color = make_uchar4(colorsum.x, colorsum.y, colorsum.z, 0);
} else {
color = TraceRay(ray, 0.001, DBL_MAX, BG_COLOR, RT_DEPTH);
}
//printf("%f\t", curand_normal(state));
framebuffer[i + j*width] = color;
return;
}
__device__ double3 CanvasToViewport(int x, int y, int width, int height)
{
double3 retval;
double3 temp;
temp.x = ((double)x-(width/2))/(double)height;
temp.y = -((double)y-(height/2))/(double)height;
temp.z = 1.0;
retval.x = temp.x*cos(theta) + temp.z*sin(theta);
retval.y = temp.y;
retval.z = -temp.x*sin(theta) + temp.z*cos(theta);
return retval;
}
__device__ Color TraceRay(Ray ray, double t_min, double t_max, Color bg_color, int depth)
{
Color local_color, reflected_color;
Ray reflected_ray;
Interaction interaction;
interaction = ClosestIntersection(ray, t_min, t_max);
if (interaction.closest_t == DBL_MAX){
return bg_color;
}
double3 point = ray.origin + interaction.closest_t * ray.direction;
double3 normal = interaction.normal;
normal = (1/length(normal)) * normal;
local_color = ComputeLighting(point,
normal,
-1 * ray.direction,
interaction.material->specular
)
* interaction.material->color;
if(depth <= 0 || interaction.material->reflectance <= 0) {
return local_color;
}
reflected_ray.origin = point;
reflected_ray.direction = ReflectRay(-1*ray.direction, normal);
reflected_color = TraceRay(reflected_ray, .001, DBL_MAX, bg_color, depth-1);
return ((1 - interaction.material->reflectance) * local_color)
+ (interaction.material->reflectance * reflected_color);
}
__device__ Color TracePath(Ray ray, double t_min, double t_max, Color bg_color, int depth, curandState *curand_state)
{
Color local_color, reflected_color;
Ray reflected_ray;
Interaction interaction;
interaction = ClosestIntersection(ray, t_min, t_max);
if (interaction.closest_t == DBL_MAX){
return bg_color;
}
double3 point = ray.origin + interaction.closest_t * ray.direction;
double3 normal = interaction.normal;
normal = (1/length(normal)) * normal;
local_color = ComputeLighting(point,
normal,
-1 * ray.direction,
interaction.material->specular
)
* interaction.material->color;
if(depth <= 0 || interaction.material->reflectance <= 0) {
return local_color;
}
reflected_ray.origin = point;
#ifdef PT_NAIVE
reflected_ray.direction = RandomVectorInHemisphere(normal, curand_state);
#else
reflected_ray.direction = ReflectRay(-1*ray.direction, normal)
+ RandomDeltaVector(interaction.material->roughness/1000, curand_state);
#endif
reflected_color = TraceRay(reflected_ray, .001, DBL_MAX, bg_color, depth-1);
return ((1 - interaction.material->reflectance) * local_color)
+ (interaction.material->reflectance * reflected_color);
}
__device__ double3 RandomVectorInHemisphere(double3 vector_in, curandState *curand_state)
{
double lambda, phi;
double3 vector_out;
lambda = acos(2*curand_uniform_double(curand_state) - 1) - (M_PI/2.0);
phi = 2*M_PI * curand_uniform_double(curand_state);
vector_out.x = cos(lambda)*cos(phi);
vector_out.y = cos(lambda)*sin(phi);
vector_out.z = sin(lambda);
if(dot(vector_out, vector_out) < 0.0) {
vector_out = -1 * vector_out;
}
return vector_out;
}
__device__ double3 RandomDeltaVector(double range, curandState *curand_state)
{
double3 vector_out;
vector_out.x = (curand_uniform_double(curand_state) - 0.5) * range;
vector_out.y = (curand_uniform_double(curand_state) - 0.5) * range;
vector_out.z = (curand_uniform_double(curand_state) - 0.5) * range;
return vector_out;
}
__device__ Interaction ClosestIntersection(Ray ray, double t_min, double t_max)
{
Interaction interaction;
double2 t;
interaction.closest_t = DBL_MAX;
for (int i = 0; i < scene.sphere_n; i++) {
t = IntersectRay(ray, scene.spheres[i]);
if (t.x < interaction.closest_t && t.x < t_max && t.x > t_min) {
interaction.closest_t = t.x;
interaction.point = ray.origin + interaction.closest_t * ray.direction;
interaction.normal = interaction.point - scene.spheres[i]->center;
interaction.material = scene.spheres[i]->material;
}
if (t.y < interaction.closest_t && t.y < t_max && t.y > t_min) {
interaction.closest_t = t.y;
interaction.point = ray.origin + interaction.closest_t * ray.direction;
interaction.normal = interaction.point - scene.spheres[i]->center;
interaction.material = scene.spheres[i]->material;
}
}
for (int i = 0; i < scene.triangle_n; i++) {
t.x = IntersectRay(ray, scene.triangles[i]);
if (t.x < interaction.closest_t && t.x < t_max && t.x > t_min) {
interaction.closest_t = t.x;
interaction.point = ray.origin + interaction.closest_t * ray.direction;
interaction.normal = scene.triangles[i]->normal;
interaction.material = scene.triangles[i]->material;
}
}
return interaction;
}
__device__ double3 ReflectRay(double3 R, double3 normal)
{
return ((2*dot(normal, R)) * normal) - R;
}
__device__ double2 IntersectRay(Ray ray, Sphere *sphere)
{
double3 coeffs;
double discriminant;
double3 offset = ray.origin - sphere->center;
coeffs.x = dot(ray.direction, ray.direction);
coeffs.y = 2*(dot(offset, ray.direction));
coeffs.z = dot(offset, offset) - (sphere->radius * sphere->radius);
discriminant = (coeffs.y*coeffs.y) - (4*coeffs.x*coeffs.z);
if(discriminant < 0.0) {
return make_double2(DBL_MAX, DBL_MAX);
}
return make_double2((-coeffs.y + sqrt(discriminant)) / (2*coeffs.x),
(-coeffs.y - sqrt(discriminant)) / (2*coeffs.x));
}
__device__ double IntersectRay(Ray ray, Triangle *triangle)
{
// Using Moeller-Trumhore method
double3 edge1, edge2, tvec, pvec, qvec;
double det, inv_det;
double u, v;
edge1 = triangle->v2 - triangle->v1;
edge2 = triangle->v3 - triangle->v1;
pvec = cross(ray.direction, edge2);
det = dot(edge1, pvec);
if(det < 0.00001 && det > -0.00001) {
return DBL_MAX;
}
inv_det = 1.0/det;
tvec = ray.origin - triangle->v1;
u = dot(tvec, pvec) * inv_det;
if(u < 0.0 || u > 1.0) {
return DBL_MAX;
}
qvec = cross(tvec, edge1);
v = dot(ray.direction, qvec) * inv_det;
if(v < 0.0 || u+v > 1.0){
return DBL_MAX;
}
return dot(edge2, qvec) * inv_det;
}
__device__ double ComputeLighting(double3 point, double3 normal, double3 view, double spec)
{
double intensity = 0.0;
Ray light_ray;
light_ray.origin = point;
for(int i = 0; i < scene.light_n; i++) {
if(scene.lights[i]->type == AMBIENT) {
intensity += scene.lights[i]->intensity;
} else {
if(scene.lights[i]->type == POINT){
light_ray.direction = scene.lights[i]->pos - point;
} else {
light_ray.direction = scene.lights[i]->dir;
}
//Shadows
if(ClosestIntersection(light_ray, 0.001, DBL_MAX).closest_t < DBL_MAX) {
// If object is occluding light then go to next light
continue;
}
// Diffuse
double n_dot_l = dot(normal, light_ray.direction);
if(n_dot_l > 0.0) {
intensity += scene.lights[i]->intensity*n_dot_l/(length(normal)*length(light_ray.direction));
}
//Specular
if(spec != -1) {
double3 reflect = ((2*n_dot_l) * normal) - light_ray.direction;
double r_dot_v = dot(reflect, view);
if(r_dot_v > 0.0) {
intensity += scene.lights[i]->intensity*pow(r_dot_v/(length(reflect)*length(view)), spec);
}
}
}
}
return min(intensity, 1.0);
}
__host__ __device__ double3 cross(double3 vec1, double3 vec2)
{
return make_double3(vec1.y*vec2.z-vec1.z*vec2.y,
vec1.z*vec2.x-vec1.x*vec2.z,
vec1.x*vec2.y-vec1.y*vec2.x
);
}
__host__ __device__ double dot(double3 vec1, double3 vec2)
{
return vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
}
__host__ __device__ double length(double3 vec)
{
return sqrt(dot(vec, vec));
}
|
88e7769a4014d645af3b2898d01b3211cf63409d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
__global__ void kernel_1(int repeat) {
__shared__ unsigned char s[12288];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_2(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_3(int repeat) {
__shared__ unsigned char s[28672];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_4(int repeat) {
__shared__ unsigned char s[26624];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_5(int repeat) {
__shared__ unsigned char s[40960];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_6(int repeat) {
__shared__ unsigned char s[25600];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_7(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_8(int repeat) {
__shared__ unsigned char s[27648];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_9(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_10(int repeat) {
__shared__ unsigned char s[4096];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_11(int repeat) {
__shared__ unsigned char s[33792];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_12(int repeat) {
__shared__ unsigned char s[4096];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_13(int repeat) {
__shared__ unsigned char s[25600];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_14(int repeat) {
__shared__ unsigned char s[20480];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_15(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_16(int repeat) {
__shared__ unsigned char s[13312];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_17(int repeat) {
__shared__ unsigned char s[36864];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_18(int repeat) {
__shared__ unsigned char s[12288];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_19(int repeat) {
__shared__ unsigned char s[25600];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_20(int repeat) {
__shared__ unsigned char s[39936];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_21(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_22(int repeat) {
__shared__ unsigned char s[1024];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_23(int repeat) {
__shared__ unsigned char s[21504];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_24(int repeat) {
__shared__ unsigned char s[26624];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_25(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_26(int repeat) {
__shared__ unsigned char s[6144];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_27(int repeat) {
__shared__ unsigned char s[35840];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_28(int repeat) {
__shared__ unsigned char s[22528];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_29(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_30(int repeat) {
__shared__ unsigned char s[41984];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_31(int repeat) {
__shared__ unsigned char s[5120];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_32(int repeat) {
__shared__ unsigned char s[18432];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_33(int repeat) {
__shared__ unsigned char s[10240];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_34(int repeat) {
__shared__ unsigned char s[36864];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_35(int repeat) {
__shared__ unsigned char s[30720];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_36(int repeat) {
__shared__ unsigned char s[21504];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_37(int repeat) {
__shared__ unsigned char s[11264];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_38(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_39(int repeat) {
__shared__ unsigned char s[28672];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_40(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_41(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_42(int repeat) {
__shared__ unsigned char s[13312];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_43(int repeat) {
__shared__ unsigned char s[21504];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_44(int repeat) {
__shared__ unsigned char s[35840];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_45(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_46(int repeat) {
__shared__ unsigned char s[31744];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_47(int repeat) {
__shared__ unsigned char s[10240];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_48(int repeat) {
__shared__ unsigned char s[12288];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_49(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_50(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_51(int repeat) {
__shared__ unsigned char s[27648];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_52(int repeat) {
__shared__ unsigned char s[1024];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_53(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_54(int repeat) {
__shared__ unsigned char s[16384];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_55(int repeat) {
__shared__ unsigned char s[30720];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_56(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_57(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_58(int repeat) {
__shared__ unsigned char s[27648];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_59(int repeat) {
__shared__ unsigned char s[25600];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_60(int repeat) {
__shared__ unsigned char s[20480];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_61(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_62(int repeat) {
__shared__ unsigned char s[20480];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_63(int repeat) {
__shared__ unsigned char s[27648];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_64(int repeat) {
__shared__ unsigned char s[22528];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_65(int repeat) {
__shared__ unsigned char s[18432];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_66(int repeat) {
__shared__ unsigned char s[20480];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_67(int repeat) {
__shared__ unsigned char s[18432];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_68(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_69(int repeat) {
__shared__ unsigned char s[2048];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_70(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_71(int repeat) {
__shared__ unsigned char s[40960];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_72(int repeat) {
__shared__ unsigned char s[1024];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_73(int repeat) {
__shared__ unsigned char s[3072];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_74(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_75(int repeat) {
__shared__ unsigned char s[38912];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_76(int repeat) {
__shared__ unsigned char s[13312];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_77(int repeat) {
__shared__ unsigned char s[41984];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_78(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_79(int repeat) {
__shared__ unsigned char s[26624];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_80(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_81(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_82(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_83(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_84(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_85(int repeat) {
__shared__ unsigned char s[7168];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_86(int repeat) {
__shared__ unsigned char s[37888];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_87(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_88(int repeat) {
__shared__ unsigned char s[28672];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_89(int repeat) {
__shared__ unsigned char s[10240];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_90(int repeat) {
__shared__ unsigned char s[40960];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_91(int repeat) {
__shared__ unsigned char s[39936];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_92(int repeat) {
__shared__ unsigned char s[37888];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_93(int repeat) {
__shared__ unsigned char s[29696];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_94(int repeat) {
__shared__ unsigned char s[14336];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_95(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_96(int repeat) {
__shared__ unsigned char s[16384];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_97(int repeat) {
__shared__ unsigned char s[38912];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_98(int repeat) {
__shared__ unsigned char s[17408];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_99(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_100(int repeat) {
__shared__ unsigned char s[33792];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_101(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_102(int repeat) {
__shared__ unsigned char s[10240];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_103(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_104(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_105(int repeat) {
__shared__ unsigned char s[17408];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_106(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_107(int repeat) {
__shared__ unsigned char s[11264];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_108(int repeat) {
__shared__ unsigned char s[28672];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_109(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_110(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_111(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_112(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_113(int repeat) {
__shared__ unsigned char s[6144];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_114(int repeat) {
__shared__ unsigned char s[1024];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_115(int repeat) {
__shared__ unsigned char s[11264];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_116(int repeat) {
__shared__ unsigned char s[17408];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_117(int repeat) {
__shared__ unsigned char s[30720];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_118(int repeat) {
__shared__ unsigned char s[26624];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_119(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_120(int repeat) {
__shared__ unsigned char s[29696];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_121(int repeat) {
__shared__ unsigned char s[30720];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_122(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_123(int repeat) {
__shared__ unsigned char s[29696];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_124(int repeat) {
__shared__ unsigned char s[4096];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_125(int repeat) {
__shared__ unsigned char s[6144];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_126(int repeat) {
__shared__ unsigned char s[13312];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_127(int repeat) {
__shared__ unsigned char s[8192];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_128(int repeat) {
__shared__ unsigned char s[11264];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
int main() {
hipStream_t streams[128];
for (int i = 0; i < 128; i++) hipStreamCreate(&streams[i]);
{
int repeat = 33792;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_93, grid, block, args, 0, streams[0]);
}
{
int repeat = 48128;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_18, grid, block, args, 0, streams[1]);
}
{
int repeat = 34816;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_32, grid, block, args, 0, streams[2]);
}
{
int repeat = 43008;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_29, grid, block, args, 0, streams[3]);
}
{
int repeat = 36864;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_106, grid, block, args, 0, streams[4]);
}
{
int repeat = 54272;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_12, grid, block, args, 0, streams[5]);
}
{
int repeat = 46080;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_98, grid, block, args, 0, streams[6]);
}
{
int repeat = 35840;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_13, grid, block, args, 0, streams[7]);
}
{
int repeat = 78848;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_72, grid, block, args, 0, streams[8]);
}
{
int repeat = 46080;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_71, grid, block, args, 0, streams[9]);
}
{
int repeat = 39936;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_57, grid, block, args, 0, streams[10]);
}
{
int repeat = 46080;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_25, grid, block, args, 0, streams[11]);
}
{
int repeat = 64512;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_127, grid, block, args, 0, streams[12]);
}
{
int repeat = 49152;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_2, grid, block, args, 0, streams[13]);
}
{
int repeat = 72704;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_124, grid, block, args, 0, streams[14]);
}
{
int repeat = 51200;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_112, grid, block, args, 0, streams[15]);
}
{
int repeat = 45056;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_88, grid, block, args, 0, streams[16]);
}
{
int repeat = 87040;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_22, grid, block, args, 0, streams[17]);
}
{
int repeat = 104448;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_69, grid, block, args, 0, streams[18]);
}
{
int repeat = 61440;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_15, grid, block, args, 0, streams[19]);
}
{
int repeat = 163840;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_114, grid, block, args, 0, streams[20]);
}
{
int repeat = 62464;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_45, grid, block, args, 0, streams[21]);
}
{
int repeat = 55296;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_38, grid, block, args, 0, streams[22]);
}
{
int repeat = 86016;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_10, grid, block, args, 0, streams[23]);
}
{
int repeat = 92160;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_89, grid, block, args, 0, streams[24]);
}
{
int repeat = 93184;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_80, grid, block, args, 0, streams[25]);
}
{
int repeat = 88064;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_95, grid, block, args, 0, streams[26]);
}
{
int repeat = 47104;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_24, grid, block, args, 0, streams[27]);
}
{
int repeat = 102400;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_116, grid, block, args, 0, streams[28]);
}
{
int repeat = 132096;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_125, grid, block, args, 0, streams[29]);
}
{
int repeat = 51200;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_35, grid, block, args, 0, streams[30]);
}
{
int repeat = 102400;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_66, grid, block, args, 0, streams[31]);
}
{
int repeat = 63488;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_4, grid, block, args, 0, streams[32]);
}
{
int repeat = 100352;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_7, grid, block, args, 0, streams[33]);
}
{
int repeat = 68608;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_99, grid, block, args, 0, streams[34]);
}
{
int repeat = 167936;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_52, grid, block, args, 0, streams[35]);
}
{
int repeat = 75776;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_103, grid, block, args, 0, streams[36]);
}
{
int repeat = 77824;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_41, grid, block, args, 0, streams[37]);
}
{
int repeat = 189440;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_31, grid, block, args, 0, streams[38]);
}
{
int repeat = 211968;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_73, grid, block, args, 0, streams[39]);
}
{
int repeat = 75776;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_117, grid, block, args, 0, streams[40]);
}
{
int repeat = 113664;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_54, grid, block, args, 0, streams[41]);
}
{
int repeat = 119808;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_82, grid, block, args, 0, streams[42]);
}
{
int repeat = 119808;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_19, grid, block, args, 0, streams[43]);
}
{
int repeat = 159744;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_111, grid, block, args, 0, streams[44]);
}
{
int repeat = 81920;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_120, grid, block, args, 0, streams[45]);
}
{
int repeat = 111616;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_49, grid, block, args, 0, streams[46]);
}
{
int repeat = 144384;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_44, grid, block, args, 0, streams[47]);
}
{
int repeat = 92160;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_92, grid, block, args, 0, streams[48]);
}
{
int repeat = 124928;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_55, grid, block, args, 0, streams[49]);
}
{
int repeat = 110592;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_62, grid, block, args, 0, streams[50]);
}
{
int repeat = 112640;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_126, grid, block, args, 0, streams[51]);
}
{
int repeat = 103424;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_34, grid, block, args, 0, streams[52]);
}
{
int repeat = 205824;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_109, grid, block, args, 0, streams[53]);
}
{
int repeat = 146432;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_47, grid, block, args, 0, streams[54]);
}
{
int repeat = 229376;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_26, grid, block, args, 0, streams[55]);
}
{
int repeat = 130048;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_110, grid, block, args, 0, streams[56]);
}
{
int repeat = 117760;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_81, grid, block, args, 0, streams[57]);
}
{
int repeat = 126976;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_42, grid, block, args, 0, streams[58]);
}
{
int repeat = 142336;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_21, grid, block, args, 0, streams[59]);
}
{
int repeat = 240640;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_113, grid, block, args, 0, streams[60]);
}
{
int repeat = 130048;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_16, grid, block, args, 0, streams[61]);
}
{
int repeat = 84992;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_75, grid, block, args, 0, streams[62]);
}
{
int repeat = 150528;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_115, grid, block, args, 0, streams[63]);
}
{
int repeat = 129024;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_51, grid, block, args, 0, streams[64]);
}
{
int repeat = 174080;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_33, grid, block, args, 0, streams[65]);
}
{
int repeat = 158720;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_60, grid, block, args, 0, streams[66]);
}
{
int repeat = 176128;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_64, grid, block, args, 0, streams[67]);
}
{
int repeat = 156672;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_79, grid, block, args, 0, streams[68]);
}
{
int repeat = 154624;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_6, grid, block, args, 0, streams[69]);
}
{
int repeat = 184320;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_101, grid, block, args, 0, streams[70]);
}
{
int repeat = 190464;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_107, grid, block, args, 0, streams[71]);
}
{
int repeat = 183296;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_63, grid, block, args, 0, streams[72]);
}
{
int repeat = 149504;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_40, grid, block, args, 0, streams[73]);
}
{
int repeat = 176128;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_87, grid, block, args, 0, streams[74]);
}
{
int repeat = 233472;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_85, grid, block, args, 0, streams[75]);
}
{
int repeat = 183296;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_9, grid, block, args, 0, streams[76]);
}
{
int repeat = 159744;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_105, grid, block, args, 0, streams[77]);
}
{
int repeat = 184320;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_65, grid, block, args, 0, streams[78]);
}
{
int repeat = 211968;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_102, grid, block, args, 0, streams[79]);
}
{
int repeat = 187392;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_96, grid, block, args, 0, streams[80]);
}
{
int repeat = 232448;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_1, grid, block, args, 0, streams[81]);
}
{
int repeat = 182272;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_83, grid, block, args, 0, streams[82]);
}
{
int repeat = 173056;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_23, grid, block, args, 0, streams[83]);
}
{
int repeat = 188416;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_68, grid, block, args, 0, streams[84]);
}
{
int repeat = 195584;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_122, grid, block, args, 0, streams[85]);
}
{
int repeat = 231424;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_84, grid, block, args, 0, streams[86]);
}
{
int repeat = 211968;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_118, grid, block, args, 0, streams[87]);
}
{
int repeat = 231424;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_128, grid, block, args, 0, streams[88]);
}
{
int repeat = 246784;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_104, grid, block, args, 0, streams[89]);
}
{
int repeat = 244736;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_61, grid, block, args, 0, streams[90]);
}
{
int repeat = 181248;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_36, grid, block, args, 0, streams[91]);
}
{
int repeat = 209920;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_14, grid, block, args, 0, streams[92]);
}
{
int repeat = 201728;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_50, grid, block, args, 0, streams[93]);
}
{
int repeat = 225280;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_59, grid, block, args, 0, streams[94]);
}
{
int repeat = 215040;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_78, grid, block, args, 0, streams[95]);
}
{
int repeat = 216064;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_70, grid, block, args, 0, streams[96]);
}
{
int repeat = 182272;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_11, grid, block, args, 0, streams[97]);
}
{
int repeat = 184320;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_100, grid, block, args, 0, streams[98]);
}
{
int repeat = 215040;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_94, grid, block, args, 0, streams[99]);
}
{
int repeat = 199680;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_76, grid, block, args, 0, streams[100]);
}
{
int repeat = 258048;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_119, grid, block, args, 0, streams[101]);
}
{
int repeat = 246784;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_43, grid, block, args, 0, streams[102]);
}
{
int repeat = 242688;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_67, grid, block, args, 0, streams[103]);
}
{
int repeat = 242688;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_37, grid, block, args, 0, streams[104]);
}
{
int repeat = 250880;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_48, grid, block, args, 0, streams[105]);
}
{
int repeat = 230400;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_56, grid, block, args, 0, streams[106]);
}
{
int repeat = 208896;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_46, grid, block, args, 0, streams[107]);
}
{
int repeat = 203776;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_121, grid, block, args, 0, streams[108]);
}
{
int repeat = 195584;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_39, grid, block, args, 0, streams[109]);
}
{
int repeat = 212992;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_123, grid, block, args, 0, streams[110]);
}
{
int repeat = 122880;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_17, grid, block, args, 0, streams[111]);
}
{
int repeat = 195584;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_27, grid, block, args, 0, streams[112]);
}
{
int repeat = 201728;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_53, grid, block, args, 0, streams[113]);
}
{
int repeat = 220160;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_74, grid, block, args, 0, streams[114]);
}
{
int repeat = 216064;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_3, grid, block, args, 0, streams[115]);
}
{
int repeat = 236544;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_28, grid, block, args, 0, streams[116]);
}
{
int repeat = 218112;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_108, grid, block, args, 0, streams[117]);
}
{
int repeat = 259072;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_58, grid, block, args, 0, streams[118]);
}
{
int repeat = 261120;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_8, grid, block, args, 0, streams[119]);
}
{
int repeat = 95232;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_90, grid, block, args, 0, streams[120]);
}
{
int repeat = 118784;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_77, grid, block, args, 0, streams[121]);
}
{
int repeat = 130048;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_97, grid, block, args, 0, streams[122]);
}
{
int repeat = 141312;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_5, grid, block, args, 0, streams[123]);
}
{
int repeat = 175104;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_20, grid, block, args, 0, streams[124]);
}
{
int repeat = 194560;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_30, grid, block, args, 0, streams[125]);
}
{
int repeat = 221184;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_86, grid, block, args, 0, streams[126]);
}
{
int repeat = 241664;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_91, grid, block, args, 0, streams[127]);
}
hipStreamSynchronize(streams[0]);
hipStreamSynchronize(streams[1]);
hipStreamSynchronize(streams[2]);
hipStreamSynchronize(streams[3]);
hipStreamSynchronize(streams[4]);
hipStreamSynchronize(streams[5]);
hipStreamSynchronize(streams[6]);
hipStreamSynchronize(streams[7]);
hipStreamSynchronize(streams[8]);
hipStreamSynchronize(streams[9]);
hipStreamSynchronize(streams[10]);
hipStreamSynchronize(streams[11]);
hipStreamSynchronize(streams[12]);
hipStreamSynchronize(streams[13]);
hipStreamSynchronize(streams[14]);
hipStreamSynchronize(streams[15]);
hipStreamSynchronize(streams[16]);
hipStreamSynchronize(streams[17]);
hipStreamSynchronize(streams[18]);
hipStreamSynchronize(streams[19]);
hipStreamSynchronize(streams[20]);
hipStreamSynchronize(streams[21]);
hipStreamSynchronize(streams[22]);
hipStreamSynchronize(streams[23]);
hipStreamSynchronize(streams[24]);
hipStreamSynchronize(streams[25]);
hipStreamSynchronize(streams[26]);
hipStreamSynchronize(streams[27]);
hipStreamSynchronize(streams[28]);
hipStreamSynchronize(streams[29]);
hipStreamSynchronize(streams[30]);
hipStreamSynchronize(streams[31]);
hipStreamSynchronize(streams[32]);
hipStreamSynchronize(streams[33]);
hipStreamSynchronize(streams[34]);
hipStreamSynchronize(streams[35]);
hipStreamSynchronize(streams[36]);
hipStreamSynchronize(streams[37]);
hipStreamSynchronize(streams[38]);
hipStreamSynchronize(streams[39]);
hipStreamSynchronize(streams[40]);
hipStreamSynchronize(streams[41]);
hipStreamSynchronize(streams[42]);
hipStreamSynchronize(streams[43]);
hipStreamSynchronize(streams[44]);
hipStreamSynchronize(streams[45]);
hipStreamSynchronize(streams[46]);
hipStreamSynchronize(streams[47]);
hipStreamSynchronize(streams[48]);
hipStreamSynchronize(streams[49]);
hipStreamSynchronize(streams[50]);
hipStreamSynchronize(streams[51]);
hipStreamSynchronize(streams[52]);
hipStreamSynchronize(streams[53]);
hipStreamSynchronize(streams[54]);
hipStreamSynchronize(streams[55]);
hipStreamSynchronize(streams[56]);
hipStreamSynchronize(streams[57]);
hipStreamSynchronize(streams[58]);
hipStreamSynchronize(streams[59]);
hipStreamSynchronize(streams[60]);
hipStreamSynchronize(streams[61]);
hipStreamSynchronize(streams[62]);
hipStreamSynchronize(streams[63]);
hipStreamSynchronize(streams[64]);
hipStreamSynchronize(streams[65]);
hipStreamSynchronize(streams[66]);
hipStreamSynchronize(streams[67]);
hipStreamSynchronize(streams[68]);
hipStreamSynchronize(streams[69]);
hipStreamSynchronize(streams[70]);
hipStreamSynchronize(streams[71]);
hipStreamSynchronize(streams[72]);
hipStreamSynchronize(streams[73]);
hipStreamSynchronize(streams[74]);
hipStreamSynchronize(streams[75]);
hipStreamSynchronize(streams[76]);
hipStreamSynchronize(streams[77]);
hipStreamSynchronize(streams[78]);
hipStreamSynchronize(streams[79]);
hipStreamSynchronize(streams[80]);
hipStreamSynchronize(streams[81]);
hipStreamSynchronize(streams[82]);
hipStreamSynchronize(streams[83]);
hipStreamSynchronize(streams[84]);
hipStreamSynchronize(streams[85]);
hipStreamSynchronize(streams[86]);
hipStreamSynchronize(streams[87]);
hipStreamSynchronize(streams[88]);
hipStreamSynchronize(streams[89]);
hipStreamSynchronize(streams[90]);
hipStreamSynchronize(streams[91]);
hipStreamSynchronize(streams[92]);
hipStreamSynchronize(streams[93]);
hipStreamSynchronize(streams[94]);
hipStreamSynchronize(streams[95]);
hipStreamSynchronize(streams[96]);
hipStreamSynchronize(streams[97]);
hipStreamSynchronize(streams[98]);
hipStreamSynchronize(streams[99]);
hipStreamSynchronize(streams[100]);
hipStreamSynchronize(streams[101]);
hipStreamSynchronize(streams[102]);
hipStreamSynchronize(streams[103]);
hipStreamSynchronize(streams[104]);
hipStreamSynchronize(streams[105]);
hipStreamSynchronize(streams[106]);
hipStreamSynchronize(streams[107]);
hipStreamSynchronize(streams[108]);
hipStreamSynchronize(streams[109]);
hipStreamSynchronize(streams[110]);
hipStreamSynchronize(streams[111]);
hipStreamSynchronize(streams[112]);
hipStreamSynchronize(streams[113]);
hipStreamSynchronize(streams[114]);
hipStreamSynchronize(streams[115]);
hipStreamSynchronize(streams[116]);
hipStreamSynchronize(streams[117]);
hipStreamSynchronize(streams[118]);
hipStreamSynchronize(streams[119]);
hipStreamSynchronize(streams[120]);
hipStreamSynchronize(streams[121]);
hipStreamSynchronize(streams[122]);
hipStreamSynchronize(streams[123]);
hipStreamSynchronize(streams[124]);
hipStreamSynchronize(streams[125]);
hipStreamSynchronize(streams[126]);
hipStreamSynchronize(streams[127]);
hipProfilerStop();
for (int i = 0; i < 128; i++) hipStreamDestroy(streams[i]);
}
| 88e7769a4014d645af3b2898d01b3211cf63409d.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
#include <stdio.h>
__global__ void kernel_1(int repeat) {
__shared__ unsigned char s[12288];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_2(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_3(int repeat) {
__shared__ unsigned char s[28672];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_4(int repeat) {
__shared__ unsigned char s[26624];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_5(int repeat) {
__shared__ unsigned char s[40960];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_6(int repeat) {
__shared__ unsigned char s[25600];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_7(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_8(int repeat) {
__shared__ unsigned char s[27648];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_9(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_10(int repeat) {
__shared__ unsigned char s[4096];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_11(int repeat) {
__shared__ unsigned char s[33792];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_12(int repeat) {
__shared__ unsigned char s[4096];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_13(int repeat) {
__shared__ unsigned char s[25600];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_14(int repeat) {
__shared__ unsigned char s[20480];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_15(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_16(int repeat) {
__shared__ unsigned char s[13312];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_17(int repeat) {
__shared__ unsigned char s[36864];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_18(int repeat) {
__shared__ unsigned char s[12288];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_19(int repeat) {
__shared__ unsigned char s[25600];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_20(int repeat) {
__shared__ unsigned char s[39936];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_21(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_22(int repeat) {
__shared__ unsigned char s[1024];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_23(int repeat) {
__shared__ unsigned char s[21504];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_24(int repeat) {
__shared__ unsigned char s[26624];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_25(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_26(int repeat) {
__shared__ unsigned char s[6144];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_27(int repeat) {
__shared__ unsigned char s[35840];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_28(int repeat) {
__shared__ unsigned char s[22528];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_29(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_30(int repeat) {
__shared__ unsigned char s[41984];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_31(int repeat) {
__shared__ unsigned char s[5120];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_32(int repeat) {
__shared__ unsigned char s[18432];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_33(int repeat) {
__shared__ unsigned char s[10240];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_34(int repeat) {
__shared__ unsigned char s[36864];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_35(int repeat) {
__shared__ unsigned char s[30720];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_36(int repeat) {
__shared__ unsigned char s[21504];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_37(int repeat) {
__shared__ unsigned char s[11264];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_38(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_39(int repeat) {
__shared__ unsigned char s[28672];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_40(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_41(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_42(int repeat) {
__shared__ unsigned char s[13312];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_43(int repeat) {
__shared__ unsigned char s[21504];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_44(int repeat) {
__shared__ unsigned char s[35840];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_45(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_46(int repeat) {
__shared__ unsigned char s[31744];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_47(int repeat) {
__shared__ unsigned char s[10240];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_48(int repeat) {
__shared__ unsigned char s[12288];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_49(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_50(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_51(int repeat) {
__shared__ unsigned char s[27648];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_52(int repeat) {
__shared__ unsigned char s[1024];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_53(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_54(int repeat) {
__shared__ unsigned char s[16384];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_55(int repeat) {
__shared__ unsigned char s[30720];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_56(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_57(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_58(int repeat) {
__shared__ unsigned char s[27648];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_59(int repeat) {
__shared__ unsigned char s[25600];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_60(int repeat) {
__shared__ unsigned char s[20480];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_61(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_62(int repeat) {
__shared__ unsigned char s[20480];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_63(int repeat) {
__shared__ unsigned char s[27648];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_64(int repeat) {
__shared__ unsigned char s[22528];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_65(int repeat) {
__shared__ unsigned char s[18432];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_66(int repeat) {
__shared__ unsigned char s[20480];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_67(int repeat) {
__shared__ unsigned char s[18432];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_68(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_69(int repeat) {
__shared__ unsigned char s[2048];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_70(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_71(int repeat) {
__shared__ unsigned char s[40960];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_72(int repeat) {
__shared__ unsigned char s[1024];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_73(int repeat) {
__shared__ unsigned char s[3072];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_74(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_75(int repeat) {
__shared__ unsigned char s[38912];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_76(int repeat) {
__shared__ unsigned char s[13312];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_77(int repeat) {
__shared__ unsigned char s[41984];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_78(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_79(int repeat) {
__shared__ unsigned char s[26624];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_80(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_81(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_82(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_83(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_84(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_85(int repeat) {
__shared__ unsigned char s[7168];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_86(int repeat) {
__shared__ unsigned char s[37888];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_87(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_88(int repeat) {
__shared__ unsigned char s[28672];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_89(int repeat) {
__shared__ unsigned char s[10240];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_90(int repeat) {
__shared__ unsigned char s[40960];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_91(int repeat) {
__shared__ unsigned char s[39936];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_92(int repeat) {
__shared__ unsigned char s[37888];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_93(int repeat) {
__shared__ unsigned char s[29696];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_94(int repeat) {
__shared__ unsigned char s[14336];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_95(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_96(int repeat) {
__shared__ unsigned char s[16384];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_97(int repeat) {
__shared__ unsigned char s[38912];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_98(int repeat) {
__shared__ unsigned char s[17408];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_99(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_100(int repeat) {
__shared__ unsigned char s[33792];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_101(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_102(int repeat) {
__shared__ unsigned char s[10240];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_103(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_104(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_105(int repeat) {
__shared__ unsigned char s[17408];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_106(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_107(int repeat) {
__shared__ unsigned char s[11264];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_108(int repeat) {
__shared__ unsigned char s[28672];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_109(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_110(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_111(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_112(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_113(int repeat) {
__shared__ unsigned char s[6144];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_114(int repeat) {
__shared__ unsigned char s[1024];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_115(int repeat) {
__shared__ unsigned char s[11264];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_116(int repeat) {
__shared__ unsigned char s[17408];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_117(int repeat) {
__shared__ unsigned char s[30720];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_118(int repeat) {
__shared__ unsigned char s[26624];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_119(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_120(int repeat) {
__shared__ unsigned char s[29696];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_121(int repeat) {
__shared__ unsigned char s[30720];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_122(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_123(int repeat) {
__shared__ unsigned char s[29696];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_124(int repeat) {
__shared__ unsigned char s[4096];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_125(int repeat) {
__shared__ unsigned char s[6144];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_126(int repeat) {
__shared__ unsigned char s[13312];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_127(int repeat) {
__shared__ unsigned char s[8192];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_128(int repeat) {
__shared__ unsigned char s[11264];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
int main() {
cudaStream_t streams[128];
for (int i = 0; i < 128; i++) cudaStreamCreate(&streams[i]);
{
int repeat = 33792;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_93, grid, block, args, 0, streams[0]);
}
{
int repeat = 48128;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_18, grid, block, args, 0, streams[1]);
}
{
int repeat = 34816;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_32, grid, block, args, 0, streams[2]);
}
{
int repeat = 43008;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_29, grid, block, args, 0, streams[3]);
}
{
int repeat = 36864;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_106, grid, block, args, 0, streams[4]);
}
{
int repeat = 54272;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_12, grid, block, args, 0, streams[5]);
}
{
int repeat = 46080;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_98, grid, block, args, 0, streams[6]);
}
{
int repeat = 35840;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_13, grid, block, args, 0, streams[7]);
}
{
int repeat = 78848;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_72, grid, block, args, 0, streams[8]);
}
{
int repeat = 46080;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_71, grid, block, args, 0, streams[9]);
}
{
int repeat = 39936;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_57, grid, block, args, 0, streams[10]);
}
{
int repeat = 46080;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_25, grid, block, args, 0, streams[11]);
}
{
int repeat = 64512;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_127, grid, block, args, 0, streams[12]);
}
{
int repeat = 49152;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_2, grid, block, args, 0, streams[13]);
}
{
int repeat = 72704;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_124, grid, block, args, 0, streams[14]);
}
{
int repeat = 51200;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_112, grid, block, args, 0, streams[15]);
}
{
int repeat = 45056;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_88, grid, block, args, 0, streams[16]);
}
{
int repeat = 87040;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_22, grid, block, args, 0, streams[17]);
}
{
int repeat = 104448;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_69, grid, block, args, 0, streams[18]);
}
{
int repeat = 61440;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_15, grid, block, args, 0, streams[19]);
}
{
int repeat = 163840;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_114, grid, block, args, 0, streams[20]);
}
{
int repeat = 62464;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_45, grid, block, args, 0, streams[21]);
}
{
int repeat = 55296;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_38, grid, block, args, 0, streams[22]);
}
{
int repeat = 86016;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_10, grid, block, args, 0, streams[23]);
}
{
int repeat = 92160;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_89, grid, block, args, 0, streams[24]);
}
{
int repeat = 93184;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_80, grid, block, args, 0, streams[25]);
}
{
int repeat = 88064;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_95, grid, block, args, 0, streams[26]);
}
{
int repeat = 47104;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_24, grid, block, args, 0, streams[27]);
}
{
int repeat = 102400;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_116, grid, block, args, 0, streams[28]);
}
{
int repeat = 132096;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_125, grid, block, args, 0, streams[29]);
}
{
int repeat = 51200;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_35, grid, block, args, 0, streams[30]);
}
{
int repeat = 102400;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_66, grid, block, args, 0, streams[31]);
}
{
int repeat = 63488;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_4, grid, block, args, 0, streams[32]);
}
{
int repeat = 100352;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_7, grid, block, args, 0, streams[33]);
}
{
int repeat = 68608;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_99, grid, block, args, 0, streams[34]);
}
{
int repeat = 167936;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_52, grid, block, args, 0, streams[35]);
}
{
int repeat = 75776;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_103, grid, block, args, 0, streams[36]);
}
{
int repeat = 77824;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_41, grid, block, args, 0, streams[37]);
}
{
int repeat = 189440;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_31, grid, block, args, 0, streams[38]);
}
{
int repeat = 211968;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_73, grid, block, args, 0, streams[39]);
}
{
int repeat = 75776;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_117, grid, block, args, 0, streams[40]);
}
{
int repeat = 113664;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_54, grid, block, args, 0, streams[41]);
}
{
int repeat = 119808;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_82, grid, block, args, 0, streams[42]);
}
{
int repeat = 119808;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_19, grid, block, args, 0, streams[43]);
}
{
int repeat = 159744;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_111, grid, block, args, 0, streams[44]);
}
{
int repeat = 81920;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_120, grid, block, args, 0, streams[45]);
}
{
int repeat = 111616;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_49, grid, block, args, 0, streams[46]);
}
{
int repeat = 144384;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_44, grid, block, args, 0, streams[47]);
}
{
int repeat = 92160;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_92, grid, block, args, 0, streams[48]);
}
{
int repeat = 124928;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_55, grid, block, args, 0, streams[49]);
}
{
int repeat = 110592;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_62, grid, block, args, 0, streams[50]);
}
{
int repeat = 112640;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_126, grid, block, args, 0, streams[51]);
}
{
int repeat = 103424;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_34, grid, block, args, 0, streams[52]);
}
{
int repeat = 205824;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_109, grid, block, args, 0, streams[53]);
}
{
int repeat = 146432;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_47, grid, block, args, 0, streams[54]);
}
{
int repeat = 229376;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_26, grid, block, args, 0, streams[55]);
}
{
int repeat = 130048;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_110, grid, block, args, 0, streams[56]);
}
{
int repeat = 117760;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_81, grid, block, args, 0, streams[57]);
}
{
int repeat = 126976;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_42, grid, block, args, 0, streams[58]);
}
{
int repeat = 142336;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_21, grid, block, args, 0, streams[59]);
}
{
int repeat = 240640;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_113, grid, block, args, 0, streams[60]);
}
{
int repeat = 130048;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_16, grid, block, args, 0, streams[61]);
}
{
int repeat = 84992;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_75, grid, block, args, 0, streams[62]);
}
{
int repeat = 150528;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_115, grid, block, args, 0, streams[63]);
}
{
int repeat = 129024;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_51, grid, block, args, 0, streams[64]);
}
{
int repeat = 174080;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_33, grid, block, args, 0, streams[65]);
}
{
int repeat = 158720;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_60, grid, block, args, 0, streams[66]);
}
{
int repeat = 176128;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_64, grid, block, args, 0, streams[67]);
}
{
int repeat = 156672;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_79, grid, block, args, 0, streams[68]);
}
{
int repeat = 154624;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_6, grid, block, args, 0, streams[69]);
}
{
int repeat = 184320;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_101, grid, block, args, 0, streams[70]);
}
{
int repeat = 190464;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_107, grid, block, args, 0, streams[71]);
}
{
int repeat = 183296;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_63, grid, block, args, 0, streams[72]);
}
{
int repeat = 149504;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_40, grid, block, args, 0, streams[73]);
}
{
int repeat = 176128;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_87, grid, block, args, 0, streams[74]);
}
{
int repeat = 233472;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_85, grid, block, args, 0, streams[75]);
}
{
int repeat = 183296;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_9, grid, block, args, 0, streams[76]);
}
{
int repeat = 159744;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_105, grid, block, args, 0, streams[77]);
}
{
int repeat = 184320;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_65, grid, block, args, 0, streams[78]);
}
{
int repeat = 211968;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_102, grid, block, args, 0, streams[79]);
}
{
int repeat = 187392;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_96, grid, block, args, 0, streams[80]);
}
{
int repeat = 232448;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_1, grid, block, args, 0, streams[81]);
}
{
int repeat = 182272;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_83, grid, block, args, 0, streams[82]);
}
{
int repeat = 173056;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_23, grid, block, args, 0, streams[83]);
}
{
int repeat = 188416;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_68, grid, block, args, 0, streams[84]);
}
{
int repeat = 195584;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_122, grid, block, args, 0, streams[85]);
}
{
int repeat = 231424;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_84, grid, block, args, 0, streams[86]);
}
{
int repeat = 211968;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_118, grid, block, args, 0, streams[87]);
}
{
int repeat = 231424;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_128, grid, block, args, 0, streams[88]);
}
{
int repeat = 246784;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_104, grid, block, args, 0, streams[89]);
}
{
int repeat = 244736;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_61, grid, block, args, 0, streams[90]);
}
{
int repeat = 181248;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_36, grid, block, args, 0, streams[91]);
}
{
int repeat = 209920;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_14, grid, block, args, 0, streams[92]);
}
{
int repeat = 201728;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_50, grid, block, args, 0, streams[93]);
}
{
int repeat = 225280;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_59, grid, block, args, 0, streams[94]);
}
{
int repeat = 215040;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_78, grid, block, args, 0, streams[95]);
}
{
int repeat = 216064;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_70, grid, block, args, 0, streams[96]);
}
{
int repeat = 182272;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_11, grid, block, args, 0, streams[97]);
}
{
int repeat = 184320;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_100, grid, block, args, 0, streams[98]);
}
{
int repeat = 215040;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_94, grid, block, args, 0, streams[99]);
}
{
int repeat = 199680;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_76, grid, block, args, 0, streams[100]);
}
{
int repeat = 258048;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_119, grid, block, args, 0, streams[101]);
}
{
int repeat = 246784;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_43, grid, block, args, 0, streams[102]);
}
{
int repeat = 242688;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_67, grid, block, args, 0, streams[103]);
}
{
int repeat = 242688;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_37, grid, block, args, 0, streams[104]);
}
{
int repeat = 250880;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_48, grid, block, args, 0, streams[105]);
}
{
int repeat = 230400;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_56, grid, block, args, 0, streams[106]);
}
{
int repeat = 208896;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_46, grid, block, args, 0, streams[107]);
}
{
int repeat = 203776;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_121, grid, block, args, 0, streams[108]);
}
{
int repeat = 195584;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_39, grid, block, args, 0, streams[109]);
}
{
int repeat = 212992;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_123, grid, block, args, 0, streams[110]);
}
{
int repeat = 122880;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_17, grid, block, args, 0, streams[111]);
}
{
int repeat = 195584;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_27, grid, block, args, 0, streams[112]);
}
{
int repeat = 201728;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_53, grid, block, args, 0, streams[113]);
}
{
int repeat = 220160;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_74, grid, block, args, 0, streams[114]);
}
{
int repeat = 216064;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_3, grid, block, args, 0, streams[115]);
}
{
int repeat = 236544;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_28, grid, block, args, 0, streams[116]);
}
{
int repeat = 218112;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_108, grid, block, args, 0, streams[117]);
}
{
int repeat = 259072;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_58, grid, block, args, 0, streams[118]);
}
{
int repeat = 261120;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_8, grid, block, args, 0, streams[119]);
}
{
int repeat = 95232;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_90, grid, block, args, 0, streams[120]);
}
{
int repeat = 118784;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_77, grid, block, args, 0, streams[121]);
}
{
int repeat = 130048;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_97, grid, block, args, 0, streams[122]);
}
{
int repeat = 141312;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_5, grid, block, args, 0, streams[123]);
}
{
int repeat = 175104;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_20, grid, block, args, 0, streams[124]);
}
{
int repeat = 194560;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_30, grid, block, args, 0, streams[125]);
}
{
int repeat = 221184;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_86, grid, block, args, 0, streams[126]);
}
{
int repeat = 241664;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_91, grid, block, args, 0, streams[127]);
}
cudaStreamSynchronize(streams[0]);
cudaStreamSynchronize(streams[1]);
cudaStreamSynchronize(streams[2]);
cudaStreamSynchronize(streams[3]);
cudaStreamSynchronize(streams[4]);
cudaStreamSynchronize(streams[5]);
cudaStreamSynchronize(streams[6]);
cudaStreamSynchronize(streams[7]);
cudaStreamSynchronize(streams[8]);
cudaStreamSynchronize(streams[9]);
cudaStreamSynchronize(streams[10]);
cudaStreamSynchronize(streams[11]);
cudaStreamSynchronize(streams[12]);
cudaStreamSynchronize(streams[13]);
cudaStreamSynchronize(streams[14]);
cudaStreamSynchronize(streams[15]);
cudaStreamSynchronize(streams[16]);
cudaStreamSynchronize(streams[17]);
cudaStreamSynchronize(streams[18]);
cudaStreamSynchronize(streams[19]);
cudaStreamSynchronize(streams[20]);
cudaStreamSynchronize(streams[21]);
cudaStreamSynchronize(streams[22]);
cudaStreamSynchronize(streams[23]);
cudaStreamSynchronize(streams[24]);
cudaStreamSynchronize(streams[25]);
cudaStreamSynchronize(streams[26]);
cudaStreamSynchronize(streams[27]);
cudaStreamSynchronize(streams[28]);
cudaStreamSynchronize(streams[29]);
cudaStreamSynchronize(streams[30]);
cudaStreamSynchronize(streams[31]);
cudaStreamSynchronize(streams[32]);
cudaStreamSynchronize(streams[33]);
cudaStreamSynchronize(streams[34]);
cudaStreamSynchronize(streams[35]);
cudaStreamSynchronize(streams[36]);
cudaStreamSynchronize(streams[37]);
cudaStreamSynchronize(streams[38]);
cudaStreamSynchronize(streams[39]);
cudaStreamSynchronize(streams[40]);
cudaStreamSynchronize(streams[41]);
cudaStreamSynchronize(streams[42]);
cudaStreamSynchronize(streams[43]);
cudaStreamSynchronize(streams[44]);
cudaStreamSynchronize(streams[45]);
cudaStreamSynchronize(streams[46]);
cudaStreamSynchronize(streams[47]);
cudaStreamSynchronize(streams[48]);
cudaStreamSynchronize(streams[49]);
cudaStreamSynchronize(streams[50]);
cudaStreamSynchronize(streams[51]);
cudaStreamSynchronize(streams[52]);
cudaStreamSynchronize(streams[53]);
cudaStreamSynchronize(streams[54]);
cudaStreamSynchronize(streams[55]);
cudaStreamSynchronize(streams[56]);
cudaStreamSynchronize(streams[57]);
cudaStreamSynchronize(streams[58]);
cudaStreamSynchronize(streams[59]);
cudaStreamSynchronize(streams[60]);
cudaStreamSynchronize(streams[61]);
cudaStreamSynchronize(streams[62]);
cudaStreamSynchronize(streams[63]);
cudaStreamSynchronize(streams[64]);
cudaStreamSynchronize(streams[65]);
cudaStreamSynchronize(streams[66]);
cudaStreamSynchronize(streams[67]);
cudaStreamSynchronize(streams[68]);
cudaStreamSynchronize(streams[69]);
cudaStreamSynchronize(streams[70]);
cudaStreamSynchronize(streams[71]);
cudaStreamSynchronize(streams[72]);
cudaStreamSynchronize(streams[73]);
cudaStreamSynchronize(streams[74]);
cudaStreamSynchronize(streams[75]);
cudaStreamSynchronize(streams[76]);
cudaStreamSynchronize(streams[77]);
cudaStreamSynchronize(streams[78]);
cudaStreamSynchronize(streams[79]);
cudaStreamSynchronize(streams[80]);
cudaStreamSynchronize(streams[81]);
cudaStreamSynchronize(streams[82]);
cudaStreamSynchronize(streams[83]);
cudaStreamSynchronize(streams[84]);
cudaStreamSynchronize(streams[85]);
cudaStreamSynchronize(streams[86]);
cudaStreamSynchronize(streams[87]);
cudaStreamSynchronize(streams[88]);
cudaStreamSynchronize(streams[89]);
cudaStreamSynchronize(streams[90]);
cudaStreamSynchronize(streams[91]);
cudaStreamSynchronize(streams[92]);
cudaStreamSynchronize(streams[93]);
cudaStreamSynchronize(streams[94]);
cudaStreamSynchronize(streams[95]);
cudaStreamSynchronize(streams[96]);
cudaStreamSynchronize(streams[97]);
cudaStreamSynchronize(streams[98]);
cudaStreamSynchronize(streams[99]);
cudaStreamSynchronize(streams[100]);
cudaStreamSynchronize(streams[101]);
cudaStreamSynchronize(streams[102]);
cudaStreamSynchronize(streams[103]);
cudaStreamSynchronize(streams[104]);
cudaStreamSynchronize(streams[105]);
cudaStreamSynchronize(streams[106]);
cudaStreamSynchronize(streams[107]);
cudaStreamSynchronize(streams[108]);
cudaStreamSynchronize(streams[109]);
cudaStreamSynchronize(streams[110]);
cudaStreamSynchronize(streams[111]);
cudaStreamSynchronize(streams[112]);
cudaStreamSynchronize(streams[113]);
cudaStreamSynchronize(streams[114]);
cudaStreamSynchronize(streams[115]);
cudaStreamSynchronize(streams[116]);
cudaStreamSynchronize(streams[117]);
cudaStreamSynchronize(streams[118]);
cudaStreamSynchronize(streams[119]);
cudaStreamSynchronize(streams[120]);
cudaStreamSynchronize(streams[121]);
cudaStreamSynchronize(streams[122]);
cudaStreamSynchronize(streams[123]);
cudaStreamSynchronize(streams[124]);
cudaStreamSynchronize(streams[125]);
cudaStreamSynchronize(streams[126]);
cudaStreamSynchronize(streams[127]);
cudaProfilerStop();
for (int i = 0; i < 128; i++) cudaStreamDestroy(streams[i]);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.