hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
f6a362929650769d0a54e637bd5843c64d07b87b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaRTCommon.h"
#include <thrust/reduce.h>
#include <thrust/remove.h>
#include <thrust/execution_policy.h>
#include "conffilehelper.h"
#include "cudaBilateralFilter.h"
#define FANN_NO_DLL
#include "floatfann.h"
#define BLOCK_SIZE 16
#define NORMALRAY_BOUND_MAX 5
#define PATHSTREAM_SIZE 1E4*64
#define LIGHTRAY_BOUND_MAX 5
#define LIGHTVERTEX_N 640
#define PATHQUEUE_MUL 1
#define PERFBREAKDOWN
namespace cudaRTBDPTAtomicAdapBilatAtomic
{
const char* g_enumAdapModeName[] = { "PDF", "Const" };
NPAttrHelper::Attrib g_enumAdapMode("Adaptive Mode", g_enumAdapModeName, 2, 0);
NPAttrHelper::Attrib g_uiDesiredSamplingN("DesiredSamplingN", 5);
NPAttrHelper::Attrib g_fMinTraceProb("MinTraceProb", 0.f);
const char* g_enumDebugModeName[] = { "None", "Traced", "Prob", "Prob With Limit", "Filtered Result" };
NPAttrHelper::Attrib g_enumDebugMode("Debug Mode", g_enumDebugModeName, 5, 0);
NPAttrHelper::Attrib g_fFilterColorEuD("Filter Color E Delta", 50.f);
NPAttrHelper::Attrib g_fFilterPosEuD("Filter Pos E Delta", 1.f);
NPAttrHelper::Attrib g_fFilterNormEuD("Filter Norm E Delta", 0.25f);
NPAttrHelper::Attrib g_fFilterDiffEuD("Filter Diff E Delta", 0.1f);
NPAttrHelper::Attrib g_uiFilterRadius("Filter Radius", 5);
NPAttrHelper::Attrib g_bFilterDiffuse("Filter Diff Flag", false);
#ifdef PERFBREAKDOWN
NPAttrHelper::Attrib g_fAvgProcessTimeA("Avg Proc Time A", 0.f);
NPAttrHelper::Attrib g_fAvgProcessTimeB("Avg Proc Time B", 0.f);
NPAttrHelper::Attrib g_fAvgProcessTimeC("Avg Proc Time C", 0.f);
NPAttrHelper::Attrib g_fAvgProcessTimeD("Avg Proc Time D", 0.f);
NPAttrHelper::Attrib g_fAvgProcessTimeE("Avg Proc Time E", 0.f);
#endif
#ifdef PERFBREAKDOWN
RT_ATTRIBS_N(15)
#else
RT_ATTRIBS_N(10)
#endif
RT_ATTRIBS_BGN
RT_ATTRIB_DECLARE(0, Adaptive Mode, g_enumAdapMode)
RT_ATTRIB_DECLARE(1, Desired Max Sampling, g_uiDesiredSamplingN)
RT_ATTRIB_DECLARE(2, Min Trace Probability, g_fMinTraceProb)
RT_ATTRIB_DECLARE(3, Debug Mode, g_enumDebugMode)
RT_ATTRIB_DECLARE(4, Filter Color EuD, g_fFilterColorEuD)
RT_ATTRIB_DECLARE(5, Filter Pos EuD, g_fFilterPosEuD)
RT_ATTRIB_DECLARE(6, Filter Norm EuD, g_fFilterNormEuD)
RT_ATTRIB_DECLARE(7, Filter Diff EuD, g_fFilterDiffEuD)
RT_ATTRIB_DECLARE(8, Filter Radius, g_uiFilterRadius)
RT_ATTRIB_DECLARE(9, Filter Diffuse Flag, g_bFilterDiffuse)
#ifdef PERFBREAKDOWN
RT_ATTRIB_DECLARE(10, Avg Filter Time, g_fAvgProcessTimeA)
RT_ATTRIB_DECLARE(11, Avg MSE Time, g_fAvgProcessTimeB)
RT_ATTRIB_DECLARE(12, Avg Gen Time, g_fAvgProcessTimeC)
RT_ATTRIB_DECLARE(13, Avg Trace Time, g_fAvgProcessTimeD)
RT_ATTRIB_DECLARE(14, Avg Accum Time, g_fAvgProcessTimeE)
#endif
RT_ATTRIBS_END
struct LightVertex
{
float3 pos;
float3 norm;
float3 irrad;
float3 irradDir;
float3 diff;
float3 emissive;
float specular;
float metallic;
float roughness;
float pathPotential;
__hd__ LightVertex()
{
pos = norm = irrad = irradDir = make_float3(0.f, 0.f, 0.f);
pathPotential = 1.f;
}
};
LightVertex* g_devLightVertices = nullptr;
uint g_uLightVerticesSize = 0;
uint* g_devLightTri = nullptr;
uint g_lightTriN = 0;
void freeLightPathMem()
{
g_uLightVerticesSize = 0;
g_lightTriN = 0;
CUFREE(g_devLightVertices);
CUFREE(g_devLightTri);
}
void allocateLightPathMem()
{
HANDLE_ERROR(hipMalloc((void**)&g_devLightVertices, sizeof(LightVertex) * LIGHTVERTEX_N));
HANDLE_ERROR(hipMemset((void*)g_devLightVertices, 0, sizeof(LightVertex) * LIGHTVERTEX_N));
}
void updateLightTriCudaMem(RTScene* scene)
{
g_lightTriN = 0;
CUFREE(g_devLightTri);
std::vector<uint> lightTri;
for (uint i = 0; i < scene->m_pTriangles.size(); i++)
{
if (NPMathHelper::Vec3::length(scene->m_pMaterials[scene->m_pTriangles[i].matInd].emissive) > 0.f)
lightTri.push_back(i);
}
uint* tempLightTri = new uint[lightTri.size()];
for (uint i = 0; i < lightTri.size(); i++)
{
tempLightTri[i] = lightTri[i];
}
g_lightTriN = lightTri.size();
HANDLE_ERROR(hipMalloc((void**)&g_devLightTri, sizeof(uint) * g_lightTriN));
HANDLE_ERROR(hipMemcpy(g_devLightTri, tempLightTri, sizeof(uint) * g_lightTriN, hipMemcpyHostToDevice));
DEL_ARRAY(tempLightTri);
}
enum RAYTYPE
{
RAYTYPE_EYE = 0,
RAYTYPE_DIFF = 1,
RAYTYPE_SPEC = 2,
RAYTYPE_LIGHT = 3
};
struct PTPathVertex
{
uint isTerminated;
uint2 pathPixel;
float3 pathOutDir;
float3 pathVertexPos;
float3 pathOutMulTerm;
RAYTYPE pathType;
float3 pathSample;
float3 pathAccumSample;
uint pathSampleN;
uint pathSampleDepth;
hiprandState_t randState;
// for connecting light path
float3 pathInMulTerm;
float3 pathInDir;
float3 origNorm;
float3 origDiff;
float origMetallic;
float origRoughness;
float origSpecular;
float origTrans;
// for adaptive sampling
float pathPotential;
float pathAccumPotential;
// for filtering
float3 pathDirectPos;
float3 pathDirectNorm;
float3 pathDirectDiffuse;
__device__ PTPathVertex()
: isTerminated(true)
, pathPixel(make_uint2(0, 0))
, pathOutDir(make_float3(0.f, 1.f, 0.f))
, pathVertexPos(make_float3(0.f, 0.f, 0.f))
, pathOutMulTerm(make_float3(1.f, 1.f, 1.f))
, pathType(RAYTYPE_EYE)
, pathSample(make_float3(0.f, 0.f, 0.f))
, pathAccumSample(make_float3(0.f, 0.f, 0.f))
, pathSampleN(0)
, pathSampleDepth(0)
, randState()
, pathInMulTerm(make_float3(0.f, 0.f, 0.f))
, pathInDir(make_float3(0.f, 0.f, 0.f))
, origNorm(make_float3(0.f, 1.f, 0.f))
, origDiff(make_float3(0.f, 0.f, 0.f))
, origMetallic(0.f)
, origRoughness(0.f)
, origSpecular(0.f)
, origTrans(0.f)
, pathPotential(1.f)
, pathAccumPotential(0.f)
, pathDirectPos(make_float3(0.f, 0.f, 0.f))
, pathDirectNorm(make_float3(0.f, 0.f, 0.f))
, pathDirectDiffuse(make_float3(0.f, 0.f, 0.f))
{}
__device__ PTPathVertex(uint _isTerminated, uint2 _pathPixel, float3 _pathOutDir, float3 _pathVertexPos, RAYTYPE _pathType, hiprandState_t _randState)
: isTerminated(_isTerminated)
, pathPixel(_pathPixel)
, pathOutDir(_pathOutDir)
, pathVertexPos(_pathVertexPos)
, pathOutMulTerm(make_float3(1.f, 1.f, 1.f))
, pathType(_pathType)
, pathSample(make_float3(0.f, 0.f, 0.f))
, pathAccumSample(make_float3(0.f, 0.f, 0.f))
, pathSampleN(0)
, pathSampleDepth(0)
, randState(_randState)
, pathInMulTerm(make_float3(0.f, 0.f, 0.f))
, pathInDir(make_float3(0.f, 0.f, 0.f))
, origNorm(make_float3(0.f, 1.f, 0.f))
, origDiff(make_float3(0.f, 0.f, 0.f))
, origMetallic(0.f)
, origRoughness(0.f)
, origSpecular(0.f)
, origTrans(0.f)
, pathPotential(1.f)
, pathAccumPotential(0.f)
, pathDirectPos(make_float3(0.f, 0.f, 0.f))
, pathDirectNorm(make_float3(0.f, 0.f, 0.f))
, pathDirectDiffuse(make_float3(0.f, 0.f, 0.f))
{}
};
uint* g_devAtomicN = nullptr;
uint* g_devTempPathQueue = nullptr;
PTPathVertex* g_devPathQueue = nullptr;
uint g_uPathQueueCur = 0;
uint g_uPathQueueSize = 0;
PTPathVertex** g_devPathStream = nullptr;
PTPathVertex** g_devTempPathStream = nullptr;
PTPathVertex** g_devEyeLightConPathStream = nullptr;
uint g_uPathStreamSize = PATHSTREAM_SIZE;
void freeStreamMem()
{
g_uPathQueueCur = g_uPathQueueSize = 0;
CUFREE(g_devTempPathQueue);
CUFREE(g_devPathQueue);
CUFREE(g_devPathStream);
CUFREE(g_devTempPathStream);
CUFREE(g_devEyeLightConPathStream);
CUFREE(g_devAtomicN);
}
void allocateStreamMem(uint queueSize = 480000)
{
g_uPathQueueSize = queueSize;
HANDLE_ERROR(hipMalloc((void**)&g_devPathQueue, sizeof(PTPathVertex) * g_uPathQueueSize * PATHQUEUE_MUL));
HANDLE_ERROR(hipMemset((void*)g_devPathQueue, 0, sizeof(PTPathVertex) * g_uPathQueueSize * PATHQUEUE_MUL));
HANDLE_ERROR(hipMalloc((void**)&g_devTempPathQueue, sizeof(uint) * g_uPathQueueSize * PATHQUEUE_MUL));
HANDLE_ERROR(hipMemset((void*)g_devTempPathQueue, 0, sizeof(uint) * g_uPathQueueSize * PATHQUEUE_MUL));
HANDLE_ERROR(hipMalloc((void**)&g_devPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize));
HANDLE_ERROR(hipMemset((void*)g_devPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize));
HANDLE_ERROR(hipMalloc((void**)&g_devTempPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize));
HANDLE_ERROR(hipMemset((void*)g_devTempPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize));
HANDLE_ERROR(hipMalloc((void**)&g_devEyeLightConPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize));
HANDLE_ERROR(hipMemset((void*)g_devEyeLightConPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize));
HANDLE_ERROR(hipMalloc((void**)&g_devAtomicN, sizeof(uint)));
HANDLE_ERROR(hipMemset((void*)g_devAtomicN, 0, sizeof(uint)));
}
float* g_devResultData = nullptr;
float* g_devAccResultData = nullptr;
float* g_devResultVarData = nullptr;
uint* g_devSampleResultN = nullptr;
float* g_devTempResultData = nullptr;
uint* g_devTempResultN = nullptr;
float* g_devTempPositionData = nullptr;
float* g_devTempNormalData = nullptr;
float* g_devTempDiffuseData = nullptr;
float* g_devPositionData = nullptr;
float* g_devNormalData = nullptr;
float* g_devDiffuseData = nullptr;
float* g_devFilteredResult = nullptr;
float* g_devFilterGaussianConst = nullptr;
NPMathHelper::Mat4x4 g_matLastCamMat;
NPMathHelper::Mat4x4 g_matCurCamMat;
uint32 g_uCurFrameN = 0;
size_t g_resultDataSize = 0;
uint32 WangHash(uint32 a) {
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
return a;
}
__global__ void pt_traceLight_kernel(RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex** pathStream, uint activePathStreamSize, LightVertex* lightVertices, uint curLightVerticesSize)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= activePathStreamSize || pathStream[x]->isTerminated) return;
PTPathVertex* procVertex = pathStream[x];
CURay ray = CURay(procVertex->pathVertexPos, procVertex->pathOutDir);
TracePrimitiveResult traceResult;
if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false))
{
RTTriangle* tri = &triangles[traceResult.triId];
RTMaterial* mat = &materials[tri->matInd];
RTVertex* v0 = &vertices[tri->vertInd0];
RTVertex* v1 = &vertices[tri->vertInd1];
RTVertex* v2 = &vertices[tri->vertInd2];
float2 uv0 = make_float2(v0->tex._x, v0->tex._y);
float2 uv1 = make_float2(v1->tex._x, v1->tex._y);
float2 uv2 = make_float2(v2->tex._x, v2->tex._y);
float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v;
float3 n0 = V32F3(v0->norm);
float3 n1 = V32F3(v1->norm);
float3 n2 = V32F3(v2->norm);
float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v;
float3 triPos = V32F3(v0->pos) * traceResult.w + V32F3(v1->pos) * traceResult.u + V32F3(v2->pos) * traceResult.v;
float3 diff;
float3 emissive;
float trans;
float specular;
float metallic;
float roughness;
float anisotropic;
float sheen;
float sheenTint;
float clearcoat;
float clearcoatGloss;
GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness
, anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss);
float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm;
lightVertices[curLightVerticesSize + x].irrad = procVertex->pathSample;
lightVertices[curLightVerticesSize + x].irradDir = -1 * ray.dir;
lightVertices[curLightVerticesSize + x].norm = nl;
lightVertices[curLightVerticesSize + x].pos = triPos;
lightVertices[curLightVerticesSize + x].diff = diff;
lightVertices[curLightVerticesSize + x].emissive = emissive;
lightVertices[curLightVerticesSize + x].specular = specular;
lightVertices[curLightVerticesSize + x].metallic = metallic;
lightVertices[curLightVerticesSize + x].roughness = roughness;
lightVertices[curLightVerticesSize + x].pathPotential = procVertex->pathPotential;
{
// Get some random microfacet
float3 hDir = ImportanceSampleGGX(make_float2(hiprand_uniform(&procVertex->randState), hiprand_uniform(&procVertex->randState)), roughness, nl);
// Calculate flesnel
float voH = vecDot(-1 * ray.dir, hDir);
float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic);
float3 brdf_f = Fresnel(f0, voH);
// PDF
float NoH = vecDot(nl, hDir);
float VoH = vecDot(-1 * ray.dir, hDir);
float pdf = D_GGX(roughness, NoH) * NoH / (4.f * VoH);
// Reflected or Refracted
float reflProb = lerp(length(brdf_f), 1.0f, metallic);
float refrProb = trans;
float3 reflDir;
float3 refrDir;
CURay nextRay = ray;
float3 lightMulTerm;
RAYTYPE nextRayType = procVertex->pathType;
if (refrProb > 0)
{
bool into = vecDot(nl, norm) > 0.f;
float nt = specular * 0.8f + 1.f;
float nc = 1.0f;
float nnt = into ? nc / nt : nt / nc;
float ddn = vecDot(hDir, ray.dir);
float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn);
//if (cos2t < 0.f)
//{
// reflProb = 1.0f;//refrProb = 0.f;
//}
//else
{
refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t)));
}
}
if (reflProb > 0)
{
reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir, ray.dir));
if (vecDot(reflDir, nl) < 0.f)
reflProb = 0.f;
}
// Reflected
if (ProbabilityRand(&procVertex->randState, reflProb))
{
nextRay = CURay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir, reflDir);
// ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate);
// Microfacet specular = D*G*F / (4*NoL*NoV)
// pdf = D * NoH / (4 * VoH)
// (G * F * VoH) / (NoV * NoH)
float NoV = vecDot(nl, -1 * ray.dir);
float NoL = vecDot(nl, reflDir);
float G = GeometricVisibility(roughness, NoV, NoL, VoH);
//shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive;
lightMulTerm = (brdf_f * G * VoH) / (NoV * NoH * reflProb);
nextRayType = RAYTYPE_SPEC;
pdf *= reflProb;
}
// Diffused or Transmited
else
{
// Transmited
if (ProbabilityRand(&procVertex->randState, refrProb))
{
nextRay = CURay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir);
//ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate);
float cosine = vecDot(-1 * nl, refrDir);
//shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive;
lightMulTerm = cosine * diff / (refrProb * (1 - reflProb));
nextRayType = RAYTYPE_SPEC;
pdf *= (refrProb * (1.f - reflProb));
}
// Diffused
else
{
float3 w = nl;
float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = vecCross(w, u);
u = vecCross(v, w);
float r1 = 2.f * M_PI * hiprand_uniform(&procVertex->randState);
float r2cos = sqrtf(hiprand_uniform(&procVertex->randState));
float r2sin = 1.f - r2cos*r2cos;
float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1));
nextRay = CURay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir);
//ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate);
float VoH = vecDot(-1 * ray.dir, hDir);
float NoV = vecDot(nl, -1 * ray.dir);
float NoL = vecDot(nl, diffDir);
//shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive;
lightMulTerm = M_PI * Diffuse(diff, roughness, NoV, NoL, VoH) / ((1 - refrProb) * (1 - reflProb));
nextRayType = RAYTYPE_DIFF;
pdf *= ((1.f - refrProb) * (1.f - reflProb)) * vecDot(diffDir, nl);
}
}
if (nextRayType != RAYTYPE_DIFF)
lightVertices[curLightVerticesSize + x].irrad = make_float3(0.f, 0.f, 0.f);
if (vecDot(nextRay.dir, nl) < 0.f)
lightVertices[curLightVerticesSize + x].norm = -1 * lightVertices[curLightVerticesSize + x].norm;
procVertex->pathSample = emissive + vecMul(procVertex->pathSample, lightMulTerm);
procVertex->pathPotential *= pdf;
float pixelContrib = length(procVertex->pathOutMulTerm) * length(lightMulTerm);
if (/*(procVertex->pathType == RAYTYPE_DIFF && nextRayType == RAYTYPE_SPEC) ||*/ length(emissive) > 0.f)
pixelContrib = 0.f;
if (hiprand_uniform(&procVertex->randState) > pixelContrib || procVertex->pathSampleDepth + 1 >= NORMALRAY_BOUND_MAX)
{
procVertex->isTerminated = true;
}
else
{
procVertex->pathOutMulTerm = vecMul(procVertex->pathOutMulTerm, lightMulTerm);
procVertex->pathOutDir = nextRay.dir;
procVertex->pathVertexPos = nextRay.orig;
procVertex->pathType = nextRayType;
procVertex->pathSampleDepth++;
}
}
}
else
{
lightVertices[curLightVerticesSize + x] = lightVertices[procVertex->pathPixel.x];
procVertex->isTerminated = true;
}
}
__global__ void pt_traceSample_kernel(RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex** pathStream, uint activePathStreamSize)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= activePathStreamSize || pathStream[x]->isTerminated) return;
PTPathVertex* procVertex = pathStream[x];
CURay ray = CURay(procVertex->pathVertexPos, procVertex->pathOutDir);
TracePrimitiveResult traceResult;
if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false))
{
RTTriangle* tri = &triangles[traceResult.triId];
RTMaterial* mat = &materials[tri->matInd];
RTVertex* v0 = &vertices[tri->vertInd0];
RTVertex* v1 = &vertices[tri->vertInd1];
RTVertex* v2 = &vertices[tri->vertInd2];
float2 uv0 = make_float2(v0->tex._x, v0->tex._y);
float2 uv1 = make_float2(v1->tex._x, v1->tex._y);
float2 uv2 = make_float2(v2->tex._x, v2->tex._y);
float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v;
float3 n0 = V32F3(v0->norm);
float3 n1 = V32F3(v1->norm);
float3 n2 = V32F3(v2->norm);
float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v;
float3 triPos = V32F3(v0->pos) * traceResult.w + V32F3(v1->pos) * traceResult.u + V32F3(v2->pos) * traceResult.v;
float3 diff;
float3 emissive;
float trans;
float specular;
float metallic;
float roughness;
float anisotropic;
float sheen;
float sheenTint;
float clearcoat;
float clearcoatGloss;
GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness
, anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss);
float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm;
{
// Get some random microfacet
float3 hDir = ImportanceSampleGGX(make_float2(hiprand_uniform(&procVertex->randState), hiprand_uniform(&procVertex->randState)), roughness, nl);
// Calculate flesnel
float voH = vecDot(-1 * ray.dir, hDir);
float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic);
float3 brdf_f = Fresnel(f0, voH);
// PDF
float NoH = vecDot(nl, hDir);
float VoH = vecDot(-1 * ray.dir, hDir);
float pdf = D_GGX(roughness, NoH) * NoH / (4.f * VoH + M_EPSILON);
// Reflected or Refracted
float reflProb = lerp(length(brdf_f), 1.0f, metallic);
float refrProb = trans;
float3 reflDir;
float3 refrDir;
CURay nextRay = ray;
float3 lightMulTerm;
RAYTYPE nextRayType = procVertex->pathType;
if (refrProb > 0)
{
bool into = vecDot(nl, norm) > 0.f;
float nt = specular * 0.8f + 1.f;
float nc = 1.0f;
float nnt = into ? nc / nt : nt / nc;
float ddn = vecDot(hDir, ray.dir);
float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn);
//if (cos2t < 0.f)
//{
// reflProb = 1.0f;//refrProb = 0.f;
//}
//else
{
refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t)));
}
}
if (reflProb > 0)
{
reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir, ray.dir));
if (vecDot(reflDir, nl) < 0.f)
reflProb = 0.f;
}
// Reflected
if (ProbabilityRand(&procVertex->randState, reflProb))
{
nextRay = CURay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir + reflDir * M_FLT_BIAS_EPSILON, reflDir);
// ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate);
// Microfacet specular = D*G*F / (4*NoL*NoV)
// pdf = D * NoH / (4 * VoH)
// (G * F * VoH) / (NoV * NoH)
float NoV = vecDot(nl, -1 * ray.dir);
float NoL = vecDot(nl, reflDir);
float G = GeometricVisibility(roughness, NoV, NoL, VoH);
//shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive;
lightMulTerm = (brdf_f * G * VoH) / (NoV * NoH * reflProb);
nextRayType = RAYTYPE_SPEC;
pdf *= reflProb;
}
// Diffused or Transmited
else
{
// Transmited
if (ProbabilityRand(&procVertex->randState, refrProb))
{
nextRay = CURay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir);
//ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate);
float cosine = vecDot(-1 * nl, refrDir);
//shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive;
lightMulTerm = cosine * diff / (refrProb * (1 - reflProb));
nextRayType = RAYTYPE_SPEC;
pdf *= (refrProb * (1.f - reflProb));
}
// Diffused
else
{
float3 w = nl;
float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = vecCross(w, u);
u = vecCross(v, w);
float r1 = 2.f * M_PI * hiprand_uniform(&procVertex->randState);
float r2cos = sqrtf(hiprand_uniform(&procVertex->randState));
float r2sin = 1.f - r2cos*r2cos;
float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1));
nextRay = CURay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir);
//ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate);
float VoH = vecDot(-1 * ray.dir, hDir);
float NoV = vecDot(nl, -1 * ray.dir);
float NoL = vecDot(nl, diffDir);
//shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive;
lightMulTerm = M_PI * Diffuse(diff, roughness, NoV, NoL, VoH) / ((1 - refrProb) * (1 - reflProb));
nextRayType = RAYTYPE_DIFF;
pdf *= ((1.f - refrProb) * (1.f - reflProb)) * vecDot(diffDir, nl);
}
}
procVertex->pathSample = procVertex->pathSample + vecMul(emissive, procVertex->pathOutMulTerm);
procVertex->origDiff = diff;
procVertex->pathInDir = -1 * ray.dir;
procVertex->origNorm = nl;
procVertex->origRoughness = roughness;
procVertex->origMetallic = metallic;
procVertex->origSpecular = specular;
procVertex->origTrans = trans;
procVertex->pathInMulTerm = procVertex->pathOutMulTerm;
procVertex->pathPotential *= pdf;
if (procVertex->pathSampleDepth == 0)
{
procVertex->pathDirectPos = triPos;
procVertex->pathDirectNorm = nl;
procVertex->pathDirectDiffuse = diff;
}
float pixelContrib = length(procVertex->pathOutMulTerm) * length(lightMulTerm);
if (/*(procVertex->pathType == RAYTYPE_DIFF && nextRayType == RAYTYPE_SPEC) ||*/ length(emissive) > 0.f)
pixelContrib = 0.f;
if (hiprand_uniform(&procVertex->randState) > pixelContrib || procVertex->pathSampleDepth + 1 >= NORMALRAY_BOUND_MAX)
{
procVertex->pathAccumSample = procVertex->pathAccumSample + procVertex->pathSample;
procVertex->pathAccumPotential = procVertex->pathAccumPotential + procVertex->pathPotential;
procVertex->pathSampleN++;
procVertex->isTerminated = true;
}
else
{
procVertex->pathOutMulTerm = vecMul(procVertex->pathOutMulTerm, lightMulTerm);
procVertex->pathOutDir = nextRay.dir;
procVertex->pathSampleDepth++;
}
procVertex->pathVertexPos = nextRay.orig;
procVertex->pathType = nextRayType;
}
}
else
{
procVertex->pathAccumSample = procVertex->pathAccumSample + procVertex->pathSample;
procVertex->pathAccumPotential = procVertex->pathAccumPotential + procVertex->pathPotential;
procVertex->pathSampleN++;
procVertex->isTerminated = true;
}
}
__global__ void pt_genLightPathQueue_kernel(uint32 frameN, uint32 hashedFrameN, uint* lightTri, uint lightTriN, RTVertex* vertices,
RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex* pathQueue, uint pathQueueCap, LightVertex* lightVertices, uint curLightVerticesSize)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > pathQueueCap) return;
hiprandState_t randstate;
hiprand_init(hashedFrameN + x, 0, 0, &randstate);
uint lightSourceId = hiprand_uniform(&randstate) * lightTriN;
float lightW = hiprand_uniform(&randstate);
float lightU = hiprand_uniform(&randstate);
if (lightW + lightU > 1.0f)
{
lightW = 1.f - lightW;
lightU = 1.f - lightU;
}
float lightV = 1.f - lightW - lightU;
uint triId = lightTri[lightSourceId];
RTTriangle* tri = &triangles[triId];
RTMaterial* mat = &materials[tri->matInd];
RTVertex* v0 = &vertices[tri->vertInd0];
RTVertex* v1 = &vertices[tri->vertInd1];
RTVertex* v2 = &vertices[tri->vertInd2];
float2 uv0 = make_float2(v0->tex._x, v0->tex._y);
float2 uv1 = make_float2(v1->tex._x, v1->tex._y);
float2 uv2 = make_float2(v2->tex._x, v2->tex._y);
float2 uv = uv0 * lightW + uv1 * lightU + uv2 * lightV;
float3 n0 = V32F3(v0->norm);
float3 n1 = V32F3(v1->norm);
float3 n2 = V32F3(v2->norm);
float3 triNorm = n0 * lightW + n1 * lightU + n2 * lightV;
float3 triPos = V32F3(v0->pos) * lightW + V32F3(v1->pos) * lightU + V32F3(v2->pos) * lightV;
float3 diff;
float3 emissive;
float trans;
float specular;
float metallic;
float roughness;
float anisotropic;
float sheen;
float sheenTint;
float clearcoat;
float clearcoatGloss;
GetMaterialColors(mat, uv, textures, diff, triNorm, emissive, trans, specular, metallic, roughness
, anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss);
float3 w = triNorm;
float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = vecCross(w, u);
u = vecCross(v, w);
float r1 = 2.f * M_PI * hiprand_uniform(&randstate);
float r2cos = sqrtf(hiprand_uniform(&randstate));
float r2sin = 1.f - r2cos*r2cos;
float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1));
pathQueue[x] = PTPathVertex(false, make_uint2(curLightVerticesSize + x, 0), diffDir
, triPos + M_FLT_BIAS_EPSILON * triNorm, RAYTYPE_LIGHT, randstate);
pathQueue[x].pathSample = emissive;
lightVertices[curLightVerticesSize + x].irrad = emissive;
lightVertices[curLightVerticesSize + x].irradDir = make_float3(0.f, 0.f, 0.f);
lightVertices[curLightVerticesSize + x].norm = triNorm;
lightVertices[curLightVerticesSize + x].pos = triPos;
lightVertices[curLightVerticesSize + x].diff = diff;
lightVertices[curLightVerticesSize + x].emissive = emissive;
lightVertices[curLightVerticesSize + x].specular = specular;
lightVertices[curLightVerticesSize + x].metallic = metallic;
lightVertices[curLightVerticesSize + x].roughness = roughness;
}
__global__ void pt_genPathQueue_kernel(float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov,
float width, float height, uint32 frameN, uint32 hashedFrameN, PTPathVertex* pathQueue)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
uint ind = (y * width + x);
float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height;
float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f);
hiprandState_t randstate;
hiprand_init(hashedFrameN + ind, 0, 0, &randstate);
float au = u + (hiprand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f);
float av = v + (hiprand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f);
float3 dir = normalize(camRight * au + camUp * av + camDir);
pathQueue[ind] = PTPathVertex(false, make_uint2(x, y), dir, camPos, RAYTYPE_EYE, randstate);
}
__global__ void pt_fillTempAdapPathQueue_kernel(uint* pathQueue, uint fillSize)
{
uint ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= fillSize) return;
pathQueue[ind] = ind;
}
__global__ void pt_genTempAdapPathQueue_kernel(float width, float height, uint32 hashedFrameN, uint32 seedoffset
, float* genChance, uint* pathQueue, float minProb = 0.f, float mulRand = 1.f)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
uint ind = (y * width + x);
hiprandState_t randstate;
hiprand_init(hashedFrameN + ind + seedoffset, 0, 0, &randstate);
pathQueue[ind] = x + y * width;
//float modChance = 1.f - expf(-genChance[ind]);
if (hiprand_uniform(&randstate)*mulRand > fmaxf(genChance[ind], minProb))
{
pathQueue[ind] = 0 - 1;
}
}
__global__ void pt_genAdapPathQueue_kernel(float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov,
float width, float height, uint32 frameN, uint32 hashedFrameN, uint* atomicN, float* mseData, float sumMSE, PTPathVertex* pathQueue, uint genSize)
{
uint ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= width * height) return;
uint y = ind / width;
uint x = ind - y * width;
float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height;
float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f);
float mseRatio = mseData[ind] / sumMSE;
uint sampleN = genSize * mseRatio;
for (uint i = 0; i < sampleN; i++)
{
hiprandState_t randstate;
hiprand_init(hashedFrameN + ind + i * genSize, 0, 0, &randstate);
float au = u + (hiprand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f);
float av = v + (hiprand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f);
float3 dir = normalize(camRight * au + camUp * av + camDir);
pathQueue[atomicAggInc((int *)atomicN)] = PTPathVertex(false, make_uint2(x, y), dir, camPos, RAYTYPE_EYE, randstate);
}
}
__device__ float3 GetShadingResult(const float3& lightOutDir, const float3& lightInDir, const float3& lightInIrrad, const float3& norm,
const float3& diff, const float metallic, const float roughness, const float specular, const float2 diffspec)
{
if (vecDot(norm, lightInDir) <= 0.f)
return make_float3(0.f, 0.f, 0.f);
float3 h = normalize(lightOutDir + lightInDir);
float voH = vecDot(lightOutDir, h);
float noV = vecDot(norm, lightOutDir);
float noH = vecDot(norm, h);
float noL = vecDot(norm, lightInDir);
float3 f0 = vecLerp(0.08f * specular * make_float3(1.f, 1.f, 1.f), diff, metallic);
float3 brdf_f = Fresnel(f0, voH);
//float g = GeometricVisibility(roughness, noV, noL, voH);
float d = D_GGX(roughness, noH);
float v = Vis_SmithJointApprox(roughness, noV, noL);
// Microfacet specular = D*G*F / (4*NoL*NoV)
float3 specIrrad = d*v*brdf_f;// vecMul(d*g*brdf_f / (4.f * noV), lightInIrrad);
float3 diffIrrad = vecMul((make_float3(1.f, 1.f, 1.f) - brdf_f), Diffuse(diff, roughness, noV, noL, voH));//vecMul((make_float3(1.f, 1.f, 1.f) - brdf_f), diff / M_PI);
return vecMul(lightInIrrad*noL, diffspec.y*specIrrad + diffspec.x*diffIrrad);
}
__device__ void GetLightFromRandLightVertices(float3 pos, float3 norm, LightVertex* lightVertices, uint lightVerticesSize, hiprandState_t* randstate, float3& irrad, float3& irradDir, float& pathPotential)
{
//LightVertex dummy;
//dummy.diff = make_float3(1.f, 1.f, 1.f);
//dummy.irrad = make_float3(1.f, 0.f, 0.f);
//dummy.pos = make_float3(0.f, 0.f, 0.f);
//dummy.norm = dummy.irradDir = normalize(pos - dummy.pos);
//dummy.roughness = 0.5f;
//dummy.specular = 0.5f;
//dummy.metallic = 0.f;
irrad = make_float3(0.f, 0.f, 0.f);
uint lightVert = hiprand_uniform(randstate) * lightVerticesSize;
LightVertex* lightVertex = &lightVertices[lightVert];
float3 toLightVertexDir = normalize(lightVertex->pos - pos);
float toLightVertexDist = length(lightVertex->pos - pos);
CURay toLightVertex(pos, toLightVertexDir);
TracePrimitiveResult traceResult;
if (length(lightVertex->irrad) > 0.f && vecDot(norm, toLightVertexDir) > 0.f &&
!TracePrimitive(toLightVertex, traceResult, toLightVertexDist - M_FLT_BIAS_EPSILON, M_FLT_BIAS_EPSILON, false))
{
if (toLightVertexDist > M_FLT_EPSILON)
{
irrad = GetShadingResult(-1 * toLightVertexDir, lightVertex->irradDir, lightVertex->irrad, lightVertex->norm
, lightVertex->diff, lightVertex->metallic, lightVertex->roughness, lightVertex->specular, make_float2(1.f, 1.f)) + lightVertex->emissive;
irradDir = toLightVertexDir;
}
else
{
irrad = lightVertex->irrad;
irradDir = -1.f * lightVertex->irradDir;
}
}
}
__global__ void pt_connectEyeLightPath_kernel(PTPathVertex** eyeStream, uint eyeStreamSize, LightVertex* lightVertices, uint lightVerticesSize)
{
uint ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= eyeStreamSize) return;
PTPathVertex* eyePath = eyeStream[ind];
float3 lightFromLightVertex = make_float3(0.f, 0.f, 0.f);
float3 toLightVertexDir = make_float3(0.f, 0.f, 0.f);
float lightPathPotential = 1.f;
GetLightFromRandLightVertices(eyePath->pathVertexPos + eyePath->origNorm * M_FLT_BIAS_EPSILON, eyePath->origNorm
, lightVertices, lightVerticesSize, &eyePath->randState, lightFromLightVertex, toLightVertexDir, lightPathPotential);
float3 lightContribFromLightVertex = vecMax(make_float3(0.f, 0.f, 0.f)
, GetShadingResult(eyePath->pathInDir, toLightVertexDir, lightFromLightVertex, eyePath->origNorm
, eyePath->origDiff, eyePath->origMetallic, eyePath->origRoughness, eyePath->origSpecular
, make_float2(1.f - eyePath->origTrans, 1.f)));
if (length(lightContribFromLightVertex) > 0.f)
{
eyePath->pathAccumSample = eyePath->pathAccumSample + vecMul(lightContribFromLightVertex, eyePath->pathInMulTerm);
eyePath->pathSampleN += 4;
eyePath->pathPotential *= lightPathPotential;
}
}
__global__ void pt_assignPathStream_kernel(PTPathVertex** pathStream, uint pathStreamSize, PTPathVertex* pathQueue, uint pathQueueCur, uint pathQueueSize, uint assignableSlot)
{
uint ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind < assignableSlot)
{
int pathStreamInd = pathStreamSize + ind;
int pathQueueInd = pathQueueCur + ind;
PTPathVertex* assignSample = nullptr;
if (pathQueueInd < pathQueueSize)
{
assignSample = &pathQueue[pathQueueInd];
}
pathStream[pathStreamInd] = assignSample;
}
}
__global__ void pt_applyPixelProbToResult_kernel(uint width, uint height, float* result, float* varResult, float minProb = 0.f)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
uint ind = (y * width + x);
result[ind * 3] = result[ind * 3 + 1] = result[ind * 3 + 2] = fmaxf(minProb, varResult[ind]);
}
__global__ void pt_debugTracedPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize, uint width, uint height
, float* tempResult, uint* tempResultN)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= pathQueueSize) return;
// add calculating sample to the result
if (!pathQueue[x].isTerminated)
{
pathQueue[x].pathAccumSample = pathQueue[x].pathAccumSample + pathQueue[x].pathSample;
pathQueue[x].pathAccumPotential = pathQueue[x].pathAccumPotential + pathQueue[x].pathPotential;
pathQueue[x].pathSampleN++;
}
if (pathQueue[x].pathSampleN > 0)
{
uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x;
uint tempNextSampleResultN = pathQueue[x].pathSampleN;
float3 sampleResult = make_float3(1.f, 1.f, 1.f);
float potentialResult = 1.f - pathQueue[x].pathAccumPotential;
atomicAdd(tempResult + ind * 3, 1.f);
atomicAdd(tempResult + ind * 3 + 1, 1.f);
atomicAdd(tempResult + ind * 3 + 2, 1.f);
atomicAdd(tempResultN + ind, 1);
}
}
__global__ void pt_applyPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize, uint width, uint height, uint frameN, float* result, float* accResult
, float* varResult, float* posResult, float* normResult, float* diffResult, uint* sampleResultN)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= pathQueueSize) return;
// add calculating sample to the result
if (!pathQueue[x].isTerminated)
{
pathQueue[x].pathAccumSample = pathQueue[x].pathAccumSample + pathQueue[x].pathSample;
pathQueue[x].pathAccumPotential = pathQueue[x].pathAccumPotential + pathQueue[x].pathPotential;
pathQueue[x].pathSampleN++;
}
if (pathQueue[x].pathSampleN > 0)
{
uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x;
if (!frameN)
{
sampleResultN[ind] = 0;
}
uint tempNextSampleResultN = sampleResultN[ind] + pathQueue[x].pathSampleN;
if (tempNextSampleResultN > sampleResultN[ind])
{
float3 sampleResult = pathQueue[x].pathAccumSample;
if (!isinf(sampleResult.x) && !isinf(sampleResult.y) && !isinf(sampleResult.z))
{
float potentialResult = 1.f - pathQueue[x].pathAccumPotential;
float resultInf = 1.f / (float)(tempNextSampleResultN);
float oldInf = sampleResultN[ind] * resultInf;
result[ind * 3] = max(resultInf * sampleResult.x + oldInf * result[ind * 3], 0.f);
result[ind * 3 + 1] = max(resultInf * sampleResult.y + oldInf * result[ind * 3 + 1], 0.f);
result[ind * 3 + 2] = max(resultInf * sampleResult.z + oldInf * result[ind * 3 + 2], 0.f);
varResult[ind] = max(resultInf * potentialResult + oldInf * varResult[ind], 0.f);
sampleResultN[ind] = tempNextSampleResultN;
sampleResult = pathQueue[x].pathDirectPos * pathQueue[x].pathSampleN;
posResult[ind * 3] = resultInf * sampleResult.x + oldInf *posResult[ind * 3];
posResult[ind * 3 + 1] = resultInf * sampleResult.y + oldInf *posResult[ind * 3 + 1];
posResult[ind * 3 + 2] = resultInf * sampleResult.z + oldInf *posResult[ind * 3 + 2];
sampleResult = pathQueue[x].pathDirectNorm * pathQueue[x].pathSampleN;
normResult[ind * 3] = resultInf * sampleResult.x + oldInf *normResult[ind * 3];
normResult[ind * 3 + 1] = resultInf * sampleResult.y + oldInf *normResult[ind * 3 + 1];
normResult[ind * 3 + 2] = resultInf * sampleResult.z + oldInf *normResult[ind * 3 + 2];
sampleResult = pathQueue[x].pathDirectDiffuse * pathQueue[x].pathSampleN;
diffResult[ind * 3] = resultInf * sampleResult.x + oldInf *diffResult[ind * 3];
diffResult[ind * 3 + 1] = resultInf * sampleResult.y + oldInf *diffResult[ind * 3 + 1];
diffResult[ind * 3 + 2] = resultInf * sampleResult.z + oldInf *diffResult[ind * 3 + 2];
}
}
}
}
__global__ void pt_atomicApplyPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize, uint width, uint height
, float* tempResult, uint* tempResultN, float* tempPos, float* tempNorm, float* tempDiff)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= pathQueueSize) return;
// add calculating sample to the result
if (!pathQueue[x].isTerminated)
{
pathQueue[x].pathAccumSample = pathQueue[x].pathAccumSample + pathQueue[x].pathSample;
pathQueue[x].pathAccumPotential = pathQueue[x].pathAccumPotential + pathQueue[x].pathPotential;
pathQueue[x].pathSampleN++;
}
if (pathQueue[x].pathSampleN > 0)
{
uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x;
float3 sampleResult = pathQueue[x].pathAccumSample;
if (!isinf(sampleResult.x) && !isinf(sampleResult.y) && !isinf(sampleResult.z))
{
atomicAdd(tempResult + ind * 3, sampleResult.x);
atomicAdd(tempResult + ind * 3 + 1, sampleResult.y);
atomicAdd(tempResult + ind * 3 + 2, sampleResult.z);
atomicAdd(tempResultN + ind, pathQueue[x].pathSampleN);
sampleResult = pathQueue[x].pathDirectPos * pathQueue[x].pathSampleN;
atomicAdd(tempPos + ind * 3, sampleResult.x);
atomicAdd(tempPos + ind * 3 + 1, sampleResult.y);
atomicAdd(tempPos + ind * 3 + 2, sampleResult.z);
sampleResult = pathQueue[x].pathDirectNorm * pathQueue[x].pathSampleN;
atomicAdd(tempNorm + ind * 3, sampleResult.x);
atomicAdd(tempNorm + ind * 3 + 1, sampleResult.y);
atomicAdd(tempNorm + ind * 3 + 2, sampleResult.z);
sampleResult = pathQueue[x].pathDirectDiffuse * pathQueue[x].pathSampleN;
atomicAdd(tempDiff + ind * 3, sampleResult.x);
atomicAdd(tempDiff + ind * 3 + 1, sampleResult.y);
atomicAdd(tempDiff + ind * 3 + 2, sampleResult.z);
}
}
}
__global__ void pt_accumTempResultToResult_kernel(uint width, uint height, uint frameN, float* tempResult, uint* tempResultN
, float* tempPos, float* tempNorm, float* tempDiff, float* result, uint* resultN
, float* pos, float* norm, float* diff)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= width * height) return;
if (frameN == 0)
{
resultN[x] = 0;
result[x * 3] = 0.f;
result[x * 3 + 1] = 0.f;
result[x * 3 + 2] = 0.f;
pos[x * 3] = 0.f;
pos[x * 3 + 1] = 0.f;
pos[x * 3 + 2] = 0.f;
norm[x * 3] = 0.f;
norm[x * 3 + 1] = 0.f;
norm[x * 3 + 2] = 0.f;
diff[x * 3] = 0.f;
diff[x * 3 + 1] = 0.f;
diff[x * 3 + 2] = 0.f;
}
uint nextSampleN = tempResultN[x] + resultN[x];
if (nextSampleN > resultN[x])
{
float resultInf = 1.f / (float)(nextSampleN);
float oldInf = resultN[x] * resultInf;
result[x * 3] = max(resultInf * tempResult[x * 3] + oldInf * result[x * 3], 0.f);
result[x * 3 + 1] = max(resultInf * tempResult[x * 3 + 1] + oldInf * result[x * 3 + 1], 0.f);
result[x * 3 + 2] = max(resultInf * tempResult[x * 3 + 2] + oldInf * result[x * 3 + 2], 0.f);
resultN[x] = resultN[x] + tempResultN[x];
pos[x * 3] = resultInf * tempPos[x * 3] + oldInf * pos[x * 3];
pos[x * 3 + 1] = resultInf * tempPos[x * 3 + 1] + oldInf * pos[x * 3 + 1];
pos[x * 3 + 2] = resultInf * tempPos[x * 3 + 2] + oldInf * pos[x * 3 + 2];
norm[x * 3] = resultInf * tempNorm[x * 3] + oldInf * norm[x * 3];
norm[x * 3 + 1] = resultInf * tempNorm[x * 3 + 1] + oldInf * norm[x * 3 + 1];
norm[x * 3 + 2] = resultInf * tempNorm[x * 3 + 2] + oldInf * norm[x * 3 + 2];
diff[x * 3] = resultInf * tempDiff[x * 3] + oldInf * diff[x * 3];
diff[x * 3 + 1] = resultInf * tempDiff[x * 3 + 1] + oldInf * diff[x * 3 + 1];
diff[x * 3 + 2] = resultInf * tempDiff[x * 3 + 2] + oldInf * diff[x * 3 + 2];
}
}
__global__ void pt_calculateSquareError_kernel(float* correctData, float* sampleData, uint* sampleNData, uint sampleN, float* resultData, uint dataSize)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= dataSize)
return;
uint sampledN = sampleNData[x];
float reductionFactor = (float)sampleN / (float)(sampleN + sampledN);
if (sampleN + sampledN < sampledN)
reductionFactor = 0;
resultData[x] = reductionFactor * ((correctData[x * 3] - sampleData[x * 3]) * (correctData[x * 3] - sampleData[x * 3])
+ (correctData[x * 3 + 1] - sampleData[x * 3 + 1]) * (correctData[x * 3 + 1] - sampleData[x * 3 + 1])
+ (correctData[x * 3 + 2] - sampleData[x * 3 + 2]) * (correctData[x * 3 + 2] - sampleData[x * 3 + 2])
) / 3.f;
}
void CleanMem()
{
freeLightPathMem();
freeStreamMem();
freeAllBVHCudaMem();
CUFREE(g_devTempResultN);
CUFREE(g_devSampleResultN);
CUFREE(g_devResultVarData);
CUFREE(g_devResultData);
CUFREE(g_devAccResultData);
CUFREE(g_devTempResultData);
CUFREE(g_devFilteredResult);
CUFREE(g_devFilterGaussianConst);
CUFREE(g_devPositionData);
CUFREE(g_devNormalData);
CUFREE(g_devDiffuseData);
CUFREE(g_devTempPositionData);
CUFREE(g_devTempNormalData);
CUFREE(g_devTempDiffuseData);
}
//struct ray_greater_compare
//{
// __hd__ bool operator()(const PTPathVertex* vert1, const PTPathVertex* vert2)
// {
// int vert1Score = (vert1->pathOutDir.x > 0) + (vert1->pathOutDir.y > 0) + (vert1->pathOutDir.z > 0);
// int vert2Score = (vert2->pathOutDir.x > 0) + (vert2->pathOutDir.y > 0) + (vert2->pathOutDir.z > 0);
// return vert1Score > vert2Score;
// }
//};
struct is_temppathqueue_terminated
{
__hd__ bool operator()(const uint& vert)
{
return (vert + 1 == 0);
}
};
struct is_active
{
__hd__ bool operator()(const PTPathVertex* vert)
{
return !vert->isTerminated;
}
};
struct is_terminated
{
__hd__ bool operator()(const PTPathVertex* vert)
{
return vert->isTerminated;
}
};
struct is_connectToLightPath
{
__hd__ bool operator()(const PTPathVertex* vert)
{
return vert->pathType == RAYTYPE_DIFF;
}
};
template<typename Predicate>
__global__ void pt_copyif_kernel(PTPathVertex** result, uint* atomicN, PTPathVertex** pathStream
, uint activePathStreamSize, Predicate pred)
{
uint ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= activePathStreamSize)
return;
if (pred(pathStream[ind]))
{
result[atomicAggInc((int *)atomicN)] = pathStream[ind];
}
}
void TracePathQueue(uint pathQueueSize)
{
dim3 block1(BLOCK_SIZE*BLOCK_SIZE, 1, 1);
dim3 block2(BLOCK_SIZE, BLOCK_SIZE, 1);
uint activePathStreamSize = 0;
g_uPathQueueCur = 0;
while (g_uPathQueueCur < pathQueueSize || activePathStreamSize > 0)
{
uint tempActivePathStreamSize = activePathStreamSize;
int assignableStreamSlot = min((uint)PATHSTREAM_SIZE - activePathStreamSize, pathQueueSize - g_uPathQueueCur);
if (assignableStreamSlot > 0)
pt_assignPathStream_kernel << < dim3(ceil((float)assignableStreamSlot / (float)block1.x), 1, 1), block1 >> >
(g_devPathStream, activePathStreamSize, g_devPathQueue, g_uPathQueueCur
, pathQueueSize, assignableStreamSlot);
//readjust activePathStreamSize
activePathStreamSize += assignableStreamSlot;
g_uPathQueueCur += assignableStreamSlot;
//tracing process
pt_traceSample_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> >
(g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathStream, activePathStreamSize);
HANDLE_KERNEL_ERROR();
//compact pathstream and find activePathStreamSize value
//PTPathVertex** compactedStreamEndItr = thrust::remove_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, is_terminated());
//activePathStreamSize = compactedStreamEndItr - g_devPathStream;
HANDLE_ERROR(hipMemset((void*)g_devAtomicN, 0, sizeof(uint)));
pt_copyif_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> >
(g_devTempPathStream, g_devAtomicN, g_devPathStream, activePathStreamSize, is_active());
HANDLE_KERNEL_ERROR();
hipMemcpy(&activePathStreamSize, g_devAtomicN, sizeof(uint), hipMemcpyDeviceToHost);
PTPathVertex** tempSwap = g_devPathStream;
g_devPathStream = g_devTempPathStream;
g_devTempPathStream = tempSwap;
//gen connectionpathstream
//PTPathVertex** conPathStreamEndItr = thrust::copy_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize
// , g_devEyeLightConPathStream, is_connectToLightPath());
//uint activeConPathStreamSize = conPathStreamEndItr - g_devEyeLightConPathStream;
uint activeConPathStreamSize = 0;
if (activePathStreamSize > 0)
{
HANDLE_ERROR(hipMemset((void*)g_devAtomicN, 0, sizeof(uint)));
pt_copyif_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> >
(g_devEyeLightConPathStream, g_devAtomicN, g_devPathStream, activePathStreamSize, is_connectToLightPath());
HANDLE_KERNEL_ERROR();
hipMemcpy(&activeConPathStreamSize, g_devAtomicN, sizeof(uint), hipMemcpyDeviceToHost);
}
//connect eye and light path stream
if (activeConPathStreamSize > 0)
{
pt_connectEyeLightPath_kernel << < dim3(ceil((float)activeConPathStreamSize / (float)block1.x), 1, 1), block1 >> >
(g_devEyeLightConPathStream, activeConPathStreamSize, g_devLightVertices, g_uLightVerticesSize);
HANDLE_KERNEL_ERROR();
}
}
}
bool Render(NPMathHelper::Vec3 camPos, NPMathHelper::Vec3 camDir, NPMathHelper::Vec3 camUp, float fov, RTScene* scene
, float width, float height, float* result)
{
// Check and allocate everything
if (!scene || !scene->GetCompactBVH()->IsValid())
return false;
NPMathHelper::Vec3 camRight = camDir.cross(camUp).normalize();
camUp = camRight.cross(camDir).normalize();
g_matLastCamMat = g_matCurCamMat;
g_matCurCamMat = NPMathHelper::Mat4x4::lookAt(camPos, camPos + camDir, camUp);
g_uCurFrameN = (g_matLastCamMat != g_matCurCamMat) ? 0 : g_uCurFrameN + 1;
if (!g_bIsCudaInit || scene->GetIsCudaDirty())
{
CleanMem();
g_matLastCamMat = g_matCurCamMat;
g_uCurFrameN = 0;
initAllSceneCudaMem(scene);
allocateStreamMem(width * height);
allocateLightPathMem();
updateLightTriCudaMem(scene);
size_t mem_tot;
size_t mem_free;
hipMemGetInfo(&mem_free, &mem_tot);
std::cout << "Memory Used : " << mem_tot - mem_free << "/" << mem_tot << " -> Free " << mem_free << std::endl;
}
else if (scene->GetIsCudaMaterialDirty())
{
updateAllSceneMaterialsCudaMem(scene);
updateLightTriCudaMem(scene);
g_uCurFrameN = 0;
}
if (!g_bIsCudaInit)
return false;
if (!g_devResultData || !g_devAccResultData || !g_devTempResultData || g_resultDataSize != (sizeof(float) * 3 * width * height) || !g_devFilteredResult)
{
g_resultDataSize = sizeof(float) * 3 * width * height;
CUFREE(g_devResultData);
hipMalloc((void**)&g_devResultData, g_resultDataSize);
CUFREE(g_devAccResultData);
hipMalloc((void**)&g_devAccResultData, g_resultDataSize);
CUFREE(g_devTempResultData);
hipMalloc((void**)&g_devTempResultData, g_resultDataSize);
CUFREE(g_devPositionData);
hipMalloc((void**)&g_devPositionData, g_resultDataSize);
CUFREE(g_devNormalData);
hipMalloc((void**)&g_devNormalData, g_resultDataSize);
CUFREE(g_devDiffuseData);
hipMalloc((void**)&g_devDiffuseData, g_resultDataSize);
CUFREE(g_devTempPositionData);
hipMalloc((void**)&g_devTempPositionData, g_resultDataSize);
CUFREE(g_devTempNormalData);
hipMalloc((void**)&g_devTempNormalData, g_resultDataSize);
CUFREE(g_devTempDiffuseData);
hipMalloc((void**)&g_devTempDiffuseData, g_resultDataSize);
CUFREE(g_devResultVarData);
hipMalloc((void**)&g_devResultVarData, sizeof(float) * width * height);
CUFREE(g_devSampleResultN);
hipMalloc((void**)&g_devSampleResultN, sizeof(uint) * width * height);
CUFREE(g_devTempResultN);
hipMalloc((void**)&g_devTempResultN, sizeof(uint) * width * height);
CUFREE(g_devFilteredResult);
hipMalloc((void**)&g_devFilteredResult, g_resultDataSize);
}
if (!g_devFilterGaussianConst)
{
CUFREE(g_devFilterGaussianConst);
hipMalloc((void**)&g_devFilterGaussianConst, sizeof(uint) * GAUSSIANCOST_N);
cudaBilateralFilter::updateGaussian(g_devFilterGaussianConst, *g_fFilterColorEuD.GetFloat(), *g_uiFilterRadius.GetUint());
}
float3 f3CamPos = V32F3(camPos);
float3 f3CamUp = V32F3(camUp);
float3 f3CamDir = V32F3(camDir);
float3 f3CamRight = V32F3(camRight);
dim3 block1(BLOCK_SIZE*BLOCK_SIZE, 1, 1);
dim3 block2(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 renderGrid(ceil(width / (float)block2.x), ceil(height / (float)block2.y), 1);
// light paths
if (g_uCurFrameN % 3 == 0)
{
uint lightPathStreamSizeCap = min((uint)PATHSTREAM_SIZE, (uint)(LIGHTVERTEX_N / LIGHTRAY_BOUND_MAX));
pt_genLightPathQueue_kernel << < dim3(ceil((float)lightPathStreamSizeCap / (float)block1.x), 1, 1), block1 >> >
(g_uCurFrameN, WangHash(g_uCurFrameN), g_devLightTri, g_lightTriN, g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathQueue, lightPathStreamSizeCap
, g_devLightVertices, 0);
uint activePathStreamSize = 0;
g_uLightVerticesSize = lightPathStreamSizeCap;
g_uPathQueueCur = 0;
while (g_uPathQueueCur < lightPathStreamSizeCap || activePathStreamSize > 0)
{
uint tempActivePathStreamSize = activePathStreamSize;
int assignableStreamSlot = min(lightPathStreamSizeCap - activePathStreamSize, lightPathStreamSizeCap - g_uPathQueueCur);
if (assignableStreamSlot > 0)
{
pt_assignPathStream_kernel << < dim3(ceil((float)assignableStreamSlot / (float)block1.x), 1, 1), block1 >> >(g_devPathStream, activePathStreamSize, g_devPathQueue, g_uPathQueueCur
, g_uLightVerticesSize, assignableStreamSlot);
}
//readjust activePathStreamSize
activePathStreamSize += assignableStreamSlot;
g_uPathQueueCur += assignableStreamSlot;
pt_traceLight_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> > (g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathStream, activePathStreamSize
, g_devLightVertices, g_uLightVerticesSize);
g_uLightVerticesSize += activePathStreamSize;
//compact pathstream and find activePathStreamSize value
//PTPathVertex** compactedStreamEndItr = thrust::remove_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, is_terminated());
//activePathStreamSize = compactedStreamEndItr - g_devPathStream;
HANDLE_ERROR(hipMemset((void*)g_devAtomicN, 0, sizeof(uint)));
pt_copyif_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> >
(g_devTempPathStream, g_devAtomicN, g_devPathStream, activePathStreamSize, is_active());
HANDLE_KERNEL_ERROR();
hipMemcpy(&activePathStreamSize, g_devAtomicN, sizeof(uint), hipMemcpyDeviceToHost);
PTPathVertex** tempSwap = g_devPathStream;
g_devPathStream = g_devTempPathStream;
g_devTempPathStream = tempSwap;
}
//std::cout << "Generated light vertices size: " << g_uLightVerticesSize << std::endl;
}
if (g_uCurFrameN < 5)
{
//float time;
//hipEvent_t start, stop;
//HANDLE_ERROR(hipEventCreate(&start));
//HANDLE_ERROR(hipEventCreate(&stop));
uint useQueueSize = width * height;
//HANDLE_ERROR(hipEventRecord(start, 0));
// eye paths
pt_genPathQueue_kernel << < renderGrid, block2 >> > (f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height
, g_uCurFrameN, WangHash(g_uCurFrameN), g_devPathQueue);
//HANDLE_ERROR(hipEventRecord(stop, 0));
//HANDLE_ERROR(hipEventSynchronize(stop));
//HANDLE_ERROR(hipEventElapsedTime(&time, start, stop));
//std::cout << "gen path: " << time << std::endl;
//HANDLE_ERROR(hipEventRecord(start, 0));
// trace path queue
TracePathQueue(useQueueSize);
//HANDLE_ERROR(hipEventRecord(stop, 0));
//HANDLE_ERROR(hipEventSynchronize(stop));
//HANDLE_ERROR(hipEventElapsedTime(&time, start, stop));
//std::cout << "trace path: " << time << std::endl;
//HANDLE_ERROR(hipEventRecord(start, 0));
pt_applyPathQueueResult_kernel << < dim3(ceil((float)useQueueSize / (float)block1.x), 1, 1), block1 >> >
(g_devPathQueue, useQueueSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData, g_devResultVarData
, g_devPositionData, g_devNormalData, g_devDiffuseData, g_devSampleResultN);
//HANDLE_ERROR(hipEventRecord(stop, 0));
//HANDLE_ERROR(hipEventSynchronize(stop));
//HANDLE_ERROR(hipEventElapsedTime(&time, start, stop));
//std::cout << "accum path: " << time << std::endl;
}
else
{
#ifdef PERFBREAKDOWN
float time;
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
#endif
if (*g_bFilterDiffuse.GetBool())
{
cudaBilateralFilter::bilaterial_posnormemit_kernel << < renderGrid, block2 >> >
(g_devResultData, g_devPositionData, g_devNormalData, g_devDiffuseData,
width, height, *g_fFilterColorEuD.GetFloat(), *g_fFilterPosEuD.GetFloat(), *g_fFilterNormEuD.GetFloat()
, *g_fFilterDiffEuD.GetFloat(), *g_uiFilterRadius.GetUint(), g_devFilterGaussianConst, g_devFilteredResult);
}
else
{
cudaBilateralFilter::bilaterial_posnorm_kernel << < renderGrid, block2 >> >
(g_devResultData, g_devPositionData, g_devNormalData,
width, height, *g_fFilterColorEuD.GetFloat(), *g_fFilterPosEuD.GetFloat(), *g_fFilterNormEuD.GetFloat()
, *g_uiFilterRadius.GetUint(), g_devFilterGaussianConst, g_devFilteredResult);
}
#ifdef PERFBREAKDOWN
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&time, start, stop));
g_fAvgProcessTimeA.attrFloat = (g_fAvgProcessTimeA.attrFloat * (g_uCurFrameN - 5) + time) / (g_uCurFrameN - 5 + 1);
//std::cout << "filter: " << time << std::endl;
HANDLE_ERROR(hipEventRecord(start, 0));
#endif
// calculate sampling map from converged result
pt_calculateSquareError_kernel << < dim3(ceil((float)(width * height) / (float)block1.x), 1, 1), block1 >> >
(g_devFilteredResult, g_devResultData, g_devSampleResultN, *g_uiDesiredSamplingN.GetUint(), g_devResultVarData, (uint)(width * height));
//thrust::sort(thrust::device, g_devResultVarData, g_devResultVarData + (uint)(width * height));
float sumMSE = thrust::reduce(thrust::device, g_devResultVarData, g_devResultVarData + (uint)(width * height), 0.f, thrust::plus<float>());
//float maxMSE = thrust::reduce(thrust::device, g_devResultVarData, g_devResultVarData + (uint)(width * height), 0.f, thrust::maximum<float>());
//float meanMSE = sumMSE / (width * height);
//std::cout << "sumMSE: " << sumMSE << "\n";
//std::cout << "maxMSE: " << maxMSE << "\n";
//std::cout << "meanMSE: " << meanMSE << "\n";
#ifdef PERFBREAKDOWN
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&time, start, stop));
g_fAvgProcessTimeB.attrFloat = (g_fAvgProcessTimeB.attrFloat * (g_uCurFrameN - 5) + time) / (g_uCurFrameN - 5 + 1);
//std::cout << "MSE cal: " << time << std::endl;
#endif
//if (g_uCurFrameN == 1)
//{
// float* tempDiffData = new float[(uint)width * (uint)height];
// hipMemcpy(tempDiffData, g_devResultVarData, (uint)(width * height) * sizeof(float), hipMemcpyDeviceToHost);
// NPConfFileHelper::txtConfFile conf("adapCheat_diffData.txt");
// for (uint j = 0; j < width * height; j++)
// {
// conf.WriteRaw<float>(tempDiffData[j]);
// conf.WriteRaw("\n");
// }
// conf.SyncDataToFile();
// DELETE_ARRAY(tempDiffData);
//}
#ifdef PERFBREAKDOWN
HANDLE_ERROR(hipEventRecord(start, 0));
#endif
// gen adaptive eye paths
uint useQueueSize = width * height * PATHQUEUE_MUL;
HANDLE_ERROR(hipMemset((void*)g_devAtomicN, 0, sizeof(uint)));
pt_genAdapPathQueue_kernel << < dim3(ceil((float)(width * height) / (float)block1.x), 1, 1), block1 >> >
(f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height
, g_uCurFrameN, WangHash(g_uCurFrameN), g_devAtomicN, g_devResultVarData, sumMSE, g_devPathQueue, useQueueSize);
hipMemcpy(&useQueueSize, g_devAtomicN, sizeof(uint), hipMemcpyDeviceToHost);
//std::cout << "AtomicN : " << useQueueSize << std::endl;
#ifdef PERFBREAKDOWN
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&time, start, stop));
g_fAvgProcessTimeC.attrFloat = (g_fAvgProcessTimeC.attrFloat * (g_uCurFrameN - 5) + time) / (g_uCurFrameN - 5 + 1);
//std::cout << "gen path: " << time << std::endl;
#endif
#ifdef PERFBREAKDOWN
HANDLE_ERROR(hipEventRecord(start, 0));
#endif
TracePathQueue(useQueueSize);
#ifdef PERFBREAKDOWN
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&time, start, stop));
g_fAvgProcessTimeD.attrFloat = (g_fAvgProcessTimeD.attrFloat * (g_uCurFrameN - 5) + time) / (g_uCurFrameN - 5 + 1);
//std::cout << "trace path: " << time << std::endl;
#endif
#ifdef PERFBREAKDOWN
HANDLE_ERROR(hipEventRecord(start, 0));
#endif
HANDLE_ERROR(hipMemset((void*)g_devTempResultData, 0, sizeof(float) * 3 * height * width));
HANDLE_ERROR(hipMemset((void*)g_devTempPositionData, 0, sizeof(float) * 3 * height * width));
HANDLE_ERROR(hipMemset((void*)g_devTempNormalData, 0, sizeof(float) * 3 * height * width));
HANDLE_ERROR(hipMemset((void*)g_devTempDiffuseData, 0, sizeof(float) * 3 * height * width));
HANDLE_ERROR(hipMemset((void*)g_devTempResultN, 0, sizeof(uint) * height * width));
if (*g_enumDebugMode.GetUint() == 1)
{
pt_debugTracedPathQueueResult_kernel << < dim3(ceil((float)useQueueSize / (float)block1.x), 1, 1), block1 >> >
(g_devPathQueue, useQueueSize, width, height, g_devTempResultData, g_devTempResultN);
}
else
{
pt_atomicApplyPathQueueResult_kernel << < dim3(ceil((float)useQueueSize / (float)block1.x), 1, 1), block1 >> >
(g_devPathQueue, useQueueSize, width, height, g_devTempResultData, g_devTempResultN
, g_devTempPositionData, g_devTempNormalData, g_devTempDiffuseData);
}
pt_accumTempResultToResult_kernel << < dim3(ceil((float)(width * height) / (float)block1.x), 1, 1), block1 >> >
(width, height, g_uCurFrameN, g_devTempResultData, g_devTempResultN
, g_devTempPositionData, g_devTempNormalData, g_devTempDiffuseData
, g_devResultData, g_devSampleResultN, g_devPositionData, g_devNormalData, g_devDiffuseData);
#ifdef PERFBREAKDOWN
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&time, start, stop));
g_fAvgProcessTimeE.attrFloat = (g_fAvgProcessTimeE.attrFloat * (g_uCurFrameN - 5) + time) / (g_uCurFrameN - 5 + 1);
//std::cout << "accum path: " << time << std::endl;
#endif
}
if (*g_enumDebugMode.GetUint() == 2 || *g_enumDebugMode.GetUint() == 3)
{
pt_applyPixelProbToResult_kernel << < renderGrid, block2 >> >(width, height, g_devFilteredResult, g_devResultVarData, (*g_enumDebugMode.GetUint() == 3) ? *g_fMinTraceProb.GetFloat() : 0.f);
}
// Copy result to host
if (*g_enumDebugMode.GetUint() == 4 || *g_enumDebugMode.GetUint() == 2 || *g_enumDebugMode.GetUint() == 3)
{
hipMemcpy(result, g_devFilteredResult, g_resultDataSize, hipMemcpyDeviceToHost);
}
else
{
hipMemcpy(result, g_devResultData, g_resultDataSize, hipMemcpyDeviceToHost);
}
return true;
}
} | f6a362929650769d0a54e637bd5843c64d07b87b.cu | #include "cudaRTCommon.h"
#include <thrust/reduce.h>
#include <thrust/remove.h>
#include <thrust/execution_policy.h>
#include "conffilehelper.h"
#include "cudaBilateralFilter.h"
#define FANN_NO_DLL
#include "floatfann.h"
#define BLOCK_SIZE 16
#define NORMALRAY_BOUND_MAX 5
#define PATHSTREAM_SIZE 1E4*64
#define LIGHTRAY_BOUND_MAX 5
#define LIGHTVERTEX_N 640
#define PATHQUEUE_MUL 1
#define PERFBREAKDOWN
namespace cudaRTBDPTAtomicAdapBilatAtomic
{
const char* g_enumAdapModeName[] = { "PDF", "Const" };
NPAttrHelper::Attrib g_enumAdapMode("Adaptive Mode", g_enumAdapModeName, 2, 0);
NPAttrHelper::Attrib g_uiDesiredSamplingN("DesiredSamplingN", 5);
NPAttrHelper::Attrib g_fMinTraceProb("MinTraceProb", 0.f);
const char* g_enumDebugModeName[] = { "None", "Traced", "Prob", "Prob With Limit", "Filtered Result" };
NPAttrHelper::Attrib g_enumDebugMode("Debug Mode", g_enumDebugModeName, 5, 0);
NPAttrHelper::Attrib g_fFilterColorEuD("Filter Color E Delta", 50.f);
NPAttrHelper::Attrib g_fFilterPosEuD("Filter Pos E Delta", 1.f);
NPAttrHelper::Attrib g_fFilterNormEuD("Filter Norm E Delta", 0.25f);
NPAttrHelper::Attrib g_fFilterDiffEuD("Filter Diff E Delta", 0.1f);
NPAttrHelper::Attrib g_uiFilterRadius("Filter Radius", 5);
NPAttrHelper::Attrib g_bFilterDiffuse("Filter Diff Flag", false);
#ifdef PERFBREAKDOWN
NPAttrHelper::Attrib g_fAvgProcessTimeA("Avg Proc Time A", 0.f);
NPAttrHelper::Attrib g_fAvgProcessTimeB("Avg Proc Time B", 0.f);
NPAttrHelper::Attrib g_fAvgProcessTimeC("Avg Proc Time C", 0.f);
NPAttrHelper::Attrib g_fAvgProcessTimeD("Avg Proc Time D", 0.f);
NPAttrHelper::Attrib g_fAvgProcessTimeE("Avg Proc Time E", 0.f);
#endif
#ifdef PERFBREAKDOWN
RT_ATTRIBS_N(15)
#else
RT_ATTRIBS_N(10)
#endif
RT_ATTRIBS_BGN
RT_ATTRIB_DECLARE(0, Adaptive Mode, g_enumAdapMode)
RT_ATTRIB_DECLARE(1, Desired Max Sampling, g_uiDesiredSamplingN)
RT_ATTRIB_DECLARE(2, Min Trace Probability, g_fMinTraceProb)
RT_ATTRIB_DECLARE(3, Debug Mode, g_enumDebugMode)
RT_ATTRIB_DECLARE(4, Filter Color EuD, g_fFilterColorEuD)
RT_ATTRIB_DECLARE(5, Filter Pos EuD, g_fFilterPosEuD)
RT_ATTRIB_DECLARE(6, Filter Norm EuD, g_fFilterNormEuD)
RT_ATTRIB_DECLARE(7, Filter Diff EuD, g_fFilterDiffEuD)
RT_ATTRIB_DECLARE(8, Filter Radius, g_uiFilterRadius)
RT_ATTRIB_DECLARE(9, Filter Diffuse Flag, g_bFilterDiffuse)
#ifdef PERFBREAKDOWN
RT_ATTRIB_DECLARE(10, Avg Filter Time, g_fAvgProcessTimeA)
RT_ATTRIB_DECLARE(11, Avg MSE Time, g_fAvgProcessTimeB)
RT_ATTRIB_DECLARE(12, Avg Gen Time, g_fAvgProcessTimeC)
RT_ATTRIB_DECLARE(13, Avg Trace Time, g_fAvgProcessTimeD)
RT_ATTRIB_DECLARE(14, Avg Accum Time, g_fAvgProcessTimeE)
#endif
RT_ATTRIBS_END
struct LightVertex
{
float3 pos;
float3 norm;
float3 irrad;
float3 irradDir;
float3 diff;
float3 emissive;
float specular;
float metallic;
float roughness;
float pathPotential;
__hd__ LightVertex()
{
pos = norm = irrad = irradDir = make_float3(0.f, 0.f, 0.f);
pathPotential = 1.f;
}
};
LightVertex* g_devLightVertices = nullptr;
uint g_uLightVerticesSize = 0;
uint* g_devLightTri = nullptr;
uint g_lightTriN = 0;
void freeLightPathMem()
{
g_uLightVerticesSize = 0;
g_lightTriN = 0;
CUFREE(g_devLightVertices);
CUFREE(g_devLightTri);
}
void allocateLightPathMem()
{
HANDLE_ERROR(cudaMalloc((void**)&g_devLightVertices, sizeof(LightVertex) * LIGHTVERTEX_N));
HANDLE_ERROR(cudaMemset((void*)g_devLightVertices, 0, sizeof(LightVertex) * LIGHTVERTEX_N));
}
void updateLightTriCudaMem(RTScene* scene)
{
g_lightTriN = 0;
CUFREE(g_devLightTri);
std::vector<uint> lightTri;
for (uint i = 0; i < scene->m_pTriangles.size(); i++)
{
if (NPMathHelper::Vec3::length(scene->m_pMaterials[scene->m_pTriangles[i].matInd].emissive) > 0.f)
lightTri.push_back(i);
}
uint* tempLightTri = new uint[lightTri.size()];
for (uint i = 0; i < lightTri.size(); i++)
{
tempLightTri[i] = lightTri[i];
}
g_lightTriN = lightTri.size();
HANDLE_ERROR(cudaMalloc((void**)&g_devLightTri, sizeof(uint) * g_lightTriN));
HANDLE_ERROR(cudaMemcpy(g_devLightTri, tempLightTri, sizeof(uint) * g_lightTriN, cudaMemcpyHostToDevice));
DEL_ARRAY(tempLightTri);
}
enum RAYTYPE
{
RAYTYPE_EYE = 0,
RAYTYPE_DIFF = 1,
RAYTYPE_SPEC = 2,
RAYTYPE_LIGHT = 3
};
struct PTPathVertex
{
uint isTerminated;
uint2 pathPixel;
float3 pathOutDir;
float3 pathVertexPos;
float3 pathOutMulTerm;
RAYTYPE pathType;
float3 pathSample;
float3 pathAccumSample;
uint pathSampleN;
uint pathSampleDepth;
curandState randState;
// for connecting light path
float3 pathInMulTerm;
float3 pathInDir;
float3 origNorm;
float3 origDiff;
float origMetallic;
float origRoughness;
float origSpecular;
float origTrans;
// for adaptive sampling
float pathPotential;
float pathAccumPotential;
// for filtering
float3 pathDirectPos;
float3 pathDirectNorm;
float3 pathDirectDiffuse;
__device__ PTPathVertex()
: isTerminated(true)
, pathPixel(make_uint2(0, 0))
, pathOutDir(make_float3(0.f, 1.f, 0.f))
, pathVertexPos(make_float3(0.f, 0.f, 0.f))
, pathOutMulTerm(make_float3(1.f, 1.f, 1.f))
, pathType(RAYTYPE_EYE)
, pathSample(make_float3(0.f, 0.f, 0.f))
, pathAccumSample(make_float3(0.f, 0.f, 0.f))
, pathSampleN(0)
, pathSampleDepth(0)
, randState()
, pathInMulTerm(make_float3(0.f, 0.f, 0.f))
, pathInDir(make_float3(0.f, 0.f, 0.f))
, origNorm(make_float3(0.f, 1.f, 0.f))
, origDiff(make_float3(0.f, 0.f, 0.f))
, origMetallic(0.f)
, origRoughness(0.f)
, origSpecular(0.f)
, origTrans(0.f)
, pathPotential(1.f)
, pathAccumPotential(0.f)
, pathDirectPos(make_float3(0.f, 0.f, 0.f))
, pathDirectNorm(make_float3(0.f, 0.f, 0.f))
, pathDirectDiffuse(make_float3(0.f, 0.f, 0.f))
{}
__device__ PTPathVertex(uint _isTerminated, uint2 _pathPixel, float3 _pathOutDir, float3 _pathVertexPos, RAYTYPE _pathType, curandState _randState)
: isTerminated(_isTerminated)
, pathPixel(_pathPixel)
, pathOutDir(_pathOutDir)
, pathVertexPos(_pathVertexPos)
, pathOutMulTerm(make_float3(1.f, 1.f, 1.f))
, pathType(_pathType)
, pathSample(make_float3(0.f, 0.f, 0.f))
, pathAccumSample(make_float3(0.f, 0.f, 0.f))
, pathSampleN(0)
, pathSampleDepth(0)
, randState(_randState)
, pathInMulTerm(make_float3(0.f, 0.f, 0.f))
, pathInDir(make_float3(0.f, 0.f, 0.f))
, origNorm(make_float3(0.f, 1.f, 0.f))
, origDiff(make_float3(0.f, 0.f, 0.f))
, origMetallic(0.f)
, origRoughness(0.f)
, origSpecular(0.f)
, origTrans(0.f)
, pathPotential(1.f)
, pathAccumPotential(0.f)
, pathDirectPos(make_float3(0.f, 0.f, 0.f))
, pathDirectNorm(make_float3(0.f, 0.f, 0.f))
, pathDirectDiffuse(make_float3(0.f, 0.f, 0.f))
{}
};
uint* g_devAtomicN = nullptr;
uint* g_devTempPathQueue = nullptr;
PTPathVertex* g_devPathQueue = nullptr;
uint g_uPathQueueCur = 0;
uint g_uPathQueueSize = 0;
PTPathVertex** g_devPathStream = nullptr;
PTPathVertex** g_devTempPathStream = nullptr;
PTPathVertex** g_devEyeLightConPathStream = nullptr;
uint g_uPathStreamSize = PATHSTREAM_SIZE;
void freeStreamMem()
{
g_uPathQueueCur = g_uPathQueueSize = 0;
CUFREE(g_devTempPathQueue);
CUFREE(g_devPathQueue);
CUFREE(g_devPathStream);
CUFREE(g_devTempPathStream);
CUFREE(g_devEyeLightConPathStream);
CUFREE(g_devAtomicN);
}
void allocateStreamMem(uint queueSize = 480000)
{
g_uPathQueueSize = queueSize;
HANDLE_ERROR(cudaMalloc((void**)&g_devPathQueue, sizeof(PTPathVertex) * g_uPathQueueSize * PATHQUEUE_MUL));
HANDLE_ERROR(cudaMemset((void*)g_devPathQueue, 0, sizeof(PTPathVertex) * g_uPathQueueSize * PATHQUEUE_MUL));
HANDLE_ERROR(cudaMalloc((void**)&g_devTempPathQueue, sizeof(uint) * g_uPathQueueSize * PATHQUEUE_MUL));
HANDLE_ERROR(cudaMemset((void*)g_devTempPathQueue, 0, sizeof(uint) * g_uPathQueueSize * PATHQUEUE_MUL));
HANDLE_ERROR(cudaMalloc((void**)&g_devPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize));
HANDLE_ERROR(cudaMemset((void*)g_devPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize));
HANDLE_ERROR(cudaMalloc((void**)&g_devTempPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize));
HANDLE_ERROR(cudaMemset((void*)g_devTempPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize));
HANDLE_ERROR(cudaMalloc((void**)&g_devEyeLightConPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize));
HANDLE_ERROR(cudaMemset((void*)g_devEyeLightConPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize));
HANDLE_ERROR(cudaMalloc((void**)&g_devAtomicN, sizeof(uint)));
HANDLE_ERROR(cudaMemset((void*)g_devAtomicN, 0, sizeof(uint)));
}
float* g_devResultData = nullptr;
float* g_devAccResultData = nullptr;
float* g_devResultVarData = nullptr;
uint* g_devSampleResultN = nullptr;
float* g_devTempResultData = nullptr;
uint* g_devTempResultN = nullptr;
float* g_devTempPositionData = nullptr;
float* g_devTempNormalData = nullptr;
float* g_devTempDiffuseData = nullptr;
float* g_devPositionData = nullptr;
float* g_devNormalData = nullptr;
float* g_devDiffuseData = nullptr;
float* g_devFilteredResult = nullptr;
float* g_devFilterGaussianConst = nullptr;
NPMathHelper::Mat4x4 g_matLastCamMat;
NPMathHelper::Mat4x4 g_matCurCamMat;
uint32 g_uCurFrameN = 0;
size_t g_resultDataSize = 0;
uint32 WangHash(uint32 a) {
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
return a;
}
__global__ void pt_traceLight_kernel(RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex** pathStream, uint activePathStreamSize, LightVertex* lightVertices, uint curLightVerticesSize)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= activePathStreamSize || pathStream[x]->isTerminated) return;
PTPathVertex* procVertex = pathStream[x];
CURay ray = CURay(procVertex->pathVertexPos, procVertex->pathOutDir);
TracePrimitiveResult traceResult;
if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false))
{
RTTriangle* tri = &triangles[traceResult.triId];
RTMaterial* mat = &materials[tri->matInd];
RTVertex* v0 = &vertices[tri->vertInd0];
RTVertex* v1 = &vertices[tri->vertInd1];
RTVertex* v2 = &vertices[tri->vertInd2];
float2 uv0 = make_float2(v0->tex._x, v0->tex._y);
float2 uv1 = make_float2(v1->tex._x, v1->tex._y);
float2 uv2 = make_float2(v2->tex._x, v2->tex._y);
float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v;
float3 n0 = V32F3(v0->norm);
float3 n1 = V32F3(v1->norm);
float3 n2 = V32F3(v2->norm);
float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v;
float3 triPos = V32F3(v0->pos) * traceResult.w + V32F3(v1->pos) * traceResult.u + V32F3(v2->pos) * traceResult.v;
float3 diff;
float3 emissive;
float trans;
float specular;
float metallic;
float roughness;
float anisotropic;
float sheen;
float sheenTint;
float clearcoat;
float clearcoatGloss;
GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness
, anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss);
float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm;
lightVertices[curLightVerticesSize + x].irrad = procVertex->pathSample;
lightVertices[curLightVerticesSize + x].irradDir = -1 * ray.dir;
lightVertices[curLightVerticesSize + x].norm = nl;
lightVertices[curLightVerticesSize + x].pos = triPos;
lightVertices[curLightVerticesSize + x].diff = diff;
lightVertices[curLightVerticesSize + x].emissive = emissive;
lightVertices[curLightVerticesSize + x].specular = specular;
lightVertices[curLightVerticesSize + x].metallic = metallic;
lightVertices[curLightVerticesSize + x].roughness = roughness;
lightVertices[curLightVerticesSize + x].pathPotential = procVertex->pathPotential;
{
// Get some random microfacet
float3 hDir = ImportanceSampleGGX(make_float2(curand_uniform(&procVertex->randState), curand_uniform(&procVertex->randState)), roughness, nl);
// Calculate flesnel
float voH = vecDot(-1 * ray.dir, hDir);
float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic);
float3 brdf_f = Fresnel(f0, voH);
// PDF
float NoH = vecDot(nl, hDir);
float VoH = vecDot(-1 * ray.dir, hDir);
float pdf = D_GGX(roughness, NoH) * NoH / (4.f * VoH);
// Reflected or Refracted
float reflProb = lerp(length(brdf_f), 1.0f, metallic);
float refrProb = trans;
float3 reflDir;
float3 refrDir;
CURay nextRay = ray;
float3 lightMulTerm;
RAYTYPE nextRayType = procVertex->pathType;
if (refrProb > 0)
{
bool into = vecDot(nl, norm) > 0.f;
float nt = specular * 0.8f + 1.f;
float nc = 1.0f;
float nnt = into ? nc / nt : nt / nc;
float ddn = vecDot(hDir, ray.dir);
float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn);
//if (cos2t < 0.f)
//{
// reflProb = 1.0f;//refrProb = 0.f;
//}
//else
{
refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t)));
}
}
if (reflProb > 0)
{
reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir, ray.dir));
if (vecDot(reflDir, nl) < 0.f)
reflProb = 0.f;
}
// Reflected
if (ProbabilityRand(&procVertex->randState, reflProb))
{
nextRay = CURay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir, reflDir);
// ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate);
// Microfacet specular = D*G*F / (4*NoL*NoV)
// pdf = D * NoH / (4 * VoH)
// (G * F * VoH) / (NoV * NoH)
float NoV = vecDot(nl, -1 * ray.dir);
float NoL = vecDot(nl, reflDir);
float G = GeometricVisibility(roughness, NoV, NoL, VoH);
//shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive;
lightMulTerm = (brdf_f * G * VoH) / (NoV * NoH * reflProb);
nextRayType = RAYTYPE_SPEC;
pdf *= reflProb;
}
// Diffused or Transmited
else
{
// Transmited
if (ProbabilityRand(&procVertex->randState, refrProb))
{
nextRay = CURay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir);
//ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate);
float cosine = vecDot(-1 * nl, refrDir);
//shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive;
lightMulTerm = cosine * diff / (refrProb * (1 - reflProb));
nextRayType = RAYTYPE_SPEC;
pdf *= (refrProb * (1.f - reflProb));
}
// Diffused
else
{
float3 w = nl;
float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = vecCross(w, u);
u = vecCross(v, w);
float r1 = 2.f * M_PI * curand_uniform(&procVertex->randState);
float r2cos = sqrtf(curand_uniform(&procVertex->randState));
float r2sin = 1.f - r2cos*r2cos;
float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1));
nextRay = CURay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir);
//ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate);
float VoH = vecDot(-1 * ray.dir, hDir);
float NoV = vecDot(nl, -1 * ray.dir);
float NoL = vecDot(nl, diffDir);
//shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive;
lightMulTerm = M_PI * Diffuse(diff, roughness, NoV, NoL, VoH) / ((1 - refrProb) * (1 - reflProb));
nextRayType = RAYTYPE_DIFF;
pdf *= ((1.f - refrProb) * (1.f - reflProb)) * vecDot(diffDir, nl);
}
}
if (nextRayType != RAYTYPE_DIFF)
lightVertices[curLightVerticesSize + x].irrad = make_float3(0.f, 0.f, 0.f);
if (vecDot(nextRay.dir, nl) < 0.f)
lightVertices[curLightVerticesSize + x].norm = -1 * lightVertices[curLightVerticesSize + x].norm;
procVertex->pathSample = emissive + vecMul(procVertex->pathSample, lightMulTerm);
procVertex->pathPotential *= pdf;
float pixelContrib = length(procVertex->pathOutMulTerm) * length(lightMulTerm);
if (/*(procVertex->pathType == RAYTYPE_DIFF && nextRayType == RAYTYPE_SPEC) ||*/ length(emissive) > 0.f)
pixelContrib = 0.f;
if (curand_uniform(&procVertex->randState) > pixelContrib || procVertex->pathSampleDepth + 1 >= NORMALRAY_BOUND_MAX)
{
procVertex->isTerminated = true;
}
else
{
procVertex->pathOutMulTerm = vecMul(procVertex->pathOutMulTerm, lightMulTerm);
procVertex->pathOutDir = nextRay.dir;
procVertex->pathVertexPos = nextRay.orig;
procVertex->pathType = nextRayType;
procVertex->pathSampleDepth++;
}
}
}
else
{
lightVertices[curLightVerticesSize + x] = lightVertices[procVertex->pathPixel.x];
procVertex->isTerminated = true;
}
}
__global__ void pt_traceSample_kernel(RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex** pathStream, uint activePathStreamSize)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= activePathStreamSize || pathStream[x]->isTerminated) return;
PTPathVertex* procVertex = pathStream[x];
CURay ray = CURay(procVertex->pathVertexPos, procVertex->pathOutDir);
TracePrimitiveResult traceResult;
if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false))
{
RTTriangle* tri = &triangles[traceResult.triId];
RTMaterial* mat = &materials[tri->matInd];
RTVertex* v0 = &vertices[tri->vertInd0];
RTVertex* v1 = &vertices[tri->vertInd1];
RTVertex* v2 = &vertices[tri->vertInd2];
float2 uv0 = make_float2(v0->tex._x, v0->tex._y);
float2 uv1 = make_float2(v1->tex._x, v1->tex._y);
float2 uv2 = make_float2(v2->tex._x, v2->tex._y);
float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v;
float3 n0 = V32F3(v0->norm);
float3 n1 = V32F3(v1->norm);
float3 n2 = V32F3(v2->norm);
float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v;
float3 triPos = V32F3(v0->pos) * traceResult.w + V32F3(v1->pos) * traceResult.u + V32F3(v2->pos) * traceResult.v;
float3 diff;
float3 emissive;
float trans;
float specular;
float metallic;
float roughness;
float anisotropic;
float sheen;
float sheenTint;
float clearcoat;
float clearcoatGloss;
GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness
, anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss);
float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm;
{
// Get some random microfacet
float3 hDir = ImportanceSampleGGX(make_float2(curand_uniform(&procVertex->randState), curand_uniform(&procVertex->randState)), roughness, nl);
// Calculate flesnel
float voH = vecDot(-1 * ray.dir, hDir);
float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic);
float3 brdf_f = Fresnel(f0, voH);
// PDF
float NoH = vecDot(nl, hDir);
float VoH = vecDot(-1 * ray.dir, hDir);
float pdf = D_GGX(roughness, NoH) * NoH / (4.f * VoH + M_EPSILON);
// Reflected or Refracted
float reflProb = lerp(length(brdf_f), 1.0f, metallic);
float refrProb = trans;
float3 reflDir;
float3 refrDir;
CURay nextRay = ray;
float3 lightMulTerm;
RAYTYPE nextRayType = procVertex->pathType;
if (refrProb > 0)
{
bool into = vecDot(nl, norm) > 0.f;
float nt = specular * 0.8f + 1.f;
float nc = 1.0f;
float nnt = into ? nc / nt : nt / nc;
float ddn = vecDot(hDir, ray.dir);
float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn);
//if (cos2t < 0.f)
//{
// reflProb = 1.0f;//refrProb = 0.f;
//}
//else
{
refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t)));
}
}
if (reflProb > 0)
{
reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir, ray.dir));
if (vecDot(reflDir, nl) < 0.f)
reflProb = 0.f;
}
// Reflected
if (ProbabilityRand(&procVertex->randState, reflProb))
{
nextRay = CURay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir + reflDir * M_FLT_BIAS_EPSILON, reflDir);
// ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate);
// Microfacet specular = D*G*F / (4*NoL*NoV)
// pdf = D * NoH / (4 * VoH)
// (G * F * VoH) / (NoV * NoH)
float NoV = vecDot(nl, -1 * ray.dir);
float NoL = vecDot(nl, reflDir);
float G = GeometricVisibility(roughness, NoV, NoL, VoH);
//shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive;
lightMulTerm = (brdf_f * G * VoH) / (NoV * NoH * reflProb);
nextRayType = RAYTYPE_SPEC;
pdf *= reflProb;
}
// Diffused or Transmited
else
{
// Transmited
if (ProbabilityRand(&procVertex->randState, refrProb))
{
nextRay = CURay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir);
//ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate);
float cosine = vecDot(-1 * nl, refrDir);
//shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive;
lightMulTerm = cosine * diff / (refrProb * (1 - reflProb));
nextRayType = RAYTYPE_SPEC;
pdf *= (refrProb * (1.f - reflProb));
}
// Diffused
else
{
float3 w = nl;
float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = vecCross(w, u);
u = vecCross(v, w);
float r1 = 2.f * M_PI * curand_uniform(&procVertex->randState);
float r2cos = sqrtf(curand_uniform(&procVertex->randState));
float r2sin = 1.f - r2cos*r2cos;
float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1));
nextRay = CURay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir);
//ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate);
float VoH = vecDot(-1 * ray.dir, hDir);
float NoV = vecDot(nl, -1 * ray.dir);
float NoL = vecDot(nl, diffDir);
//shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive;
lightMulTerm = M_PI * Diffuse(diff, roughness, NoV, NoL, VoH) / ((1 - refrProb) * (1 - reflProb));
nextRayType = RAYTYPE_DIFF;
pdf *= ((1.f - refrProb) * (1.f - reflProb)) * vecDot(diffDir, nl);
}
}
procVertex->pathSample = procVertex->pathSample + vecMul(emissive, procVertex->pathOutMulTerm);
procVertex->origDiff = diff;
procVertex->pathInDir = -1 * ray.dir;
procVertex->origNorm = nl;
procVertex->origRoughness = roughness;
procVertex->origMetallic = metallic;
procVertex->origSpecular = specular;
procVertex->origTrans = trans;
procVertex->pathInMulTerm = procVertex->pathOutMulTerm;
procVertex->pathPotential *= pdf;
if (procVertex->pathSampleDepth == 0)
{
procVertex->pathDirectPos = triPos;
procVertex->pathDirectNorm = nl;
procVertex->pathDirectDiffuse = diff;
}
float pixelContrib = length(procVertex->pathOutMulTerm) * length(lightMulTerm);
if (/*(procVertex->pathType == RAYTYPE_DIFF && nextRayType == RAYTYPE_SPEC) ||*/ length(emissive) > 0.f)
pixelContrib = 0.f;
if (curand_uniform(&procVertex->randState) > pixelContrib || procVertex->pathSampleDepth + 1 >= NORMALRAY_BOUND_MAX)
{
procVertex->pathAccumSample = procVertex->pathAccumSample + procVertex->pathSample;
procVertex->pathAccumPotential = procVertex->pathAccumPotential + procVertex->pathPotential;
procVertex->pathSampleN++;
procVertex->isTerminated = true;
}
else
{
procVertex->pathOutMulTerm = vecMul(procVertex->pathOutMulTerm, lightMulTerm);
procVertex->pathOutDir = nextRay.dir;
procVertex->pathSampleDepth++;
}
procVertex->pathVertexPos = nextRay.orig;
procVertex->pathType = nextRayType;
}
}
else
{
procVertex->pathAccumSample = procVertex->pathAccumSample + procVertex->pathSample;
procVertex->pathAccumPotential = procVertex->pathAccumPotential + procVertex->pathPotential;
procVertex->pathSampleN++;
procVertex->isTerminated = true;
}
}
__global__ void pt_genLightPathQueue_kernel(uint32 frameN, uint32 hashedFrameN, uint* lightTri, uint lightTriN, RTVertex* vertices,
RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex* pathQueue, uint pathQueueCap, LightVertex* lightVertices, uint curLightVerticesSize)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > pathQueueCap) return;
curandState randstate;
curand_init(hashedFrameN + x, 0, 0, &randstate);
uint lightSourceId = curand_uniform(&randstate) * lightTriN;
float lightW = curand_uniform(&randstate);
float lightU = curand_uniform(&randstate);
if (lightW + lightU > 1.0f)
{
lightW = 1.f - lightW;
lightU = 1.f - lightU;
}
float lightV = 1.f - lightW - lightU;
uint triId = lightTri[lightSourceId];
RTTriangle* tri = &triangles[triId];
RTMaterial* mat = &materials[tri->matInd];
RTVertex* v0 = &vertices[tri->vertInd0];
RTVertex* v1 = &vertices[tri->vertInd1];
RTVertex* v2 = &vertices[tri->vertInd2];
float2 uv0 = make_float2(v0->tex._x, v0->tex._y);
float2 uv1 = make_float2(v1->tex._x, v1->tex._y);
float2 uv2 = make_float2(v2->tex._x, v2->tex._y);
float2 uv = uv0 * lightW + uv1 * lightU + uv2 * lightV;
float3 n0 = V32F3(v0->norm);
float3 n1 = V32F3(v1->norm);
float3 n2 = V32F3(v2->norm);
float3 triNorm = n0 * lightW + n1 * lightU + n2 * lightV;
float3 triPos = V32F3(v0->pos) * lightW + V32F3(v1->pos) * lightU + V32F3(v2->pos) * lightV;
float3 diff;
float3 emissive;
float trans;
float specular;
float metallic;
float roughness;
float anisotropic;
float sheen;
float sheenTint;
float clearcoat;
float clearcoatGloss;
GetMaterialColors(mat, uv, textures, diff, triNorm, emissive, trans, specular, metallic, roughness
, anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss);
float3 w = triNorm;
float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = vecCross(w, u);
u = vecCross(v, w);
float r1 = 2.f * M_PI * curand_uniform(&randstate);
float r2cos = sqrtf(curand_uniform(&randstate));
float r2sin = 1.f - r2cos*r2cos;
float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1));
pathQueue[x] = PTPathVertex(false, make_uint2(curLightVerticesSize + x, 0), diffDir
, triPos + M_FLT_BIAS_EPSILON * triNorm, RAYTYPE_LIGHT, randstate);
pathQueue[x].pathSample = emissive;
lightVertices[curLightVerticesSize + x].irrad = emissive;
lightVertices[curLightVerticesSize + x].irradDir = make_float3(0.f, 0.f, 0.f);
lightVertices[curLightVerticesSize + x].norm = triNorm;
lightVertices[curLightVerticesSize + x].pos = triPos;
lightVertices[curLightVerticesSize + x].diff = diff;
lightVertices[curLightVerticesSize + x].emissive = emissive;
lightVertices[curLightVerticesSize + x].specular = specular;
lightVertices[curLightVerticesSize + x].metallic = metallic;
lightVertices[curLightVerticesSize + x].roughness = roughness;
}
__global__ void pt_genPathQueue_kernel(float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov,
float width, float height, uint32 frameN, uint32 hashedFrameN, PTPathVertex* pathQueue)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
uint ind = (y * width + x);
float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height;
float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f);
curandState randstate;
curand_init(hashedFrameN + ind, 0, 0, &randstate);
float au = u + (curand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f);
float av = v + (curand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f);
float3 dir = normalize(camRight * au + camUp * av + camDir);
pathQueue[ind] = PTPathVertex(false, make_uint2(x, y), dir, camPos, RAYTYPE_EYE, randstate);
}
__global__ void pt_fillTempAdapPathQueue_kernel(uint* pathQueue, uint fillSize)
{
uint ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= fillSize) return;
pathQueue[ind] = ind;
}
__global__ void pt_genTempAdapPathQueue_kernel(float width, float height, uint32 hashedFrameN, uint32 seedoffset
, float* genChance, uint* pathQueue, float minProb = 0.f, float mulRand = 1.f)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
uint ind = (y * width + x);
curandState randstate;
curand_init(hashedFrameN + ind + seedoffset, 0, 0, &randstate);
pathQueue[ind] = x + y * width;
//float modChance = 1.f - expf(-genChance[ind]);
if (curand_uniform(&randstate)*mulRand > fmaxf(genChance[ind], minProb))
{
pathQueue[ind] = 0 - 1;
}
}
__global__ void pt_genAdapPathQueue_kernel(float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov,
float width, float height, uint32 frameN, uint32 hashedFrameN, uint* atomicN, float* mseData, float sumMSE, PTPathVertex* pathQueue, uint genSize)
{
uint ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= width * height) return;
uint y = ind / width;
uint x = ind - y * width;
float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height;
float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f);
float mseRatio = mseData[ind] / sumMSE;
uint sampleN = genSize * mseRatio;
for (uint i = 0; i < sampleN; i++)
{
curandState randstate;
curand_init(hashedFrameN + ind + i * genSize, 0, 0, &randstate);
float au = u + (curand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f);
float av = v + (curand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f);
float3 dir = normalize(camRight * au + camUp * av + camDir);
pathQueue[atomicAggInc((int *)atomicN)] = PTPathVertex(false, make_uint2(x, y), dir, camPos, RAYTYPE_EYE, randstate);
}
}
__device__ float3 GetShadingResult(const float3& lightOutDir, const float3& lightInDir, const float3& lightInIrrad, const float3& norm,
const float3& diff, const float metallic, const float roughness, const float specular, const float2 diffspec)
{
if (vecDot(norm, lightInDir) <= 0.f)
return make_float3(0.f, 0.f, 0.f);
float3 h = normalize(lightOutDir + lightInDir);
float voH = vecDot(lightOutDir, h);
float noV = vecDot(norm, lightOutDir);
float noH = vecDot(norm, h);
float noL = vecDot(norm, lightInDir);
float3 f0 = vecLerp(0.08f * specular * make_float3(1.f, 1.f, 1.f), diff, metallic);
float3 brdf_f = Fresnel(f0, voH);
//float g = GeometricVisibility(roughness, noV, noL, voH);
float d = D_GGX(roughness, noH);
float v = Vis_SmithJointApprox(roughness, noV, noL);
// Microfacet specular = D*G*F / (4*NoL*NoV)
float3 specIrrad = d*v*brdf_f;// vecMul(d*g*brdf_f / (4.f * noV), lightInIrrad);
float3 diffIrrad = vecMul((make_float3(1.f, 1.f, 1.f) - brdf_f), Diffuse(diff, roughness, noV, noL, voH));//vecMul((make_float3(1.f, 1.f, 1.f) - brdf_f), diff / M_PI);
return vecMul(lightInIrrad*noL, diffspec.y*specIrrad + diffspec.x*diffIrrad);
}
__device__ void GetLightFromRandLightVertices(float3 pos, float3 norm, LightVertex* lightVertices, uint lightVerticesSize, curandState* randstate, float3& irrad, float3& irradDir, float& pathPotential)
{
//LightVertex dummy;
//dummy.diff = make_float3(1.f, 1.f, 1.f);
//dummy.irrad = make_float3(1.f, 0.f, 0.f);
//dummy.pos = make_float3(0.f, 0.f, 0.f);
//dummy.norm = dummy.irradDir = normalize(pos - dummy.pos);
//dummy.roughness = 0.5f;
//dummy.specular = 0.5f;
//dummy.metallic = 0.f;
irrad = make_float3(0.f, 0.f, 0.f);
uint lightVert = curand_uniform(randstate) * lightVerticesSize;
LightVertex* lightVertex = &lightVertices[lightVert];
float3 toLightVertexDir = normalize(lightVertex->pos - pos);
float toLightVertexDist = length(lightVertex->pos - pos);
CURay toLightVertex(pos, toLightVertexDir);
TracePrimitiveResult traceResult;
if (length(lightVertex->irrad) > 0.f && vecDot(norm, toLightVertexDir) > 0.f &&
!TracePrimitive(toLightVertex, traceResult, toLightVertexDist - M_FLT_BIAS_EPSILON, M_FLT_BIAS_EPSILON, false))
{
if (toLightVertexDist > M_FLT_EPSILON)
{
irrad = GetShadingResult(-1 * toLightVertexDir, lightVertex->irradDir, lightVertex->irrad, lightVertex->norm
, lightVertex->diff, lightVertex->metallic, lightVertex->roughness, lightVertex->specular, make_float2(1.f, 1.f)) + lightVertex->emissive;
irradDir = toLightVertexDir;
}
else
{
irrad = lightVertex->irrad;
irradDir = -1.f * lightVertex->irradDir;
}
}
}
__global__ void pt_connectEyeLightPath_kernel(PTPathVertex** eyeStream, uint eyeStreamSize, LightVertex* lightVertices, uint lightVerticesSize)
{
uint ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= eyeStreamSize) return;
PTPathVertex* eyePath = eyeStream[ind];
float3 lightFromLightVertex = make_float3(0.f, 0.f, 0.f);
float3 toLightVertexDir = make_float3(0.f, 0.f, 0.f);
float lightPathPotential = 1.f;
GetLightFromRandLightVertices(eyePath->pathVertexPos + eyePath->origNorm * M_FLT_BIAS_EPSILON, eyePath->origNorm
, lightVertices, lightVerticesSize, &eyePath->randState, lightFromLightVertex, toLightVertexDir, lightPathPotential);
float3 lightContribFromLightVertex = vecMax(make_float3(0.f, 0.f, 0.f)
, GetShadingResult(eyePath->pathInDir, toLightVertexDir, lightFromLightVertex, eyePath->origNorm
, eyePath->origDiff, eyePath->origMetallic, eyePath->origRoughness, eyePath->origSpecular
, make_float2(1.f - eyePath->origTrans, 1.f)));
if (length(lightContribFromLightVertex) > 0.f)
{
eyePath->pathAccumSample = eyePath->pathAccumSample + vecMul(lightContribFromLightVertex, eyePath->pathInMulTerm);
eyePath->pathSampleN += 4;
eyePath->pathPotential *= lightPathPotential;
}
}
__global__ void pt_assignPathStream_kernel(PTPathVertex** pathStream, uint pathStreamSize, PTPathVertex* pathQueue, uint pathQueueCur, uint pathQueueSize, uint assignableSlot)
{
uint ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind < assignableSlot)
{
int pathStreamInd = pathStreamSize + ind;
int pathQueueInd = pathQueueCur + ind;
PTPathVertex* assignSample = nullptr;
if (pathQueueInd < pathQueueSize)
{
assignSample = &pathQueue[pathQueueInd];
}
pathStream[pathStreamInd] = assignSample;
}
}
__global__ void pt_applyPixelProbToResult_kernel(uint width, uint height, float* result, float* varResult, float minProb = 0.f)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
uint ind = (y * width + x);
result[ind * 3] = result[ind * 3 + 1] = result[ind * 3 + 2] = fmaxf(minProb, varResult[ind]);
}
__global__ void pt_debugTracedPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize, uint width, uint height
, float* tempResult, uint* tempResultN)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= pathQueueSize) return;
// add calculating sample to the result
if (!pathQueue[x].isTerminated)
{
pathQueue[x].pathAccumSample = pathQueue[x].pathAccumSample + pathQueue[x].pathSample;
pathQueue[x].pathAccumPotential = pathQueue[x].pathAccumPotential + pathQueue[x].pathPotential;
pathQueue[x].pathSampleN++;
}
if (pathQueue[x].pathSampleN > 0)
{
uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x;
uint tempNextSampleResultN = pathQueue[x].pathSampleN;
float3 sampleResult = make_float3(1.f, 1.f, 1.f);
float potentialResult = 1.f - pathQueue[x].pathAccumPotential;
atomicAdd(tempResult + ind * 3, 1.f);
atomicAdd(tempResult + ind * 3 + 1, 1.f);
atomicAdd(tempResult + ind * 3 + 2, 1.f);
atomicAdd(tempResultN + ind, 1);
}
}
__global__ void pt_applyPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize, uint width, uint height, uint frameN, float* result, float* accResult
, float* varResult, float* posResult, float* normResult, float* diffResult, uint* sampleResultN)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= pathQueueSize) return;
// add calculating sample to the result
if (!pathQueue[x].isTerminated)
{
pathQueue[x].pathAccumSample = pathQueue[x].pathAccumSample + pathQueue[x].pathSample;
pathQueue[x].pathAccumPotential = pathQueue[x].pathAccumPotential + pathQueue[x].pathPotential;
pathQueue[x].pathSampleN++;
}
if (pathQueue[x].pathSampleN > 0)
{
uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x;
if (!frameN)
{
sampleResultN[ind] = 0;
}
uint tempNextSampleResultN = sampleResultN[ind] + pathQueue[x].pathSampleN;
if (tempNextSampleResultN > sampleResultN[ind])
{
float3 sampleResult = pathQueue[x].pathAccumSample;
if (!isinf(sampleResult.x) && !isinf(sampleResult.y) && !isinf(sampleResult.z))
{
float potentialResult = 1.f - pathQueue[x].pathAccumPotential;
float resultInf = 1.f / (float)(tempNextSampleResultN);
float oldInf = sampleResultN[ind] * resultInf;
result[ind * 3] = max(resultInf * sampleResult.x + oldInf * result[ind * 3], 0.f);
result[ind * 3 + 1] = max(resultInf * sampleResult.y + oldInf * result[ind * 3 + 1], 0.f);
result[ind * 3 + 2] = max(resultInf * sampleResult.z + oldInf * result[ind * 3 + 2], 0.f);
varResult[ind] = max(resultInf * potentialResult + oldInf * varResult[ind], 0.f);
sampleResultN[ind] = tempNextSampleResultN;
sampleResult = pathQueue[x].pathDirectPos * pathQueue[x].pathSampleN;
posResult[ind * 3] = resultInf * sampleResult.x + oldInf *posResult[ind * 3];
posResult[ind * 3 + 1] = resultInf * sampleResult.y + oldInf *posResult[ind * 3 + 1];
posResult[ind * 3 + 2] = resultInf * sampleResult.z + oldInf *posResult[ind * 3 + 2];
sampleResult = pathQueue[x].pathDirectNorm * pathQueue[x].pathSampleN;
normResult[ind * 3] = resultInf * sampleResult.x + oldInf *normResult[ind * 3];
normResult[ind * 3 + 1] = resultInf * sampleResult.y + oldInf *normResult[ind * 3 + 1];
normResult[ind * 3 + 2] = resultInf * sampleResult.z + oldInf *normResult[ind * 3 + 2];
sampleResult = pathQueue[x].pathDirectDiffuse * pathQueue[x].pathSampleN;
diffResult[ind * 3] = resultInf * sampleResult.x + oldInf *diffResult[ind * 3];
diffResult[ind * 3 + 1] = resultInf * sampleResult.y + oldInf *diffResult[ind * 3 + 1];
diffResult[ind * 3 + 2] = resultInf * sampleResult.z + oldInf *diffResult[ind * 3 + 2];
}
}
}
}
__global__ void pt_atomicApplyPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize, uint width, uint height
, float* tempResult, uint* tempResultN, float* tempPos, float* tempNorm, float* tempDiff)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= pathQueueSize) return;
// add calculating sample to the result
if (!pathQueue[x].isTerminated)
{
pathQueue[x].pathAccumSample = pathQueue[x].pathAccumSample + pathQueue[x].pathSample;
pathQueue[x].pathAccumPotential = pathQueue[x].pathAccumPotential + pathQueue[x].pathPotential;
pathQueue[x].pathSampleN++;
}
if (pathQueue[x].pathSampleN > 0)
{
uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x;
float3 sampleResult = pathQueue[x].pathAccumSample;
if (!isinf(sampleResult.x) && !isinf(sampleResult.y) && !isinf(sampleResult.z))
{
atomicAdd(tempResult + ind * 3, sampleResult.x);
atomicAdd(tempResult + ind * 3 + 1, sampleResult.y);
atomicAdd(tempResult + ind * 3 + 2, sampleResult.z);
atomicAdd(tempResultN + ind, pathQueue[x].pathSampleN);
sampleResult = pathQueue[x].pathDirectPos * pathQueue[x].pathSampleN;
atomicAdd(tempPos + ind * 3, sampleResult.x);
atomicAdd(tempPos + ind * 3 + 1, sampleResult.y);
atomicAdd(tempPos + ind * 3 + 2, sampleResult.z);
sampleResult = pathQueue[x].pathDirectNorm * pathQueue[x].pathSampleN;
atomicAdd(tempNorm + ind * 3, sampleResult.x);
atomicAdd(tempNorm + ind * 3 + 1, sampleResult.y);
atomicAdd(tempNorm + ind * 3 + 2, sampleResult.z);
sampleResult = pathQueue[x].pathDirectDiffuse * pathQueue[x].pathSampleN;
atomicAdd(tempDiff + ind * 3, sampleResult.x);
atomicAdd(tempDiff + ind * 3 + 1, sampleResult.y);
atomicAdd(tempDiff + ind * 3 + 2, sampleResult.z);
}
}
}
__global__ void pt_accumTempResultToResult_kernel(uint width, uint height, uint frameN, float* tempResult, uint* tempResultN
, float* tempPos, float* tempNorm, float* tempDiff, float* result, uint* resultN
, float* pos, float* norm, float* diff)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= width * height) return;
if (frameN == 0)
{
resultN[x] = 0;
result[x * 3] = 0.f;
result[x * 3 + 1] = 0.f;
result[x * 3 + 2] = 0.f;
pos[x * 3] = 0.f;
pos[x * 3 + 1] = 0.f;
pos[x * 3 + 2] = 0.f;
norm[x * 3] = 0.f;
norm[x * 3 + 1] = 0.f;
norm[x * 3 + 2] = 0.f;
diff[x * 3] = 0.f;
diff[x * 3 + 1] = 0.f;
diff[x * 3 + 2] = 0.f;
}
uint nextSampleN = tempResultN[x] + resultN[x];
if (nextSampleN > resultN[x])
{
float resultInf = 1.f / (float)(nextSampleN);
float oldInf = resultN[x] * resultInf;
result[x * 3] = max(resultInf * tempResult[x * 3] + oldInf * result[x * 3], 0.f);
result[x * 3 + 1] = max(resultInf * tempResult[x * 3 + 1] + oldInf * result[x * 3 + 1], 0.f);
result[x * 3 + 2] = max(resultInf * tempResult[x * 3 + 2] + oldInf * result[x * 3 + 2], 0.f);
resultN[x] = resultN[x] + tempResultN[x];
pos[x * 3] = resultInf * tempPos[x * 3] + oldInf * pos[x * 3];
pos[x * 3 + 1] = resultInf * tempPos[x * 3 + 1] + oldInf * pos[x * 3 + 1];
pos[x * 3 + 2] = resultInf * tempPos[x * 3 + 2] + oldInf * pos[x * 3 + 2];
norm[x * 3] = resultInf * tempNorm[x * 3] + oldInf * norm[x * 3];
norm[x * 3 + 1] = resultInf * tempNorm[x * 3 + 1] + oldInf * norm[x * 3 + 1];
norm[x * 3 + 2] = resultInf * tempNorm[x * 3 + 2] + oldInf * norm[x * 3 + 2];
diff[x * 3] = resultInf * tempDiff[x * 3] + oldInf * diff[x * 3];
diff[x * 3 + 1] = resultInf * tempDiff[x * 3 + 1] + oldInf * diff[x * 3 + 1];
diff[x * 3 + 2] = resultInf * tempDiff[x * 3 + 2] + oldInf * diff[x * 3 + 2];
}
}
__global__ void pt_calculateSquareError_kernel(float* correctData, float* sampleData, uint* sampleNData, uint sampleN, float* resultData, uint dataSize)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= dataSize)
return;
uint sampledN = sampleNData[x];
float reductionFactor = (float)sampleN / (float)(sampleN + sampledN);
if (sampleN + sampledN < sampledN)
reductionFactor = 0;
resultData[x] = reductionFactor * ((correctData[x * 3] - sampleData[x * 3]) * (correctData[x * 3] - sampleData[x * 3])
+ (correctData[x * 3 + 1] - sampleData[x * 3 + 1]) * (correctData[x * 3 + 1] - sampleData[x * 3 + 1])
+ (correctData[x * 3 + 2] - sampleData[x * 3 + 2]) * (correctData[x * 3 + 2] - sampleData[x * 3 + 2])
) / 3.f;
}
void CleanMem()
{
freeLightPathMem();
freeStreamMem();
freeAllBVHCudaMem();
CUFREE(g_devTempResultN);
CUFREE(g_devSampleResultN);
CUFREE(g_devResultVarData);
CUFREE(g_devResultData);
CUFREE(g_devAccResultData);
CUFREE(g_devTempResultData);
CUFREE(g_devFilteredResult);
CUFREE(g_devFilterGaussianConst);
CUFREE(g_devPositionData);
CUFREE(g_devNormalData);
CUFREE(g_devDiffuseData);
CUFREE(g_devTempPositionData);
CUFREE(g_devTempNormalData);
CUFREE(g_devTempDiffuseData);
}
//struct ray_greater_compare
//{
// __hd__ bool operator()(const PTPathVertex* vert1, const PTPathVertex* vert2)
// {
// int vert1Score = (vert1->pathOutDir.x > 0) + (vert1->pathOutDir.y > 0) + (vert1->pathOutDir.z > 0);
// int vert2Score = (vert2->pathOutDir.x > 0) + (vert2->pathOutDir.y > 0) + (vert2->pathOutDir.z > 0);
// return vert1Score > vert2Score;
// }
//};
struct is_temppathqueue_terminated
{
__hd__ bool operator()(const uint& vert)
{
return (vert + 1 == 0);
}
};
struct is_active
{
__hd__ bool operator()(const PTPathVertex* vert)
{
return !vert->isTerminated;
}
};
struct is_terminated
{
__hd__ bool operator()(const PTPathVertex* vert)
{
return vert->isTerminated;
}
};
struct is_connectToLightPath
{
__hd__ bool operator()(const PTPathVertex* vert)
{
return vert->pathType == RAYTYPE_DIFF;
}
};
template<typename Predicate>
__global__ void pt_copyif_kernel(PTPathVertex** result, uint* atomicN, PTPathVertex** pathStream
, uint activePathStreamSize, Predicate pred)
{
uint ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= activePathStreamSize)
return;
if (pred(pathStream[ind]))
{
result[atomicAggInc((int *)atomicN)] = pathStream[ind];
}
}
void TracePathQueue(uint pathQueueSize)
{
dim3 block1(BLOCK_SIZE*BLOCK_SIZE, 1, 1);
dim3 block2(BLOCK_SIZE, BLOCK_SIZE, 1);
uint activePathStreamSize = 0;
g_uPathQueueCur = 0;
while (g_uPathQueueCur < pathQueueSize || activePathStreamSize > 0)
{
uint tempActivePathStreamSize = activePathStreamSize;
int assignableStreamSlot = min((uint)PATHSTREAM_SIZE - activePathStreamSize, pathQueueSize - g_uPathQueueCur);
if (assignableStreamSlot > 0)
pt_assignPathStream_kernel << < dim3(ceil((float)assignableStreamSlot / (float)block1.x), 1, 1), block1 >> >
(g_devPathStream, activePathStreamSize, g_devPathQueue, g_uPathQueueCur
, pathQueueSize, assignableStreamSlot);
//readjust activePathStreamSize
activePathStreamSize += assignableStreamSlot;
g_uPathQueueCur += assignableStreamSlot;
//tracing process
pt_traceSample_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> >
(g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathStream, activePathStreamSize);
HANDLE_KERNEL_ERROR();
//compact pathstream and find activePathStreamSize value
//PTPathVertex** compactedStreamEndItr = thrust::remove_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, is_terminated());
//activePathStreamSize = compactedStreamEndItr - g_devPathStream;
HANDLE_ERROR(cudaMemset((void*)g_devAtomicN, 0, sizeof(uint)));
pt_copyif_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> >
(g_devTempPathStream, g_devAtomicN, g_devPathStream, activePathStreamSize, is_active());
HANDLE_KERNEL_ERROR();
cudaMemcpy(&activePathStreamSize, g_devAtomicN, sizeof(uint), cudaMemcpyDeviceToHost);
PTPathVertex** tempSwap = g_devPathStream;
g_devPathStream = g_devTempPathStream;
g_devTempPathStream = tempSwap;
//gen connectionpathstream
//PTPathVertex** conPathStreamEndItr = thrust::copy_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize
// , g_devEyeLightConPathStream, is_connectToLightPath());
//uint activeConPathStreamSize = conPathStreamEndItr - g_devEyeLightConPathStream;
uint activeConPathStreamSize = 0;
if (activePathStreamSize > 0)
{
HANDLE_ERROR(cudaMemset((void*)g_devAtomicN, 0, sizeof(uint)));
pt_copyif_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> >
(g_devEyeLightConPathStream, g_devAtomicN, g_devPathStream, activePathStreamSize, is_connectToLightPath());
HANDLE_KERNEL_ERROR();
cudaMemcpy(&activeConPathStreamSize, g_devAtomicN, sizeof(uint), cudaMemcpyDeviceToHost);
}
//connect eye and light path stream
if (activeConPathStreamSize > 0)
{
pt_connectEyeLightPath_kernel << < dim3(ceil((float)activeConPathStreamSize / (float)block1.x), 1, 1), block1 >> >
(g_devEyeLightConPathStream, activeConPathStreamSize, g_devLightVertices, g_uLightVerticesSize);
HANDLE_KERNEL_ERROR();
}
}
}
bool Render(NPMathHelper::Vec3 camPos, NPMathHelper::Vec3 camDir, NPMathHelper::Vec3 camUp, float fov, RTScene* scene
, float width, float height, float* result)
{
// Check and allocate everything
if (!scene || !scene->GetCompactBVH()->IsValid())
return false;
NPMathHelper::Vec3 camRight = camDir.cross(camUp).normalize();
camUp = camRight.cross(camDir).normalize();
g_matLastCamMat = g_matCurCamMat;
g_matCurCamMat = NPMathHelper::Mat4x4::lookAt(camPos, camPos + camDir, camUp);
g_uCurFrameN = (g_matLastCamMat != g_matCurCamMat) ? 0 : g_uCurFrameN + 1;
if (!g_bIsCudaInit || scene->GetIsCudaDirty())
{
CleanMem();
g_matLastCamMat = g_matCurCamMat;
g_uCurFrameN = 0;
initAllSceneCudaMem(scene);
allocateStreamMem(width * height);
allocateLightPathMem();
updateLightTriCudaMem(scene);
size_t mem_tot;
size_t mem_free;
cudaMemGetInfo(&mem_free, &mem_tot);
std::cout << "Memory Used : " << mem_tot - mem_free << "/" << mem_tot << " -> Free " << mem_free << std::endl;
}
else if (scene->GetIsCudaMaterialDirty())
{
updateAllSceneMaterialsCudaMem(scene);
updateLightTriCudaMem(scene);
g_uCurFrameN = 0;
}
if (!g_bIsCudaInit)
return false;
if (!g_devResultData || !g_devAccResultData || !g_devTempResultData || g_resultDataSize != (sizeof(float) * 3 * width * height) || !g_devFilteredResult)
{
g_resultDataSize = sizeof(float) * 3 * width * height;
CUFREE(g_devResultData);
cudaMalloc((void**)&g_devResultData, g_resultDataSize);
CUFREE(g_devAccResultData);
cudaMalloc((void**)&g_devAccResultData, g_resultDataSize);
CUFREE(g_devTempResultData);
cudaMalloc((void**)&g_devTempResultData, g_resultDataSize);
CUFREE(g_devPositionData);
cudaMalloc((void**)&g_devPositionData, g_resultDataSize);
CUFREE(g_devNormalData);
cudaMalloc((void**)&g_devNormalData, g_resultDataSize);
CUFREE(g_devDiffuseData);
cudaMalloc((void**)&g_devDiffuseData, g_resultDataSize);
CUFREE(g_devTempPositionData);
cudaMalloc((void**)&g_devTempPositionData, g_resultDataSize);
CUFREE(g_devTempNormalData);
cudaMalloc((void**)&g_devTempNormalData, g_resultDataSize);
CUFREE(g_devTempDiffuseData);
cudaMalloc((void**)&g_devTempDiffuseData, g_resultDataSize);
CUFREE(g_devResultVarData);
cudaMalloc((void**)&g_devResultVarData, sizeof(float) * width * height);
CUFREE(g_devSampleResultN);
cudaMalloc((void**)&g_devSampleResultN, sizeof(uint) * width * height);
CUFREE(g_devTempResultN);
cudaMalloc((void**)&g_devTempResultN, sizeof(uint) * width * height);
CUFREE(g_devFilteredResult);
cudaMalloc((void**)&g_devFilteredResult, g_resultDataSize);
}
if (!g_devFilterGaussianConst)
{
CUFREE(g_devFilterGaussianConst);
cudaMalloc((void**)&g_devFilterGaussianConst, sizeof(uint) * GAUSSIANCOST_N);
cudaBilateralFilter::updateGaussian(g_devFilterGaussianConst, *g_fFilterColorEuD.GetFloat(), *g_uiFilterRadius.GetUint());
}
float3 f3CamPos = V32F3(camPos);
float3 f3CamUp = V32F3(camUp);
float3 f3CamDir = V32F3(camDir);
float3 f3CamRight = V32F3(camRight);
dim3 block1(BLOCK_SIZE*BLOCK_SIZE, 1, 1);
dim3 block2(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 renderGrid(ceil(width / (float)block2.x), ceil(height / (float)block2.y), 1);
// light paths
if (g_uCurFrameN % 3 == 0)
{
uint lightPathStreamSizeCap = min((uint)PATHSTREAM_SIZE, (uint)(LIGHTVERTEX_N / LIGHTRAY_BOUND_MAX));
pt_genLightPathQueue_kernel << < dim3(ceil((float)lightPathStreamSizeCap / (float)block1.x), 1, 1), block1 >> >
(g_uCurFrameN, WangHash(g_uCurFrameN), g_devLightTri, g_lightTriN, g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathQueue, lightPathStreamSizeCap
, g_devLightVertices, 0);
uint activePathStreamSize = 0;
g_uLightVerticesSize = lightPathStreamSizeCap;
g_uPathQueueCur = 0;
while (g_uPathQueueCur < lightPathStreamSizeCap || activePathStreamSize > 0)
{
uint tempActivePathStreamSize = activePathStreamSize;
int assignableStreamSlot = min(lightPathStreamSizeCap - activePathStreamSize, lightPathStreamSizeCap - g_uPathQueueCur);
if (assignableStreamSlot > 0)
{
pt_assignPathStream_kernel << < dim3(ceil((float)assignableStreamSlot / (float)block1.x), 1, 1), block1 >> >(g_devPathStream, activePathStreamSize, g_devPathQueue, g_uPathQueueCur
, g_uLightVerticesSize, assignableStreamSlot);
}
//readjust activePathStreamSize
activePathStreamSize += assignableStreamSlot;
g_uPathQueueCur += assignableStreamSlot;
pt_traceLight_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> > (g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathStream, activePathStreamSize
, g_devLightVertices, g_uLightVerticesSize);
g_uLightVerticesSize += activePathStreamSize;
//compact pathstream and find activePathStreamSize value
//PTPathVertex** compactedStreamEndItr = thrust::remove_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, is_terminated());
//activePathStreamSize = compactedStreamEndItr - g_devPathStream;
HANDLE_ERROR(cudaMemset((void*)g_devAtomicN, 0, sizeof(uint)));
pt_copyif_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> >
(g_devTempPathStream, g_devAtomicN, g_devPathStream, activePathStreamSize, is_active());
HANDLE_KERNEL_ERROR();
cudaMemcpy(&activePathStreamSize, g_devAtomicN, sizeof(uint), cudaMemcpyDeviceToHost);
PTPathVertex** tempSwap = g_devPathStream;
g_devPathStream = g_devTempPathStream;
g_devTempPathStream = tempSwap;
}
//std::cout << "Generated light vertices size: " << g_uLightVerticesSize << std::endl;
}
if (g_uCurFrameN < 5)
{
//float time;
//cudaEvent_t start, stop;
//HANDLE_ERROR(cudaEventCreate(&start));
//HANDLE_ERROR(cudaEventCreate(&stop));
uint useQueueSize = width * height;
//HANDLE_ERROR(cudaEventRecord(start, 0));
// eye paths
pt_genPathQueue_kernel << < renderGrid, block2 >> > (f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height
, g_uCurFrameN, WangHash(g_uCurFrameN), g_devPathQueue);
//HANDLE_ERROR(cudaEventRecord(stop, 0));
//HANDLE_ERROR(cudaEventSynchronize(stop));
//HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop));
//std::cout << "gen path: " << time << std::endl;
//HANDLE_ERROR(cudaEventRecord(start, 0));
// trace path queue
TracePathQueue(useQueueSize);
//HANDLE_ERROR(cudaEventRecord(stop, 0));
//HANDLE_ERROR(cudaEventSynchronize(stop));
//HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop));
//std::cout << "trace path: " << time << std::endl;
//HANDLE_ERROR(cudaEventRecord(start, 0));
pt_applyPathQueueResult_kernel << < dim3(ceil((float)useQueueSize / (float)block1.x), 1, 1), block1 >> >
(g_devPathQueue, useQueueSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData, g_devResultVarData
, g_devPositionData, g_devNormalData, g_devDiffuseData, g_devSampleResultN);
//HANDLE_ERROR(cudaEventRecord(stop, 0));
//HANDLE_ERROR(cudaEventSynchronize(stop));
//HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop));
//std::cout << "accum path: " << time << std::endl;
}
else
{
#ifdef PERFBREAKDOWN
float time;
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
#endif
if (*g_bFilterDiffuse.GetBool())
{
cudaBilateralFilter::bilaterial_posnormemit_kernel << < renderGrid, block2 >> >
(g_devResultData, g_devPositionData, g_devNormalData, g_devDiffuseData,
width, height, *g_fFilterColorEuD.GetFloat(), *g_fFilterPosEuD.GetFloat(), *g_fFilterNormEuD.GetFloat()
, *g_fFilterDiffEuD.GetFloat(), *g_uiFilterRadius.GetUint(), g_devFilterGaussianConst, g_devFilteredResult);
}
else
{
cudaBilateralFilter::bilaterial_posnorm_kernel << < renderGrid, block2 >> >
(g_devResultData, g_devPositionData, g_devNormalData,
width, height, *g_fFilterColorEuD.GetFloat(), *g_fFilterPosEuD.GetFloat(), *g_fFilterNormEuD.GetFloat()
, *g_uiFilterRadius.GetUint(), g_devFilterGaussianConst, g_devFilteredResult);
}
#ifdef PERFBREAKDOWN
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop));
g_fAvgProcessTimeA.attrFloat = (g_fAvgProcessTimeA.attrFloat * (g_uCurFrameN - 5) + time) / (g_uCurFrameN - 5 + 1);
//std::cout << "filter: " << time << std::endl;
HANDLE_ERROR(cudaEventRecord(start, 0));
#endif
// calculate sampling map from converged result
pt_calculateSquareError_kernel << < dim3(ceil((float)(width * height) / (float)block1.x), 1, 1), block1 >> >
(g_devFilteredResult, g_devResultData, g_devSampleResultN, *g_uiDesiredSamplingN.GetUint(), g_devResultVarData, (uint)(width * height));
//thrust::sort(thrust::device, g_devResultVarData, g_devResultVarData + (uint)(width * height));
float sumMSE = thrust::reduce(thrust::device, g_devResultVarData, g_devResultVarData + (uint)(width * height), 0.f, thrust::plus<float>());
//float maxMSE = thrust::reduce(thrust::device, g_devResultVarData, g_devResultVarData + (uint)(width * height), 0.f, thrust::maximum<float>());
//float meanMSE = sumMSE / (width * height);
//std::cout << "sumMSE: " << sumMSE << "\n";
//std::cout << "maxMSE: " << maxMSE << "\n";
//std::cout << "meanMSE: " << meanMSE << "\n";
#ifdef PERFBREAKDOWN
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop));
g_fAvgProcessTimeB.attrFloat = (g_fAvgProcessTimeB.attrFloat * (g_uCurFrameN - 5) + time) / (g_uCurFrameN - 5 + 1);
//std::cout << "MSE cal: " << time << std::endl;
#endif
//if (g_uCurFrameN == 1)
//{
// float* tempDiffData = new float[(uint)width * (uint)height];
// cudaMemcpy(tempDiffData, g_devResultVarData, (uint)(width * height) * sizeof(float), cudaMemcpyDeviceToHost);
// NPConfFileHelper::txtConfFile conf("adapCheat_diffData.txt");
// for (uint j = 0; j < width * height; j++)
// {
// conf.WriteRaw<float>(tempDiffData[j]);
// conf.WriteRaw("\n");
// }
// conf.SyncDataToFile();
// DELETE_ARRAY(tempDiffData);
//}
#ifdef PERFBREAKDOWN
HANDLE_ERROR(cudaEventRecord(start, 0));
#endif
// gen adaptive eye paths
uint useQueueSize = width * height * PATHQUEUE_MUL;
HANDLE_ERROR(cudaMemset((void*)g_devAtomicN, 0, sizeof(uint)));
pt_genAdapPathQueue_kernel << < dim3(ceil((float)(width * height) / (float)block1.x), 1, 1), block1 >> >
(f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height
, g_uCurFrameN, WangHash(g_uCurFrameN), g_devAtomicN, g_devResultVarData, sumMSE, g_devPathQueue, useQueueSize);
cudaMemcpy(&useQueueSize, g_devAtomicN, sizeof(uint), cudaMemcpyDeviceToHost);
//std::cout << "AtomicN : " << useQueueSize << std::endl;
#ifdef PERFBREAKDOWN
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop));
g_fAvgProcessTimeC.attrFloat = (g_fAvgProcessTimeC.attrFloat * (g_uCurFrameN - 5) + time) / (g_uCurFrameN - 5 + 1);
//std::cout << "gen path: " << time << std::endl;
#endif
#ifdef PERFBREAKDOWN
HANDLE_ERROR(cudaEventRecord(start, 0));
#endif
TracePathQueue(useQueueSize);
#ifdef PERFBREAKDOWN
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop));
g_fAvgProcessTimeD.attrFloat = (g_fAvgProcessTimeD.attrFloat * (g_uCurFrameN - 5) + time) / (g_uCurFrameN - 5 + 1);
//std::cout << "trace path: " << time << std::endl;
#endif
#ifdef PERFBREAKDOWN
HANDLE_ERROR(cudaEventRecord(start, 0));
#endif
HANDLE_ERROR(cudaMemset((void*)g_devTempResultData, 0, sizeof(float) * 3 * height * width));
HANDLE_ERROR(cudaMemset((void*)g_devTempPositionData, 0, sizeof(float) * 3 * height * width));
HANDLE_ERROR(cudaMemset((void*)g_devTempNormalData, 0, sizeof(float) * 3 * height * width));
HANDLE_ERROR(cudaMemset((void*)g_devTempDiffuseData, 0, sizeof(float) * 3 * height * width));
HANDLE_ERROR(cudaMemset((void*)g_devTempResultN, 0, sizeof(uint) * height * width));
if (*g_enumDebugMode.GetUint() == 1)
{
pt_debugTracedPathQueueResult_kernel << < dim3(ceil((float)useQueueSize / (float)block1.x), 1, 1), block1 >> >
(g_devPathQueue, useQueueSize, width, height, g_devTempResultData, g_devTempResultN);
}
else
{
pt_atomicApplyPathQueueResult_kernel << < dim3(ceil((float)useQueueSize / (float)block1.x), 1, 1), block1 >> >
(g_devPathQueue, useQueueSize, width, height, g_devTempResultData, g_devTempResultN
, g_devTempPositionData, g_devTempNormalData, g_devTempDiffuseData);
}
pt_accumTempResultToResult_kernel << < dim3(ceil((float)(width * height) / (float)block1.x), 1, 1), block1 >> >
(width, height, g_uCurFrameN, g_devTempResultData, g_devTempResultN
, g_devTempPositionData, g_devTempNormalData, g_devTempDiffuseData
, g_devResultData, g_devSampleResultN, g_devPositionData, g_devNormalData, g_devDiffuseData);
#ifdef PERFBREAKDOWN
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop));
g_fAvgProcessTimeE.attrFloat = (g_fAvgProcessTimeE.attrFloat * (g_uCurFrameN - 5) + time) / (g_uCurFrameN - 5 + 1);
//std::cout << "accum path: " << time << std::endl;
#endif
}
if (*g_enumDebugMode.GetUint() == 2 || *g_enumDebugMode.GetUint() == 3)
{
pt_applyPixelProbToResult_kernel << < renderGrid, block2 >> >(width, height, g_devFilteredResult, g_devResultVarData, (*g_enumDebugMode.GetUint() == 3) ? *g_fMinTraceProb.GetFloat() : 0.f);
}
// Copy result to host
if (*g_enumDebugMode.GetUint() == 4 || *g_enumDebugMode.GetUint() == 2 || *g_enumDebugMode.GetUint() == 3)
{
cudaMemcpy(result, g_devFilteredResult, g_resultDataSize, cudaMemcpyDeviceToHost);
}
else
{
cudaMemcpy(result, g_devResultData, g_resultDataSize, cudaMemcpyDeviceToHost);
}
return true;
}
} |
fe90d0a60d4016f32ff0a2ea52e90a8882d9dfd1.hip | // !!! This is a file automatically generated by hipify!!!
#include "memory_helper.h"
using namespace std;
void* MemoryMonitor::cpuMalloc(int size){
cpuMemory += size;
void* p = malloc(size);
cpuPoint[p] = 1.0f * size;
//if(size >= Mb){
// printf("******************************* cpu malloc memory %fMb\n", 1.0 * size / Mb);
//}
return p;
}
void MemoryMonitor::freeCpuMemory(void* ptr)
{
if(cpuPoint.find(ptr) != cpuPoint.end()){
//if(cpuPoint[ptr] >= Mb){
// printf("+++++++++++++++++++++++++++++++ free cpu memory %fMb\n", cpuPoint[ptr] / Mb);
//}
cpuMemory -= cpuPoint[ptr];
free(ptr);
cpuPoint.erase(ptr);
}
}
hipError_t MemoryMonitor::gpuMalloc(void** devPtr, int size){
hipError_t error = hipMalloc(devPtr, size);
checkCudaErrors(error);
gpuMemory += size;
gpuPoint[*devPtr] = (float)size;
return error;
//if(size >= Mb){
// printf("******************************* gpu malloc memory %fMb\n", 1.0 * size / Mb);
//}
}
void MemoryMonitor::freeGpuMemory(void* ptr){
if(gpuPoint.find(ptr) != gpuPoint.end()){
//if(gpuPoint[ptr] >= Mb){
// printf("+++++++++++++++++++++++++++++++ free gpu memory %fMb\n", gpuPoint[ptr] / Mb);
//}
gpuMemory -= gpuPoint[ptr];
checkCudaErrors(hipFree(ptr));
gpuPoint.erase(ptr);
}
}
float MemoryMonitor::getCpuMemory() const{
return cpuMemory;
}
float MemoryMonitor::getGpuMemory() const{
return gpuMemory;
}
| fe90d0a60d4016f32ff0a2ea52e90a8882d9dfd1.cu | #include "memory_helper.h"
using namespace std;
void* MemoryMonitor::cpuMalloc(int size){
cpuMemory += size;
void* p = malloc(size);
cpuPoint[p] = 1.0f * size;
//if(size >= Mb){
// printf("******************************* cpu malloc memory %fMb\n", 1.0 * size / Mb);
//}
return p;
}
void MemoryMonitor::freeCpuMemory(void* ptr)
{
if(cpuPoint.find(ptr) != cpuPoint.end()){
//if(cpuPoint[ptr] >= Mb){
// printf("+++++++++++++++++++++++++++++++ free cpu memory %fMb\n", cpuPoint[ptr] / Mb);
//}
cpuMemory -= cpuPoint[ptr];
free(ptr);
cpuPoint.erase(ptr);
}
}
cudaError_t MemoryMonitor::gpuMalloc(void** devPtr, int size){
cudaError_t error = cudaMalloc(devPtr, size);
checkCudaErrors(error);
gpuMemory += size;
gpuPoint[*devPtr] = (float)size;
return error;
//if(size >= Mb){
// printf("******************************* gpu malloc memory %fMb\n", 1.0 * size / Mb);
//}
}
void MemoryMonitor::freeGpuMemory(void* ptr){
if(gpuPoint.find(ptr) != gpuPoint.end()){
//if(gpuPoint[ptr] >= Mb){
// printf("+++++++++++++++++++++++++++++++ free gpu memory %fMb\n", gpuPoint[ptr] / Mb);
//}
gpuMemory -= gpuPoint[ptr];
checkCudaErrors(cudaFree(ptr));
gpuPoint.erase(ptr);
}
}
float MemoryMonitor::getCpuMemory() const{
return cpuMemory;
}
float MemoryMonitor::getGpuMemory() const{
return gpuMemory;
}
|
aee615cc8d1369a392fe0055a4d39a0857781629.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
void Reduce::map(void)
{
// TODO: use cudnn reduce tensor
checkCUDA(hipMalloc(&outputs[0].data_ptr, outputs[0].volume() * sizeof(DATATYPE)));
}
void Reduce::unmap(void)
{
checkCUDA(hipFree(outputs[0].data_ptr));
}
void Reduce::forward(bool block)
{
if (block)
checkCUDA(hipDeviceSynchronize());
}
void Model::measure_reduce_cost(Reduce* reduce)
{
// TODO: use cudnn reduce tensor
reduce->runtime = 0;
}
| aee615cc8d1369a392fe0055a4d39a0857781629.cu | /* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
void Reduce::map(void)
{
// TODO: use cudnn reduce tensor
checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputs[0].volume() * sizeof(DATATYPE)));
}
void Reduce::unmap(void)
{
checkCUDA(cudaFree(outputs[0].data_ptr));
}
void Reduce::forward(bool block)
{
if (block)
checkCUDA(cudaDeviceSynchronize());
}
void Model::measure_reduce_cost(Reduce* reduce)
{
// TODO: use cudnn reduce tensor
reduce->runtime = 0;
}
|
0bdbb2ed63e06969da7582fad2ab2d13d5db1dc0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <hipcub/hipcub.hpp>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/iterator/counting_iterator.h>
typedef uint32_t kmer_t;
const kmer_t INVALID_KMER = ~0;
template <typename T>
__host__ __device__ T powi(T a, T b) {
T result = 1;
while (b > 0) {
result *= a;
b--;
}
return result;
}
template <typename T>
T div_ceil(T a, T b) {
return (a / b) + (a % b == 0 ? 0 : 1);
}
/**
* Calculates the scratch memory required to store n elements of type T.
* Memory is aligned 256-byte segments for good performance on the GPU.
*/
template <typename T>
size_t allocate_scratch(size_t n) {
return (n * sizeof(T) + 255) / 256 * 256;
}
/**
* Takes memory for n elements of type T from scratch and advance
* the scratch memory pointer forward to the next available bytes.
*/
template <typename T>
T* advance_scratch(void **scratch, size_t n) {
void* current = *scratch;
size_t bytes = allocate_scratch<T>(n);
*scratch = (void*)(((char*) *scratch) + bytes);
return (T*) current;
}
__constant__ uint8_t DNA2DIGIT_MAPPING[26] = {
0, // A
255, // B
1, // C
255, // D
255, // E
255, // F
2, // G
255, // H
255, // I
255, // J
255, // K
255, // L
255, // M
255, // N
255, // O
255, // P
255, // Q
255, // R
255, // S
3, // T
255, // U
255, // V
255, // W
255, // X
255, // Y
255 // Z
};
struct DNAAlphabet {
static const size_t NUM_SYMBOLS = 4;
/**
* convert an ASCII DNA representation to its 2-bit symbol
* Based on nvbio/dna.h:78
*/
__device__ static uint8_t char_to_digit(const char c) {
if (c >= 'A' && c <= 'Z') {
return DNA2DIGIT_MAPPING[c - 'A'];
} else {
return 0xff;
}
}
};
__constant__ uint8_t PROTEIN2DIGIT_MAPPING[26] = {
0, // A
255, // B
1, // C
2, // D
3, // E
4, // F
5, // G
6, // H
7, // I
255, // J
8, // K
9, // L
10, // M
11, // N
255, // O
12, // P
13, // Q
14, // R
15, // S
16, // T
255, // U
17, // V
18, // W
20, // X (Uknown protein)
19, // Y
255 // Z
};
struct ProteinAlphabet {
static const size_t NUM_SYMBOLS = 21;
/**
* convert an ASCII character to a 5-bit symbol
* Based on nvbio/alphabet_inl.h:90
*/
__device__ static uint8_t char_to_digit(const char c) {
if (c >= 'A' && c <= 'Z') {
return PROTEIN2DIGIT_MAPPING[c - 'A'];
} else {
return 0xff;
}
}
};
template <typename Alphabet>
size_t scratch_build_composition_vector(int k) {
size_t k0 = powi(Alphabet::NUM_SYMBOLS, (size_t) k - 0);
size_t k1 = powi(Alphabet::NUM_SYMBOLS, (size_t) k - 1);
size_t k2 = powi(Alphabet::NUM_SYMBOLS, (size_t) k - 2);
size_t scratch_size = 0;
scratch_size += allocate_scratch<uint32_t>(k0);
scratch_size += allocate_scratch<uint32_t>(k1);
scratch_size += allocate_scratch<uint32_t>(k2);
scratch_size += allocate_scratch<uint32_t>(1);
scratch_size += allocate_scratch<uint32_t>(1);
scratch_size += allocate_scratch<uint32_t>(1);
scratch_size += allocate_scratch<uint32_t>(1);
scratch_size += allocate_scratch<double>(1);
size_t cub_scratch = 0;
hipError_t err = hipcub::DeviceReduce::Sum(
(void*) NULL,
cub_scratch,
(uint32_t*) NULL,
(uint32_t*) NULL,
k0);
if (err != hipSuccess) return err;
size_t x = 0;
err = hipcub::DeviceReduce::Sum(
(void*) NULL,
x,
(double*) NULL,
(double*) NULL,
k0);
if (err != hipSuccess) return err;
if (x > cub_scratch) cub_scratch = x;
err = hipcub::DeviceSelect::If(
(void*) NULL,
x,
(uint32_t*) NULL,
(uint32_t*) NULL,
(uint32_t*) NULL,
k0,
[=] __device__ (uint32_t i) { return false; });
if (err != hipSuccess) return err;
if (x > cub_scratch) cub_scratch = x;
scratch_size += allocate_scratch<uint8_t>(cub_scratch);
return scratch_size;
}
template <int i, int j, bool flag = i < j>
struct unroll_helper {
template <typename F>
__device__ static void call(F fun) {
if (fun(i)) {
unroll_helper<i + 1, j>::call(fun);
}
}
};
template <int i, int j>
struct unroll_helper<i, j, false> {
template <typename F>
__device__ static void call(F fun) {
//
}
};
template <int N, typename F>
__device__ void unroll(F fun) {
unroll_helper<0, N>::call(fun);
}
template <typename Alphabet, int K>
hipError_t count_kmers(
hipStream_t stream,
const char* d_string,
uint32_t string_len,
uint32_t *d_k0_count,
uint32_t *d_k1_count,
uint32_t *d_k2_count
) {
auto exec = thrust::hip::par.on(stream);
thrust::for_each(
exec,
thrust::make_counting_iterator<uint32_t>(0u),
thrust::make_counting_iterator<uint32_t>(string_len),
[=] __device__ (uint32_t i) {
kmer_t w = 0;
unroll<K + 1>([=, &w](uint32_t j) {
// Compiler should remove the conditionals when unrolling
// the loop (fingers crossed!).
if (j == K - 2) atomicAdd(&d_k2_count[w], 1);
if (j == K - 1) atomicAdd(&d_k1_count[w], 1);
if (j == K - 0) atomicAdd(&d_k0_count[w], 1);
if (i + j >= string_len) return false;
uint8_t c = Alphabet::char_to_digit(d_string[i + j]);
if (c == 0xff) return false;
w = w * Alphabet::NUM_SYMBOLS + c;
return true;
});
});
return hipSuccess;
}
template <typename Alphabet>
hipError_t build_composition_vector(
hipStream_t stream,
void *d_scratch,
size_t scratch_size,
int k,
const char* d_string,
uint32_t string_len,
kmer_t *d_keys,
float *d_values,
uint32_t *num_unique,
uint32_t max_size
) {
hipError_t err = hipSuccess;
size_t k0 = powi(Alphabet::NUM_SYMBOLS, (size_t) k - 0);
size_t k1 = powi(Alphabet::NUM_SYMBOLS, (size_t) k - 1);
size_t k2 = powi(Alphabet::NUM_SYMBOLS, (size_t) k - 2);
uint32_t *d_k0_count = advance_scratch<kmer_t>(&d_scratch, k0);
uint32_t *d_k1_count = advance_scratch<kmer_t>(&d_scratch, k1);
uint32_t *d_k2_count = advance_scratch<kmer_t>(&d_scratch, k2);
uint32_t *d_n0 = advance_scratch<uint32_t>(&d_scratch, 1);
uint32_t *d_n1 = advance_scratch<uint32_t>(&d_scratch, 1);
uint32_t *d_n2 = advance_scratch<uint32_t>(&d_scratch, 1);
uint32_t *d_num_unique = advance_scratch<uint32_t>(&d_scratch, 1);
double *d_norm = advance_scratch<double>(&d_scratch, 1);
// initialize k-mer count table with zeros
auto exec = thrust::hip::par.on(stream);
thrust::fill_n(exec, d_k0_count, k0, 0);
thrust::fill_n(exec, d_k1_count, k1, 0);
thrust::fill_n(exec, d_k2_count, k2, 0);
// count k-mers of length k, k-1, and k-2
#define SPECIALIZE(K) \
if (k == K) err = count_kmers<Alphabet, K>( \
stream, d_string, string_len, d_k0_count, d_k1_count, d_k2_count)
if (k < 3 || k > 10) {
fprintf(stderr, "error: k=%d should be in range 3-10\n", k);
return hipErrorUnknown;
}
SPECIALIZE(3);
SPECIALIZE(4);
SPECIALIZE(5);
SPECIALIZE(6);
SPECIALIZE(7);
SPECIALIZE(8);
SPECIALIZE(9);
SPECIALIZE(10);
if (err != hipSuccess) return err;
#undef SPECIALIZE
// sum the number of k-mers of length k
err = hipcub::DeviceReduce::Sum(
d_scratch,
scratch_size,
d_k0_count,
d_n0,
k0,
stream);
if (err != hipSuccess) return err;
// sum the number of k-mers of length k-1
err = hipcub::DeviceReduce::Sum(
d_scratch,
scratch_size,
d_k1_count,
d_n1,
k1,
stream);
if (err != hipSuccess) return err;
// sum the number of k-mers of length k-2
err = hipcub::DeviceReduce::Sum(
d_scratch,
scratch_size,
d_k2_count,
d_n2,
k2,
stream);
if (err != hipSuccess) return err;
// following function implements formulas (2) and (3) from "Whole Proteome
// Prokaryote Phylogeny Without Sequence Alignment:AK-String Composition
// Approach" (2004) by Qi et al.
thrust::for_each(
exec,
thrust::make_counting_iterator<uint32_t>(0u),
thrust::make_counting_iterator<uint32_t>(k0),
[=] __device__ (uint32_t i) {
// full = a_1 a_2 ... a_{k-1} a_k (complete k-mer)
// prefix = a_1 a_2 ... a_{k-1} ({k-1}-mer without last character)
// suffix = a_2 ... a_{k-1} a_k ({k-1}-mer without first character)
// middle = a_2 ... a_{k-1} ({k-2}-mer without first and last char.)
kmer_t full = i;
kmer_t prefix = full / Alphabet::NUM_SYMBOLS;
kmer_t suffix = full % powi((int) Alphabet::NUM_SYMBOLS, k - 1);
kmer_t middle = suffix / Alphabet::NUM_SYMBOLS;
// You are probably wonder what this strange do-while statement is doing
// here. Formula (3) gives that alpha = 0 if either middle_count == 0 or
// prefix_count == 0 or suffix_count == 0. This means we read each of these
// counts in any order and immediately break if any of them is zero.
// The counts are read in decending order of locality (i.e., increasing
// order of cost): first middle_count, then prefix_count, then suffix_count.
float alpha = 0.0;
do {
uint32_t middle_count = d_k2_count[middle];
if (middle_count == 0) break;
uint32_t prefix_count = d_k1_count[prefix];
if (prefix_count == 0) break;
uint32_t suffix_count = d_k1_count[suffix];
if (suffix_count == 0) break;
uint32_t full_count = d_k0_count[full];
uint32_t n0 = *d_n0;
uint32_t n1 = *d_n1;
uint32_t n2 = *d_n2;
// Formula (1) = count / n
double p = double(full_count) / n0;
// Formula (2) = P(prefix) * P(suffix) / P(middle)
double p0 = ((double(prefix_count) / n1) * (double(suffix_count) / n1)) / (double(middle_count) / n2);
// Formula (3) = (p - p0) / p0
alpha = float((p - p0) / p0);
} while (0);
// Overwrite int by taking the int32 interpretation of float.
d_k0_count[i] = __float_as_int(alpha);
});
// count entries where alpha != 0
err = hipcub::DeviceReduce::Sum(
d_scratch,
scratch_size,
thrust::make_transform_iterator(
d_k0_count,
[=] __device__ (uint32_t p) { return p != 0; }),
d_num_unique,
k0,
stream);
if (err != hipSuccess) return err;
// sum p**2 for all entries
err = hipcub::DeviceReduce::Sum(
d_scratch,
scratch_size,
thrust::make_transform_iterator(
d_k0_count,
[=] __device__ (uint32_t p) {
float f = __int_as_float(p);
return f * f;
}),
d_norm,
k0,
stream);
if (err != hipSuccess) return err;
err = hipMemcpyAsync(
num_unique,
d_num_unique,
sizeof(uint32_t),
hipMemcpyDeviceToHost,
stream);
if (err != hipSuccess) return err;
err = hipStreamSynchronize(stream);
if (err != hipSuccess) return err;
if (*num_unique > max_size) {
fprintf(stderr,
"error: size of composition vector (size: %d) exceeds the given maximum (size: %d)\n",
*num_unique,
max_size);
return hipErrorUnknown;
}
err = hipcub::DeviceSelect::If(
d_scratch,
scratch_size,
thrust::make_counting_iterator<uint32_t>(0u),
d_keys,
d_num_unique,
k0,
[=] __device__ (uint32_t i) { return d_k0_count[i] != 0; },
stream);
if (err != hipSuccess) return err;
thrust::for_each(
exec,
thrust::make_counting_iterator<uint32_t>(0u),
thrust::make_counting_iterator<uint32_t>(*num_unique),
[=] __device__ (uint32_t i) {
kmer_t index = d_keys[i];
float f = __int_as_float(d_k0_count[index]);
d_values[i] = f / sqrt(*d_norm);
});
err = hipStreamSynchronize(stream);
if (err != hipSuccess) return err;
thrust::fill(exec, d_keys + *num_unique, d_keys + max_size, INVALID_KMER);
thrust::fill(exec, d_values + *num_unique, d_values + max_size, 0.0f);
return hipSuccess;
}
#ifdef TUNE_THREADS_PER_BLOCK
#define THREADS_PER_BLOCK (TUNE_THREADS_PER_BLOCK)
#define ITEMS_PER_THREAD (TUNE_ITEMS_PER_THREAD)
#define USE_SMEM (TUNE_USE_SMEM)
#else
#define THREADS_PER_BLOCK (256)
#define ITEMS_PER_THREAD (1)
#define USE_SMEM (0)
#endif
#define ITEMS_PER_BLOCK (THREADS_PER_BLOCK * ITEMS_PER_THREAD)
size_t scratch_calculate_cosine_similarity(uint32_t max_vector_size) {
size_t n = 0;
size_t num_blocks = div_ceil(
max_vector_size + max_vector_size,
(uint32_t) ITEMS_PER_BLOCK);
n += allocate_scratch<double>(num_blocks);
n += allocate_scratch<uint2>(num_blocks + 1);
size_t x = 0;
hipcub::DeviceReduce::Sum(
(void*) NULL,
x,
(double*) NULL,
(double*) NULL,
num_blocks);
n += allocate_scratch<uint8_t>(x);
return n;
}
template <typename K>
uint2 __device__ __forceinline__ merge_path(
uint32_t diag,
const K *__restrict__ left,
uint32_t left_size,
const K *__restrict__ right,
uint32_t right_size
) {
uint32_t begin = diag < right_size ? 0 : diag - right_size;
uint32_t end = diag < left_size ? diag : left_size;
while (begin < end) {
uint32_t mid = (begin + end) / 2 + 1;
K a = left[mid - 1];
K b = right[diag - mid];
if (a <= b) {
begin = mid;
} else {
end = mid - 1;
}
}
int i = min(begin, left_size);
int j = min(diag - begin, right_size);
return make_uint2(i, j);
}
__global__ void set_merge_by_key_and_reduce_cosine(
const uint2 *__restrict__ ranges,
const kmer_t *__restrict__ left_keys,
const float *__restrict__ left_values,
const uint32_t left_size,
const kmer_t *__restrict__ right_keys,
const float *__restrict__ right_values,
const uint32_t right_size,
double *results
) {
typedef hipcub::BlockReduce<double, THREADS_PER_BLOCK> BlockReduce;
__shared__ union {
typename BlockReduce::TempStorage temp;
kmer_t keys[2 * (ITEMS_PER_BLOCK + 1) * !!USE_SMEM];
} shared;
int tid = threadIdx.x;
int bid = blockIdx.x;
// YYY
uint left_begin = ranges[bid].x;
uint right_begin = ranges[bid].y;
uint left_end = ranges[bid + 1].x;
uint right_end = ranges[bid+ 1].y;
uint left_span = left_end - left_begin;
uint right_span = right_end - right_begin;
#if USE_SMEM
#pragma unroll
for (int i = tid; i < 2 * (ITEMS_PER_BLOCK + 2); i += THREADS_PER_BLOCK) {
kmer_t key = !0;
if (left_begin + i <= left_end) {
key = left_keys[left_begin + i];
} else {
int j = i - (left_end - left_begin + 1);
if (right_begin + j <= right_end) {
key = right_keys[right_begin + j];
}
}
shared.keys[i] = key;
}
__syncthreads();
#endif
#if USE_SMEM
uint2 mp = merge_path(
tid * ITEMS_PER_THREAD,
shared.keys,
left_span,
shared.keys + left_span,
right_span);
#else
uint2 mp = merge_path(
tid * ITEMS_PER_THREAD,
left_keys + left_begin,
left_span,
right_keys + right_begin,
right_span);
#endif
uint i = mp.x + left_begin;
uint j = mp.y + right_begin;
double result = 0.0;
#pragma unroll
for (int it = 0; it < ITEMS_PER_THREAD; it++) {
if ((i >= left_end && j >= right_end) || i >= left_size || j >= right_size) {
break;
}
kmer_t p = left_keys[i];
kmer_t q = right_keys[j];
if (p == q) {
double a = left_values[i];
double b = right_values[j];
result += a * b;
//printf("GPU found %d %d (%d == %d): %f * %f == %f\n",
// i, j, p, q, a, b, a * b);
}
if (p <= q) {
i++;
} else {
j++;
}
}
// Reduce
result = BlockReduce(shared.temp).Sum(result);
if (tid == 0) {
results[bid] = result;
}
}
hipError_t calculate_cosine_similarity(
hipStream_t stream,
void *d_scratch,
size_t scratch_size,
const kmer_t *d_left_keys,
const float *d_left_values,
const uint32_t left_size,
const kmer_t *d_right_keys,
const float *d_right_values,
const uint32_t right_size,
double *d_result
) {
hipError_t err = hipSuccess;
size_t num_blocks = div_ceil(left_size + right_size, (uint32_t) ITEMS_PER_BLOCK);
uint2 *d_ranges = advance_scratch<uint2>(&d_scratch, num_blocks);
double *d_partial_results = advance_scratch<double>(&d_scratch, num_blocks);
thrust::transform(
thrust::hip::par.on(stream),
thrust::make_counting_iterator<uint32_t>(0),
thrust::make_counting_iterator<uint32_t>(num_blocks + 1),
d_ranges,
[=] __device__ (uint32_t bid) {
return merge_path(
bid * ITEMS_PER_BLOCK,
d_left_keys,
left_size,
d_right_keys,
right_size);
});
hipLaunchKernelGGL(( set_merge_by_key_and_reduce_cosine), dim3(num_blocks), dim3(THREADS_PER_BLOCK), 0, stream,
d_ranges,
d_left_keys,
d_left_values,
left_size,
d_right_keys,
d_right_values,
right_size,
d_partial_results);
err = hipGetLastError();
if (err != hipSuccess) return err;
err = hipcub::DeviceReduce::Sum(
d_scratch,
scratch_size,
d_partial_results,
d_result,
num_blocks,
stream);
if (err != hipSuccess) return err;
err = hipStreamSynchronize(stream);
if (err != hipSuccess) return err;
return hipSuccess;
}
extern "C" long estimateScratchMemory(
char *alphabet,
int k,
int max_vector_size
) {
size_t a = strcmp(alphabet, "DNA") == 0 ?
scratch_build_composition_vector<DNAAlphabet>(k) :
scratch_build_composition_vector<ProteinAlphabet>(k);
size_t b = scratch_calculate_cosine_similarity(max_vector_size);
return ::max(a, b);
}
extern "C" int buildCompositionVector(
uintptr_t stream,
uintptr_t d_temp_storage_ptr,
long temp_storage_size,
char *alphabet,
int k,
uintptr_t d_string_ptr,
long string_len,
uintptr_t d_set_keys_ptr,
uintptr_t d_set_values_ptr,
uintptr_t set_size_ptr,
int max_vector_size
) {
#define SPECIALIZE(name, A) \
if (strcmp(alphabet, name) == 0) { \
hipError_t err = build_composition_vector<A>( \
(hipStream_t) stream, \
(void*) d_temp_storage_ptr, \
temp_storage_size, \
k, \
(const char*) d_string_ptr, \
(uint32_t) string_len, \
(kmer_t*) d_set_keys_ptr, \
(float*) d_set_values_ptr, \
(uint32_t*) set_size_ptr, \
max_vector_size); \
return (int) err; \
}
SPECIALIZE("DNA", DNAAlphabet);
SPECIALIZE("protein", ProteinAlphabet);
fprintf(stderr, "error: invalid alphabet '%s'", alphabet);
return hipErrorUnknown;
}
extern "C" double compareCompositionVectors(
uintptr_t stream,
uintptr_t d_temp_storage_ptr,
long temp_storage_size,
uintptr_t d_left_keys_ptr,
uintptr_t d_left_values_ptr,
uint32_t left_size,
uintptr_t d_right_keys_ptr,
uintptr_t d_right_values_ptr,
uint32_t right_size,
uintptr_t d_output_ptr
) {
hipError_t err = calculate_cosine_similarity(
(hipStream_t) stream,
(void*) d_temp_storage_ptr,
temp_storage_size,
(const kmer_t*) d_left_keys_ptr,
(const float*) d_left_values_ptr,
left_size,
(const kmer_t*) d_right_keys_ptr,
(const float*) d_right_values_ptr,
right_size,
(double*) d_output_ptr);
return (int) err;
}
extern "C" float tuneCalculateCosineSimilarity(
const kmer_t *left_keys,
const float *left_values,
const uint32_t left_size,
const kmer_t *right_keys,
const float *right_values,
const uint32_t right_size,
double *result
) {
size_t scratch_size = scratch_calculate_cosine_similarity(left_size + right_size);
hipStream_t stream;
hipEvent_t event_before;
hipEvent_t event_after;
uint32_t max_size = ::max(left_size, right_size);
thrust::device_vector<double> d_result(1);
thrust::device_vector<uint8_t> d_scratch(scratch_size);
thrust::device_vector<kmer_t> d_left_keys(left_keys, left_keys + left_size);
thrust::device_vector<float> d_left_values(left_values, left_values + left_size);
thrust::device_vector<uint32_t> d_left_size(1, left_size);
thrust::device_vector<kmer_t> d_right_keys(right_keys, right_keys + right_size);
thrust::device_vector<float> d_right_values(right_values, right_values + right_size);
thrust::device_vector<uint32_t> d_right_size(1, right_size);
hipStreamCreate(&stream);
hipEventCreate(&event_before);
hipEventCreate(&event_after);
hipEventRecord(event_before, stream);
hipError_t err = calculate_cosine_similarity(
stream,
(void*) thrust::raw_pointer_cast(d_scratch.data()),
scratch_size,
thrust::raw_pointer_cast(d_left_keys.data()),
thrust::raw_pointer_cast(d_left_values.data()),
left_size,
thrust::raw_pointer_cast(d_right_keys.data()),
thrust::raw_pointer_cast(d_right_values.data()),
right_size,
thrust::raw_pointer_cast(d_result.data()));
hipEventRecord(event_after, stream);
hipStreamSynchronize(stream);
float elapsed = 0.0;
hipEventElapsedTime(&elapsed, event_before, event_after);
hipStreamDestroy(stream);
hipEventDestroy(event_before);
hipEventDestroy(event_after);
*result = d_result[0];
return elapsed;
}
| 0bdbb2ed63e06969da7582fad2ab2d13d5db1dc0.cu | #include <iostream>
#include <cub/cub.cuh>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/iterator/counting_iterator.h>
typedef uint32_t kmer_t;
const kmer_t INVALID_KMER = ~0;
template <typename T>
__host__ __device__ T powi(T a, T b) {
T result = 1;
while (b > 0) {
result *= a;
b--;
}
return result;
}
template <typename T>
T div_ceil(T a, T b) {
return (a / b) + (a % b == 0 ? 0 : 1);
}
/**
* Calculates the scratch memory required to store n elements of type T.
* Memory is aligned 256-byte segments for good performance on the GPU.
*/
template <typename T>
size_t allocate_scratch(size_t n) {
return (n * sizeof(T) + 255) / 256 * 256;
}
/**
* Takes memory for n elements of type T from scratch and advance
* the scratch memory pointer forward to the next available bytes.
*/
template <typename T>
T* advance_scratch(void **scratch, size_t n) {
void* current = *scratch;
size_t bytes = allocate_scratch<T>(n);
*scratch = (void*)(((char*) *scratch) + bytes);
return (T*) current;
}
__constant__ uint8_t DNA2DIGIT_MAPPING[26] = {
0, // A
255, // B
1, // C
255, // D
255, // E
255, // F
2, // G
255, // H
255, // I
255, // J
255, // K
255, // L
255, // M
255, // N
255, // O
255, // P
255, // Q
255, // R
255, // S
3, // T
255, // U
255, // V
255, // W
255, // X
255, // Y
255 // Z
};
struct DNAAlphabet {
static const size_t NUM_SYMBOLS = 4;
/**
* convert an ASCII DNA representation to its 2-bit symbol
* Based on nvbio/dna.h:78
*/
__device__ static uint8_t char_to_digit(const char c) {
if (c >= 'A' && c <= 'Z') {
return DNA2DIGIT_MAPPING[c - 'A'];
} else {
return 0xff;
}
}
};
__constant__ uint8_t PROTEIN2DIGIT_MAPPING[26] = {
0, // A
255, // B
1, // C
2, // D
3, // E
4, // F
5, // G
6, // H
7, // I
255, // J
8, // K
9, // L
10, // M
11, // N
255, // O
12, // P
13, // Q
14, // R
15, // S
16, // T
255, // U
17, // V
18, // W
20, // X (Uknown protein)
19, // Y
255 // Z
};
struct ProteinAlphabet {
static const size_t NUM_SYMBOLS = 21;
/**
* convert an ASCII character to a 5-bit symbol
* Based on nvbio/alphabet_inl.h:90
*/
__device__ static uint8_t char_to_digit(const char c) {
if (c >= 'A' && c <= 'Z') {
return PROTEIN2DIGIT_MAPPING[c - 'A'];
} else {
return 0xff;
}
}
};
template <typename Alphabet>
size_t scratch_build_composition_vector(int k) {
size_t k0 = powi(Alphabet::NUM_SYMBOLS, (size_t) k - 0);
size_t k1 = powi(Alphabet::NUM_SYMBOLS, (size_t) k - 1);
size_t k2 = powi(Alphabet::NUM_SYMBOLS, (size_t) k - 2);
size_t scratch_size = 0;
scratch_size += allocate_scratch<uint32_t>(k0);
scratch_size += allocate_scratch<uint32_t>(k1);
scratch_size += allocate_scratch<uint32_t>(k2);
scratch_size += allocate_scratch<uint32_t>(1);
scratch_size += allocate_scratch<uint32_t>(1);
scratch_size += allocate_scratch<uint32_t>(1);
scratch_size += allocate_scratch<uint32_t>(1);
scratch_size += allocate_scratch<double>(1);
size_t cub_scratch = 0;
cudaError_t err = cub::DeviceReduce::Sum(
(void*) NULL,
cub_scratch,
(uint32_t*) NULL,
(uint32_t*) NULL,
k0);
if (err != cudaSuccess) return err;
size_t x = 0;
err = cub::DeviceReduce::Sum(
(void*) NULL,
x,
(double*) NULL,
(double*) NULL,
k0);
if (err != cudaSuccess) return err;
if (x > cub_scratch) cub_scratch = x;
err = cub::DeviceSelect::If(
(void*) NULL,
x,
(uint32_t*) NULL,
(uint32_t*) NULL,
(uint32_t*) NULL,
k0,
[=] __device__ (uint32_t i) { return false; });
if (err != cudaSuccess) return err;
if (x > cub_scratch) cub_scratch = x;
scratch_size += allocate_scratch<uint8_t>(cub_scratch);
return scratch_size;
}
template <int i, int j, bool flag = i < j>
struct unroll_helper {
template <typename F>
__device__ static void call(F fun) {
if (fun(i)) {
unroll_helper<i + 1, j>::call(fun);
}
}
};
template <int i, int j>
struct unroll_helper<i, j, false> {
template <typename F>
__device__ static void call(F fun) {
//
}
};
template <int N, typename F>
__device__ void unroll(F fun) {
unroll_helper<0, N>::call(fun);
}
template <typename Alphabet, int K>
cudaError_t count_kmers(
cudaStream_t stream,
const char* d_string,
uint32_t string_len,
uint32_t *d_k0_count,
uint32_t *d_k1_count,
uint32_t *d_k2_count
) {
auto exec = thrust::cuda::par.on(stream);
thrust::for_each(
exec,
thrust::make_counting_iterator<uint32_t>(0u),
thrust::make_counting_iterator<uint32_t>(string_len),
[=] __device__ (uint32_t i) {
kmer_t w = 0;
unroll<K + 1>([=, &w](uint32_t j) {
// Compiler should remove the conditionals when unrolling
// the loop (fingers crossed!).
if (j == K - 2) atomicAdd(&d_k2_count[w], 1);
if (j == K - 1) atomicAdd(&d_k1_count[w], 1);
if (j == K - 0) atomicAdd(&d_k0_count[w], 1);
if (i + j >= string_len) return false;
uint8_t c = Alphabet::char_to_digit(d_string[i + j]);
if (c == 0xff) return false;
w = w * Alphabet::NUM_SYMBOLS + c;
return true;
});
});
return cudaSuccess;
}
template <typename Alphabet>
cudaError_t build_composition_vector(
cudaStream_t stream,
void *d_scratch,
size_t scratch_size,
int k,
const char* d_string,
uint32_t string_len,
kmer_t *d_keys,
float *d_values,
uint32_t *num_unique,
uint32_t max_size
) {
cudaError_t err = cudaSuccess;
size_t k0 = powi(Alphabet::NUM_SYMBOLS, (size_t) k - 0);
size_t k1 = powi(Alphabet::NUM_SYMBOLS, (size_t) k - 1);
size_t k2 = powi(Alphabet::NUM_SYMBOLS, (size_t) k - 2);
uint32_t *d_k0_count = advance_scratch<kmer_t>(&d_scratch, k0);
uint32_t *d_k1_count = advance_scratch<kmer_t>(&d_scratch, k1);
uint32_t *d_k2_count = advance_scratch<kmer_t>(&d_scratch, k2);
uint32_t *d_n0 = advance_scratch<uint32_t>(&d_scratch, 1);
uint32_t *d_n1 = advance_scratch<uint32_t>(&d_scratch, 1);
uint32_t *d_n2 = advance_scratch<uint32_t>(&d_scratch, 1);
uint32_t *d_num_unique = advance_scratch<uint32_t>(&d_scratch, 1);
double *d_norm = advance_scratch<double>(&d_scratch, 1);
// initialize k-mer count table with zeros
auto exec = thrust::cuda::par.on(stream);
thrust::fill_n(exec, d_k0_count, k0, 0);
thrust::fill_n(exec, d_k1_count, k1, 0);
thrust::fill_n(exec, d_k2_count, k2, 0);
// count k-mers of length k, k-1, and k-2
#define SPECIALIZE(K) \
if (k == K) err = count_kmers<Alphabet, K>( \
stream, d_string, string_len, d_k0_count, d_k1_count, d_k2_count)
if (k < 3 || k > 10) {
fprintf(stderr, "error: k=%d should be in range 3-10\n", k);
return cudaErrorUnknown;
}
SPECIALIZE(3);
SPECIALIZE(4);
SPECIALIZE(5);
SPECIALIZE(6);
SPECIALIZE(7);
SPECIALIZE(8);
SPECIALIZE(9);
SPECIALIZE(10);
if (err != cudaSuccess) return err;
#undef SPECIALIZE
// sum the number of k-mers of length k
err = cub::DeviceReduce::Sum(
d_scratch,
scratch_size,
d_k0_count,
d_n0,
k0,
stream);
if (err != cudaSuccess) return err;
// sum the number of k-mers of length k-1
err = cub::DeviceReduce::Sum(
d_scratch,
scratch_size,
d_k1_count,
d_n1,
k1,
stream);
if (err != cudaSuccess) return err;
// sum the number of k-mers of length k-2
err = cub::DeviceReduce::Sum(
d_scratch,
scratch_size,
d_k2_count,
d_n2,
k2,
stream);
if (err != cudaSuccess) return err;
// following function implements formulas (2) and (3) from "Whole Proteome
// Prokaryote Phylogeny Without Sequence Alignment:AK-String Composition
// Approach" (2004) by Qi et al.
thrust::for_each(
exec,
thrust::make_counting_iterator<uint32_t>(0u),
thrust::make_counting_iterator<uint32_t>(k0),
[=] __device__ (uint32_t i) {
// full = a_1 a_2 ... a_{k-1} a_k (complete k-mer)
// prefix = a_1 a_2 ... a_{k-1} ({k-1}-mer without last character)
// suffix = a_2 ... a_{k-1} a_k ({k-1}-mer without first character)
// middle = a_2 ... a_{k-1} ({k-2}-mer without first and last char.)
kmer_t full = i;
kmer_t prefix = full / Alphabet::NUM_SYMBOLS;
kmer_t suffix = full % powi((int) Alphabet::NUM_SYMBOLS, k - 1);
kmer_t middle = suffix / Alphabet::NUM_SYMBOLS;
// You are probably wonder what this strange do-while statement is doing
// here. Formula (3) gives that alpha = 0 if either middle_count == 0 or
// prefix_count == 0 or suffix_count == 0. This means we read each of these
// counts in any order and immediately break if any of them is zero.
// The counts are read in decending order of locality (i.e., increasing
// order of cost): first middle_count, then prefix_count, then suffix_count.
float alpha = 0.0;
do {
uint32_t middle_count = d_k2_count[middle];
if (middle_count == 0) break;
uint32_t prefix_count = d_k1_count[prefix];
if (prefix_count == 0) break;
uint32_t suffix_count = d_k1_count[suffix];
if (suffix_count == 0) break;
uint32_t full_count = d_k0_count[full];
uint32_t n0 = *d_n0;
uint32_t n1 = *d_n1;
uint32_t n2 = *d_n2;
// Formula (1) = count / n
double p = double(full_count) / n0;
// Formula (2) = P(prefix) * P(suffix) / P(middle)
double p0 = ((double(prefix_count) / n1) * (double(suffix_count) / n1)) / (double(middle_count) / n2);
// Formula (3) = (p - p0) / p0
alpha = float((p - p0) / p0);
} while (0);
// Overwrite int by taking the int32 interpretation of float.
d_k0_count[i] = __float_as_int(alpha);
});
// count entries where alpha != 0
err = cub::DeviceReduce::Sum(
d_scratch,
scratch_size,
thrust::make_transform_iterator(
d_k0_count,
[=] __device__ (uint32_t p) { return p != 0; }),
d_num_unique,
k0,
stream);
if (err != cudaSuccess) return err;
// sum p**2 for all entries
err = cub::DeviceReduce::Sum(
d_scratch,
scratch_size,
thrust::make_transform_iterator(
d_k0_count,
[=] __device__ (uint32_t p) {
float f = __int_as_float(p);
return f * f;
}),
d_norm,
k0,
stream);
if (err != cudaSuccess) return err;
err = cudaMemcpyAsync(
num_unique,
d_num_unique,
sizeof(uint32_t),
cudaMemcpyDeviceToHost,
stream);
if (err != cudaSuccess) return err;
err = cudaStreamSynchronize(stream);
if (err != cudaSuccess) return err;
if (*num_unique > max_size) {
fprintf(stderr,
"error: size of composition vector (size: %d) exceeds the given maximum (size: %d)\n",
*num_unique,
max_size);
return cudaErrorUnknown;
}
err = cub::DeviceSelect::If(
d_scratch,
scratch_size,
thrust::make_counting_iterator<uint32_t>(0u),
d_keys,
d_num_unique,
k0,
[=] __device__ (uint32_t i) { return d_k0_count[i] != 0; },
stream);
if (err != cudaSuccess) return err;
thrust::for_each(
exec,
thrust::make_counting_iterator<uint32_t>(0u),
thrust::make_counting_iterator<uint32_t>(*num_unique),
[=] __device__ (uint32_t i) {
kmer_t index = d_keys[i];
float f = __int_as_float(d_k0_count[index]);
d_values[i] = f / sqrt(*d_norm);
});
err = cudaStreamSynchronize(stream);
if (err != cudaSuccess) return err;
thrust::fill(exec, d_keys + *num_unique, d_keys + max_size, INVALID_KMER);
thrust::fill(exec, d_values + *num_unique, d_values + max_size, 0.0f);
return cudaSuccess;
}
#ifdef TUNE_THREADS_PER_BLOCK
#define THREADS_PER_BLOCK (TUNE_THREADS_PER_BLOCK)
#define ITEMS_PER_THREAD (TUNE_ITEMS_PER_THREAD)
#define USE_SMEM (TUNE_USE_SMEM)
#else
#define THREADS_PER_BLOCK (256)
#define ITEMS_PER_THREAD (1)
#define USE_SMEM (0)
#endif
#define ITEMS_PER_BLOCK (THREADS_PER_BLOCK * ITEMS_PER_THREAD)
size_t scratch_calculate_cosine_similarity(uint32_t max_vector_size) {
size_t n = 0;
size_t num_blocks = div_ceil(
max_vector_size + max_vector_size,
(uint32_t) ITEMS_PER_BLOCK);
n += allocate_scratch<double>(num_blocks);
n += allocate_scratch<uint2>(num_blocks + 1);
size_t x = 0;
cub::DeviceReduce::Sum(
(void*) NULL,
x,
(double*) NULL,
(double*) NULL,
num_blocks);
n += allocate_scratch<uint8_t>(x);
return n;
}
template <typename K>
uint2 __device__ __forceinline__ merge_path(
uint32_t diag,
const K *__restrict__ left,
uint32_t left_size,
const K *__restrict__ right,
uint32_t right_size
) {
uint32_t begin = diag < right_size ? 0 : diag - right_size;
uint32_t end = diag < left_size ? diag : left_size;
while (begin < end) {
uint32_t mid = (begin + end) / 2 + 1;
K a = left[mid - 1];
K b = right[diag - mid];
if (a <= b) {
begin = mid;
} else {
end = mid - 1;
}
}
int i = min(begin, left_size);
int j = min(diag - begin, right_size);
return make_uint2(i, j);
}
__global__ void set_merge_by_key_and_reduce_cosine(
const uint2 *__restrict__ ranges,
const kmer_t *__restrict__ left_keys,
const float *__restrict__ left_values,
const uint32_t left_size,
const kmer_t *__restrict__ right_keys,
const float *__restrict__ right_values,
const uint32_t right_size,
double *results
) {
typedef cub::BlockReduce<double, THREADS_PER_BLOCK> BlockReduce;
__shared__ union {
typename BlockReduce::TempStorage temp;
kmer_t keys[2 * (ITEMS_PER_BLOCK + 1) * !!USE_SMEM];
} shared;
int tid = threadIdx.x;
int bid = blockIdx.x;
// YYY
uint left_begin = ranges[bid].x;
uint right_begin = ranges[bid].y;
uint left_end = ranges[bid + 1].x;
uint right_end = ranges[bid+ 1].y;
uint left_span = left_end - left_begin;
uint right_span = right_end - right_begin;
#if USE_SMEM
#pragma unroll
for (int i = tid; i < 2 * (ITEMS_PER_BLOCK + 2); i += THREADS_PER_BLOCK) {
kmer_t key = !0;
if (left_begin + i <= left_end) {
key = left_keys[left_begin + i];
} else {
int j = i - (left_end - left_begin + 1);
if (right_begin + j <= right_end) {
key = right_keys[right_begin + j];
}
}
shared.keys[i] = key;
}
__syncthreads();
#endif
#if USE_SMEM
uint2 mp = merge_path(
tid * ITEMS_PER_THREAD,
shared.keys,
left_span,
shared.keys + left_span,
right_span);
#else
uint2 mp = merge_path(
tid * ITEMS_PER_THREAD,
left_keys + left_begin,
left_span,
right_keys + right_begin,
right_span);
#endif
uint i = mp.x + left_begin;
uint j = mp.y + right_begin;
double result = 0.0;
#pragma unroll
for (int it = 0; it < ITEMS_PER_THREAD; it++) {
if ((i >= left_end && j >= right_end) || i >= left_size || j >= right_size) {
break;
}
kmer_t p = left_keys[i];
kmer_t q = right_keys[j];
if (p == q) {
double a = left_values[i];
double b = right_values[j];
result += a * b;
//printf("GPU found %d %d (%d == %d): %f * %f == %f\n",
// i, j, p, q, a, b, a * b);
}
if (p <= q) {
i++;
} else {
j++;
}
}
// Reduce
result = BlockReduce(shared.temp).Sum(result);
if (tid == 0) {
results[bid] = result;
}
}
cudaError_t calculate_cosine_similarity(
cudaStream_t stream,
void *d_scratch,
size_t scratch_size,
const kmer_t *d_left_keys,
const float *d_left_values,
const uint32_t left_size,
const kmer_t *d_right_keys,
const float *d_right_values,
const uint32_t right_size,
double *d_result
) {
cudaError_t err = cudaSuccess;
size_t num_blocks = div_ceil(left_size + right_size, (uint32_t) ITEMS_PER_BLOCK);
uint2 *d_ranges = advance_scratch<uint2>(&d_scratch, num_blocks);
double *d_partial_results = advance_scratch<double>(&d_scratch, num_blocks);
thrust::transform(
thrust::cuda::par.on(stream),
thrust::make_counting_iterator<uint32_t>(0),
thrust::make_counting_iterator<uint32_t>(num_blocks + 1),
d_ranges,
[=] __device__ (uint32_t bid) {
return merge_path(
bid * ITEMS_PER_BLOCK,
d_left_keys,
left_size,
d_right_keys,
right_size);
});
set_merge_by_key_and_reduce_cosine<<<num_blocks, THREADS_PER_BLOCK, 0, stream>>>(
d_ranges,
d_left_keys,
d_left_values,
left_size,
d_right_keys,
d_right_values,
right_size,
d_partial_results);
err = cudaGetLastError();
if (err != cudaSuccess) return err;
err = cub::DeviceReduce::Sum(
d_scratch,
scratch_size,
d_partial_results,
d_result,
num_blocks,
stream);
if (err != cudaSuccess) return err;
err = cudaStreamSynchronize(stream);
if (err != cudaSuccess) return err;
return cudaSuccess;
}
extern "C" long estimateScratchMemory(
char *alphabet,
int k,
int max_vector_size
) {
size_t a = strcmp(alphabet, "DNA") == 0 ?
scratch_build_composition_vector<DNAAlphabet>(k) :
scratch_build_composition_vector<ProteinAlphabet>(k);
size_t b = scratch_calculate_cosine_similarity(max_vector_size);
return std::max(a, b);
}
extern "C" int buildCompositionVector(
uintptr_t stream,
uintptr_t d_temp_storage_ptr,
long temp_storage_size,
char *alphabet,
int k,
uintptr_t d_string_ptr,
long string_len,
uintptr_t d_set_keys_ptr,
uintptr_t d_set_values_ptr,
uintptr_t set_size_ptr,
int max_vector_size
) {
#define SPECIALIZE(name, A) \
if (strcmp(alphabet, name) == 0) { \
cudaError_t err = build_composition_vector<A>( \
(cudaStream_t) stream, \
(void*) d_temp_storage_ptr, \
temp_storage_size, \
k, \
(const char*) d_string_ptr, \
(uint32_t) string_len, \
(kmer_t*) d_set_keys_ptr, \
(float*) d_set_values_ptr, \
(uint32_t*) set_size_ptr, \
max_vector_size); \
return (int) err; \
}
SPECIALIZE("DNA", DNAAlphabet);
SPECIALIZE("protein", ProteinAlphabet);
fprintf(stderr, "error: invalid alphabet '%s'", alphabet);
return cudaErrorUnknown;
}
extern "C" double compareCompositionVectors(
uintptr_t stream,
uintptr_t d_temp_storage_ptr,
long temp_storage_size,
uintptr_t d_left_keys_ptr,
uintptr_t d_left_values_ptr,
uint32_t left_size,
uintptr_t d_right_keys_ptr,
uintptr_t d_right_values_ptr,
uint32_t right_size,
uintptr_t d_output_ptr
) {
cudaError_t err = calculate_cosine_similarity(
(cudaStream_t) stream,
(void*) d_temp_storage_ptr,
temp_storage_size,
(const kmer_t*) d_left_keys_ptr,
(const float*) d_left_values_ptr,
left_size,
(const kmer_t*) d_right_keys_ptr,
(const float*) d_right_values_ptr,
right_size,
(double*) d_output_ptr);
return (int) err;
}
extern "C" float tuneCalculateCosineSimilarity(
const kmer_t *left_keys,
const float *left_values,
const uint32_t left_size,
const kmer_t *right_keys,
const float *right_values,
const uint32_t right_size,
double *result
) {
size_t scratch_size = scratch_calculate_cosine_similarity(left_size + right_size);
cudaStream_t stream;
cudaEvent_t event_before;
cudaEvent_t event_after;
uint32_t max_size = std::max(left_size, right_size);
thrust::device_vector<double> d_result(1);
thrust::device_vector<uint8_t> d_scratch(scratch_size);
thrust::device_vector<kmer_t> d_left_keys(left_keys, left_keys + left_size);
thrust::device_vector<float> d_left_values(left_values, left_values + left_size);
thrust::device_vector<uint32_t> d_left_size(1, left_size);
thrust::device_vector<kmer_t> d_right_keys(right_keys, right_keys + right_size);
thrust::device_vector<float> d_right_values(right_values, right_values + right_size);
thrust::device_vector<uint32_t> d_right_size(1, right_size);
cudaStreamCreate(&stream);
cudaEventCreate(&event_before);
cudaEventCreate(&event_after);
cudaEventRecord(event_before, stream);
cudaError_t err = calculate_cosine_similarity(
stream,
(void*) thrust::raw_pointer_cast(d_scratch.data()),
scratch_size,
thrust::raw_pointer_cast(d_left_keys.data()),
thrust::raw_pointer_cast(d_left_values.data()),
left_size,
thrust::raw_pointer_cast(d_right_keys.data()),
thrust::raw_pointer_cast(d_right_values.data()),
right_size,
thrust::raw_pointer_cast(d_result.data()));
cudaEventRecord(event_after, stream);
cudaStreamSynchronize(stream);
float elapsed = 0.0;
cudaEventElapsedTime(&elapsed, event_before, event_after);
cudaStreamDestroy(stream);
cudaEventDestroy(event_before);
cudaEventDestroy(event_after);
*result = d_result[0];
return elapsed;
}
|
4daaa8b030149c436dfbea14fa32f5b87b754cf3.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <cstdlib>
#include "parse.h"
#include "sequential.h"
#include "util.cuh"
#include "kernels_hip.cuh"
#include <yaml-cpp/yaml.h>
int main(int argc, char *argv[])
{
std::string infile = "/home/vikraman/Desktop/mapf/hybrid_BC/graph/breast_coexpress.txt";
int k = 0;
bool approx = false;
int device = 0;
int max_threads_per_block, number_of_SMs;
choose_device(max_threads_per_block,number_of_SMs, device);
graph g = parse(infile);
std::cout << "Number of nodes: " << g.n << std::endl;
std::cout << "Number of edges: " << g.m << std::endl;
//If we're approximating, choose source vertices at random
std::set<int> source_vertices;
if(approx)
{
if(k > g.n || k < 1)
{
k = g.n;
}
while(source_vertices.size() < k)
{
int temp_source = rand() % g.n;
source_vertices.insert(temp_source);
}
}
hipEvent_t start,end;
float CPU_time;
std::vector<float> bc;
if(false) //Only run CPU code if verifying
{
start_clock(start,end);
bc = bc_cpu(g,source_vertices);
CPU_time = end_clock(start,end);
}
float GPU_time;
std::vector<float> bc_g;
start_clock(start,end);
bc_g = bc_gpu(g,max_threads_per_block,number_of_SMs,approx, k,source_vertices);
GPU_time = end_clock(start,end);
if(false)
{
verify(g,bc,bc_g);
}
if(false)
{
//g.print_BC_scores(bc_g,op.scorefile);
}
std::cout << std::setprecision(9);
if(false)
{
std::cout << "Time for CPU Algorithm: " << CPU_time << " s" << std::endl;
}
std::cout << "Time for GPU Algorithm: " << GPU_time << " s" << std::endl;
delete[] g.R;
delete[] g.C;
delete[] g.F;
return 0;
}
| 4daaa8b030149c436dfbea14fa32f5b87b754cf3.cu | #include <iostream>
#include <iomanip>
#include <cstdlib>
#include "parse.h"
#include "sequential.h"
#include "util.cuh"
#include "kernels.cuh"
#include <yaml-cpp/yaml.h>
int main(int argc, char *argv[])
{
std::string infile = "/home/vikraman/Desktop/mapf/hybrid_BC/graph/breast_coexpress.txt";
int k = 0;
bool approx = false;
int device = 0;
int max_threads_per_block, number_of_SMs;
choose_device(max_threads_per_block,number_of_SMs, device);
graph g = parse(infile);
std::cout << "Number of nodes: " << g.n << std::endl;
std::cout << "Number of edges: " << g.m << std::endl;
//If we're approximating, choose source vertices at random
std::set<int> source_vertices;
if(approx)
{
if(k > g.n || k < 1)
{
k = g.n;
}
while(source_vertices.size() < k)
{
int temp_source = rand() % g.n;
source_vertices.insert(temp_source);
}
}
cudaEvent_t start,end;
float CPU_time;
std::vector<float> bc;
if(false) //Only run CPU code if verifying
{
start_clock(start,end);
bc = bc_cpu(g,source_vertices);
CPU_time = end_clock(start,end);
}
float GPU_time;
std::vector<float> bc_g;
start_clock(start,end);
bc_g = bc_gpu(g,max_threads_per_block,number_of_SMs,approx, k,source_vertices);
GPU_time = end_clock(start,end);
if(false)
{
verify(g,bc,bc_g);
}
if(false)
{
//g.print_BC_scores(bc_g,op.scorefile);
}
std::cout << std::setprecision(9);
if(false)
{
std::cout << "Time for CPU Algorithm: " << CPU_time << " s" << std::endl;
}
std::cout << "Time for GPU Algorithm: " << GPU_time << " s" << std::endl;
delete[] g.R;
delete[] g.C;
delete[] g.F;
return 0;
}
|
9d5cefa98a396f728ca0284bdbbedd42834d0fa9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "non_diag_mask_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int64_t *row_data = NULL;
hipMalloc(&row_data, XSIZE*YSIZE);
const int64_t *col_data = NULL;
hipMalloc(&col_data, XSIZE*YSIZE);
bool *out_data = NULL;
hipMalloc(&out_data, XSIZE*YSIZE);
int64_t N = XSIZE*YSIZE;
int64_t k = 1;
int64_t num_diag = 1;
int64_t numel = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
non_diag_mask_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, row_data,col_data,out_data,N,k,num_diag,numel);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
non_diag_mask_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, row_data,col_data,out_data,N,k,num_diag,numel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
non_diag_mask_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, row_data,col_data,out_data,N,k,num_diag,numel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9d5cefa98a396f728ca0284bdbbedd42834d0fa9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "non_diag_mask_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int64_t *row_data = NULL;
cudaMalloc(&row_data, XSIZE*YSIZE);
const int64_t *col_data = NULL;
cudaMalloc(&col_data, XSIZE*YSIZE);
bool *out_data = NULL;
cudaMalloc(&out_data, XSIZE*YSIZE);
int64_t N = XSIZE*YSIZE;
int64_t k = 1;
int64_t num_diag = 1;
int64_t numel = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
non_diag_mask_kernel<<<gridBlock,threadBlock>>>(row_data,col_data,out_data,N,k,num_diag,numel);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
non_diag_mask_kernel<<<gridBlock,threadBlock>>>(row_data,col_data,out_data,N,k,num_diag,numel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
non_diag_mask_kernel<<<gridBlock,threadBlock>>>(row_data,col_data,out_data,N,k,num_diag,numel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ab7a206e3a29cd9096ca2230928fd21116b7fc08.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <GL/glew.h>
#include <GL/gl.h>
#include <hip/hip_runtime_api.h>
#include <cuda_gl_interop.h>
#include "RayCasting.h"
#include "RayCastingKernel.cu"
#include "logger.h"
RayCasting::RayCasting( float a , float b , float c , float _m )
: width(0) , height(0) , step(0) , d_m(NULL)
{
e.a = a;
e.b = b;
e.c = c;
e.m =_m;
float i[16] = { 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1 };
memcpy( m , i , sizeof(float)*16 );
}
RayCasting::~RayCasting()
{
if( d_m ) {
hipFree( d_m );
CUT_CHECK_ERROR("RayCasting::~RayCasting::hipFree");
}
}
void RayCasting::translate( float x , float y , float z )
{
float t[16] = { 1, 0, 0, x,
0, 1, 0, y,
0, 0, 1, z,
0, 0, 0, 1 };
matmul4( m , t , m);
}
void RayCasting::scale( float x , float y , float z )
{
float s[16] = { x, 0, 0, 0,
0, y, 0, 0,
0, 0, z, 0,
0, 0, 0, 1 };
matmul4( m , s , m );
}
void RayCasting::rotate( float a , float x , float y , float z )
{
float c = cos( a );
float s = sin( a );
float xx = x*x;
float yy = y*y;
float zz = z*z;
float r[16] = { xx+(1-xx)*c , x*y*(1-c)-z*s , x*z*(1-c)+y*s , 0 ,
x*y*(1-c)+z*s , yy+(1-yy)*c , y*z*(1-c)-x*s , 0 ,
x*z*(1-c)-y*s , y*z*(1-c)+x*s , zz+(1-zz)*c , 0 ,
0 , 0 , 0 , 1 };
matmul4( m , r , m );
}
void RayCasting::resize( int w , int h )
{
if( !d_m ) {
hipMalloc( (void**)&d_m , sizeof(float)*16 );
CUT_CHECK_ERROR("RayCasting::RayCasting::hipMalloc");
}
width = w; height = h;
GLubyte*d_ub;
hipGLMapBufferObject__( (void**)&d_ub , pbo.pbo );
CUT_CHECK_ERROR("RayCasting::init::hipGLMapBufferObject__");
hipMemset( d_ub , 0 , sizeof(GLubyte)*w*h*3 );
CUT_CHECK_ERROR("RayCasting::init::hipMemset");
hipGLUnmapBufferObject( pbo.pbo );
CUT_CHECK_ERROR("RayCasting::init::hipGLUnmapBufferObject");
}
bool RayCasting::render_frame( bool next )
{
unsigned int quads = pow(2,step);
if( next && (quads < width || quads < height) )
++step;
dim3 threads;
while( (threads = ::ceil( (float)width / (float)(quads=pow(2,step)))).x >= 512 ) ++step;
unsigned int qw = quads , qh = quads;
while( qw > width ) qw >>= 1;
while( qh > height) qh >>= 1;
qw <<= 1 ; qh <<= 1;
dim3 blocks = dim3( quads , qh );
GLubyte*d_ub;
hipGLMapBufferObject__( (void**)&d_ub , pbo.pbo );
CUT_CHECK_ERROR("RayCasting::init::hipGLMapBufferObject__");
log_printf(DBG,"width %d\theight %d\n",width,height);
log_printf(DBG,"thr: %d\tblk: %d %d\n",threads.x,blocks.x,blocks.y);
/* for( int i=0 ; i<16 ; i++ ) printf("%f%c",m[i],i%4-3?' ':'\n');*/
/* printf("\n");*/
hipMemcpy( (void**)d_m , (void**)m , sizeof(float)*16 , hipMemcpyHostToDevice );
CUT_CHECK_ERROR("RayCasting::render_frame::hipMemcpy");
hipMemset( d_ub , 0 , sizeof(GLubyte)*width*height*3 );
CUT_CHECK_ERROR("RayCasting::render_frame::hipMemset");
hipLaunchKernelGGL(( render_elipsoid), dim3(blocks) , dim3(threads) , 0, 0, d_ub , ::ceil( (float)width / (float)quads ) , ::ceil( (float)height / (float)quads ), width , height , e , d_m );
CUT_CHECK_ERROR("RayCasting::render_frame::render_elipsoid");
hipGLUnmapBufferObject( pbo.pbo );
CUT_CHECK_ERROR("RayCasting::render_frame::hipGLUnmapBufferObject");
if( quads < width || quads < height )
return false;
return true;
}
| ab7a206e3a29cd9096ca2230928fd21116b7fc08.cu | #include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <GL/glew.h>
#include <GL/gl.h>
#include <cuda_runtime_api.h>
#include <cuda_gl_interop.h>
#include "RayCasting.h"
#include "RayCastingKernel.cu"
#include "logger.h"
RayCasting::RayCasting( float a , float b , float c , float _m )
: width(0) , height(0) , step(0) , d_m(NULL)
{
e.a = a;
e.b = b;
e.c = c;
e.m =_m;
float i[16] = { 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1 };
memcpy( m , i , sizeof(float)*16 );
}
RayCasting::~RayCasting()
{
if( d_m ) {
cudaFree( d_m );
CUT_CHECK_ERROR("RayCasting::~RayCasting::cudaFree");
}
}
void RayCasting::translate( float x , float y , float z )
{
float t[16] = { 1, 0, 0, x,
0, 1, 0, y,
0, 0, 1, z,
0, 0, 0, 1 };
matmul4( m , t , m);
}
void RayCasting::scale( float x , float y , float z )
{
float s[16] = { x, 0, 0, 0,
0, y, 0, 0,
0, 0, z, 0,
0, 0, 0, 1 };
matmul4( m , s , m );
}
void RayCasting::rotate( float a , float x , float y , float z )
{
float c = cos( a );
float s = sin( a );
float xx = x*x;
float yy = y*y;
float zz = z*z;
float r[16] = { xx+(1-xx)*c , x*y*(1-c)-z*s , x*z*(1-c)+y*s , 0 ,
x*y*(1-c)+z*s , yy+(1-yy)*c , y*z*(1-c)-x*s , 0 ,
x*z*(1-c)-y*s , y*z*(1-c)+x*s , zz+(1-zz)*c , 0 ,
0 , 0 , 0 , 1 };
matmul4( m , r , m );
}
void RayCasting::resize( int w , int h )
{
if( !d_m ) {
cudaMalloc( (void**)&d_m , sizeof(float)*16 );
CUT_CHECK_ERROR("RayCasting::RayCasting::cudaMalloc");
}
width = w; height = h;
GLubyte*d_ub;
cudaGLMapBufferObject( (void**)&d_ub , pbo.pbo );
CUT_CHECK_ERROR("RayCasting::init::cudaGLMapBufferObject");
cudaMemset( d_ub , 0 , sizeof(GLubyte)*w*h*3 );
CUT_CHECK_ERROR("RayCasting::init::cudaMemset");
cudaGLUnmapBufferObject( pbo.pbo );
CUT_CHECK_ERROR("RayCasting::init::cudaGLUnmapBufferObject");
}
bool RayCasting::render_frame( bool next )
{
unsigned int quads = pow(2,step);
if( next && (quads < width || quads < height) )
++step;
dim3 threads;
while( (threads = std::ceil( (float)width / (float)(quads=pow(2,step)))).x >= 512 ) ++step;
unsigned int qw = quads , qh = quads;
while( qw > width ) qw >>= 1;
while( qh > height) qh >>= 1;
qw <<= 1 ; qh <<= 1;
dim3 blocks = dim3( quads , qh );
GLubyte*d_ub;
cudaGLMapBufferObject( (void**)&d_ub , pbo.pbo );
CUT_CHECK_ERROR("RayCasting::init::cudaGLMapBufferObject");
log_printf(DBG,"width %d\theight %d\n",width,height);
log_printf(DBG,"thr: %d\tblk: %d %d\n",threads.x,blocks.x,blocks.y);
/* for( int i=0 ; i<16 ; i++ ) printf("%f%c",m[i],i%4-3?' ':'\n');*/
/* printf("\n");*/
cudaMemcpy( (void**)d_m , (void**)m , sizeof(float)*16 , cudaMemcpyHostToDevice );
CUT_CHECK_ERROR("RayCasting::render_frame::cudaMemcpy");
cudaMemset( d_ub , 0 , sizeof(GLubyte)*width*height*3 );
CUT_CHECK_ERROR("RayCasting::render_frame::cudaMemset");
render_elipsoid<<< blocks , threads >>>( d_ub , std::ceil( (float)width / (float)quads ) , std::ceil( (float)height / (float)quads ), width , height , e , d_m );
CUT_CHECK_ERROR("RayCasting::render_frame::render_elipsoid");
cudaGLUnmapBufferObject( pbo.pbo );
CUT_CHECK_ERROR("RayCasting::render_frame::cudaGLUnmapBufferObject");
if( quads < width || quads < height )
return false;
return true;
}
|
65621ca985b7d92d9bc92feeb29d24bae892977e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "transform.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// extern "C" void doTransform(struct work_entry *ori_queue, struct work_entry *result_queue, int count);
// call corresponding transform functions
__device__ uint16_t transCall(char cmd, uint16_t key){
uint16_t result;
if(cmd == 'A'){
result = transformA(key);
}
else if(cmd == 'B'){
result = transformB(key);
}
else if(cmd == 'C'){
result = transformC(key);
}
else if(cmd == 'D'){
result = transformD(key);
}
return result;
}
// kernel - producer
__global__ void transProducer(struct work_entry *work_queue, int count){
int tid;
tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < count){
work_queue[tid].key = transCall(work_queue[tid].cmd, work_queue[tid].key);
}
}
// kernel - consumer
__global__ void transConsumer(struct work_entry *work_queue, int count){
int tid;
tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < count){
work_queue[tid].key = transCall(work_queue[tid].cmd, work_queue[tid].key);
}
}
extern "C" void doTransform(struct work_entry *ori_queue, struct work_entry *result_queue, int count){
// pointers for host and device memory
struct work_entry *d_queue;
// thread hierarchy
int num_threads = NUM_THREAD;
int num_blocks = count/num_threads + 1;
time_t p_time, c_time, p_begin, p_end, c_begin, c_end;
// allocate memory
size_t memSize;
memSize = count * sizeof(struct work_entry);
hipMalloc((void**)&d_queue, memSize);
time(&p_begin);
// copy data
hipMemcpy(d_queue, ori_queue, memSize, hipMemcpyHostToDevice);
// launch kernel
dim3 dimGrid(num_blocks);
dim3 dimBlock(num_threads);
// do transformation and copy results
hipLaunchKernelGGL(( transProducer), dim3(dimGrid), dim3(dimBlock), 0, 0, d_queue, count);
hipMemcpy(ori_queue, d_queue, memSize, hipMemcpyDeviceToHost);
time(&p_end);
p_time = p_end - p_begin;
time(&c_begin);
hipLaunchKernelGGL(( transConsumer), dim3(dimGrid), dim3(dimBlock), 0, 0, d_queue, count);
hipMemcpy(result_queue, d_queue, memSize, hipMemcpyDeviceToHost);
time(&c_end);
c_time = c_end - c_begin;
fprintf(stderr, "total producer time: %d\ntotal consumer time: %d\n", p_time, c_time);
hipFree(d_queue);
} | 65621ca985b7d92d9bc92feeb29d24bae892977e.cu | #include "transform.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// extern "C" void doTransform(struct work_entry *ori_queue, struct work_entry *result_queue, int count);
// call corresponding transform functions
__device__ uint16_t transCall(char cmd, uint16_t key){
uint16_t result;
if(cmd == 'A'){
result = transformA(key);
}
else if(cmd == 'B'){
result = transformB(key);
}
else if(cmd == 'C'){
result = transformC(key);
}
else if(cmd == 'D'){
result = transformD(key);
}
return result;
}
// kernel - producer
__global__ void transProducer(struct work_entry *work_queue, int count){
int tid;
tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < count){
work_queue[tid].key = transCall(work_queue[tid].cmd, work_queue[tid].key);
}
}
// kernel - consumer
__global__ void transConsumer(struct work_entry *work_queue, int count){
int tid;
tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < count){
work_queue[tid].key = transCall(work_queue[tid].cmd, work_queue[tid].key);
}
}
extern "C" void doTransform(struct work_entry *ori_queue, struct work_entry *result_queue, int count){
// pointers for host and device memory
struct work_entry *d_queue;
// thread hierarchy
int num_threads = NUM_THREAD;
int num_blocks = count/num_threads + 1;
time_t p_time, c_time, p_begin, p_end, c_begin, c_end;
// allocate memory
size_t memSize;
memSize = count * sizeof(struct work_entry);
cudaMalloc((void**)&d_queue, memSize);
time(&p_begin);
// copy data
cudaMemcpy(d_queue, ori_queue, memSize, cudaMemcpyHostToDevice);
// launch kernel
dim3 dimGrid(num_blocks);
dim3 dimBlock(num_threads);
// do transformation and copy results
transProducer<<<dimGrid, dimBlock>>>(d_queue, count);
cudaMemcpy(ori_queue, d_queue, memSize, cudaMemcpyDeviceToHost);
time(&p_end);
p_time = p_end - p_begin;
time(&c_begin);
transConsumer<<<dimGrid, dimBlock>>>(d_queue, count);
cudaMemcpy(result_queue, d_queue, memSize, cudaMemcpyDeviceToHost);
time(&c_end);
c_time = c_end - c_begin;
fprintf(stderr, "total producer time: %d\ntotal consumer time: %d\n", p_time, c_time);
cudaFree(d_queue);
} |
9b6135b3ac9b22ab732cef07dd194c6c1b81be03.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "cudaCommon.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if((nlhs != 0) || (nrhs != 1)) { mexErrMsgTxt("Form: GPU_test(GPU tag / GPU_Type / ImogenArray)"); }
int returnCode = CHECK_CUDA_ERROR("entering GPU_test");
if(returnCode != SUCCESSFUL) return;
MGArray thetag;
returnCode = MGA_accessMatlabArrays(prhs, 0, 0, &thetag);
if(returnCode != SUCCESSFUL) {
CHECK_IMOGEN_ERROR(returnCode);
return;
}
printf("Complete contents of the MGArray structure generated by this tag:\n");
printf("|-Boundary information\n");
printf("| |-addExteriorHalo = %i\n", thetag.addExteriorHalo);
printf("| |-circularBoundaryBits = %i\n", thetag.circularBoundaryBits);
printf("| |-haloSize = %i\n", thetag.haloSize);
printf("|-External facing parameters\n");
printf("| |-currentPermutation = [%i %i %i]\n", thetag.currentPermutation[0], thetag.currentPermutation[1], thetag.currentPermutation[2]);
printf("| |-dim = [%i %i %i]\n", thetag.dim[0], thetag.dim[1], thetag.dim[2]);
printf("| |-numSlabs = %i\n", thetag.numSlabs);
printf("| |-numel = %li\n", thetag.numel);
printf("| |-permtag = %i\n", thetag.permtag);
printf("|-Internal parameters\n");
printf("| |-matlabClassHandle=%#lx\n", (unsigned long int)(thetag.matlabClassHandle));
printf("| |-mlClassHandleIndex=%i\n", thetag.mlClassHandleIndex);
printf("|-Parallel parameters\n");
printf("| |-This system configured to use max of %i GPUs\n", MAX_GPUS_USED);
printf("| |-partitionDir = %i\n", thetag.partitionDir);
printf("| |-nGPUs = %i\n", thetag.nGPUs);
int pc;
int sub[6];
for(pc = 0; pc < thetag.nGPUs; pc++) {
printf("| |-partition %i information\n", pc);
printf("| | |-deviceID = %i\n", thetag.deviceID[pc]);
printf("| | |-devicePtr = %#lx\n", (unsigned long int)thetag.devicePtr[pc]);
printf("| | |-partNumel = %i\n", thetag.partNumel[pc]);
printf("| | |-slabPitch = %li\n", thetag.slabPitch[pc]);
calcPartitionExtent(&thetag, pc, &sub[0]);
printf("| | |-calcPartitionExtent returns [%i %i %i %i %i %i]\n", sub[0], sub[1], sub[2], sub[3], sub[4], sub[5]);
}
CHECK_IMOGEN_ERROR(returnCode);
return;
}
| 9b6135b3ac9b22ab732cef07dd194c6c1b81be03.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
#include "cudaCommon.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if((nlhs != 0) || (nrhs != 1)) { mexErrMsgTxt("Form: GPU_test(GPU tag / GPU_Type / ImogenArray)"); }
int returnCode = CHECK_CUDA_ERROR("entering GPU_test");
if(returnCode != SUCCESSFUL) return;
MGArray thetag;
returnCode = MGA_accessMatlabArrays(prhs, 0, 0, &thetag);
if(returnCode != SUCCESSFUL) {
CHECK_IMOGEN_ERROR(returnCode);
return;
}
printf("Complete contents of the MGArray structure generated by this tag:\n");
printf("|-Boundary information\n");
printf("| |-addExteriorHalo = %i\n", thetag.addExteriorHalo);
printf("| |-circularBoundaryBits = %i\n", thetag.circularBoundaryBits);
printf("| |-haloSize = %i\n", thetag.haloSize);
printf("|-External facing parameters\n");
printf("| |-currentPermutation = [%i %i %i]\n", thetag.currentPermutation[0], thetag.currentPermutation[1], thetag.currentPermutation[2]);
printf("| |-dim = [%i %i %i]\n", thetag.dim[0], thetag.dim[1], thetag.dim[2]);
printf("| |-numSlabs = %i\n", thetag.numSlabs);
printf("| |-numel = %li\n", thetag.numel);
printf("| |-permtag = %i\n", thetag.permtag);
printf("|-Internal parameters\n");
printf("| |-matlabClassHandle=%#lx\n", (unsigned long int)(thetag.matlabClassHandle));
printf("| |-mlClassHandleIndex=%i\n", thetag.mlClassHandleIndex);
printf("|-Parallel parameters\n");
printf("| |-This system configured to use max of %i GPUs\n", MAX_GPUS_USED);
printf("| |-partitionDir = %i\n", thetag.partitionDir);
printf("| |-nGPUs = %i\n", thetag.nGPUs);
int pc;
int sub[6];
for(pc = 0; pc < thetag.nGPUs; pc++) {
printf("| |-partition %i information\n", pc);
printf("| | |-deviceID = %i\n", thetag.deviceID[pc]);
printf("| | |-devicePtr = %#lx\n", (unsigned long int)thetag.devicePtr[pc]);
printf("| | |-partNumel = %i\n", thetag.partNumel[pc]);
printf("| | |-slabPitch = %li\n", thetag.slabPitch[pc]);
calcPartitionExtent(&thetag, pc, &sub[0]);
printf("| | |-calcPartitionExtent returns [%i %i %i %i %i %i]\n", sub[0], sub[1], sub[2], sub[3], sub[4], sub[5]);
}
CHECK_IMOGEN_ERROR(returnCode);
return;
}
|
7aae30726eb6bb34910b8c52299bcd3e6ad99722.hip | // !!! This is a file automatically generated by hipify!!!
/**
* syr2k.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define N 128
#define M 128
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */
#define ALPHA 12435
#define BETA 4546
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int i, j;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
C[i*N + j] = ((DATA_TYPE) i*j + 2) / N;
}
for (j = 0; j < M; j++)
{
A[i*N + j] = ((DATA_TYPE) i*j) / N;
B[i*N + j] = ((DATA_TYPE) i*j + 1) / N;
}
}
}
void syr2k(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int i, j, k;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
C[i*N + j] *= BETA;
}
}
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
for (k = 0; k < M; k++)
{
C[i*N + j] += ALPHA * A[i*M + k] * B[j*M + k];
C[i*N + j] += ALPHA * B[i*M + k] * A[j*M + k];
}
}
}
}
void compareResults(DATA_TYPE *C, DATA_TYPE *C_outputFromGpu)
{
int i,j,fail;
fail = 0;
// Compare C with D
for (i=0; i<N; i++)
{
for (j=0; j<N; j++)
{
if (percentDiff(C[i*N + j], C_outputFromGpu[i*N + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
//hipDeviceProp_t deviceProp;
//hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
//hipSetDevice( GPU_DEVICE );
}
__global__ void syr2k_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < N) && (j < N))
{
c[i * N + j] *= BETA;
int k;
for(k = 0; k < M; k++)
{
c[i * N + j] += ALPHA * a[i * M + k] * b[j * M + k] + ALPHA * b[i * M + k] * a[j * M + k];
}
}
}
void syr2kCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * M);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * N * M);
hipMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * N * N);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * M, hipMemcpyHostToDevice);
hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * N * M, hipMemcpyHostToDevice);
hipMemcpy(C_gpu, C, sizeof(DATA_TYPE) * N * N, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil( ((float)N) / ((float)DIM_THREAD_BLOCK_X) ), (size_t)(ceil( ((float)N) / ((float)DIM_THREAD_BLOCK_Y) )));
t_start = rtclock();
hipLaunchKernelGGL(( syr2k_kernel), dim3(grid),dim3(block), 0, 0, A_gpu,B_gpu,C_gpu);
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
hipMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * N * N, hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(B_gpu);
hipFree(C_gpu);
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* C_outputFromGpu;
A = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C_outputFromGpu = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
init_arrays(A, B, C);
GPU_argv_init();
syr2kCuda(A, B, C, C_outputFromGpu);
t_start = rtclock();
syr2k(A, B, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(C, C_outputFromGpu);
free(A);
free(B);
free(C);
free(C_outputFromGpu);
return 0;
}
| 7aae30726eb6bb34910b8c52299bcd3e6ad99722.cu | /**
* syr2k.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define N 128
#define M 128
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */
#define ALPHA 12435
#define BETA 4546
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int i, j;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
C[i*N + j] = ((DATA_TYPE) i*j + 2) / N;
}
for (j = 0; j < M; j++)
{
A[i*N + j] = ((DATA_TYPE) i*j) / N;
B[i*N + j] = ((DATA_TYPE) i*j + 1) / N;
}
}
}
void syr2k(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int i, j, k;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
C[i*N + j] *= BETA;
}
}
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
for (k = 0; k < M; k++)
{
C[i*N + j] += ALPHA * A[i*M + k] * B[j*M + k];
C[i*N + j] += ALPHA * B[i*M + k] * A[j*M + k];
}
}
}
}
void compareResults(DATA_TYPE *C, DATA_TYPE *C_outputFromGpu)
{
int i,j,fail;
fail = 0;
// Compare C with D
for (i=0; i<N; i++)
{
for (j=0; j<N; j++)
{
if (percentDiff(C[i*N + j], C_outputFromGpu[i*N + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
//cudaDeviceProp deviceProp;
//cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
//cudaSetDevice( GPU_DEVICE );
}
__global__ void syr2k_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < N) && (j < N))
{
c[i * N + j] *= BETA;
int k;
for(k = 0; k < M; k++)
{
c[i * N + j] += ALPHA * a[i * M + k] * b[j * M + k] + ALPHA * b[i * M + k] * a[j * M + k];
}
}
}
void syr2kCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * M);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * N * M);
cudaMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * N * N);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * M, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * N * M, cudaMemcpyHostToDevice);
cudaMemcpy(C_gpu, C, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil( ((float)N) / ((float)DIM_THREAD_BLOCK_X) ), (size_t)(ceil( ((float)N) / ((float)DIM_THREAD_BLOCK_Y) )));
t_start = rtclock();
syr2k_kernel<<<grid,block>>>(A_gpu,B_gpu,C_gpu);
cudaThreadSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
cudaMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * N * N, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(C_gpu);
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* C_outputFromGpu;
A = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C_outputFromGpu = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
init_arrays(A, B, C);
GPU_argv_init();
syr2kCuda(A, B, C, C_outputFromGpu);
t_start = rtclock();
syr2k(A, B, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(C, C_outputFromGpu);
free(A);
free(B);
free(C);
free(C_outputFromGpu);
return 0;
}
|
1cf289ef4c2a544c56db07be1d9fdd5c724c2bef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//M*/
#include <opencv2/gpu/device/common.hpp>
#include <opencv2/gpu/device/vec_traits.hpp>
#include <opencv2/gpu/device/vec_math.hpp>
#include <opencv2/gpu/device/emulation.hpp>
#include <iostream>
#include <stdio.h>
namespace cv { namespace gpu { namespace device
{
namespace ccl
{
enum
{
WARP_SIZE = 32,
WARP_LOG = 5,
CTA_SIZE_X = 32,
CTA_SIZE_Y = 8,
STA_SIZE_MERGE_Y = 4,
STA_SIZE_MERGE_X = 32,
TPB_X = 1,
TPB_Y = 4,
TILE_COLS = CTA_SIZE_X * TPB_X,
TILE_ROWS = CTA_SIZE_Y * TPB_Y
};
template<typename T> struct IntervalsTraits
{
typedef T elem_type;
};
template<> struct IntervalsTraits<unsigned char>
{
typedef int dist_type;
enum {ch = 1};
};
template<> struct IntervalsTraits<uchar3>
{
typedef int3 dist_type;
enum {ch = 3};
};
template<> struct IntervalsTraits<uchar4>
{
typedef int4 dist_type;
enum {ch = 4};
};
template<> struct IntervalsTraits<unsigned short>
{
typedef int dist_type;
enum {ch = 1};
};
template<> struct IntervalsTraits<ushort3>
{
typedef int3 dist_type;
enum {ch = 3};
};
template<> struct IntervalsTraits<ushort4>
{
typedef int4 dist_type;
enum {ch = 4};
};
template<> struct IntervalsTraits<float>
{
typedef float dist_type;
enum {ch = 1};
};
template<> struct IntervalsTraits<int>
{
typedef int dist_type;
enum {ch = 1};
};
typedef unsigned char component;
enum Edges { UP = 1, DOWN = 2, LEFT = 4, RIGHT = 8, EMPTY = 0xF0 };
template<typename T, int CH> struct InInterval {};
template<typename T> struct InInterval<T, 1>
{
__host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi) : lo(-_lo.x), hi(_hi.x) {};
T lo, hi;
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
{
I d = a - b;
return lo <= d && d <= hi;
}
};
template<typename T> struct InInterval<T, 3>
{
__host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi)
: lo (VecTraits<T>::make(-_lo.x, -_lo.y, -_lo.z)), hi (VecTraits<T>::make(_hi.x, _hi.y, _hi.z)){};
T lo, hi;
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
{
I d = a - b;
return lo.x <= d.x && d.x <= hi.x &&
lo.y <= d.y && d.y <= hi.y &&
lo.z <= d.z && d.z <= hi.z;
}
};
template<typename T> struct InInterval<T, 4>
{
__host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi)
: lo (VecTraits<T>::make(-_lo.x, -_lo.y, -_lo.z, -_lo.w)), hi (VecTraits<T>::make(_hi.x, _hi.y, _hi.z, -_hi.w)){};
T lo, hi;
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
{
I d = a - b;
return lo.x <= d.x && d.x <= hi.x &&
lo.y <= d.y && d.y <= hi.y &&
lo.z <= d.z && d.z <= hi.z &&
lo.w <= d.w && d.w <= hi.w;
}
};
template<typename T, typename F>
__global__ void computeConnectivity(const DevMem2D_<T> image, DevMem2D components, F connected)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= image.cols || y >= image.rows) return;
T intensity = image(y, x);
component c = 0;
if ( x > 0 && connected(intensity, image(y, x - 1)))
c |= LEFT;
if ( y > 0 && connected(intensity, image(y - 1, x)))
c |= UP;
if ( x - 1 < image.cols && connected(intensity, image(y, x + 1)))
c |= RIGHT;
if ( y - 1 < image.rows && connected(intensity, image(y + 1, x)))
c |= DOWN;
components(y, x) = c;
}
template< typename T>
void computeEdges(const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, hipStream_t stream)
{
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid(divUp(image.cols, block.x), divUp(image.rows, block.y));
typedef InInterval<typename IntervalsTraits<T>::dist_type, IntervalsTraits<T>::ch> Int_t;
Int_t inInt(lo, hi);
hipLaunchKernelGGL(( computeConnectivity<T, Int_t>), dim3(grid), dim3(block), 0, stream, static_cast<const DevMem2D_<T> >(image), edges, inInt);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void computeEdges<uchar> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, hipStream_t stream);
template void computeEdges<uchar3> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, hipStream_t stream);
template void computeEdges<uchar4> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, hipStream_t stream);
template void computeEdges<ushort> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, hipStream_t stream);
template void computeEdges<ushort3>(const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, hipStream_t stream);
template void computeEdges<ushort4>(const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, hipStream_t stream);
template void computeEdges<int> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, hipStream_t stream);
template void computeEdges<float> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, hipStream_t stream);
__global__ void lableTiles(const DevMem2D edges, DevMem2Di comps)
{
int x = threadIdx.x + blockIdx.x * TILE_COLS;
int y = threadIdx.y + blockIdx.y * TILE_ROWS;
if (x >= edges.cols || y >= edges.rows) return;
//currently x is 1
int bounds = ((y + TPB_Y) < edges.rows);
__shared__ int labelsTile[TILE_ROWS][TILE_COLS];
__shared__ int edgesTile[TILE_ROWS][TILE_COLS];
int new_labels[TPB_Y][TPB_X];
int old_labels[TPB_Y][TPB_X];
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int yloc = threadIdx.y + CTA_SIZE_Y * i;
int xloc = threadIdx.x + CTA_SIZE_X * j;
component c = edges(bounds * (y + CTA_SIZE_Y * i), x + CTA_SIZE_X * j);
if (!xloc) c &= ~LEFT;
if (!yloc) c &= ~UP;
if (xloc == TILE_COLS -1) c &= ~RIGHT;
if (yloc == TILE_ROWS -1) c &= ~DOWN;
new_labels[i][j] = yloc * TILE_COLS + xloc;
edgesTile[yloc][xloc] = c;
}
for (int k = 0; ;++k)
{
//1. backup
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int yloc = threadIdx.y + CTA_SIZE_Y * i;
int xloc = threadIdx.x + CTA_SIZE_X * j;
old_labels[i][j] = new_labels[i][j];
labelsTile[yloc][xloc] = new_labels[i][j];
}
__syncthreads();
//2. compare local arrays
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int yloc = threadIdx.y + CTA_SIZE_Y * i;
int xloc = threadIdx.x + CTA_SIZE_X * j;
component c = edgesTile[yloc][xloc];
int label = new_labels[i][j];
if (c & UP)
label = ::min(label, labelsTile[yloc - 1][xloc]);
if (c & DOWN)
label = ::min(label, labelsTile[yloc + 1][xloc]);
if (c & LEFT)
label = ::min(label, labelsTile[yloc][xloc - 1]);
if (c & RIGHT)
label = ::min(label, labelsTile[yloc][xloc + 1]);
new_labels[i][j] = label;
}
__syncthreads();
//3. determine: Is any value changed?
int changed = 0;
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
if (new_labels[i][j] < old_labels[i][j])
{
changed = 1;
Emulation::smem::atomicMin(&labelsTile[0][0] + old_labels[i][j], new_labels[i][j]);
}
}
changed = Emulation::syncthreadsOr(changed);
if (!changed)
break;
//4. Compact paths
const int *labels = &labelsTile[0][0];
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int label = new_labels[i][j];
while( labels[label] < label ) label = labels[label];
new_labels[i][j] = label;
}
__syncthreads();
}
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int label = new_labels[i][j];
int yloc = label / TILE_COLS;
int xloc = label - yloc * TILE_COLS;
xloc += blockIdx.x * TILE_COLS;
yloc += blockIdx.y * TILE_ROWS;
label = yloc * edges.cols + xloc;
// do it for x too.
if (y + CTA_SIZE_Y * i < comps.rows) comps(y + CTA_SIZE_Y * i, x + CTA_SIZE_X * j) = label;
}
}
__device__ __forceinline__ int root(const DevMem2Di& comps, int label)
{
while(1)
{
int y = label / comps.cols;
int x = label - y * comps.cols;
int parent = comps(y, x);
if (label == parent) break;
label = parent;
}
return label;
}
__device__ __forceinline__ void isConnected(DevMem2Di& comps, int l1, int l2, bool& changed)
{
int r1 = root(comps, l1);
int r2 = root(comps, l2);
if (r1 == r2) return;
int mi = ::min(r1, r2);
int ma = ::max(r1, r2);
int y = ma / comps.cols;
int x = ma - y * comps.cols;
atomicMin(&comps.ptr(y)[x], mi);
changed = true;
}
__global__ void crossMerge(const int tilesNumY, const int tilesNumX, int tileSizeY, int tileSizeX,
const DevMem2D edges, DevMem2Di comps, const int yIncomplete, int xIncomplete)
{
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int stride = blockDim.y * blockDim.x;
int ybegin = blockIdx.y * (tilesNumY * tileSizeY);
int yend = ybegin + tilesNumY * tileSizeY;
if (blockIdx.y == gridDim.y - 1)
{
yend -= yIncomplete * tileSizeY;
yend -= tileSizeY;
tileSizeY = (edges.rows % tileSizeY);
yend += tileSizeY;
}
int xbegin = blockIdx.x * tilesNumX * tileSizeX;
int xend = xbegin + tilesNumX * tileSizeX;
if (blockIdx.x == gridDim.x - 1)
{
if (xIncomplete) yend = ybegin;
xend -= xIncomplete * tileSizeX;
xend -= tileSizeX;
tileSizeX = (edges.cols % tileSizeX);
xend += tileSizeX;
}
if (blockIdx.y == (gridDim.y - 1) && yIncomplete)
{
xend = xbegin;
}
int tasksV = (tilesNumX - 1) * (yend - ybegin);
int tasksH = (tilesNumY - 1) * (xend - xbegin);
int total = tasksH + tasksV;
bool changed;
do
{
changed = false;
for (int taskIdx = tid; taskIdx < total; taskIdx += stride)
{
if (taskIdx < tasksH)
{
int indexH = taskIdx;
int row = indexH / (xend - xbegin);
int col = indexH - row * (xend - xbegin);
int y = ybegin + (row + 1) * tileSizeY;
int x = xbegin + col;
component e = edges( x, y);
if (e & UP)
{
int lc = comps(y,x);
int lu = comps(y - 1, x);
isConnected(comps, lc, lu, changed);
}
}
else
{
int indexV = taskIdx - tasksH;
int col = indexV / (yend - ybegin);
int row = indexV - col * (yend - ybegin);
int x = xbegin + (col + 1) * tileSizeX;
int y = ybegin + row;
component e = edges(x, y);
if (e & LEFT)
{
int lc = comps(y, x);
int ll = comps(y, x - 1);
isConnected(comps, lc, ll, changed);
}
}
}
} while (Emulation::syncthreadsOr(changed));
}
__global__ void flatten(const DevMem2D edges, DevMem2Di comps)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if( x < comps.cols && y < comps.rows)
comps(y, x) = root(comps, comps(y, x));
}
enum {CC_NO_COMPACT = 0, CC_COMPACT_LABELS = 1};
void labelComponents(const DevMem2D& edges, DevMem2Di comps, int flags, hipStream_t stream)
{
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid(divUp(edges.cols, TILE_COLS), divUp(edges.rows, TILE_ROWS));
hipLaunchKernelGGL(( lableTiles), dim3(grid), dim3(block), 0, stream, edges, comps);
cudaSafeCall( hipGetLastError() );
int tileSizeX = TILE_COLS, tileSizeY = TILE_ROWS;
while (grid.x > 1 || grid.y > 1)
{
dim3 mergeGrid(ceilf(grid.x / 2.0), ceilf(grid.y / 2.0));
dim3 mergeBlock(STA_SIZE_MERGE_X, STA_SIZE_MERGE_Y);
// debug log
// std::cout << "merging: " << grid.y << " x " << grid.x << " ---> " << mergeGrid.y << " x " << mergeGrid.x << " for tiles: " << tileSizeY << " x " << tileSizeX << std::endl;
hipLaunchKernelGGL(( crossMerge), dim3(mergeGrid), dim3(mergeBlock), 0, stream, 2, 2, tileSizeY, tileSizeX, edges, comps, ceilf(grid.y / 2.0) - grid.y / 2, ceilf(grid.x / 2.0) - grid.x / 2);
tileSizeX <<= 1;
tileSizeY <<= 1;
grid = mergeGrid;
cudaSafeCall( hipGetLastError() );
}
grid.x = divUp(edges.cols, block.x);
grid.y = divUp(edges.rows, block.y);
hipLaunchKernelGGL(( flatten), dim3(grid), dim3(block), 0, stream, edges, comps);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
}
} } } | 1cf289ef4c2a544c56db07be1d9fdd5c724c2bef.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//M*/
#include <opencv2/gpu/device/common.hpp>
#include <opencv2/gpu/device/vec_traits.hpp>
#include <opencv2/gpu/device/vec_math.hpp>
#include <opencv2/gpu/device/emulation.hpp>
#include <iostream>
#include <stdio.h>
namespace cv { namespace gpu { namespace device
{
namespace ccl
{
enum
{
WARP_SIZE = 32,
WARP_LOG = 5,
CTA_SIZE_X = 32,
CTA_SIZE_Y = 8,
STA_SIZE_MERGE_Y = 4,
STA_SIZE_MERGE_X = 32,
TPB_X = 1,
TPB_Y = 4,
TILE_COLS = CTA_SIZE_X * TPB_X,
TILE_ROWS = CTA_SIZE_Y * TPB_Y
};
template<typename T> struct IntervalsTraits
{
typedef T elem_type;
};
template<> struct IntervalsTraits<unsigned char>
{
typedef int dist_type;
enum {ch = 1};
};
template<> struct IntervalsTraits<uchar3>
{
typedef int3 dist_type;
enum {ch = 3};
};
template<> struct IntervalsTraits<uchar4>
{
typedef int4 dist_type;
enum {ch = 4};
};
template<> struct IntervalsTraits<unsigned short>
{
typedef int dist_type;
enum {ch = 1};
};
template<> struct IntervalsTraits<ushort3>
{
typedef int3 dist_type;
enum {ch = 3};
};
template<> struct IntervalsTraits<ushort4>
{
typedef int4 dist_type;
enum {ch = 4};
};
template<> struct IntervalsTraits<float>
{
typedef float dist_type;
enum {ch = 1};
};
template<> struct IntervalsTraits<int>
{
typedef int dist_type;
enum {ch = 1};
};
typedef unsigned char component;
enum Edges { UP = 1, DOWN = 2, LEFT = 4, RIGHT = 8, EMPTY = 0xF0 };
template<typename T, int CH> struct InInterval {};
template<typename T> struct InInterval<T, 1>
{
__host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi) : lo(-_lo.x), hi(_hi.x) {};
T lo, hi;
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
{
I d = a - b;
return lo <= d && d <= hi;
}
};
template<typename T> struct InInterval<T, 3>
{
__host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi)
: lo (VecTraits<T>::make(-_lo.x, -_lo.y, -_lo.z)), hi (VecTraits<T>::make(_hi.x, _hi.y, _hi.z)){};
T lo, hi;
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
{
I d = a - b;
return lo.x <= d.x && d.x <= hi.x &&
lo.y <= d.y && d.y <= hi.y &&
lo.z <= d.z && d.z <= hi.z;
}
};
template<typename T> struct InInterval<T, 4>
{
__host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi)
: lo (VecTraits<T>::make(-_lo.x, -_lo.y, -_lo.z, -_lo.w)), hi (VecTraits<T>::make(_hi.x, _hi.y, _hi.z, -_hi.w)){};
T lo, hi;
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
{
I d = a - b;
return lo.x <= d.x && d.x <= hi.x &&
lo.y <= d.y && d.y <= hi.y &&
lo.z <= d.z && d.z <= hi.z &&
lo.w <= d.w && d.w <= hi.w;
}
};
template<typename T, typename F>
__global__ void computeConnectivity(const DevMem2D_<T> image, DevMem2D components, F connected)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= image.cols || y >= image.rows) return;
T intensity = image(y, x);
component c = 0;
if ( x > 0 && connected(intensity, image(y, x - 1)))
c |= LEFT;
if ( y > 0 && connected(intensity, image(y - 1, x)))
c |= UP;
if ( x - 1 < image.cols && connected(intensity, image(y, x + 1)))
c |= RIGHT;
if ( y - 1 < image.rows && connected(intensity, image(y + 1, x)))
c |= DOWN;
components(y, x) = c;
}
template< typename T>
void computeEdges(const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream)
{
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid(divUp(image.cols, block.x), divUp(image.rows, block.y));
typedef InInterval<typename IntervalsTraits<T>::dist_type, IntervalsTraits<T>::ch> Int_t;
Int_t inInt(lo, hi);
computeConnectivity<T, Int_t><<<grid, block, 0, stream>>>(static_cast<const DevMem2D_<T> >(image), edges, inInt);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void computeEdges<uchar> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<uchar3> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<uchar4> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<ushort> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<ushort3>(const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<ushort4>(const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<int> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
template void computeEdges<float> (const DevMem2D& image, DevMem2D edges, const float4& lo, const float4& hi, cudaStream_t stream);
__global__ void lableTiles(const DevMem2D edges, DevMem2Di comps)
{
int x = threadIdx.x + blockIdx.x * TILE_COLS;
int y = threadIdx.y + blockIdx.y * TILE_ROWS;
if (x >= edges.cols || y >= edges.rows) return;
//currently x is 1
int bounds = ((y + TPB_Y) < edges.rows);
__shared__ int labelsTile[TILE_ROWS][TILE_COLS];
__shared__ int edgesTile[TILE_ROWS][TILE_COLS];
int new_labels[TPB_Y][TPB_X];
int old_labels[TPB_Y][TPB_X];
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int yloc = threadIdx.y + CTA_SIZE_Y * i;
int xloc = threadIdx.x + CTA_SIZE_X * j;
component c = edges(bounds * (y + CTA_SIZE_Y * i), x + CTA_SIZE_X * j);
if (!xloc) c &= ~LEFT;
if (!yloc) c &= ~UP;
if (xloc == TILE_COLS -1) c &= ~RIGHT;
if (yloc == TILE_ROWS -1) c &= ~DOWN;
new_labels[i][j] = yloc * TILE_COLS + xloc;
edgesTile[yloc][xloc] = c;
}
for (int k = 0; ;++k)
{
//1. backup
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int yloc = threadIdx.y + CTA_SIZE_Y * i;
int xloc = threadIdx.x + CTA_SIZE_X * j;
old_labels[i][j] = new_labels[i][j];
labelsTile[yloc][xloc] = new_labels[i][j];
}
__syncthreads();
//2. compare local arrays
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int yloc = threadIdx.y + CTA_SIZE_Y * i;
int xloc = threadIdx.x + CTA_SIZE_X * j;
component c = edgesTile[yloc][xloc];
int label = new_labels[i][j];
if (c & UP)
label = ::min(label, labelsTile[yloc - 1][xloc]);
if (c & DOWN)
label = ::min(label, labelsTile[yloc + 1][xloc]);
if (c & LEFT)
label = ::min(label, labelsTile[yloc][xloc - 1]);
if (c & RIGHT)
label = ::min(label, labelsTile[yloc][xloc + 1]);
new_labels[i][j] = label;
}
__syncthreads();
//3. determine: Is any value changed?
int changed = 0;
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
if (new_labels[i][j] < old_labels[i][j])
{
changed = 1;
Emulation::smem::atomicMin(&labelsTile[0][0] + old_labels[i][j], new_labels[i][j]);
}
}
changed = Emulation::syncthreadsOr(changed);
if (!changed)
break;
//4. Compact paths
const int *labels = &labelsTile[0][0];
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int label = new_labels[i][j];
while( labels[label] < label ) label = labels[label];
new_labels[i][j] = label;
}
__syncthreads();
}
#pragma unroll
for (int i = 0; i < TPB_Y; ++i)
#pragma unroll
for (int j = 0; j < TPB_X; ++j)
{
int label = new_labels[i][j];
int yloc = label / TILE_COLS;
int xloc = label - yloc * TILE_COLS;
xloc += blockIdx.x * TILE_COLS;
yloc += blockIdx.y * TILE_ROWS;
label = yloc * edges.cols + xloc;
// do it for x too.
if (y + CTA_SIZE_Y * i < comps.rows) comps(y + CTA_SIZE_Y * i, x + CTA_SIZE_X * j) = label;
}
}
__device__ __forceinline__ int root(const DevMem2Di& comps, int label)
{
while(1)
{
int y = label / comps.cols;
int x = label - y * comps.cols;
int parent = comps(y, x);
if (label == parent) break;
label = parent;
}
return label;
}
__device__ __forceinline__ void isConnected(DevMem2Di& comps, int l1, int l2, bool& changed)
{
int r1 = root(comps, l1);
int r2 = root(comps, l2);
if (r1 == r2) return;
int mi = ::min(r1, r2);
int ma = ::max(r1, r2);
int y = ma / comps.cols;
int x = ma - y * comps.cols;
atomicMin(&comps.ptr(y)[x], mi);
changed = true;
}
__global__ void crossMerge(const int tilesNumY, const int tilesNumX, int tileSizeY, int tileSizeX,
const DevMem2D edges, DevMem2Di comps, const int yIncomplete, int xIncomplete)
{
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int stride = blockDim.y * blockDim.x;
int ybegin = blockIdx.y * (tilesNumY * tileSizeY);
int yend = ybegin + tilesNumY * tileSizeY;
if (blockIdx.y == gridDim.y - 1)
{
yend -= yIncomplete * tileSizeY;
yend -= tileSizeY;
tileSizeY = (edges.rows % tileSizeY);
yend += tileSizeY;
}
int xbegin = blockIdx.x * tilesNumX * tileSizeX;
int xend = xbegin + tilesNumX * tileSizeX;
if (blockIdx.x == gridDim.x - 1)
{
if (xIncomplete) yend = ybegin;
xend -= xIncomplete * tileSizeX;
xend -= tileSizeX;
tileSizeX = (edges.cols % tileSizeX);
xend += tileSizeX;
}
if (blockIdx.y == (gridDim.y - 1) && yIncomplete)
{
xend = xbegin;
}
int tasksV = (tilesNumX - 1) * (yend - ybegin);
int tasksH = (tilesNumY - 1) * (xend - xbegin);
int total = tasksH + tasksV;
bool changed;
do
{
changed = false;
for (int taskIdx = tid; taskIdx < total; taskIdx += stride)
{
if (taskIdx < tasksH)
{
int indexH = taskIdx;
int row = indexH / (xend - xbegin);
int col = indexH - row * (xend - xbegin);
int y = ybegin + (row + 1) * tileSizeY;
int x = xbegin + col;
component e = edges( x, y);
if (e & UP)
{
int lc = comps(y,x);
int lu = comps(y - 1, x);
isConnected(comps, lc, lu, changed);
}
}
else
{
int indexV = taskIdx - tasksH;
int col = indexV / (yend - ybegin);
int row = indexV - col * (yend - ybegin);
int x = xbegin + (col + 1) * tileSizeX;
int y = ybegin + row;
component e = edges(x, y);
if (e & LEFT)
{
int lc = comps(y, x);
int ll = comps(y, x - 1);
isConnected(comps, lc, ll, changed);
}
}
}
} while (Emulation::syncthreadsOr(changed));
}
__global__ void flatten(const DevMem2D edges, DevMem2Di comps)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if( x < comps.cols && y < comps.rows)
comps(y, x) = root(comps, comps(y, x));
}
enum {CC_NO_COMPACT = 0, CC_COMPACT_LABELS = 1};
void labelComponents(const DevMem2D& edges, DevMem2Di comps, int flags, cudaStream_t stream)
{
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid(divUp(edges.cols, TILE_COLS), divUp(edges.rows, TILE_ROWS));
lableTiles<<<grid, block, 0, stream>>>(edges, comps);
cudaSafeCall( cudaGetLastError() );
int tileSizeX = TILE_COLS, tileSizeY = TILE_ROWS;
while (grid.x > 1 || grid.y > 1)
{
dim3 mergeGrid(ceilf(grid.x / 2.0), ceilf(grid.y / 2.0));
dim3 mergeBlock(STA_SIZE_MERGE_X, STA_SIZE_MERGE_Y);
// debug log
// std::cout << "merging: " << grid.y << " x " << grid.x << " ---> " << mergeGrid.y << " x " << mergeGrid.x << " for tiles: " << tileSizeY << " x " << tileSizeX << std::endl;
crossMerge<<<mergeGrid, mergeBlock, 0, stream>>>(2, 2, tileSizeY, tileSizeX, edges, comps, ceilf(grid.y / 2.0) - grid.y / 2, ceilf(grid.x / 2.0) - grid.x / 2);
tileSizeX <<= 1;
tileSizeY <<= 1;
grid = mergeGrid;
cudaSafeCall( cudaGetLastError() );
}
grid.x = divUp(edges.cols, block.x);
grid.y = divUp(edges.rows, block.y);
flatten<<<grid, block, 0, stream>>>(edges, comps);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
} } } |
d54197261a23c43f156595139cc0f35f7dde1c8f.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2020 XGBoost contributors
*/
#include <memory>
#include <type_traits>
#include <algorithm>
#include "../common/hist_util.cuh"
#include "simple_batch_iterator.h"
#include "iterative_device_dmatrix.h"
#include "sparse_page_source.h"
#include "ellpack_page.cuh"
#include "proxy_dmatrix.h"
#include "device_adapter_hip.cuh"
namespace xgboost {
namespace data {
template <typename Fn>
decltype(auto) Dispatch(DMatrixProxy const* proxy, Fn fn) {
if (proxy->Adapter().type() == typeid(std::shared_ptr<CupyAdapter>)) {
auto value = dmlc::get<std::shared_ptr<CupyAdapter>>(
proxy->Adapter())->Value();
return fn(value);
} else if (proxy->Adapter().type() == typeid(std::shared_ptr<CudfAdapter>)) {
auto value = dmlc::get<std::shared_ptr<CudfAdapter>>(
proxy->Adapter())->Value();
return fn(value);
} else {
LOG(FATAL) << "Unknown type: " << proxy->Adapter().type().name();
auto value = dmlc::get<std::shared_ptr<CudfAdapter>>(
proxy->Adapter())->Value();
return fn(value);
}
}
void IterativeDeviceDMatrix::Initialize(DataIterHandle iter_handle, float missing, int nthread) {
// A handle passed to external iterator.
auto handle = static_cast<std::shared_ptr<DMatrix>*>(proxy_);
CHECK(handle);
DMatrixProxy* proxy = static_cast<DMatrixProxy*>(handle->get());
CHECK(proxy);
// The external iterator
auto iter = DataIterProxy<DataIterResetCallback, XGDMatrixCallbackNext>{
iter_handle, reset_, next_};
dh::XGBCachingDeviceAllocator<char> alloc;
auto num_rows = [&]() {
return Dispatch(proxy, [](auto const &value) { return value.NumRows(); });
};
auto num_cols = [&]() {
return Dispatch(proxy, [](auto const &value) { return value.NumCols(); });
};
size_t row_stride = 0;
size_t nnz = 0;
// Sketch for all batches.
iter.Reset();
std::vector<common::SketchContainer> sketch_containers;
size_t batches = 0;
size_t accumulated_rows = 0;
bst_feature_t cols = 0;
int32_t device = -1;
while (iter.Next()) {
device = proxy->DeviceIdx();
dh::safe_cuda(hipSetDevice(device));
if (cols == 0) {
cols = num_cols();
} else {
CHECK_EQ(cols, num_cols()) << "Inconsistent number of columns.";
}
sketch_containers.emplace_back(batch_param_.max_bin, num_cols(), num_rows(), device);
auto* p_sketch = &sketch_containers.back();
proxy->Info().weights_.SetDevice(device);
Dispatch(proxy, [&](auto const &value) {
common::AdapterDeviceSketchWeighted(value, batch_param_.max_bin,
proxy->Info(), missing, p_sketch);
});
auto batch_rows = num_rows();
accumulated_rows += batch_rows;
dh::caching_device_vector<size_t> row_counts(batch_rows + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(),
row_counts.size());
row_stride = ::max(row_stride, Dispatch(proxy, [=](auto const &value) {
return GetRowCounts(value, row_counts_span,
device, missing);
}));
nnz += thrust::reduce(thrust::hip::par(alloc), row_counts.begin(),
row_counts.end());
batches++;
}
common::SketchContainer final_sketch(batch_param_.max_bin, cols, accumulated_rows, device);
for (auto const& sketch : sketch_containers) {
final_sketch.Merge(sketch.ColumnsPtr(), sketch.Data());
final_sketch.FixError();
}
sketch_containers.clear();
sketch_containers.shrink_to_fit();
common::HistogramCuts cuts;
final_sketch.MakeCuts(&cuts);
this->info_.num_col_ = cols;
this->info_.num_row_ = accumulated_rows;
this->info_.num_nonzero_ = nnz;
// Construct the final ellpack page.
page_.reset(new EllpackPage);
*(page_->Impl()) = EllpackPageImpl(proxy->DeviceIdx(), cuts, this->IsDense(),
row_stride, accumulated_rows);
size_t offset = 0;
iter.Reset();
while (iter.Next()) {
auto device = proxy->DeviceIdx();
dh::safe_cuda(hipSetDevice(device));
auto rows = num_rows();
dh::caching_device_vector<size_t> row_counts(rows + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(),
row_counts.size());
Dispatch(proxy, [=](auto const& value) {
return GetRowCounts(value, row_counts_span, device, missing);
});
auto is_dense = this->IsDense();
auto new_impl = Dispatch(proxy, [&](auto const &value) {
return EllpackPageImpl(value, missing, device, is_dense, nthread,
row_counts_span, row_stride, rows, cols, cuts);
});
size_t num_elements = page_->Impl()->Copy(device, &new_impl, offset);
offset += num_elements;
proxy->Info().num_row_ = num_rows();
proxy->Info().num_col_ = cols;
if (batches != 1) {
this->info_.Extend(std::move(proxy->Info()), false);
}
}
if (batches == 1) {
this->info_ = std::move(proxy->Info());
CHECK_EQ(proxy->Info().labels_.Size(), 0);
}
iter.Reset();
// Synchronise worker columns
rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1);
}
BatchSet<EllpackPage> IterativeDeviceDMatrix::GetEllpackBatches(const BatchParam& param) {
CHECK(page_);
auto begin_iter =
BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(page_.get()));
return BatchSet<EllpackPage>(begin_iter);
}
} // namespace data
} // namespace xgboost
| d54197261a23c43f156595139cc0f35f7dde1c8f.cu | /*!
* Copyright 2020 XGBoost contributors
*/
#include <memory>
#include <type_traits>
#include <algorithm>
#include "../common/hist_util.cuh"
#include "simple_batch_iterator.h"
#include "iterative_device_dmatrix.h"
#include "sparse_page_source.h"
#include "ellpack_page.cuh"
#include "proxy_dmatrix.h"
#include "device_adapter.cuh"
namespace xgboost {
namespace data {
template <typename Fn>
decltype(auto) Dispatch(DMatrixProxy const* proxy, Fn fn) {
if (proxy->Adapter().type() == typeid(std::shared_ptr<CupyAdapter>)) {
auto value = dmlc::get<std::shared_ptr<CupyAdapter>>(
proxy->Adapter())->Value();
return fn(value);
} else if (proxy->Adapter().type() == typeid(std::shared_ptr<CudfAdapter>)) {
auto value = dmlc::get<std::shared_ptr<CudfAdapter>>(
proxy->Adapter())->Value();
return fn(value);
} else {
LOG(FATAL) << "Unknown type: " << proxy->Adapter().type().name();
auto value = dmlc::get<std::shared_ptr<CudfAdapter>>(
proxy->Adapter())->Value();
return fn(value);
}
}
void IterativeDeviceDMatrix::Initialize(DataIterHandle iter_handle, float missing, int nthread) {
// A handle passed to external iterator.
auto handle = static_cast<std::shared_ptr<DMatrix>*>(proxy_);
CHECK(handle);
DMatrixProxy* proxy = static_cast<DMatrixProxy*>(handle->get());
CHECK(proxy);
// The external iterator
auto iter = DataIterProxy<DataIterResetCallback, XGDMatrixCallbackNext>{
iter_handle, reset_, next_};
dh::XGBCachingDeviceAllocator<char> alloc;
auto num_rows = [&]() {
return Dispatch(proxy, [](auto const &value) { return value.NumRows(); });
};
auto num_cols = [&]() {
return Dispatch(proxy, [](auto const &value) { return value.NumCols(); });
};
size_t row_stride = 0;
size_t nnz = 0;
// Sketch for all batches.
iter.Reset();
std::vector<common::SketchContainer> sketch_containers;
size_t batches = 0;
size_t accumulated_rows = 0;
bst_feature_t cols = 0;
int32_t device = -1;
while (iter.Next()) {
device = proxy->DeviceIdx();
dh::safe_cuda(cudaSetDevice(device));
if (cols == 0) {
cols = num_cols();
} else {
CHECK_EQ(cols, num_cols()) << "Inconsistent number of columns.";
}
sketch_containers.emplace_back(batch_param_.max_bin, num_cols(), num_rows(), device);
auto* p_sketch = &sketch_containers.back();
proxy->Info().weights_.SetDevice(device);
Dispatch(proxy, [&](auto const &value) {
common::AdapterDeviceSketchWeighted(value, batch_param_.max_bin,
proxy->Info(), missing, p_sketch);
});
auto batch_rows = num_rows();
accumulated_rows += batch_rows;
dh::caching_device_vector<size_t> row_counts(batch_rows + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(),
row_counts.size());
row_stride = std::max(row_stride, Dispatch(proxy, [=](auto const &value) {
return GetRowCounts(value, row_counts_span,
device, missing);
}));
nnz += thrust::reduce(thrust::cuda::par(alloc), row_counts.begin(),
row_counts.end());
batches++;
}
common::SketchContainer final_sketch(batch_param_.max_bin, cols, accumulated_rows, device);
for (auto const& sketch : sketch_containers) {
final_sketch.Merge(sketch.ColumnsPtr(), sketch.Data());
final_sketch.FixError();
}
sketch_containers.clear();
sketch_containers.shrink_to_fit();
common::HistogramCuts cuts;
final_sketch.MakeCuts(&cuts);
this->info_.num_col_ = cols;
this->info_.num_row_ = accumulated_rows;
this->info_.num_nonzero_ = nnz;
// Construct the final ellpack page.
page_.reset(new EllpackPage);
*(page_->Impl()) = EllpackPageImpl(proxy->DeviceIdx(), cuts, this->IsDense(),
row_stride, accumulated_rows);
size_t offset = 0;
iter.Reset();
while (iter.Next()) {
auto device = proxy->DeviceIdx();
dh::safe_cuda(cudaSetDevice(device));
auto rows = num_rows();
dh::caching_device_vector<size_t> row_counts(rows + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(),
row_counts.size());
Dispatch(proxy, [=](auto const& value) {
return GetRowCounts(value, row_counts_span, device, missing);
});
auto is_dense = this->IsDense();
auto new_impl = Dispatch(proxy, [&](auto const &value) {
return EllpackPageImpl(value, missing, device, is_dense, nthread,
row_counts_span, row_stride, rows, cols, cuts);
});
size_t num_elements = page_->Impl()->Copy(device, &new_impl, offset);
offset += num_elements;
proxy->Info().num_row_ = num_rows();
proxy->Info().num_col_ = cols;
if (batches != 1) {
this->info_.Extend(std::move(proxy->Info()), false);
}
}
if (batches == 1) {
this->info_ = std::move(proxy->Info());
CHECK_EQ(proxy->Info().labels_.Size(), 0);
}
iter.Reset();
// Synchronise worker columns
rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1);
}
BatchSet<EllpackPage> IterativeDeviceDMatrix::GetEllpackBatches(const BatchParam& param) {
CHECK(page_);
auto begin_iter =
BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(page_.get()));
return BatchSet<EllpackPage>(begin_iter);
}
} // namespace data
} // namespace xgboost
|
bc1a4c0c86a26a29ad22617d8b2ebceb8af2ef69.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <algorithm>
#include "rtree.h"
__host__ __device__
int overlap(RTree_Rect *R, RTree_Rect * S)
{
register RTree_Rect *r = R, *s = S;
assert(r && s);
if ( r->left > s->right || r->right < s->left
|| r->top > s->bottom || r->bottom < s->top )
{
//printf("overlap R: %llu, %llu, %lu, %lu, S: %llu, %llu, %lu, %lu\n",
// r->left, r->right, r->top, r->bottom, s->left, s->right, s->top, s->bottom);
return 0;
}
else
return 1;
}
__host__ __device__
int contains(RTree_Rect *R, RTree_Point *P)
{
register RTree_Rect *r = R;
register RTree_Point *p = P;
assert(r && p);
//printf("point: %llu, %lu, Rect: %llu, %llu, %lu, %lu\n",
// p->x, p->y, r->left, r->right, r->top, r->bottom);
if (p->x < r->right && p->x > r->left
&& p->y < r->bottom && p->y > r->top)
return 1;
else
return 0;
}
__host__ __device__
inline void init_boundary(RTree_Rect *bbox)
{
bbox->top = ULONG_MAX;
bbox->bottom = -ULONG_MAX;
bbox->left = ULLONG_MAX;
bbox->right = -ULLONG_MAX;
}
__host__ __device__
inline void update_boundary(RTree_Rect *bbox, RTree_Rect *node_bbx)
{
bbox->top = min(bbox->top, node_bbx->top);
bbox->bottom = max(bbox->bottom, node_bbx->bottom);
bbox->left = min(bbox->left, node_bbx->left);
bbox->right = max(bbox->right, node_bbx->right);
//printf("---node bbox: %llu, %llu, update: %llu, %llu\n",
// node_bbx->left, node_bbx->right, bbox->left, bbox->right);
}
__host__ __device__
inline void c_update_boundary(RTree_Rect *bbox, RTree_Point *p)
{
bbox->top = min(p->y, bbox->top);
bbox->bottom = max(p->y, bbox->bottom);
bbox->left = min(p->x, bbox->left);
bbox->right = max(p->x, bbox->right);
//printf("x: %llu, bbox: %lu, %lu, %llu, %llu\n", p->x, bbox->top, bbox->bottom, bbox->left, bbox->right);
}
__host__ __device__
inline size_t get_node_length (
const size_t i,
const size_t len_level,
const size_t previous_level_len,
const size_t node_size)
{
const size_t n = node_size;
const size_t len = previous_level_len;
const size_t final_i = len_level -1;
// set lnum to len%n if it's the last iteration and there's a remainder, else n
return ((i != final_i || len % n == 0) *n) + ((i == final_i && len % n != 0) * (len % n));
}
// points are on device and sorted by x
void cuda_sort(RTree_Points *sorted)
{
uint64 *X = sorted->X;
unsigned long *Y = sorted->Y;
int *ID = sorted->ID;
// sort by x
auto tbegin = thrust::make_zip_iterator(thrust::make_tuple(Y, ID));
auto tend = thrust::make_zip_iterator(thrust::make_tuple(Y+sorted->length, ID+sorted->length));
thrust::sort_by_key(thrust::device, X, X+sorted->length, tbegin);
}
RTree cuda_create_rtree(RTree_Points points)
{
cuda_sort(&points);
RTree_Leaf *leaves = cuda_create_leaves( &points );
const size_t len_leaf = DIV_CEIL(points.length, RTREE_NODE_SIZE);
// build rtree from bottom
RTree_Node *level_previous = (RTree_Node*) leaves;
size_t len_previous = len_leaf;
size_t depth = 1; // leaf level: 0
size_t num_nodes = len_leaf;
while (len_previous > RTREE_NODE_SIZE)
{
level_previous = cuda_create_level(level_previous, len_previous, depth);
num_nodes += level_previous->num;
len_previous = DIV_CEIL(len_previous, RTREE_NODE_SIZE);
++depth;
}
// tackle the root node
RTree_Node *root = new RTree_Node();
init_boundary(&root->bbox);
root->num = len_previous;
root->children = level_previous;
num_nodes += root->num;
for (size_t i = 0, end = len_previous; i != end; ++i)
update_boundary(&root->bbox, &root->children[i].bbox);
++depth;
root->depth = depth;
RTree tree = {depth, root};
return tree;
}
__global__
void create_level_kernel
(
RTree_Node *next_level,
RTree_Node *nodes,
RTree_Node *real_nodes,
const size_t len,
size_t depth
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const size_t next_level_len = DIV_CEIL(len, RTREE_NODE_SIZE);
if (i >= next_level_len) return; // skip the final block remainder
RTree_Node *n = &next_level[i];
init_boundary(&n->bbox);
n->num = get_node_length(i, next_level_len, len, RTREE_NODE_SIZE);
n->children = &real_nodes[i * RTREE_NODE_SIZE];
n->depth = depth;
//printf("level num: %d, ---num: %lu\n", n->num, next_level_len);
#pragma unroll
for (size_t j = 0, jend = n->num; j != jend; ++j)
{
update_boundary(&n->bbox, &nodes[i * RTREE_NODE_SIZE + j].bbox);
//printf("after set node bbox: %lu, %lu, %llu, %llu\n",
// n->bbox.top, n->bbox.bottom, n->bbox.left, n->bbox.right);
}
}
RTree_Node* cuda_create_level(RTree_Node *nodes, const size_t len, size_t depth)
{
const size_t THREADS_PER_BLOCK = 512;
const size_t next_level_len = DIV_CEIL(len, RTREE_NODE_SIZE);
RTree_Node *d_nodes;
RTree_Node *d_next_level;
hipMalloc( (void**) &d_nodes, len * sizeof(RTree_Node) );
hipMalloc( (void**) &d_next_level, next_level_len * sizeof(RTree_Node) );
hipMemcpy(d_nodes, nodes, len * sizeof(RTree_Node), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( create_level_kernel), dim3((next_level_len + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0,
d_next_level, d_nodes, nodes, len, depth);
RTree_Node *next_level = new RTree_Node[next_level_len];
hipMemcpy(next_level, d_next_level, next_level_len * sizeof(RTree_Node), hipMemcpyDeviceToHost);
hipFree(d_next_level);
hipFree(d_nodes);
return next_level;
}
__global__
void create_leaves_kernel
(
RTree_Leaf *leaves,
RTree_Point *points,
RTree_Point *h_points,
uint64 *X,
unsigned long *Y,
int *ID,
const size_t len
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const size_t len_leaf = DIV_CEIL(len, RTREE_NODE_SIZE);
if (i >= len_leaf) return; // skip the final block remainder
// tackle leaf points
RTree_Leaf *l = &leaves[i];
init_boundary(&l->bbox);
l->num = get_node_length(i, len_leaf, len, RTREE_NODE_SIZE);
l->depth = 0;
l->points = &h_points[i * RTREE_NODE_SIZE]; // occupy position
// compute MBR from its
#pragma unroll
for (size_t j = 0, jend = l->num; j != jend; ++j)
{
// *** use pointer, not value ***/
RTree_Point *p = &points[i* RTREE_NODE_SIZE + j];
p->x = X[i * RTREE_NODE_SIZE + j];
p->y = Y[i * RTREE_NODE_SIZE + j];
p->id = ID[i * RTREE_NODE_SIZE + j];
//printf("----------id: %d, j: %lu\n", p->id, j);
c_update_boundary(&l->bbox, p);
}
}
RTree_Leaf* cuda_create_leaves(RTree_Points *sorted)
{
const size_t THREADS_PER_BLOCK = 512;
const size_t len = sorted->length;
const size_t num_leaf = DIV_CEIL(len, RTREE_NODE_SIZE);
RTree_Leaf *d_leaves;
RTree_Point *d_points;
hipMalloc( (void**) &d_leaves, num_leaf * sizeof(RTree_Leaf) );
hipMalloc( (void**) &d_points, len * sizeof(RTree_Point) );
// points on host will be passed to kernel and only occupy the position
RTree_Point *points = new RTree_Point[len];
hipLaunchKernelGGL(( create_leaves_kernel), dim3((num_leaf + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK) , 0, 0,
d_leaves, d_points, points, sorted->X, sorted->Y, sorted->ID, len);
RTree_Leaf *leaves = new RTree_Leaf[num_leaf];
// copy points from device to host
hipMemcpy(leaves, d_leaves, num_leaf * sizeof(RTree_Leaf), hipMemcpyDeviceToHost);
hipMemcpy(points, d_points, len * sizeof(RTree_Point), hipMemcpyDeviceToHost);
hipFree(d_leaves);
hipFree(d_points);
return leaves;
}
int cpu_search(RTree_Node *N, RTree_Rect *rect, std::vector<int> &points)
{
register RTree_Node *n = N;
register RTree_Rect *r = rect;
register int hit_count = 0;
register int i;
assert(n);
assert(n->num);
assert(r);
//printf("depth: %lu, bbox: %llu, %llu, %lu, %lu\t rect: %llu, %lu\n", n->depth, n->bbox.left, n->bbox.right,
// n->bbox.top, n->bbox.bottom, r->left, r->top );
if (n->depth > 0)
{
for (i = 0; i < n->num; i++)
{
printf("depth: %lu, bbox: %llu, %llu, %lu, %lu\t rect: %llu, %lu\t num: %lu\n",
n->depth, n->children[i].bbox.left, n->children[i].bbox.right,
n->children[i].bbox.top, n->children[i].bbox.bottom, r->left, r->top
, n->children[i].num );
if ( overlap(r, &n->children[i].bbox) )
{
hit_count += cpu_search(&n->children[i], rect, points);
}
}
}
else // this is a leaf node
{
if ( n->num && overlap(r, &n->bbox) )
{
//printf("---%llu, %llu, %lu, %lu\n", n->bbox.left, n->bbox.right, n->bbox.top, n->bbox.bottom);
RTree_Leaf *l = (RTree_Leaf*) n;
for (i = 0; i < n->num; i++)
{
// determine whether points in rect
if ( contains(r, &l->points[i] ) )
{
hit_count++;
// check if contains this point
if ( std::find(points.begin(), points.end(), l->points[i].id) == points.end() )
points.push_back(l->points[i].id);
printf("%d trajectory is hit, %llu, %lu\n", l->points[i].id, l->points[i].x, l->points[i].y);
}
}
}
}
return hit_count;
}
template< int MAX_THREADS_PER_BLOCK >
__global__
void search_kernel(
CUDA_RTree_Node * d_nodes,
int * d_edges,
RTree_Rect * d_rects,
bool * d_search_front,
RTree_Rect * rects,
int * results,
int num_nodes)
{
// shared memory to store the query rectangles
extern __shared__ RTree_Rect rmem[];
// Address of shared memory
RTree_Rect *s_rect = (RTree_Rect *) &rmem[blockIdx.x];
// each thread represents one node
int tid = blockIdx.x * MAX_THREADS_PER_BLOCK + threadIdx.x;
// whether the query rectangle overlaps the MBR of the frontier node
bool flag = false;
if ( overlap(&d_rects[tid], s_rect) ) flag = true;
// node is in frontier and its MBR overlaps query rectangle
if (tid < num_nodes && d_search_front[tid] && flag)
{
// remove it from frontier
d_search_front[tid] = false;
// reach Leaf level
if (d_nodes[tid].starting == -1)
{
results[tid] = 1;
return ;
}
// put its children to the next search_front
for (int i = d_nodes[tid].starting; i < (d_nodes[tid].num_edges + d_nodes[tid].starting); i++)
{
int id = d_edges[i];
d_search_front[id] = true;
}
}
hipLaunchKernelGGL(( search_kernel<MAX_THREADS_PER_BLOCK>), dim3(10), dim3(20), 0, 0,
d_nodes, d_edges, d_rects, d_search_front, rects, results, num_nodes);
}
void fill_edges(RTree_Node *N, CUDA_RTree_Node *h_nodes, int *h_edges, RTree_Rect *h_rects, int& node_id)
{
register RTree_Node * n = N;
if (node_id == 0)
{
h_nodes[node_id].starting = 0; // initialize root node
for (int i = h_nodes[0].starting; i < (h_nodes[0].starting + n->num); i++)
{
// starting index of child in array
if (i == 0)
h_edges[i] = RTREE_NODE_SIZE;
else
h_edges[i] = n->num;
}
}
else
{
if (n->depth > 0) // set nodes
{
h_nodes[node_id].starting = h_nodes[node_id-1].starting + h_nodes[node_id-1].num_edges;
for (int i = h_nodes[node_id].starting; i < (h_nodes[node_id].starting + n->num); i++)
{
// starting index of child in array
h_edges[i] = h_edges[i-1] + h_nodes[node_id-1].num_edges;
}
}
else // set Leaf node
{
h_nodes[node_id].starting = -1;
}
}
h_nodes[node_id].num_edges = n->num;
h_rects[node_id] = n->bbox;
// recursively fill edges
for (int i = 0; i < n->num; i++)
{
fill_edges(&n->children[i], h_nodes, h_edges, h_rects, ++node_id);
}
}
RTree_Points cuda_search(RTree *tree, std::vector<RTree_Rect> rect_vec)
{
CUDA_RTree_Node * h_nodes = (CUDA_RTree_Node *) malloc(tree->num * sizeof(CUDA_RTree_Node));
int * h_edges = (int *) malloc(tree->num * sizeof(int) * RTREE_NODE_SIZE);
RTree_Rect * h_rects = (RTree_Rect *) malloc(tree->num * sizeof(RTree_Rect));
int node_id = 0;
printf("tree node number: %lu-----\n", tree->num);
// copy data from cpu to gpu
fill_edges(tree->root, h_nodes, h_edges, h_rects, node_id);
for (int i = 0; i < tree->num; i++)
{
printf("starting of node: %d is %d\n", i, h_nodes[i].starting);
}
// allocate n blocks to deal with n query rectangles
RTree_Points points;
return points;
}
| bc1a4c0c86a26a29ad22617d8b2ebceb8af2ef69.cu | #include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <algorithm>
#include "rtree.h"
__host__ __device__
int overlap(RTree_Rect *R, RTree_Rect * S)
{
register RTree_Rect *r = R, *s = S;
assert(r && s);
if ( r->left > s->right || r->right < s->left
|| r->top > s->bottom || r->bottom < s->top )
{
//printf("overlap R: %llu, %llu, %lu, %lu, S: %llu, %llu, %lu, %lu\n",
// r->left, r->right, r->top, r->bottom, s->left, s->right, s->top, s->bottom);
return 0;
}
else
return 1;
}
__host__ __device__
int contains(RTree_Rect *R, RTree_Point *P)
{
register RTree_Rect *r = R;
register RTree_Point *p = P;
assert(r && p);
//printf("point: %llu, %lu, Rect: %llu, %llu, %lu, %lu\n",
// p->x, p->y, r->left, r->right, r->top, r->bottom);
if (p->x < r->right && p->x > r->left
&& p->y < r->bottom && p->y > r->top)
return 1;
else
return 0;
}
__host__ __device__
inline void init_boundary(RTree_Rect *bbox)
{
bbox->top = ULONG_MAX;
bbox->bottom = -ULONG_MAX;
bbox->left = ULLONG_MAX;
bbox->right = -ULLONG_MAX;
}
__host__ __device__
inline void update_boundary(RTree_Rect *bbox, RTree_Rect *node_bbx)
{
bbox->top = min(bbox->top, node_bbx->top);
bbox->bottom = max(bbox->bottom, node_bbx->bottom);
bbox->left = min(bbox->left, node_bbx->left);
bbox->right = max(bbox->right, node_bbx->right);
//printf("---node bbox: %llu, %llu, update: %llu, %llu\n",
// node_bbx->left, node_bbx->right, bbox->left, bbox->right);
}
__host__ __device__
inline void c_update_boundary(RTree_Rect *bbox, RTree_Point *p)
{
bbox->top = min(p->y, bbox->top);
bbox->bottom = max(p->y, bbox->bottom);
bbox->left = min(p->x, bbox->left);
bbox->right = max(p->x, bbox->right);
//printf("x: %llu, bbox: %lu, %lu, %llu, %llu\n", p->x, bbox->top, bbox->bottom, bbox->left, bbox->right);
}
__host__ __device__
inline size_t get_node_length (
const size_t i,
const size_t len_level,
const size_t previous_level_len,
const size_t node_size)
{
const size_t n = node_size;
const size_t len = previous_level_len;
const size_t final_i = len_level -1;
// set lnum to len%n if it's the last iteration and there's a remainder, else n
return ((i != final_i || len % n == 0) *n) + ((i == final_i && len % n != 0) * (len % n));
}
// points are on device and sorted by x
void cuda_sort(RTree_Points *sorted)
{
uint64 *X = sorted->X;
unsigned long *Y = sorted->Y;
int *ID = sorted->ID;
// sort by x
auto tbegin = thrust::make_zip_iterator(thrust::make_tuple(Y, ID));
auto tend = thrust::make_zip_iterator(thrust::make_tuple(Y+sorted->length, ID+sorted->length));
thrust::sort_by_key(thrust::device, X, X+sorted->length, tbegin);
}
RTree cuda_create_rtree(RTree_Points points)
{
cuda_sort(&points);
RTree_Leaf *leaves = cuda_create_leaves( &points );
const size_t len_leaf = DIV_CEIL(points.length, RTREE_NODE_SIZE);
// build rtree from bottom
RTree_Node *level_previous = (RTree_Node*) leaves;
size_t len_previous = len_leaf;
size_t depth = 1; // leaf level: 0
size_t num_nodes = len_leaf;
while (len_previous > RTREE_NODE_SIZE)
{
level_previous = cuda_create_level(level_previous, len_previous, depth);
num_nodes += level_previous->num;
len_previous = DIV_CEIL(len_previous, RTREE_NODE_SIZE);
++depth;
}
// tackle the root node
RTree_Node *root = new RTree_Node();
init_boundary(&root->bbox);
root->num = len_previous;
root->children = level_previous;
num_nodes += root->num;
for (size_t i = 0, end = len_previous; i != end; ++i)
update_boundary(&root->bbox, &root->children[i].bbox);
++depth;
root->depth = depth;
RTree tree = {depth, root};
return tree;
}
__global__
void create_level_kernel
(
RTree_Node *next_level,
RTree_Node *nodes,
RTree_Node *real_nodes,
const size_t len,
size_t depth
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const size_t next_level_len = DIV_CEIL(len, RTREE_NODE_SIZE);
if (i >= next_level_len) return; // skip the final block remainder
RTree_Node *n = &next_level[i];
init_boundary(&n->bbox);
n->num = get_node_length(i, next_level_len, len, RTREE_NODE_SIZE);
n->children = &real_nodes[i * RTREE_NODE_SIZE];
n->depth = depth;
//printf("level num: %d, ---num: %lu\n", n->num, next_level_len);
#pragma unroll
for (size_t j = 0, jend = n->num; j != jend; ++j)
{
update_boundary(&n->bbox, &nodes[i * RTREE_NODE_SIZE + j].bbox);
//printf("after set node bbox: %lu, %lu, %llu, %llu\n",
// n->bbox.top, n->bbox.bottom, n->bbox.left, n->bbox.right);
}
}
RTree_Node* cuda_create_level(RTree_Node *nodes, const size_t len, size_t depth)
{
const size_t THREADS_PER_BLOCK = 512;
const size_t next_level_len = DIV_CEIL(len, RTREE_NODE_SIZE);
RTree_Node *d_nodes;
RTree_Node *d_next_level;
cudaMalloc( (void**) &d_nodes, len * sizeof(RTree_Node) );
cudaMalloc( (void**) &d_next_level, next_level_len * sizeof(RTree_Node) );
cudaMemcpy(d_nodes, nodes, len * sizeof(RTree_Node), cudaMemcpyHostToDevice);
create_level_kernel<<< (next_level_len + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>
(d_next_level, d_nodes, nodes, len, depth);
RTree_Node *next_level = new RTree_Node[next_level_len];
cudaMemcpy(next_level, d_next_level, next_level_len * sizeof(RTree_Node), cudaMemcpyDeviceToHost);
cudaFree(d_next_level);
cudaFree(d_nodes);
return next_level;
}
__global__
void create_leaves_kernel
(
RTree_Leaf *leaves,
RTree_Point *points,
RTree_Point *h_points,
uint64 *X,
unsigned long *Y,
int *ID,
const size_t len
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const size_t len_leaf = DIV_CEIL(len, RTREE_NODE_SIZE);
if (i >= len_leaf) return; // skip the final block remainder
// tackle leaf points
RTree_Leaf *l = &leaves[i];
init_boundary(&l->bbox);
l->num = get_node_length(i, len_leaf, len, RTREE_NODE_SIZE);
l->depth = 0;
l->points = &h_points[i * RTREE_NODE_SIZE]; // occupy position
// compute MBR from its
#pragma unroll
for (size_t j = 0, jend = l->num; j != jend; ++j)
{
// *** use pointer, not value ***/
RTree_Point *p = &points[i* RTREE_NODE_SIZE + j];
p->x = X[i * RTREE_NODE_SIZE + j];
p->y = Y[i * RTREE_NODE_SIZE + j];
p->id = ID[i * RTREE_NODE_SIZE + j];
//printf("----------id: %d, j: %lu\n", p->id, j);
c_update_boundary(&l->bbox, p);
}
}
RTree_Leaf* cuda_create_leaves(RTree_Points *sorted)
{
const size_t THREADS_PER_BLOCK = 512;
const size_t len = sorted->length;
const size_t num_leaf = DIV_CEIL(len, RTREE_NODE_SIZE);
RTree_Leaf *d_leaves;
RTree_Point *d_points;
cudaMalloc( (void**) &d_leaves, num_leaf * sizeof(RTree_Leaf) );
cudaMalloc( (void**) &d_points, len * sizeof(RTree_Point) );
// points on host will be passed to kernel and only occupy the position
RTree_Point *points = new RTree_Point[len];
create_leaves_kernel<<< (num_leaf + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>
(d_leaves, d_points, points, sorted->X, sorted->Y, sorted->ID, len);
RTree_Leaf *leaves = new RTree_Leaf[num_leaf];
// copy points from device to host
cudaMemcpy(leaves, d_leaves, num_leaf * sizeof(RTree_Leaf), cudaMemcpyDeviceToHost);
cudaMemcpy(points, d_points, len * sizeof(RTree_Point), cudaMemcpyDeviceToHost);
cudaFree(d_leaves);
cudaFree(d_points);
return leaves;
}
int cpu_search(RTree_Node *N, RTree_Rect *rect, std::vector<int> &points)
{
register RTree_Node *n = N;
register RTree_Rect *r = rect;
register int hit_count = 0;
register int i;
assert(n);
assert(n->num);
assert(r);
//printf("depth: %lu, bbox: %llu, %llu, %lu, %lu\t rect: %llu, %lu\n", n->depth, n->bbox.left, n->bbox.right,
// n->bbox.top, n->bbox.bottom, r->left, r->top );
if (n->depth > 0)
{
for (i = 0; i < n->num; i++)
{
printf("depth: %lu, bbox: %llu, %llu, %lu, %lu\t rect: %llu, %lu\t num: %lu\n",
n->depth, n->children[i].bbox.left, n->children[i].bbox.right,
n->children[i].bbox.top, n->children[i].bbox.bottom, r->left, r->top
, n->children[i].num );
if ( overlap(r, &n->children[i].bbox) )
{
hit_count += cpu_search(&n->children[i], rect, points);
}
}
}
else // this is a leaf node
{
if ( n->num && overlap(r, &n->bbox) )
{
//printf("---%llu, %llu, %lu, %lu\n", n->bbox.left, n->bbox.right, n->bbox.top, n->bbox.bottom);
RTree_Leaf *l = (RTree_Leaf*) n;
for (i = 0; i < n->num; i++)
{
// determine whether points in rect
if ( contains(r, &l->points[i] ) )
{
hit_count++;
// check if contains this point
if ( std::find(points.begin(), points.end(), l->points[i].id) == points.end() )
points.push_back(l->points[i].id);
printf("%d trajectory is hit, %llu, %lu\n", l->points[i].id, l->points[i].x, l->points[i].y);
}
}
}
}
return hit_count;
}
template< int MAX_THREADS_PER_BLOCK >
__global__
void search_kernel(
CUDA_RTree_Node * d_nodes,
int * d_edges,
RTree_Rect * d_rects,
bool * d_search_front,
RTree_Rect * rects,
int * results,
int num_nodes)
{
// shared memory to store the query rectangles
extern __shared__ RTree_Rect rmem[];
// Address of shared memory
RTree_Rect *s_rect = (RTree_Rect *) &rmem[blockIdx.x];
// each thread represents one node
int tid = blockIdx.x * MAX_THREADS_PER_BLOCK + threadIdx.x;
// whether the query rectangle overlaps the MBR of the frontier node
bool flag = false;
if ( overlap(&d_rects[tid], s_rect) ) flag = true;
// node is in frontier and its MBR overlaps query rectangle
if (tid < num_nodes && d_search_front[tid] && flag)
{
// remove it from frontier
d_search_front[tid] = false;
// reach Leaf level
if (d_nodes[tid].starting == -1)
{
results[tid] = 1;
return ;
}
// put its children to the next search_front
for (int i = d_nodes[tid].starting; i < (d_nodes[tid].num_edges + d_nodes[tid].starting); i++)
{
int id = d_edges[i];
d_search_front[id] = true;
}
}
search_kernel<MAX_THREADS_PER_BLOCK><<<10, 20>>>
(d_nodes, d_edges, d_rects, d_search_front, rects, results, num_nodes);
}
void fill_edges(RTree_Node *N, CUDA_RTree_Node *h_nodes, int *h_edges, RTree_Rect *h_rects, int& node_id)
{
register RTree_Node * n = N;
if (node_id == 0)
{
h_nodes[node_id].starting = 0; // initialize root node
for (int i = h_nodes[0].starting; i < (h_nodes[0].starting + n->num); i++)
{
// starting index of child in array
if (i == 0)
h_edges[i] = RTREE_NODE_SIZE;
else
h_edges[i] = n->num;
}
}
else
{
if (n->depth > 0) // set nodes
{
h_nodes[node_id].starting = h_nodes[node_id-1].starting + h_nodes[node_id-1].num_edges;
for (int i = h_nodes[node_id].starting; i < (h_nodes[node_id].starting + n->num); i++)
{
// starting index of child in array
h_edges[i] = h_edges[i-1] + h_nodes[node_id-1].num_edges;
}
}
else // set Leaf node
{
h_nodes[node_id].starting = -1;
}
}
h_nodes[node_id].num_edges = n->num;
h_rects[node_id] = n->bbox;
// recursively fill edges
for (int i = 0; i < n->num; i++)
{
fill_edges(&n->children[i], h_nodes, h_edges, h_rects, ++node_id);
}
}
RTree_Points cuda_search(RTree *tree, std::vector<RTree_Rect> rect_vec)
{
CUDA_RTree_Node * h_nodes = (CUDA_RTree_Node *) malloc(tree->num * sizeof(CUDA_RTree_Node));
int * h_edges = (int *) malloc(tree->num * sizeof(int) * RTREE_NODE_SIZE);
RTree_Rect * h_rects = (RTree_Rect *) malloc(tree->num * sizeof(RTree_Rect));
int node_id = 0;
printf("tree node number: %lu-----\n", tree->num);
// copy data from cpu to gpu
fill_edges(tree->root, h_nodes, h_edges, h_rects, node_id);
for (int i = 0; i < tree->num; i++)
{
printf("starting of node: %d is %d\n", i, h_nodes[i].starting);
}
// allocate n blocks to deal with n query rectangles
RTree_Points points;
return points;
}
|
b2c568d16797d905a699d18e549178f5c7d554d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include "CudaWrappers.h"
#include "CudaDeviceDataMan.h"
#include "DepthCamera.h"
__global__ void findCorrespondecesKernel(const Point4fMap2D input_vertices,
const Vector4fMap2D input_normals,
const Point4fMap2D target_vertices,
const Vector4fMap2D target_normals,
const CameraParams depth_camera_params,
const Mat44 cur_transform,
const Mat44 last_transform_inv,
Point4fMap2D corres_vertices,
float dist_thres,
float norm_sin_thres
)
{//precondition:corres_vertices is cleared!!
//find the corres point of input point on target view
const unsigned x = blockDim.x*blockIdx.x + threadIdx.x;
const unsigned y = blockDim.y*blockIdx.y + threadIdx.y;
const unsigned cols=input_vertices.cols();
const unsigned rows=input_vertices.rows();
if (x >= cols||y>=rows)return;
float4 input_v_data = input_vertices.at(x, y);
float4 input_n_data = input_normals.at(x, y);
if (isZero(input_n_data) ) return;
float4 v_input_g = cur_transform* input_v_data ;
float4 n_input_g =cur_transform* input_n_data;
float4 v_cp = last_transform_inv*v_input_g;
int2 screen_pos= DepthCamera::projectSkeletonToScreen(make_float3(v_cp.x,v_cp.y,v_cp.z),depth_camera_params);
if (screen_pos.x < 0 || screen_pos.x >= cols || screen_pos.y < 0 || screen_pos.y >= rows)
{
return;
}
float4 n_target_g = target_normals.at(screen_pos.x,screen_pos.y);
if (isZero(n_target_g))return;
float4 v_target_g = target_vertices.at(screen_pos.x, screen_pos.y);
float4 delta_g=v_target_g - v_input_g;
float d = norm(make_float3(delta_g.x,delta_g.y,delta_g.z));
float d_normal_sin = norm(cross(make_float3(n_target_g.x,n_target_g.y,n_target_g.z), make_float3(n_input_g.x,n_input_g.y,n_input_g.z)));
if (d <dist_thres&&d_normal_sin<norm_sin_thres)
{
float4 pre_corrs_v=corres_vertices.at(screen_pos.x, screen_pos.y);
if(false==isZero(pre_corrs_v))
{ //overlap corrs
float4 delta_pre_g=pre_corrs_v-v_target_g;
float pre_d=norm(make_float3(delta_g.x,delta_pre_g.y,delta_pre_g.z));
if(pre_d<d)
{
// atomicAdd(corrs_count, 1);
return;//use the previous corrs
}
}
corres_vertices.at(screen_pos.x, screen_pos.y)= v_input_g;
}
}
void cudaProjectionMapFindCorrs(unsigned pyramid_level,
const Mat44& cur_transform,
const Mat44& last_transform_inv,
const CameraParams& depth_camera_params,
float dist_thres,
float norm_sin_thres)
{
Point4fMap2D input_v=CudaDeviceDataMan::instance()->new_vertices_pyramid[pyramid_level];
Vector4fMap2D input_n=CudaDeviceDataMan::instance()->new_normals_pyramid[pyramid_level];
Point4fMap2D target_v=CudaDeviceDataMan::instance()->model_vertices_pyramid[pyramid_level];
Vector4fMap2D target_n=CudaDeviceDataMan::instance()->model_normals_pyramid[pyramid_level];
Point4fMap2D corrs_v=CudaDeviceDataMan::instance()->corrs_vertices_pyramid[pyramid_level];
corrs_v.clearData();
// unsigned * corrs_count=CudaDeviceDataMan::instance()->debug_count;
// hipMemset((void*)corrs_count,0,sizeof(unsigned));
const dim3 blockSize(BLOCK_SIZE_2D_X, BLOCK_SIZE_2D_Y);
const dim3 gridSize(divUp(input_v.cols(), BLOCK_SIZE_2D_X), divUp(input_v.rows(), BLOCK_SIZE_2D_Y));
hipLaunchKernelGGL(( findCorrespondecesKernel), dim3(gridSize),dim3(blockSize), 0, 0, input_v,
input_n,
target_v,
target_n,
depth_camera_params,
cur_transform,
last_transform_inv,
corrs_v,
dist_thres,
norm_sin_thres
);
hipDeviceSynchronize();
// int corrs_count_cpu;
// hipMemcpy(&corrs_count_cpu,corrs_count,sizeof(unsigned),hipMemcpyDeviceToHost);
// cout<<"overlap corrs find count: "<<corrs_count_cpu<<endl;
}
| b2c568d16797d905a699d18e549178f5c7d554d0.cu | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include "CudaWrappers.h"
#include "CudaDeviceDataMan.h"
#include "DepthCamera.h"
__global__ void findCorrespondecesKernel(const Point4fMap2D input_vertices,
const Vector4fMap2D input_normals,
const Point4fMap2D target_vertices,
const Vector4fMap2D target_normals,
const CameraParams depth_camera_params,
const Mat44 cur_transform,
const Mat44 last_transform_inv,
Point4fMap2D corres_vertices,
float dist_thres,
float norm_sin_thres
)
{//precondition:corres_vertices is cleared!!
//find the corres point of input point on target view
const unsigned x = blockDim.x*blockIdx.x + threadIdx.x;
const unsigned y = blockDim.y*blockIdx.y + threadIdx.y;
const unsigned cols=input_vertices.cols();
const unsigned rows=input_vertices.rows();
if (x >= cols||y>=rows)return;
float4 input_v_data = input_vertices.at(x, y);
float4 input_n_data = input_normals.at(x, y);
if (isZero(input_n_data) ) return;
float4 v_input_g = cur_transform* input_v_data ;
float4 n_input_g =cur_transform* input_n_data;
float4 v_cp = last_transform_inv*v_input_g;
int2 screen_pos= DepthCamera::projectSkeletonToScreen(make_float3(v_cp.x,v_cp.y,v_cp.z),depth_camera_params);
if (screen_pos.x < 0 || screen_pos.x >= cols || screen_pos.y < 0 || screen_pos.y >= rows)
{
return;
}
float4 n_target_g = target_normals.at(screen_pos.x,screen_pos.y);
if (isZero(n_target_g))return;
float4 v_target_g = target_vertices.at(screen_pos.x, screen_pos.y);
float4 delta_g=v_target_g - v_input_g;
float d = norm(make_float3(delta_g.x,delta_g.y,delta_g.z));
float d_normal_sin = norm(cross(make_float3(n_target_g.x,n_target_g.y,n_target_g.z), make_float3(n_input_g.x,n_input_g.y,n_input_g.z)));
if (d <dist_thres&&d_normal_sin<norm_sin_thres)
{
float4 pre_corrs_v=corres_vertices.at(screen_pos.x, screen_pos.y);
if(false==isZero(pre_corrs_v))
{ //overlap corrs
float4 delta_pre_g=pre_corrs_v-v_target_g;
float pre_d=norm(make_float3(delta_g.x,delta_pre_g.y,delta_pre_g.z));
if(pre_d<d)
{
// atomicAdd(corrs_count, 1);
return;//use the previous corrs
}
}
corres_vertices.at(screen_pos.x, screen_pos.y)= v_input_g;
}
}
void cudaProjectionMapFindCorrs(unsigned pyramid_level,
const Mat44& cur_transform,
const Mat44& last_transform_inv,
const CameraParams& depth_camera_params,
float dist_thres,
float norm_sin_thres)
{
Point4fMap2D input_v=CudaDeviceDataMan::instance()->new_vertices_pyramid[pyramid_level];
Vector4fMap2D input_n=CudaDeviceDataMan::instance()->new_normals_pyramid[pyramid_level];
Point4fMap2D target_v=CudaDeviceDataMan::instance()->model_vertices_pyramid[pyramid_level];
Vector4fMap2D target_n=CudaDeviceDataMan::instance()->model_normals_pyramid[pyramid_level];
Point4fMap2D corrs_v=CudaDeviceDataMan::instance()->corrs_vertices_pyramid[pyramid_level];
corrs_v.clearData();
// unsigned * corrs_count=CudaDeviceDataMan::instance()->debug_count;
// cudaMemset((void*)corrs_count,0,sizeof(unsigned));
const dim3 blockSize(BLOCK_SIZE_2D_X, BLOCK_SIZE_2D_Y);
const dim3 gridSize(divUp(input_v.cols(), BLOCK_SIZE_2D_X), divUp(input_v.rows(), BLOCK_SIZE_2D_Y));
findCorrespondecesKernel<<<gridSize,blockSize>>>(input_v,
input_n,
target_v,
target_n,
depth_camera_params,
cur_transform,
last_transform_inv,
corrs_v,
dist_thres,
norm_sin_thres
);
cudaDeviceSynchronize();
// int corrs_count_cpu;
// cudaMemcpy(&corrs_count_cpu,corrs_count,sizeof(unsigned),cudaMemcpyDeviceToHost);
// cout<<"overlap corrs find count: "<<corrs_count_cpu<<endl;
}
|
8a86df607fb284955a3495755a339b4ff203180e.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA programming
// Exercise n. 01
#include <errno.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
// Prototype
__host__ void print_dev_prop(hipDeviceProp_t dev_prop);
int main(void)
{
// Number of CUDA-capable devices attached to this system
int dev_cnt;
hipGetDeviceCount(&dev_cnt);
// Calculate the theoretical peak bandwidth for each device
for(int i = 0; i < dev_cnt; i++)
{
hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop, i);
printf("Device Number: %d\n", i);
print_dev_prop(dev_prop);
}
}
// Print device properties
__host__ void print_dev_prop(hipDeviceProp_t dev_prop)
{
printf(" Major revision number: %d\n", dev_prop.major);
printf(" Minor revision number: %d\n", dev_prop.minor);
printf(" Name: %s\n", dev_prop.name);
printf(" Total global memory: %zu\n", dev_prop.totalGlobalMem);
printf(" Total shared memory per block: %zu\n", dev_prop.sharedMemPerBlock);
printf(" Total registers per block: %d\n", dev_prop.regsPerBlock);
printf(" Warp size: %d\n", dev_prop.warpSize);
printf(" Maximum memory pitch: %zu\n", dev_prop.memPitch);
printf(" Maximum threads per block: %d\n", dev_prop.maxThreadsPerBlock);
for(int i = 0; i < 3; ++i)
printf(" Maximum block dimension #%02d: %d\n", i, dev_prop.maxThreadsDim[i]);
for(int i = 0; i < 3; ++i)
printf(" Maximum grid dimension #%02d: %d\n", i, dev_prop.maxGridSize[i]);
printf(" Clock rate: %d\n", dev_prop.clockRate);
printf(" Memory Bus Width (bits): %d\n", dev_prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0 * dev_prop.memoryClockRate * (dev_prop.memoryBusWidth / 8) / 1.0e6);
printf(" Total constant memory: %zu\n", dev_prop.totalConstMem);
printf(" Texture alignment: %zu\n", dev_prop.textureAlignment);
printf(" Concurrent copy and execution: %s\n", (dev_prop.deviceOverlap ? "Yes" : "No"));
printf(" Number of multiprocessors: %d\n", dev_prop.multiProcessorCount);
printf(" Kernel execution timeout: %s\n", (dev_prop.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
| 8a86df607fb284955a3495755a339b4ff203180e.cu | // CUDA programming
// Exercise n. 01
#include <errno.h>
#include <cuda.h>
#include <stdio.h>
// Prototype
__host__ void print_dev_prop(cudaDeviceProp dev_prop);
int main(void)
{
// Number of CUDA-capable devices attached to this system
int dev_cnt;
cudaGetDeviceCount(&dev_cnt);
// Calculate the theoretical peak bandwidth for each device
for(int i = 0; i < dev_cnt; i++)
{
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop, i);
printf("Device Number: %d\n", i);
print_dev_prop(dev_prop);
}
}
// Print device properties
__host__ void print_dev_prop(cudaDeviceProp dev_prop)
{
printf(" Major revision number: %d\n", dev_prop.major);
printf(" Minor revision number: %d\n", dev_prop.minor);
printf(" Name: %s\n", dev_prop.name);
printf(" Total global memory: %zu\n", dev_prop.totalGlobalMem);
printf(" Total shared memory per block: %zu\n", dev_prop.sharedMemPerBlock);
printf(" Total registers per block: %d\n", dev_prop.regsPerBlock);
printf(" Warp size: %d\n", dev_prop.warpSize);
printf(" Maximum memory pitch: %zu\n", dev_prop.memPitch);
printf(" Maximum threads per block: %d\n", dev_prop.maxThreadsPerBlock);
for(int i = 0; i < 3; ++i)
printf(" Maximum block dimension #%02d: %d\n", i, dev_prop.maxThreadsDim[i]);
for(int i = 0; i < 3; ++i)
printf(" Maximum grid dimension #%02d: %d\n", i, dev_prop.maxGridSize[i]);
printf(" Clock rate: %d\n", dev_prop.clockRate);
printf(" Memory Bus Width (bits): %d\n", dev_prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0 * dev_prop.memoryClockRate * (dev_prop.memoryBusWidth / 8) / 1.0e6);
printf(" Total constant memory: %zu\n", dev_prop.totalConstMem);
printf(" Texture alignment: %zu\n", dev_prop.textureAlignment);
printf(" Concurrent copy and execution: %s\n", (dev_prop.deviceOverlap ? "Yes" : "No"));
printf(" Number of multiprocessors: %d\n", dev_prop.multiProcessorCount);
printf(" Kernel execution timeout: %s\n", (dev_prop.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
|
823a3c1548294a3757a6adfa8a9b1341245bfddb.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <omp.h>
#include <stdlib.h>
#include <math.h>
#include <array>
#include <vector>
#include <sstream>
#include <chrono>
#include <hiprand/hiprand_kernel.h>
#include <limits>
#include <memory>
#include <cstddef>
#include <type_traits>
#include <../include/timer.cuh>
#include "../include/musket.cuh"
#include "../include/spfb32768_0.cuh"
const double PI = 3.141592653589793;
//Float2::Float2() : x(), y() {}
struct FIR_map_index_in_place_array_functor{
FIR_map_index_in_place_array_functor(const mkt::DArray<float>& _input, const mkt::DArray<float>& _coeff) : input(_input), coeff(_coeff){}
~FIR_map_index_in_place_array_functor() {}
__device__
auto operator()(int Index, float2 a){
float2 newa; newa.x = 0.0f;newa.y=0.0f;
if(((Index) <= ((channels) * (spectra)))){
for(int j = 0; ((j) < (taps)); j++){
newa.x += (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
input.get_data_local(((Index) + ((j) * (channels)))) * coeff.get_data_local(((Index%(taps*channels)) + ((j) * (channels))))
);
}
}
return (newa);
}
void init(int device){
input.init(device);
coeff.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int taps;
int channels;
int spectra;
mkt::DeviceArray<float> input;
mkt::DeviceArray<float> coeff;
};
struct Fetch_map_index_in_place_array_functor{
Fetch_map_index_in_place_array_functor(const mkt::DArray<float2>& _c_output) : c_output(_c_output){}
~Fetch_map_index_in_place_array_functor() {}
__device__
auto operator()(int i, float2 Ti){
return // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_output.get_data_local((i ^ (int) __powf(2, (((log2size) - 1) - (counter)))))
;
}
void init(int device){
c_output.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int counter;
int log2size;
mkt::DeviceArray<float2> c_output;
};
struct Combine_map_index_in_place_array_functor{
Combine_map_index_in_place_array_functor(const mkt::DArray<float2>& _c_input_double) : c_input_double(_c_input_double){}
~Combine_map_index_in_place_array_functor() {}
__device__
auto operator()(int Index, float2 Ai){
float2 newa;
newa.x = 0.0f;
newa.y = 0.0f;
int b = Index >> (log2size - counter - 1);
int b2 = 0;
for(int l = 0;l <= counter;l++) {
b2 = (b & 1) ? 2 * b2 + 1 : 2 * b2;
b >>= 1;
}
double temp = 2.0 * pi / Problemsize * (b2 << (log2size - counter - 1));
float2 intermediateresult;
intermediateresult.x = __cosf(temp);
intermediateresult.y = -__sinf(temp);
if(((Index) == __powf(2, (((log2size) - 1) - (counter))))){
float2 mult_res;
mult_res.x = (((intermediateresult).x * (Ai).x) - ((intermediateresult).y * (Ai).y));
mult_res.y = (((intermediateresult).x * (Ai).y) + ((intermediateresult).y * (Ai).x));
float2 add_res;
add_res.x = (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
+ (mult_res).x);
add_res.y = (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
+ (mult_res).y);
newa = (add_res);
}
else {
float2 mult_res2;
mult_res2.x = (((intermediateresult).x * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
) - ((intermediateresult).y * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
));
mult_res2.y = (((intermediateresult).x * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
) + ((intermediateresult).y * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
));
float2 add_res2;
add_res2.x = ((Ai).x + (mult_res2).x);
add_res2.y = ((Ai).y + (mult_res2).y);
newa = (add_res2);
}
return (newa);
}
void init(int device){
c_input_double.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int counter;
int log2size;
double pi;
int Problemsize;
mkt::DeviceArray<float2> c_input_double;
};
int main(int argc, char** argv) {
mkt::init();
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point complete_timer_start = std::chrono::high_resolution_clock::now();
GpuTimer timer;
double allocation = 0.0,fill = 0.0, rest = 0.0, rest2 = 0.0;
timer.Start();
mkt::DArray<float> input(0, 268533760, 268533760, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
// mkt::DArray<float> input_double(0, 268435456, 268435456, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float2> c_input_double(0, 268435456, 268435456, float2{}, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float2> c_output(0, 268435456, 268435456, float2{}, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float> coeff(0, 131072, 131072, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
timer.Stop();
allocation += timer.Elapsed();
// timer.Start();
srand(1);
for (int n = 0; n < 268533760; n++) {
input[n] = (rand() / (float)RAND_MAX);
}
for (int n = 0; n < 131072; n++) {
coeff[n] = (rand() / (float)RAND_MAX);
}
timer.Start();
input.update_devices();
coeff.update_devices();
timer.Stop();
fill += timer.Elapsed();
timer.Start();
FIR_map_index_in_place_array_functor fIR_map_index_in_place_array_functor{input, coeff};
// Float_to_float2_map_index_in_place_array_functor float_to_float2_map_index_in_place_array_functor{input_double};
Fetch_map_index_in_place_array_functor fetch_map_index_in_place_array_functor{c_output};
Combine_map_index_in_place_array_functor combine_map_index_in_place_array_functor{c_input_double};
timer.Stop();
rest += timer.Elapsed();
double fir_time = 0.0, fft_time = 0.0, R2C_time = 0.0;
int ntaps = 4;
int nchans = 32768;
int nspectra = 8192;
int log2size = 15;
timer.Start();
fIR_map_index_in_place_array_functor.taps = (ntaps);fIR_map_index_in_place_array_functor.channels = (nchans);fIR_map_index_in_place_array_functor.spectra = (nspectra);
mkt::map_index_in_place<float2, FIR_map_index_in_place_array_functor>(c_input_double, fIR_map_index_in_place_array_functor);
timer.Stop();
fir_time += timer.Elapsed();
timer.Start();
//mkt::map_index_in_place<float2, Float_to_float2_map_index_in_place_array_functor>(c_output, float_to_float2_map_index_in_place_array_functor);
timer.Stop();
R2C_time += timer.Elapsed();
for(int j = 0; ((j) < (log2size)); j++){
double fetch_init = 0.0, fetch_exec = 0.0, combine_init = 0.0, combine_exec = 0.0;
timer.Start();
fetch_map_index_in_place_array_functor.counter = (j);fetch_map_index_in_place_array_functor.log2size = (log2size);timer.Stop();
fetch_init += timer.Elapsed();
timer.Start();
mkt::map_index_in_place<float2, Fetch_map_index_in_place_array_functor>(c_input_double, fetch_map_index_in_place_array_functor);
timer.Stop();
fetch_exec += timer.Elapsed();
timer.Start();
combine_map_index_in_place_array_functor.counter = (j);combine_map_index_in_place_array_functor.log2size = (log2size);combine_map_index_in_place_array_functor.pi = (PI);combine_map_index_in_place_array_functor.Problemsize = 16;
timer.Stop();
combine_init += timer.Elapsed();
timer.Start();
mkt::map_index_in_place<float2, Combine_map_index_in_place_array_functor>(c_output, combine_map_index_in_place_array_functor);
timer.Stop();
combine_exec += timer.Elapsed();
printf("\n %d, %f,%f,%f,%f", j, fetch_init, fetch_exec, combine_init, combine_exec);
}
//timer.Stop();
//fft_time += timer.Elapsed();
timer.Start();
c_output.update_self();
timer.Stop();
double out = 0.0;
out += timer.Elapsed();
//printf("\n%.5f;%.5f;%.5f;%f;%f;%f,%f", fir_time, fft_time, R2C_time, allocation, fill, rest,out);
return EXIT_SUCCESS;
}
| 823a3c1548294a3757a6adfa8a9b1341245bfddb.cu | #include <cuda.h>
#include <omp.h>
#include <stdlib.h>
#include <math.h>
#include <array>
#include <vector>
#include <sstream>
#include <chrono>
#include <curand_kernel.h>
#include <limits>
#include <memory>
#include <cstddef>
#include <type_traits>
#include <../include/timer.cuh>
#include "../include/musket.cuh"
#include "../include/spfb32768_0.cuh"
const double PI = 3.141592653589793;
//Float2::Float2() : x(), y() {}
struct FIR_map_index_in_place_array_functor{
FIR_map_index_in_place_array_functor(const mkt::DArray<float>& _input, const mkt::DArray<float>& _coeff) : input(_input), coeff(_coeff){}
~FIR_map_index_in_place_array_functor() {}
__device__
auto operator()(int Index, float2 a){
float2 newa; newa.x = 0.0f;newa.y=0.0f;
if(((Index) <= ((channels) * (spectra)))){
for(int j = 0; ((j) < (taps)); j++){
newa.x += (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
input.get_data_local(((Index) + ((j) * (channels)))) * coeff.get_data_local(((Index%(taps*channels)) + ((j) * (channels))))
);
}
}
return (newa);
}
void init(int device){
input.init(device);
coeff.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int taps;
int channels;
int spectra;
mkt::DeviceArray<float> input;
mkt::DeviceArray<float> coeff;
};
struct Fetch_map_index_in_place_array_functor{
Fetch_map_index_in_place_array_functor(const mkt::DArray<float2>& _c_output) : c_output(_c_output){}
~Fetch_map_index_in_place_array_functor() {}
__device__
auto operator()(int i, float2 Ti){
return // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_output.get_data_local((i ^ (int) __powf(2, (((log2size) - 1) - (counter)))))
;
}
void init(int device){
c_output.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int counter;
int log2size;
mkt::DeviceArray<float2> c_output;
};
struct Combine_map_index_in_place_array_functor{
Combine_map_index_in_place_array_functor(const mkt::DArray<float2>& _c_input_double) : c_input_double(_c_input_double){}
~Combine_map_index_in_place_array_functor() {}
__device__
auto operator()(int Index, float2 Ai){
float2 newa;
newa.x = 0.0f;
newa.y = 0.0f;
int b = Index >> (log2size - counter - 1);
int b2 = 0;
for(int l = 0;l <= counter;l++) {
b2 = (b & 1) ? 2 * b2 + 1 : 2 * b2;
b >>= 1;
}
double temp = 2.0 * pi / Problemsize * (b2 << (log2size - counter - 1));
float2 intermediateresult;
intermediateresult.x = __cosf(temp);
intermediateresult.y = -__sinf(temp);
if(((Index) == __powf(2, (((log2size) - 1) - (counter))))){
float2 mult_res;
mult_res.x = (((intermediateresult).x * (Ai).x) - ((intermediateresult).y * (Ai).y));
mult_res.y = (((intermediateresult).x * (Ai).y) + ((intermediateresult).y * (Ai).x));
float2 add_res;
add_res.x = (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
+ (mult_res).x);
add_res.y = (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
+ (mult_res).y);
newa = (add_res);
}
else {
float2 mult_res2;
mult_res2.x = (((intermediateresult).x * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
) - ((intermediateresult).y * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
));
mult_res2.y = (((intermediateresult).x * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
) + ((intermediateresult).y * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
));
float2 add_res2;
add_res2.x = ((Ai).x + (mult_res2).x);
add_res2.y = ((Ai).y + (mult_res2).y);
newa = (add_res2);
}
return (newa);
}
void init(int device){
c_input_double.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int counter;
int log2size;
double pi;
int Problemsize;
mkt::DeviceArray<float2> c_input_double;
};
int main(int argc, char** argv) {
mkt::init();
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point complete_timer_start = std::chrono::high_resolution_clock::now();
GpuTimer timer;
double allocation = 0.0,fill = 0.0, rest = 0.0, rest2 = 0.0;
timer.Start();
mkt::DArray<float> input(0, 268533760, 268533760, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
// mkt::DArray<float> input_double(0, 268435456, 268435456, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float2> c_input_double(0, 268435456, 268435456, float2{}, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float2> c_output(0, 268435456, 268435456, float2{}, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float> coeff(0, 131072, 131072, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
timer.Stop();
allocation += timer.Elapsed();
// timer.Start();
srand(1);
for (int n = 0; n < 268533760; n++) {
input[n] = (rand() / (float)RAND_MAX);
}
for (int n = 0; n < 131072; n++) {
coeff[n] = (rand() / (float)RAND_MAX);
}
timer.Start();
input.update_devices();
coeff.update_devices();
timer.Stop();
fill += timer.Elapsed();
timer.Start();
FIR_map_index_in_place_array_functor fIR_map_index_in_place_array_functor{input, coeff};
// Float_to_float2_map_index_in_place_array_functor float_to_float2_map_index_in_place_array_functor{input_double};
Fetch_map_index_in_place_array_functor fetch_map_index_in_place_array_functor{c_output};
Combine_map_index_in_place_array_functor combine_map_index_in_place_array_functor{c_input_double};
timer.Stop();
rest += timer.Elapsed();
double fir_time = 0.0, fft_time = 0.0, R2C_time = 0.0;
int ntaps = 4;
int nchans = 32768;
int nspectra = 8192;
int log2size = 15;
timer.Start();
fIR_map_index_in_place_array_functor.taps = (ntaps);fIR_map_index_in_place_array_functor.channels = (nchans);fIR_map_index_in_place_array_functor.spectra = (nspectra);
mkt::map_index_in_place<float2, FIR_map_index_in_place_array_functor>(c_input_double, fIR_map_index_in_place_array_functor);
timer.Stop();
fir_time += timer.Elapsed();
timer.Start();
//mkt::map_index_in_place<float2, Float_to_float2_map_index_in_place_array_functor>(c_output, float_to_float2_map_index_in_place_array_functor);
timer.Stop();
R2C_time += timer.Elapsed();
for(int j = 0; ((j) < (log2size)); j++){
double fetch_init = 0.0, fetch_exec = 0.0, combine_init = 0.0, combine_exec = 0.0;
timer.Start();
fetch_map_index_in_place_array_functor.counter = (j);fetch_map_index_in_place_array_functor.log2size = (log2size);timer.Stop();
fetch_init += timer.Elapsed();
timer.Start();
mkt::map_index_in_place<float2, Fetch_map_index_in_place_array_functor>(c_input_double, fetch_map_index_in_place_array_functor);
timer.Stop();
fetch_exec += timer.Elapsed();
timer.Start();
combine_map_index_in_place_array_functor.counter = (j);combine_map_index_in_place_array_functor.log2size = (log2size);combine_map_index_in_place_array_functor.pi = (PI);combine_map_index_in_place_array_functor.Problemsize = 16;
timer.Stop();
combine_init += timer.Elapsed();
timer.Start();
mkt::map_index_in_place<float2, Combine_map_index_in_place_array_functor>(c_output, combine_map_index_in_place_array_functor);
timer.Stop();
combine_exec += timer.Elapsed();
printf("\n %d, %f,%f,%f,%f", j, fetch_init, fetch_exec, combine_init, combine_exec);
}
//timer.Stop();
//fft_time += timer.Elapsed();
timer.Start();
c_output.update_self();
timer.Stop();
double out = 0.0;
out += timer.Elapsed();
//printf("\n%.5f;%.5f;%.5f;%f;%f;%f,%f", fir_time, fft_time, R2C_time, allocation, fill, rest,out);
return EXIT_SUCCESS;
}
|
fe5829f9d6062fa56b88bead1d167e6ff638b99e.hip | // !!! This is a file automatically generated by hipify!!!
// random generator includes
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/xor_combine_engine.h>
#include <thrust/random.h>
#include <hiprand/hiprand_kernel.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/for_each.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <thrust/transform_reduce.h>
#include <thrust/binary_search.h>
#include <thrust/adjacent_difference.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <boost/math/tools/roots.hpp>
#include <thrust/tuple.h>
#include <thrust/iterator/counting_iterator.h>
#include <map>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <math.h>
#include <string>
#include <boost/math/tools/roots.hpp>
#include <thrust/tuple.h>
// load float 6 datastructure
// #include "STE_DataStructures.cuh"
// load tfsTableData datastructure
// #include "STE_TFS.cuh"
// load ibs header
#include "STE_IBS.cuh"
// numerical recipes
// !Computes Carlson elliptic integral of the second kind, RD(x, y, z). x and y must be
// !nonnegative, and at most one can be zero. z must be positive. TINY must be at least twice
// !the negative 2/3 power of the machine overflow limit. BIG must be at most 0.1ERRTOL
// !times the negative 2/3 power of the machine underflow limit.
__host__ __device__ float rd_s(float3 in) {
float ERRTOL = 0.05;
// float TINY = 1.0e-25;
// float BIG = 4.5e21;
float C1 = 3.0/14.0;
float C2 = 1.0/6.0;
float C3 = 9.0/22.0;
float C4 = 3.0/26.0;
float C5 = 0.25 * C3;
float C6 = 1.5*C4;
float xt = in.x;
float yt = in.y;
float zt = in.z;
float sum = 0.0;
float fac = 1.0;
int iter = 0;
float sqrtx, sqrty, sqrtz, alamb, ave, delx, dely, delz, maxi ;
do
{
iter++;
sqrtx = sqrt(xt);
sqrty = sqrt(yt);
sqrtz = sqrt(zt);
alamb = sqrtx * (sqrty + sqrtz) + sqrty * sqrtz;
sum = sum + fac / (sqrtz * (zt + alamb));
fac = 0.25 * fac;
xt = 0.25 * (xt + alamb);
yt = 0.25 * (yt + alamb);
zt = 0.25 * (zt + alamb);
ave = 0.2 * (xt + yt + 3.0 * zt);
delx = (ave - xt) / ave;
dely = (ave - yt) / ave;
delz = (ave - zt) / ave;
maxi = abs(delx);
if (abs(dely) > maxi)
maxi = abs(dely);
if (abs(delz) > maxi)
maxi = abs(delz);
}
while (maxi > ERRTOL);
float ea = delx * dely;
float eb = delz * delz;
float ec = ea - eb;
float ed = ea - 6.0 * eb;
float ee = ed + ec + ec;
float rd_s = 3.0 * sum + fac * (1.0 + ed *(-C1+C5*ed-C6*delz*ee)+delz*(C2*ee+delz*(-C3*ec+delz*C4*ea)))/(ave*sqrt(ave));
return rd_s;
};
__host__ __device__ float fmohl(float a, float b, float q, int fmohlNumPoints) {
float result;
float sum = 0;
float du = 1.0/fmohlNumPoints;
float u, cp, cq, dsum;
for(int k=0;k<fmohlNumPoints;k++)
{
u = k*du;
cp = sqrt(a * a + (1 - a * a) * u * u);
cq = sqrt(b * b + (1 - b * b) * u * u);
dsum = 2*log(q*(1/cp+1/cq)/2) - CUDA_EULER_F;
dsum *= (1-3*u*u)/(cp*cq);
if (k==0)
dsum = dsum / 2;
if (k==fmohlNumPoints)
dsum = dsum / 2;
sum += dsum;
}
result = 8 * CUDA_PI_F * du * sum;
return result;
};
__host__ __device__ float3 ibsPiwinskiSmooth(ibsparameters& params){
float d;
float xdisp = params.acceleratorLength / (2 * CUDA_PI_F * pow(params.gammaTransition,2));
float rmsx = sqrt(params.emitx * params.betx);
float rmsy = sqrt(params.emity * params.bety);
float sigh2inv = 1.0 / pow(params.dponp,2) + pow((xdisp / rmsx),2);
float sigh = 1.0 / sqrt(sigh2inv);
float atop = pow(params.ParticleRadius,2) * CUDA_C_F * params.numberRealParticles;
float coeff = params.emitx * params.emity * params.sigs * params.dponp;
float abot = 64.0 * pow(CUDA_PI_F,2) * pow(params.betar,3) * pow(params.gamma0,4) * coeff;
// ca is Piwinski's A
float ca = atop/abot;
float a = sigh * params.betx / (params.gamma0 * rmsx);
float b = sigh * params.bety / (params.gamma0 * rmsy);
if (rmsx <=rmsy)
d = rmsx;
else
d = rmsy;
float q = sigh * params.betar * sqrt(2 * d / params.ParticleRadius);
float fmohlp = fmohl(a,b,q,params.fmohlNumPoints);
float fmohlx = fmohl(1/a,b/a,q/a,params.fmohlNumPoints);
float fmohly = fmohl(1/b,a/b,q/b,params.fmohlNumPoints);
float alfap0 = ca * fmohlp * pow((sigh/params.dponp),2);
float alfax0 = ca * (fmohlx + fmohlp * pow((xdisp * sigh / rmsx),2));
float alfay0 = ca * fmohly;
return make_float3(alfax0, alfay0, alfap0);
};
struct ibsPiwinskiLattice{
ibsparameters params;
ibsPiwinskiLattice(ibsparameters& params) : params(params) {}
// __host__ __device__
// radiationIntegrals operator()(tfsTableData& tfsAcceleratorElement, radiationIntegralsParameters& radiationIntParameters) const
__host__ __device__
float3 operator()(tfsTableData& tfsrow) const
{
float d;
float atop = pow(params.ParticleRadius,2) * CUDA_C_F * params.numberRealParticles;
float coeff = params.emitx * params.emity * params.sigs * params.dponp;
float abot = 64.0 * pow(CUDA_PI_F,2) * pow(params.betar,3) * pow(params.gamma0,4) * coeff;
// ca is Piwinski's A
float ca = atop/abot;
float rmsx = sqrt(params.emitx * tfsrow.betx);
float rmsy = sqrt(params.emity * tfsrow.bety);
if (rmsx <= rmsy)
d = rmsx;
else
d = rmsy;
float sigh2inv = 1.0 / pow(params.dponp,2) + pow((tfsrow.dx / rmsx),2);
float sigh = 1.0 / sqrt(sigh2inv);
float a = sigh * tfsrow.betx / (params.gamma0 * rmsx);
float b = sigh * tfsrow.bety / (params.gamma0 * rmsy);
float q = sigh * params.betar * sqrt(2 * d / params.ParticleRadius);
float fmohlp = fmohl(a,b,q,params.fmohlNumPoints);
float fmohlx = fmohl(1/a,b/a,q/a,params.fmohlNumPoints);
float fmohly = fmohl(1/b,a/b,q/b,params.fmohlNumPoints);
float alfap0 = ca * fmohlp * pow((sigh/params.dponp),2) * tfsrow.l / params.acceleratorLength;
float alfax0 = ca * (fmohlx + fmohlp * pow((tfsrow.dx * sigh / rmsx),2)) * tfsrow.l / params.acceleratorLength;
float alfay0 = ca * fmohly * tfsrow.l / params.acceleratorLength;
return make_float3(alfax0, alfay0, alfap0);
};
};
struct ibsmodPiwinskiLattice{
ibsparameters params;
ibsmodPiwinskiLattice(ibsparameters& params) : params(params) {}
// __host__ __device__
// radiationIntegrals operator()(tfsTableData& tfsAcceleratorElement, radiationIntegralsParameters& radiationIntParameters) const
__host__ __device__
float3 operator()(tfsTableData& tfsrow) const
{
float d;
float atop = pow(params.ParticleRadius,2) * CUDA_C_F * params.numberRealParticles;
float coeff = params.emitx * params.emity * params.sigs * params.dponp;
float abot = 64.0 * pow(CUDA_PI_F,2) * pow(params.betar,3) * pow(params.gamma0,4) * coeff;
// ca is Piwinski's A
float ca = atop/abot;
float H = (pow(tfsrow.dx,2)+ pow((tfsrow.betx * tfsrow.dpx - 0.5 * tfsrow.dx * (-2 * tfsrow.alfx)),2)) / tfsrow.betx;
float rmsx = sqrt(params.emitx * tfsrow.betx);
float rmsy = sqrt(params.emity * tfsrow.bety);
if (rmsx <= rmsy)
d = rmsx;
else
d = rmsy;
float sigh2inv = 1.0 / pow(params.dponp,2) + (H / params.emitx);
float sigh = 1.0 / sqrt(sigh2inv);
float a = sigh * tfsrow.betx / (params.gamma0 * rmsx);
float b = sigh * tfsrow.bety / (params.gamma0 * rmsy);
float q = sigh * params.betar * sqrt(2 * d / params.ParticleRadius);
float fmohlp = fmohl(a,b,q,params.fmohlNumPoints);
float fmohlx = fmohl(1/a,b/a,q/a,params.fmohlNumPoints);
float fmohly = fmohl(1/b,a/b,q/b,params.fmohlNumPoints);
float alfap0 = ca * fmohlp * pow((sigh/params.dponp),2) * tfsrow.l / params.acceleratorLength;
float alfax0 = ca * (fmohlx + fmohlp * pow((tfsrow.dx * sigh / rmsx),2)) * tfsrow.l / params.acceleratorLength;
float alfay0 = ca * fmohly * tfsrow.l / params.acceleratorLength;
return make_float3(alfax0, alfay0, alfap0);
};
};
struct ibsnagaitsev{
ibsparameters params;
ibsnagaitsev(ibsparameters& params) : params(params) {}
__host__ __device__
float3 operator()(tfsTableData& tfsrow) const
{
float phi = tfsrow.dpx + (tfsrow.alfx * (tfsrow.dx/tfsrow.betx));
float axx = tfsrow.betx / params.emitx;
float ayy = tfsrow.bety / params.emity;
float sigmax = sqrt( pow(tfsrow.dx,2) * pow(params.dponp,2) + params.emitx * tfsrow.betx);
float sigmay = sqrt(params.emity * tfsrow.bety);
float as = axx * (pow(tfsrow.dx,2)/pow(tfsrow.betx,2) + pow(phi,2)) + (1/(pow(params.dponp,2)));
float a1 = 0.5 * (axx + pow(params.gamma0,2) * as);
float a2 = 0.5 * (axx - pow(params.gamma0,2) * as);
float b1 = sqrt(pow(a2,2) + pow(params.gamma0,2) * pow(axx,2) * pow(phi,2));
float lambda1 = ayy;
float lambda2 = a1 + b1;
float lambda3 = a1 - b1;
float R1 = (1/lambda1) * rd_s(make_float3(1./lambda2,1./lambda3,1./lambda1));
float R2 = (1/lambda2) * rd_s(make_float3(1./lambda3,1./lambda1,1./lambda2));
float R3 = 3*sqrt((lambda1*lambda2)/lambda3)-(lambda1/lambda3)*R1-(lambda2/lambda3)*R2;
float sp = (pow(params.gamma0,2)/2.0) * ( 2.0*R1 - R2*( 1.0 - 3.0*a2/b1 ) - R3*( 1.0 + 3.0*a2/b1 ));
float sx = 0.50 * (2.0*R1 - R2*(1.0 + 3.0*a2/b1) -R3*(1.0 - 3.0*a2/b1));
float sxp=(3.0 * pow(params.gamma0,2)* pow(phi,2)*axx)/b1*(R3-R2);
float alfapp = sp/(sigmax*sigmay);
float alfaxx = (tfsrow.betx/(sigmax*sigmay)) * (sx+sxp+sp*(pow(tfsrow.dx,2)/pow(tfsrow.betx,2) + pow(phi,2)));
float alfayy = (tfsrow.bety/(sigmax*sigmay)) * (-2.0*R1+R2+R3);
float alfap0 = alfapp * tfsrow.l / params.acceleratorLength;
float alfax0 = alfaxx * tfsrow.l / params.acceleratorLength;
float alfay0 = alfayy * tfsrow.l / params.acceleratorLength;
return make_float3(alfax0, alfay0, alfap0);
};
};
/* ******************** */
/* class implementation */
/* ******************** */
STE_IBS::STE_IBS( ibsparameters& params, thrust::device_vector<float6> bunch , thrust::device_vector<tfsTableData> tfsdata){
// update inpup parameters
params.dponp = (CalcRMS( bunch, params.numberMacroParticles )).delta;
// make a histogram for the distribution longitudinally
histogramTime = ParticleTimesToHistogram(bunch,params.nbins,params.tauhat);
// calculate the ibs growth coefficients used in the ibs routine
ibsGrowthRates = CalculateIBSGrowthRates( params , params.methodIBS , params.phiSynchronous , tfsdata );
ibscoeff = CalcIbsCoeff( params , params.methodIBS , params.phiSynchronous, ibsGrowthRates );
// combined the histogram and coefficients in a vector to use in ibs calculations
// sqrt ( longitudinal coeff * cumul histogram ) - quantity representing the particle line density impact on momentum changes
sqrthistogram = HistogramToSQRTofCumul(histogramTime,ibscoeff.w);
}
template <typename Vector1, typename Vector2>
void STE_IBS::dense_histogram( const Vector1& input , Vector2& histogram , int nbins , float tauhat ){
typedef typename Vector1::value_type ValueType; // input value type
typedef typename Vector2::value_type IndexType; // histogram index type
// copy input data (could be skipped if input is allowed to be modified)
thrust::device_vector<ValueType> data(input);
// print the initial data
// print_vector("initial data", data);
// sort data to bring equal elements together
thrust::sort(data.begin(), data.end());
// print the sorted data
// print_vector("sorted data", data);
// number of histogram bins is equal to the maximum value plus one
IndexType num_bins = nbins+1;
// resize histogram storage
histogram.resize(num_bins);
// find the end of each bin of values
thrust::counting_iterator<IndexType> search_begin(0);
// thrust::device_vector<float> gc_d(nbins+1);
// thrust::fill(gc_d.begin(),gc_d.end(), tauhat / nbins);
// thrust::upper_bound(data.begin(), data.end(),
// gc_d.begin(), gc_d.begin() + num_bins,
// histogram.begin());
thrust::upper_bound(data.begin(), data.end(),
search_begin, search_begin + num_bins,
histogram.begin());
// print the cumulative histogram
// print_vector("cumulative histogram", histogram);
// compute the histogram by taking differences of the cumulative histogram
thrust::adjacent_difference(histogram.begin(), histogram.end(),
histogram.begin());
// print the histogram
// print_vector("histogram", histogram);
};
// takes the integers produced by ParticleTimes to Integer to produce a histogram or binned version
// necessary for collision and intra beam scattering routines
thrust::device_vector<int> STE_IBS::ParticleTimesToHistogram(thrust::device_vector<float6> data, int nbins, float tauhat){
int n = data.size();
thrust::device_vector<int> timecomponent(n);
thrust::device_vector<int> histogram;
// load the integer time components in a device vector
thrust::transform(data.begin(),data.end(),thrust::make_constant_iterator(make_float2(tauhat,nbins)),timecomponent.begin(),ParticleTimesToInteger());
// binning the times
dense_histogram(timecomponent,histogram,nbins,tauhat);
return histogram;
};
float6 STE_IBS::CalcRMS(thrust::device_vector<float6> distribution, int numberMacroParticles){
float6 sum;
sum.x= 0.0;
sum.px=0.0;
sum.y=0.0;
sum.py=0.0;
sum.t=0.0;
sum.delta=0.0;
float6 average = thrust::reduce(distribution.begin(),distribution.end(),sum,addFloat6());
average.x = - average.x / numberMacroParticles;
average.px = - average.px / numberMacroParticles;
average.y = - average.y / numberMacroParticles;
average.py = - average.py / numberMacroParticles;
average.t = - average.t / numberMacroParticles;
average.delta = - average.delta / numberMacroParticles;
// subtract shift -> t_synchronous
thrust::transform(distribution.begin(), distribution.end(),thrust::make_constant_iterator(average),distribution.begin(),addFloat6());
// square
thrust::transform(distribution.begin(),distribution.end(),distribution.begin(),squareFunctor<float6> ());
sum.x= 0.0;
sum.px=0.0;
sum.y=0.0;
sum.py=0.0;
sum.t=0.0;
sum.delta=0.0;
// sum squares
float6 MS = thrust::reduce(distribution.begin(),distribution.end(),sum,addFloat6());
MS.x /= numberMacroParticles;
MS.px /= numberMacroParticles;
MS.y /= numberMacroParticles;
MS.py /= numberMacroParticles;
MS.t /= numberMacroParticles;
MS.delta /= numberMacroParticles;
MS.x = sqrt(MS.x);
MS.px = sqrt(MS.px);
MS.y = sqrt(MS.y);
MS.py = sqrt(MS.py);
MS.t = sqrt(MS.t);
MS.delta = sqrt(MS.delta);
return MS;
};
float3 STE_IBS::CalculateIBSGrowthRates(ibsparameters& params, int method, float tsynchro, thrust::device_vector<tfsTableData> tfsdata){
float3 ibsgrowthrates;
int m = tfsdata.size();
thrust::device_vector<float3> ibsgrowthratestfs(m);
// std::cout << "method = " << method << std::endl;
// float rmsx = sqrt(params.emitx * params.betx);
// float rmsy = sqrt(params.emity * params.bety);
// get growth rates according to selected ibs method
switch(method)
{
case 0: {
ibsgrowthrates = ibsPiwinskiSmooth(params);
break;
}
case 1:{
std::cout << "method ok " << std::endl;
thrust::transform(tfsdata.begin(),tfsdata.end(),ibsgrowthratestfs.begin(),ibsPiwinskiLattice(params));
ibsgrowthrates = thrust::reduce(ibsgrowthratestfs.begin(),ibsgrowthratestfs.end(),make_float3(0.0,0.0,0.0),addFloat3());
break;
}
case 2:{
thrust::transform(tfsdata.begin(),tfsdata.end(),ibsgrowthratestfs.begin(),ibsmodPiwinskiLattice(params));
ibsgrowthrates = thrust::reduce(ibsgrowthratestfs.begin(),ibsgrowthratestfs.end(),make_float3(0.0,0.0,0.0),addFloat3());
break;
}
case 3: {
thrust::transform(tfsdata.begin(),tfsdata.end(),ibsgrowthratestfs.begin(),ibsnagaitsev(params));
ibsgrowthrates = thrust::reduce(ibsgrowthratestfs.begin(),ibsgrowthratestfs.end(),make_float3(0.0,0.0,0.0),addFloat3());
float nom = 0.5 * (params.numberRealParticles * pow(params.ParticleRadius,2) * CUDA_C_F * params.coulomblog);
float denom = (12.0 * CUDA_PI_F * pow(params.betar,3) * pow(params.gamma0,5) * params.sigs);
ibsgrowthrates.x = ibsgrowthrates.x / params.emitx * nom / denom;
ibsgrowthrates.y = ibsgrowthrates.y / params.emity * nom / denom;
ibsgrowthrates.z = ibsgrowthrates.z / pow(params.dponp,2) * nom / denom;
break;
}
};
// uncomment for debugging
// std::cout << "fracibstot " << params.fracibstot << std::endl;
// std::cout << "real part " << params.numberRealParticles << std::endl;
// std::cout << "timeRatio " << params.timeratio << std::endl;
// std::cout << "growth rate z " << ibsgrowthrates.z << std::endl;
// std::cout << "numberMacroParticles " << params.numberMacroParticles << std::endl;
float alfap = 2 * params.fracibstot * params.numberRealParticles * params.timeratio * ibsgrowthrates.z / params.numberMacroParticles;
float alfax = 2 * params.fracibstot * params.numberRealParticles * params.timeratio * ibsgrowthrates.x / params.numberMacroParticles;
float alfay = 2 * params.fracibstot * params.numberRealParticles * params.timeratio * ibsgrowthrates.y / params.numberMacroParticles;
return make_float3(alfax,alfay,alfap);
};
float3 STE_IBS::getIBSLifeTimes( float3 ibsGrowthRates )
{
return make_float3( ibsGrowthRates.x / (2 * 1) , ibsGrowthRates.y / (2 * 1) , ibsGrowthRates.z / (2 * 1) );
};
__host__ float4 STE_IBS::CalcIbsCoeff(ibsparameters& params, int method, float tsynchro, float3 ibsGrowthRates )
{
float coeffs,coeffx, coeffy;
float alphaAverage;
float coeffMulT;
float alfax = ibsGrowthRates.x;
float alfay = ibsGrowthRates.y;
float alfap = ibsGrowthRates.z;
float dtsamp2 = 2 * params.tauhat / params.nbins;
float rmsdelta = params.dponp;
float sigs = params.sigs;
float rmsx = sqrt(params.emitx * params.betx);
float rmsy = sqrt(params.emity * params.bety);
// debugging
// cout << "alfap " << alfap << endl << endl;
if (alfap > 0.0f)
coeffs = sqrt(6 * alfap * params.trev) * rmsdelta;
else
coeffs = 0.0f;
// coupling
if (params.ibsCoupling == 0.0)
{
if (alfay > 0.0)
coeffy = sqrt(6 * alfay * params.trev) * rmsy;
else
coeffy = 0.0f;
if (alfax > 0.0)
coeffx = sqrt(6 * alfax * params.trev) * rmsx;
else
coeffx = 0.0f;
}
else
{
// alphaAverage
alphaAverage = 0.5 * (alfax + alfay);
if (alphaAverage > 0.0)
{
coeffx = sqrt(6 * alphaAverage * params.trev) * rmsx;
coeffy = sqrt(6 * alphaAverage * params.trev) * rmsy;
}
else
{
coeffx = 0.0f;
coeffy = 0.0f;
}
// end if alphaAverage
}
// end if ibs coupling
coeffMulT = sigs* 2* sqrt(CUDA_PI_F)/(params.numberRealParticles * dtsamp2 * CUDA_C_F);
return make_float4(coeffx,coeffy,coeffs, coeffMulT);
};
thrust::device_vector<float> STE_IBS::HistogramToSQRTofCumul(thrust::device_vector<int> inputHistogram, float coeff)
{
int n = inputHistogram.size();
thrust::device_vector<float> vcoeff(n);
// thrust::device_vector<float> vcoeff2;
//thrust::device_vector<float> cumul(n);
// thrust::device_vector<int> outputhistogram;
// fill constant vector
thrust::fill(vcoeff.begin(),vcoeff.end(),coeff);
// multiply with constant
thrust::transform(inputHistogram.begin(),inputHistogram.end(),vcoeff.begin(),vcoeff.begin(),thrust::multiplies<float>());
// this was wrong, no cumul taken in original code
// cumulative sum
//thrust::inclusive_scan(vcoeff.begin(),vcoeff.end(),cumul.begin());
// take sqrt
//thrust::for_each(cumul.begin(),cumul.end(),sqrtFloatFunctor());
thrust::for_each(vcoeff.begin(),vcoeff.end(),sqrtFloatFunctor());
return vcoeff;
};
void STE_IBS::update( ibsparameters& params, thrust::device_vector<float6> bunch , thrust::device_vector<tfsTableData> tfsdata)
{
histogramTime = ParticleTimesToHistogram( bunch , params.nbins , params.tauhat );
// std::cout << "before : "<< ibsGrowthRates.x << " " << ibsGrowthRates.y << " " << ibsGrowthRates.z << std::endl;
ibsGrowthRates = CalculateIBSGrowthRates( params , params.methodIBS , params.phiSynchronous , tfsdata );
// std::cout << "after : " << ibsGrowthRates.x << " " << ibsGrowthRates.y << " " << ibsGrowthRates.z << std::endl;
ibscoeff = CalcIbsCoeff( params , params.methodIBS , params.phiSynchronous , ibsGrowthRates );
sqrthistogram = HistogramToSQRTofCumul(histogramTime,ibscoeff.w);
}
float4 STE_IBS::getIBScoeff()
{
return ibscoeff;
}
thrust::device_vector<int> STE_IBS::getTimeHistogram()
{
return histogramTime;
}
thrust::device_vector<float> STE_IBS::getSqrtHistogram()
{
return sqrthistogram;
}
float3 STE_IBS::getIBSGrowthRates(){
return ibsGrowthRates;
} | fe5829f9d6062fa56b88bead1d167e6ff638b99e.cu | // random generator includes
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/xor_combine_engine.h>
#include <thrust/random.h>
#include <curand_kernel.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/for_each.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <thrust/transform_reduce.h>
#include <thrust/binary_search.h>
#include <thrust/adjacent_difference.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <boost/math/tools/roots.hpp>
#include <thrust/tuple.h>
#include <thrust/iterator/counting_iterator.h>
#include <map>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <math.h>
#include <string>
#include <boost/math/tools/roots.hpp>
#include <thrust/tuple.h>
// load float 6 datastructure
// #include "STE_DataStructures.cuh"
// load tfsTableData datastructure
// #include "STE_TFS.cuh"
// load ibs header
#include "STE_IBS.cuh"
// numerical recipes
// !Computes Carlson elliptic integral of the second kind, RD(x, y, z). x and y must be
// !nonnegative, and at most one can be zero. z must be positive. TINY must be at least twice
// !the negative 2/3 power of the machine overflow limit. BIG must be at most 0.1×ERRTOL
// !times the negative 2/3 power of the machine underflow limit.
__host__ __device__ float rd_s(float3 in) {
float ERRTOL = 0.05;
// float TINY = 1.0e-25;
// float BIG = 4.5e21;
float C1 = 3.0/14.0;
float C2 = 1.0/6.0;
float C3 = 9.0/22.0;
float C4 = 3.0/26.0;
float C5 = 0.25 * C3;
float C6 = 1.5*C4;
float xt = in.x;
float yt = in.y;
float zt = in.z;
float sum = 0.0;
float fac = 1.0;
int iter = 0;
float sqrtx, sqrty, sqrtz, alamb, ave, delx, dely, delz, maxi ;
do
{
iter++;
sqrtx = sqrt(xt);
sqrty = sqrt(yt);
sqrtz = sqrt(zt);
alamb = sqrtx * (sqrty + sqrtz) + sqrty * sqrtz;
sum = sum + fac / (sqrtz * (zt + alamb));
fac = 0.25 * fac;
xt = 0.25 * (xt + alamb);
yt = 0.25 * (yt + alamb);
zt = 0.25 * (zt + alamb);
ave = 0.2 * (xt + yt + 3.0 * zt);
delx = (ave - xt) / ave;
dely = (ave - yt) / ave;
delz = (ave - zt) / ave;
maxi = abs(delx);
if (abs(dely) > maxi)
maxi = abs(dely);
if (abs(delz) > maxi)
maxi = abs(delz);
}
while (maxi > ERRTOL);
float ea = delx * dely;
float eb = delz * delz;
float ec = ea - eb;
float ed = ea - 6.0 * eb;
float ee = ed + ec + ec;
float rd_s = 3.0 * sum + fac * (1.0 + ed *(-C1+C5*ed-C6*delz*ee)+delz*(C2*ee+delz*(-C3*ec+delz*C4*ea)))/(ave*sqrt(ave));
return rd_s;
};
__host__ __device__ float fmohl(float a, float b, float q, int fmohlNumPoints) {
float result;
float sum = 0;
float du = 1.0/fmohlNumPoints;
float u, cp, cq, dsum;
for(int k=0;k<fmohlNumPoints;k++)
{
u = k*du;
cp = sqrt(a * a + (1 - a * a) * u * u);
cq = sqrt(b * b + (1 - b * b) * u * u);
dsum = 2*log(q*(1/cp+1/cq)/2) - CUDA_EULER_F;
dsum *= (1-3*u*u)/(cp*cq);
if (k==0)
dsum = dsum / 2;
if (k==fmohlNumPoints)
dsum = dsum / 2;
sum += dsum;
}
result = 8 * CUDA_PI_F * du * sum;
return result;
};
__host__ __device__ float3 ibsPiwinskiSmooth(ibsparameters& params){
float d;
float xdisp = params.acceleratorLength / (2 * CUDA_PI_F * pow(params.gammaTransition,2));
float rmsx = sqrt(params.emitx * params.betx);
float rmsy = sqrt(params.emity * params.bety);
float sigh2inv = 1.0 / pow(params.dponp,2) + pow((xdisp / rmsx),2);
float sigh = 1.0 / sqrt(sigh2inv);
float atop = pow(params.ParticleRadius,2) * CUDA_C_F * params.numberRealParticles;
float coeff = params.emitx * params.emity * params.sigs * params.dponp;
float abot = 64.0 * pow(CUDA_PI_F,2) * pow(params.betar,3) * pow(params.gamma0,4) * coeff;
// ca is Piwinski's A
float ca = atop/abot;
float a = sigh * params.betx / (params.gamma0 * rmsx);
float b = sigh * params.bety / (params.gamma0 * rmsy);
if (rmsx <=rmsy)
d = rmsx;
else
d = rmsy;
float q = sigh * params.betar * sqrt(2 * d / params.ParticleRadius);
float fmohlp = fmohl(a,b,q,params.fmohlNumPoints);
float fmohlx = fmohl(1/a,b/a,q/a,params.fmohlNumPoints);
float fmohly = fmohl(1/b,a/b,q/b,params.fmohlNumPoints);
float alfap0 = ca * fmohlp * pow((sigh/params.dponp),2);
float alfax0 = ca * (fmohlx + fmohlp * pow((xdisp * sigh / rmsx),2));
float alfay0 = ca * fmohly;
return make_float3(alfax0, alfay0, alfap0);
};
struct ibsPiwinskiLattice{
ibsparameters params;
ibsPiwinskiLattice(ibsparameters& params) : params(params) {}
// __host__ __device__
// radiationIntegrals operator()(tfsTableData& tfsAcceleratorElement, radiationIntegralsParameters& radiationIntParameters) const
__host__ __device__
float3 operator()(tfsTableData& tfsrow) const
{
float d;
float atop = pow(params.ParticleRadius,2) * CUDA_C_F * params.numberRealParticles;
float coeff = params.emitx * params.emity * params.sigs * params.dponp;
float abot = 64.0 * pow(CUDA_PI_F,2) * pow(params.betar,3) * pow(params.gamma0,4) * coeff;
// ca is Piwinski's A
float ca = atop/abot;
float rmsx = sqrt(params.emitx * tfsrow.betx);
float rmsy = sqrt(params.emity * tfsrow.bety);
if (rmsx <= rmsy)
d = rmsx;
else
d = rmsy;
float sigh2inv = 1.0 / pow(params.dponp,2) + pow((tfsrow.dx / rmsx),2);
float sigh = 1.0 / sqrt(sigh2inv);
float a = sigh * tfsrow.betx / (params.gamma0 * rmsx);
float b = sigh * tfsrow.bety / (params.gamma0 * rmsy);
float q = sigh * params.betar * sqrt(2 * d / params.ParticleRadius);
float fmohlp = fmohl(a,b,q,params.fmohlNumPoints);
float fmohlx = fmohl(1/a,b/a,q/a,params.fmohlNumPoints);
float fmohly = fmohl(1/b,a/b,q/b,params.fmohlNumPoints);
float alfap0 = ca * fmohlp * pow((sigh/params.dponp),2) * tfsrow.l / params.acceleratorLength;
float alfax0 = ca * (fmohlx + fmohlp * pow((tfsrow.dx * sigh / rmsx),2)) * tfsrow.l / params.acceleratorLength;
float alfay0 = ca * fmohly * tfsrow.l / params.acceleratorLength;
return make_float3(alfax0, alfay0, alfap0);
};
};
struct ibsmodPiwinskiLattice{
ibsparameters params;
ibsmodPiwinskiLattice(ibsparameters& params) : params(params) {}
// __host__ __device__
// radiationIntegrals operator()(tfsTableData& tfsAcceleratorElement, radiationIntegralsParameters& radiationIntParameters) const
__host__ __device__
float3 operator()(tfsTableData& tfsrow) const
{
float d;
float atop = pow(params.ParticleRadius,2) * CUDA_C_F * params.numberRealParticles;
float coeff = params.emitx * params.emity * params.sigs * params.dponp;
float abot = 64.0 * pow(CUDA_PI_F,2) * pow(params.betar,3) * pow(params.gamma0,4) * coeff;
// ca is Piwinski's A
float ca = atop/abot;
float H = (pow(tfsrow.dx,2)+ pow((tfsrow.betx * tfsrow.dpx - 0.5 * tfsrow.dx * (-2 * tfsrow.alfx)),2)) / tfsrow.betx;
float rmsx = sqrt(params.emitx * tfsrow.betx);
float rmsy = sqrt(params.emity * tfsrow.bety);
if (rmsx <= rmsy)
d = rmsx;
else
d = rmsy;
float sigh2inv = 1.0 / pow(params.dponp,2) + (H / params.emitx);
float sigh = 1.0 / sqrt(sigh2inv);
float a = sigh * tfsrow.betx / (params.gamma0 * rmsx);
float b = sigh * tfsrow.bety / (params.gamma0 * rmsy);
float q = sigh * params.betar * sqrt(2 * d / params.ParticleRadius);
float fmohlp = fmohl(a,b,q,params.fmohlNumPoints);
float fmohlx = fmohl(1/a,b/a,q/a,params.fmohlNumPoints);
float fmohly = fmohl(1/b,a/b,q/b,params.fmohlNumPoints);
float alfap0 = ca * fmohlp * pow((sigh/params.dponp),2) * tfsrow.l / params.acceleratorLength;
float alfax0 = ca * (fmohlx + fmohlp * pow((tfsrow.dx * sigh / rmsx),2)) * tfsrow.l / params.acceleratorLength;
float alfay0 = ca * fmohly * tfsrow.l / params.acceleratorLength;
return make_float3(alfax0, alfay0, alfap0);
};
};
struct ibsnagaitsev{
ibsparameters params;
ibsnagaitsev(ibsparameters& params) : params(params) {}
__host__ __device__
float3 operator()(tfsTableData& tfsrow) const
{
float phi = tfsrow.dpx + (tfsrow.alfx * (tfsrow.dx/tfsrow.betx));
float axx = tfsrow.betx / params.emitx;
float ayy = tfsrow.bety / params.emity;
float sigmax = sqrt( pow(tfsrow.dx,2) * pow(params.dponp,2) + params.emitx * tfsrow.betx);
float sigmay = sqrt(params.emity * tfsrow.bety);
float as = axx * (pow(tfsrow.dx,2)/pow(tfsrow.betx,2) + pow(phi,2)) + (1/(pow(params.dponp,2)));
float a1 = 0.5 * (axx + pow(params.gamma0,2) * as);
float a2 = 0.5 * (axx - pow(params.gamma0,2) * as);
float b1 = sqrt(pow(a2,2) + pow(params.gamma0,2) * pow(axx,2) * pow(phi,2));
float lambda1 = ayy;
float lambda2 = a1 + b1;
float lambda3 = a1 - b1;
float R1 = (1/lambda1) * rd_s(make_float3(1./lambda2,1./lambda3,1./lambda1));
float R2 = (1/lambda2) * rd_s(make_float3(1./lambda3,1./lambda1,1./lambda2));
float R3 = 3*sqrt((lambda1*lambda2)/lambda3)-(lambda1/lambda3)*R1-(lambda2/lambda3)*R2;
float sp = (pow(params.gamma0,2)/2.0) * ( 2.0*R1 - R2*( 1.0 - 3.0*a2/b1 ) - R3*( 1.0 + 3.0*a2/b1 ));
float sx = 0.50 * (2.0*R1 - R2*(1.0 + 3.0*a2/b1) -R3*(1.0 - 3.0*a2/b1));
float sxp=(3.0 * pow(params.gamma0,2)* pow(phi,2)*axx)/b1*(R3-R2);
float alfapp = sp/(sigmax*sigmay);
float alfaxx = (tfsrow.betx/(sigmax*sigmay)) * (sx+sxp+sp*(pow(tfsrow.dx,2)/pow(tfsrow.betx,2) + pow(phi,2)));
float alfayy = (tfsrow.bety/(sigmax*sigmay)) * (-2.0*R1+R2+R3);
float alfap0 = alfapp * tfsrow.l / params.acceleratorLength;
float alfax0 = alfaxx * tfsrow.l / params.acceleratorLength;
float alfay0 = alfayy * tfsrow.l / params.acceleratorLength;
return make_float3(alfax0, alfay0, alfap0);
};
};
/* ******************** */
/* class implementation */
/* ******************** */
STE_IBS::STE_IBS( ibsparameters& params, thrust::device_vector<float6> bunch , thrust::device_vector<tfsTableData> tfsdata){
// update inpup parameters
params.dponp = (CalcRMS( bunch, params.numberMacroParticles )).delta;
// make a histogram for the distribution longitudinally
histogramTime = ParticleTimesToHistogram(bunch,params.nbins,params.tauhat);
// calculate the ibs growth coefficients used in the ibs routine
ibsGrowthRates = CalculateIBSGrowthRates( params , params.methodIBS , params.phiSynchronous , tfsdata );
ibscoeff = CalcIbsCoeff( params , params.methodIBS , params.phiSynchronous, ibsGrowthRates );
// combined the histogram and coefficients in a vector to use in ibs calculations
// sqrt ( longitudinal coeff * cumul histogram ) - quantity representing the particle line density impact on momentum changes
sqrthistogram = HistogramToSQRTofCumul(histogramTime,ibscoeff.w);
}
template <typename Vector1, typename Vector2>
void STE_IBS::dense_histogram( const Vector1& input , Vector2& histogram , int nbins , float tauhat ){
typedef typename Vector1::value_type ValueType; // input value type
typedef typename Vector2::value_type IndexType; // histogram index type
// copy input data (could be skipped if input is allowed to be modified)
thrust::device_vector<ValueType> data(input);
// print the initial data
// print_vector("initial data", data);
// sort data to bring equal elements together
thrust::sort(data.begin(), data.end());
// print the sorted data
// print_vector("sorted data", data);
// number of histogram bins is equal to the maximum value plus one
IndexType num_bins = nbins+1;
// resize histogram storage
histogram.resize(num_bins);
// find the end of each bin of values
thrust::counting_iterator<IndexType> search_begin(0);
// thrust::device_vector<float> gc_d(nbins+1);
// thrust::fill(gc_d.begin(),gc_d.end(), tauhat / nbins);
// thrust::upper_bound(data.begin(), data.end(),
// gc_d.begin(), gc_d.begin() + num_bins,
// histogram.begin());
thrust::upper_bound(data.begin(), data.end(),
search_begin, search_begin + num_bins,
histogram.begin());
// print the cumulative histogram
// print_vector("cumulative histogram", histogram);
// compute the histogram by taking differences of the cumulative histogram
thrust::adjacent_difference(histogram.begin(), histogram.end(),
histogram.begin());
// print the histogram
// print_vector("histogram", histogram);
};
// takes the integers produced by ParticleTimes to Integer to produce a histogram or binned version
// necessary for collision and intra beam scattering routines
thrust::device_vector<int> STE_IBS::ParticleTimesToHistogram(thrust::device_vector<float6> data, int nbins, float tauhat){
int n = data.size();
thrust::device_vector<int> timecomponent(n);
thrust::device_vector<int> histogram;
// load the integer time components in a device vector
thrust::transform(data.begin(),data.end(),thrust::make_constant_iterator(make_float2(tauhat,nbins)),timecomponent.begin(),ParticleTimesToInteger());
// binning the times
dense_histogram(timecomponent,histogram,nbins,tauhat);
return histogram;
};
float6 STE_IBS::CalcRMS(thrust::device_vector<float6> distribution, int numberMacroParticles){
float6 sum;
sum.x= 0.0;
sum.px=0.0;
sum.y=0.0;
sum.py=0.0;
sum.t=0.0;
sum.delta=0.0;
float6 average = thrust::reduce(distribution.begin(),distribution.end(),sum,addFloat6());
average.x = - average.x / numberMacroParticles;
average.px = - average.px / numberMacroParticles;
average.y = - average.y / numberMacroParticles;
average.py = - average.py / numberMacroParticles;
average.t = - average.t / numberMacroParticles;
average.delta = - average.delta / numberMacroParticles;
// subtract shift -> t_synchronous
thrust::transform(distribution.begin(), distribution.end(),thrust::make_constant_iterator(average),distribution.begin(),addFloat6());
// square
thrust::transform(distribution.begin(),distribution.end(),distribution.begin(),squareFunctor<float6> ());
sum.x= 0.0;
sum.px=0.0;
sum.y=0.0;
sum.py=0.0;
sum.t=0.0;
sum.delta=0.0;
// sum squares
float6 MS = thrust::reduce(distribution.begin(),distribution.end(),sum,addFloat6());
MS.x /= numberMacroParticles;
MS.px /= numberMacroParticles;
MS.y /= numberMacroParticles;
MS.py /= numberMacroParticles;
MS.t /= numberMacroParticles;
MS.delta /= numberMacroParticles;
MS.x = sqrt(MS.x);
MS.px = sqrt(MS.px);
MS.y = sqrt(MS.y);
MS.py = sqrt(MS.py);
MS.t = sqrt(MS.t);
MS.delta = sqrt(MS.delta);
return MS;
};
float3 STE_IBS::CalculateIBSGrowthRates(ibsparameters& params, int method, float tsynchro, thrust::device_vector<tfsTableData> tfsdata){
float3 ibsgrowthrates;
int m = tfsdata.size();
thrust::device_vector<float3> ibsgrowthratestfs(m);
// std::cout << "method = " << method << std::endl;
// float rmsx = sqrt(params.emitx * params.betx);
// float rmsy = sqrt(params.emity * params.bety);
// get growth rates according to selected ibs method
switch(method)
{
case 0: {
ibsgrowthrates = ibsPiwinskiSmooth(params);
break;
}
case 1:{
std::cout << "method ok " << std::endl;
thrust::transform(tfsdata.begin(),tfsdata.end(),ibsgrowthratestfs.begin(),ibsPiwinskiLattice(params));
ibsgrowthrates = thrust::reduce(ibsgrowthratestfs.begin(),ibsgrowthratestfs.end(),make_float3(0.0,0.0,0.0),addFloat3());
break;
}
case 2:{
thrust::transform(tfsdata.begin(),tfsdata.end(),ibsgrowthratestfs.begin(),ibsmodPiwinskiLattice(params));
ibsgrowthrates = thrust::reduce(ibsgrowthratestfs.begin(),ibsgrowthratestfs.end(),make_float3(0.0,0.0,0.0),addFloat3());
break;
}
case 3: {
thrust::transform(tfsdata.begin(),tfsdata.end(),ibsgrowthratestfs.begin(),ibsnagaitsev(params));
ibsgrowthrates = thrust::reduce(ibsgrowthratestfs.begin(),ibsgrowthratestfs.end(),make_float3(0.0,0.0,0.0),addFloat3());
float nom = 0.5 * (params.numberRealParticles * pow(params.ParticleRadius,2) * CUDA_C_F * params.coulomblog);
float denom = (12.0 * CUDA_PI_F * pow(params.betar,3) * pow(params.gamma0,5) * params.sigs);
ibsgrowthrates.x = ibsgrowthrates.x / params.emitx * nom / denom;
ibsgrowthrates.y = ibsgrowthrates.y / params.emity * nom / denom;
ibsgrowthrates.z = ibsgrowthrates.z / pow(params.dponp,2) * nom / denom;
break;
}
};
// uncomment for debugging
// std::cout << "fracibstot " << params.fracibstot << std::endl;
// std::cout << "real part " << params.numberRealParticles << std::endl;
// std::cout << "timeRatio " << params.timeratio << std::endl;
// std::cout << "growth rate z " << ibsgrowthrates.z << std::endl;
// std::cout << "numberMacroParticles " << params.numberMacroParticles << std::endl;
float alfap = 2 * params.fracibstot * params.numberRealParticles * params.timeratio * ibsgrowthrates.z / params.numberMacroParticles;
float alfax = 2 * params.fracibstot * params.numberRealParticles * params.timeratio * ibsgrowthrates.x / params.numberMacroParticles;
float alfay = 2 * params.fracibstot * params.numberRealParticles * params.timeratio * ibsgrowthrates.y / params.numberMacroParticles;
return make_float3(alfax,alfay,alfap);
};
float3 STE_IBS::getIBSLifeTimes( float3 ibsGrowthRates )
{
return make_float3( ibsGrowthRates.x / (2 * 1) , ibsGrowthRates.y / (2 * 1) , ibsGrowthRates.z / (2 * 1) );
};
__host__ float4 STE_IBS::CalcIbsCoeff(ibsparameters& params, int method, float tsynchro, float3 ibsGrowthRates )
{
float coeffs,coeffx, coeffy;
float alphaAverage;
float coeffMulT;
float alfax = ibsGrowthRates.x;
float alfay = ibsGrowthRates.y;
float alfap = ibsGrowthRates.z;
float dtsamp2 = 2 * params.tauhat / params.nbins;
float rmsdelta = params.dponp;
float sigs = params.sigs;
float rmsx = sqrt(params.emitx * params.betx);
float rmsy = sqrt(params.emity * params.bety);
// debugging
// cout << "alfap " << alfap << endl << endl;
if (alfap > 0.0f)
coeffs = sqrt(6 * alfap * params.trev) * rmsdelta;
else
coeffs = 0.0f;
// coupling
if (params.ibsCoupling == 0.0)
{
if (alfay > 0.0)
coeffy = sqrt(6 * alfay * params.trev) * rmsy;
else
coeffy = 0.0f;
if (alfax > 0.0)
coeffx = sqrt(6 * alfax * params.trev) * rmsx;
else
coeffx = 0.0f;
}
else
{
// alphaAverage
alphaAverage = 0.5 * (alfax + alfay);
if (alphaAverage > 0.0)
{
coeffx = sqrt(6 * alphaAverage * params.trev) * rmsx;
coeffy = sqrt(6 * alphaAverage * params.trev) * rmsy;
}
else
{
coeffx = 0.0f;
coeffy = 0.0f;
}
// end if alphaAverage
}
// end if ibs coupling
coeffMulT = sigs* 2* sqrt(CUDA_PI_F)/(params.numberRealParticles * dtsamp2 * CUDA_C_F);
return make_float4(coeffx,coeffy,coeffs, coeffMulT);
};
thrust::device_vector<float> STE_IBS::HistogramToSQRTofCumul(thrust::device_vector<int> inputHistogram, float coeff)
{
int n = inputHistogram.size();
thrust::device_vector<float> vcoeff(n);
// thrust::device_vector<float> vcoeff2;
//thrust::device_vector<float> cumul(n);
// thrust::device_vector<int> outputhistogram;
// fill constant vector
thrust::fill(vcoeff.begin(),vcoeff.end(),coeff);
// multiply with constant
thrust::transform(inputHistogram.begin(),inputHistogram.end(),vcoeff.begin(),vcoeff.begin(),thrust::multiplies<float>());
// this was wrong, no cumul taken in original code
// cumulative sum
//thrust::inclusive_scan(vcoeff.begin(),vcoeff.end(),cumul.begin());
// take sqrt
//thrust::for_each(cumul.begin(),cumul.end(),sqrtFloatFunctor());
thrust::for_each(vcoeff.begin(),vcoeff.end(),sqrtFloatFunctor());
return vcoeff;
};
void STE_IBS::update( ibsparameters& params, thrust::device_vector<float6> bunch , thrust::device_vector<tfsTableData> tfsdata)
{
histogramTime = ParticleTimesToHistogram( bunch , params.nbins , params.tauhat );
// std::cout << "before : "<< ibsGrowthRates.x << " " << ibsGrowthRates.y << " " << ibsGrowthRates.z << std::endl;
ibsGrowthRates = CalculateIBSGrowthRates( params , params.methodIBS , params.phiSynchronous , tfsdata );
// std::cout << "after : " << ibsGrowthRates.x << " " << ibsGrowthRates.y << " " << ibsGrowthRates.z << std::endl;
ibscoeff = CalcIbsCoeff( params , params.methodIBS , params.phiSynchronous , ibsGrowthRates );
sqrthistogram = HistogramToSQRTofCumul(histogramTime,ibscoeff.w);
}
float4 STE_IBS::getIBScoeff()
{
return ibscoeff;
}
thrust::device_vector<int> STE_IBS::getTimeHistogram()
{
return histogramTime;
}
thrust::device_vector<float> STE_IBS::getSqrtHistogram()
{
return sqrthistogram;
}
float3 STE_IBS::getIBSGrowthRates(){
return ibsGrowthRates;
} |
c90ef920e55fad94beb754666c85da457eab5963.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
//extern "C" {
#include "im2col.h"
#include "hip/hip_runtime.h"
//}
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col){
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
hipLaunchKernelGGL(( im2col_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK),
dim3(BLOCK), 0, 0,
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
}
| c90ef920e55fad94beb754666c85da457eab5963.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
//extern "C" {
#include "im2col.h"
#include "cuda.h"
//}
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col){
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK,
BLOCK>>>(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
}
|
0479923b46119f7155491b6769a13bf885487193.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <rocblas.h>
extern "C"
{
#include <stdio.h>
#include <cblas.h>
#include "lib_kernels.h"
#include <math.h>
#include "omp.h"
#define BLOCK_SIZE 8 //only for GPU5
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
double *elements;
} Matrix;
/***********************
CPU LIB
************************/
void
matmult_lib(int m, int n, int k, double *A, double *B, double *C) {
double te, ts;
ts = omp_get_wtime();
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, m ,m , n, 1.0, A, k, B, n, 0.0, C, n);
te = omp_get_wtime() - ts;
//printf("Time:%f\n", te);
}
/***********************
NAT CPU
************************/
void
matmult_nat(int m, int n, int k, double *A, double *B, double *C) {
int i, j, l;
for(i = 0; i < m; i++){
for(j = 0; j < n; j++){
C[i * m + j] = 0;
}
}
for(j = 0; j < n; j++){
for(i = 0; i < m; i++){
double tmp = 0.0;
for(l = 0; l < k; l++){
tmp += A[i*m +l] * B[l*k+j];
}
C[i*m +j] = tmp;
}
}
}
/***********************
GPU 1
************************/
void
matmult_gpu1(int m, int n, int k, double *h_A, double *h_B, double *h_C){
double *d_A, *d_B, *d_C;
hipMalloc((void **)&d_A, m * k * sizeof(double));
hipMalloc((void **)&d_B, k * n * sizeof(double));
hipMemcpy(d_A, h_A, m * k * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, k * n * sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void **)&d_C, m * n * sizeof(double));
hipLaunchKernelGGL(( kernel_gpu1), dim3(1),dim3(1), 0, 0, d_A, d_B, d_C, m, n, k);
hipDeviceSynchronize();
hipMemcpy(h_C, d_C, m * n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
/***********************
GPU 2
************************/
void
matmult_gpu2(int m, int n, int k, double *h_A, double *h_B, double *h_C){
double *d_A, *d_B, *d_C;
hipMalloc((void **)&d_A, m * k * sizeof(double));
hipMalloc((void **)&d_B, k * n * sizeof(double));
hipMemcpy(d_A, h_A, m * k * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, k * n * sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void **)&d_C, m * n * sizeof(double));
int bs = 32;
int dimGridX = (int)ceil(1.0*n/bs);
int dimGridY = (int)ceil(1.0*m/bs);
hipLaunchKernelGGL(( kernel_gpu2), dim3(dim3(dimGridX, dimGridY)),dim3(dim3(bs,bs)), 0, 0, d_A, d_B, d_C, m, n, k);
hipDeviceSynchronize();
hipMemcpy(h_C, d_C, m * n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
/***********************
GPU 3
************************/
void
matmult_gpu3(int m, int n, int k, double *h_A, double *h_B, double *h_C){
double *d_A, *d_B, *d_C;
hipMalloc((void **)&d_A, m * k * sizeof(double));
hipMalloc((void **)&d_B, k * n * sizeof(double));
hipMemcpy(d_A, h_A, m * k * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, k * n * sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void **)&d_C, m * n * sizeof(double));
int dimGridX = (int)ceil(1.0*n/(1*bs));
int dimGridY = (int)ceil(1.0*m/(2*bs));
hipLaunchKernelGGL(( kernel_gpu3), dim3(dim3(dimGridX, dimGridY)),dim3(dim3(bs,bs)), 0, 0, d_A, d_B, d_C, m, n, k);
hipDeviceSynchronize();
hipMemcpy(h_C, d_C, m * n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
/***********************
GPU 4
************************/
void
matmult_gpu4(int m, int n, int k, double *h_A, double *h_B, double *h_C){
double *d_A, *d_B, *d_C;
hipMalloc((void **)&d_A, m * k * sizeof(double));
hipMalloc((void **)&d_B, k * n * sizeof(double));
hipMemcpy(d_A, h_A, m * k * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, k * n * sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void **)&d_C, m * n * sizeof(double));
int blockSize = 32;
int elemPerThread = 8;
int dimGridX = (int)ceil(1.0*n/blockSize);
int dimGridY = (int)ceil(1.0*m/(elemPerThread*blockSize));
hipLaunchKernelGGL(( kernel_gpu4), dim3(dim3(dimGridX, dimGridY)),dim3(dim3(blockSize,blockSize)), 0, 0, d_A, d_B, d_C, m, n, k);
hipDeviceSynchronize();
hipMemcpy(h_C, d_C, m * n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
/***********************
GPU 5
************************/
__global__ void kernel_gpu5(const Matrix, const Matrix, Matrix);
void
matmult_gpu5(int m, int n, int k, double *h_A, double *h_B, double *h_C){
Matrix A, B, C;
A.width = k;
A.height = m;
//A.stride = k;
A.elements = h_A;
B.width = n;
B.height = k;
//B.stride = n;
B.elements = h_B;
C.width = n;
C.height = m;
//C.stride = n;
C.elements = h_C;
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(double);
hipMalloc(&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(double);
hipMalloc(&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size,
hipMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(double);
hipMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
hipLaunchKernelGGL(( kernel_gpu5), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
// Read C from device memory
hipMemcpy(h_C, d_C.elements, size, hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
// Get a matrix element
__device__ double GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
double value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Thread block size
__global__ void
kernel_gpu5(const Matrix A, const Matrix B, Matrix C){
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
double Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ double As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
}
/***********************
GPU LIB
************************/
void
matmult_gpulib(int m, int n, int k, double *h_A, double *h_B, double *h_C){
double *d_A, *d_B, *d_C;
const double alf = 1;
const double bet = 0;
const double *alpha = &alf;
const double *beta = &bet;
hipMalloc((void **)&d_A, m * k * sizeof(double));
hipMalloc((void **)&d_B, k * n * sizeof(double));
hipMemcpy(d_A, h_A, m * k * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, k * n * sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void **)&d_C, m * n * sizeof(double));
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, d_B, n, d_A, k, beta, d_C, m);
hipMemcpy(h_C, d_C, m * n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
}
| 0479923b46119f7155491b6769a13bf885487193.cu | #include <cublas_v2.h>
extern "C"
{
#include <stdio.h>
#include <cblas.h>
#include "lib_kernels.h"
#include <math.h>
#include "omp.h"
#define BLOCK_SIZE 8 //only for GPU5
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
double *elements;
} Matrix;
/***********************
CPU LIB
************************/
void
matmult_lib(int m, int n, int k, double *A, double *B, double *C) {
double te, ts;
ts = omp_get_wtime();
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, m ,m , n, 1.0, A, k, B, n, 0.0, C, n);
te = omp_get_wtime() - ts;
//printf("Time:%f\n", te);
}
/***********************
NAT CPU
************************/
void
matmult_nat(int m, int n, int k, double *A, double *B, double *C) {
int i, j, l;
for(i = 0; i < m; i++){
for(j = 0; j < n; j++){
C[i * m + j] = 0;
}
}
for(j = 0; j < n; j++){
for(i = 0; i < m; i++){
double tmp = 0.0;
for(l = 0; l < k; l++){
tmp += A[i*m +l] * B[l*k+j];
}
C[i*m +j] = tmp;
}
}
}
/***********************
GPU 1
************************/
void
matmult_gpu1(int m, int n, int k, double *h_A, double *h_B, double *h_C){
double *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, m * k * sizeof(double));
cudaMalloc((void **)&d_B, k * n * sizeof(double));
cudaMemcpy(d_A, h_A, m * k * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, k * n * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_C, m * n * sizeof(double));
kernel_gpu1<<<1,1>>>(d_A, d_B, d_C, m, n, k);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, m * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
/***********************
GPU 2
************************/
void
matmult_gpu2(int m, int n, int k, double *h_A, double *h_B, double *h_C){
double *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, m * k * sizeof(double));
cudaMalloc((void **)&d_B, k * n * sizeof(double));
cudaMemcpy(d_A, h_A, m * k * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, k * n * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_C, m * n * sizeof(double));
int bs = 32;
int dimGridX = (int)ceil(1.0*n/bs);
int dimGridY = (int)ceil(1.0*m/bs);
kernel_gpu2<<<dim3(dimGridX, dimGridY),dim3(bs,bs)>>>(d_A, d_B, d_C, m, n, k);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, m * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
/***********************
GPU 3
************************/
void
matmult_gpu3(int m, int n, int k, double *h_A, double *h_B, double *h_C){
double *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, m * k * sizeof(double));
cudaMalloc((void **)&d_B, k * n * sizeof(double));
cudaMemcpy(d_A, h_A, m * k * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, k * n * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_C, m * n * sizeof(double));
int dimGridX = (int)ceil(1.0*n/(1*bs));
int dimGridY = (int)ceil(1.0*m/(2*bs));
kernel_gpu3<<<dim3(dimGridX, dimGridY),dim3(bs,bs)>>>(d_A, d_B, d_C, m, n, k);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, m * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
/***********************
GPU 4
************************/
void
matmult_gpu4(int m, int n, int k, double *h_A, double *h_B, double *h_C){
double *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, m * k * sizeof(double));
cudaMalloc((void **)&d_B, k * n * sizeof(double));
cudaMemcpy(d_A, h_A, m * k * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, k * n * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_C, m * n * sizeof(double));
int blockSize = 32;
int elemPerThread = 8;
int dimGridX = (int)ceil(1.0*n/blockSize);
int dimGridY = (int)ceil(1.0*m/(elemPerThread*blockSize));
kernel_gpu4<<<dim3(dimGridX, dimGridY),dim3(blockSize,blockSize)>>>(d_A, d_B, d_C, m, n, k);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, m * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
/***********************
GPU 5
************************/
__global__ void kernel_gpu5(const Matrix, const Matrix, Matrix);
void
matmult_gpu5(int m, int n, int k, double *h_A, double *h_B, double *h_C){
Matrix A, B, C;
A.width = k;
A.height = m;
//A.stride = k;
A.elements = h_A;
B.width = n;
B.height = k;
//B.stride = n;
B.elements = h_B;
C.width = n;
C.height = m;
//C.stride = n;
C.elements = h_C;
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(double);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(double);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(double);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
kernel_gpu5<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(h_C, d_C.elements, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Get a matrix element
__device__ double GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
double value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Thread block size
__global__ void
kernel_gpu5(const Matrix A, const Matrix B, Matrix C){
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
double Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ double As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
}
/***********************
GPU LIB
************************/
void
matmult_gpulib(int m, int n, int k, double *h_A, double *h_B, double *h_C){
double *d_A, *d_B, *d_C;
const double alf = 1;
const double bet = 0;
const double *alpha = &alf;
const double *beta = &bet;
cudaMalloc((void **)&d_A, m * k * sizeof(double));
cudaMalloc((void **)&d_B, k * n * sizeof(double));
cudaMemcpy(d_A, h_A, m * k * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, k * n * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_C, m * n * sizeof(double));
cublasHandle_t handle;
cublasCreate(&handle);
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, d_B, n, d_A, k, beta, d_C, m);
cudaMemcpy(h_C, d_C, m * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
}
|
7def7f00b75fd6b1e44b9e1353e4476bf25e5a85.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_mparticles.cuh"
#include "cuda_moments.cuh"
#include "bs.hxx"
#include "pushp.hxx"
#include "fields.hxx"
#define THREADS_PER_BLOCK (512)
// FIXME/TODO: we could do this w/o prior reordering, but currently the
// generic moment calculation code first reorders anyway (which it shouldn't)
// ======================================================================
template <typename DIM>
class Deposit
{
public:
using R = float;
template <typename E>
GT_INLINE void operator()(E& flds, int m, int lf[3], R of[3], R val,
dim_yz tag)
{
atomicAdd(&flds(m, 0, lf[1], lf[2]), (1.f - of[1]) * (1.f - of[2]) * val);
atomicAdd(&flds(m, 0, lf[1] + 1, lf[2]), (of[1]) * (1.f - of[2]) * val);
atomicAdd(&flds(m, 0, lf[1], lf[2] + 1), (1.f - of[1]) * (of[2]) * val);
atomicAdd(&flds(m, 0, lf[1] + 1, lf[2] + 1), (of[1]) * (of[2]) * val);
}
template <typename E>
GT_INLINE void operator()(E& flds, int m, int lf[3], R of[3], R val,
dim_xyz tag)
{
atomicAdd(&flds(m, lf[0], lf[1], lf[2]),
(1.f - of[0]) * (1.f - of[1]) * (1.f - of[2]) * val);
atomicAdd(&flds(m, lf[0] + 1, lf[1], lf[2]),
(of[0]) * (1.f - of[1]) * (1.f - of[2]) * val);
atomicAdd(&flds(m, lf[0], lf[1] + 1, lf[2]),
(1.f - of[0]) * (of[1]) * (1.f - of[2]) * val);
atomicAdd(&flds(m, lf[0] + 1, lf[1] + 1, lf[2]),
(of[0]) * (of[1]) * (1.f - of[2]) * val);
atomicAdd(&flds(m, lf[0], lf[1], lf[2] + 1),
(1.f - of[0]) * (1.f - of[1]) * (of[2]) * val);
atomicAdd(&flds(m, lf[0] + 1, lf[1], lf[2] + 1),
(of[0]) * (1.f - of[1]) * (of[2]) * val);
atomicAdd(&flds(m, lf[0], lf[1] + 1, lf[2] + 1),
(1.f - of[0]) * (of[1]) * (of[2]) * val);
atomicAdd(&flds(m, lf[0] + 1, lf[1] + 1, lf[2] + 1),
(of[0]) * (of[1]) * (of[2]) * val);
}
template <typename E>
GT_INLINE void operator()(E& flds, int m, int lf[3], R of[3], R val)
{
(*this)(flds, m, lf, of, val, DIM{});
}
};
// ----------------------------------------------------------------------
// rho_1st_nc_cuda_run
template <typename DMparticles, typename dim, bool REORDER, typename E>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, 3)
rho_1st_nc_cuda_run(DMparticles dmprts, E mflds_gt, Int3 ib)
{
BlockSimple<typename DMparticles::BS, dim> current_block;
if (!current_block.init(dmprts)) {
return;
}
auto gt = view_patch(mflds_gt, current_block.p);
auto flds = make_Fields3d<dim>(gt, ib);
Deposit<dim> deposit;
__syncthreads();
int block_begin = dmprts.off_[current_block.bid];
int block_end = dmprts.off_[current_block.bid + 1];
for (int n : in_block_loop(block_begin, block_end)) {
if (n < block_begin) {
continue;
}
const auto prt =
REORDER ? dmprts.storage[dmprts.id_[n]] : dmprts.storage[n];
float fnq = dmprts.prt_w(prt) * dmprts.fnqs();
float q = dmprts.prt_q(prt);
int lf[3];
float of[3];
dmprts.find_idx_off_1st(prt.x, lf, of, float(0.));
deposit(flds, 0, lf, of, q * fnq);
}
}
// ----------------------------------------------------------------------
// n_1st_cuda_run
template <typename BS, typename dim, bool REORDER, typename E>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, 3)
n_1st_cuda_run(DMparticlesCuda<BS> dmprts, E mflds_gt, Int3 ib)
{
BlockSimple<BS, dim> current_block;
if (!current_block.init(dmprts)) {
return;
}
auto gt = view_patch(mflds_gt, current_block.p);
auto flds = make_Fields3d<dim>(gt, ib);
Deposit<dim> deposit;
__syncthreads();
int block_begin = dmprts.off_[current_block.bid];
int block_end = dmprts.off_[current_block.bid + 1];
for (int n : in_block_loop(block_begin, block_end)) {
if (n < block_begin) {
continue;
}
const auto prt =
REORDER ? dmprts.storage[dmprts.id_[n]] : dmprts.storage[n];
int kind = prt.kind;
float fnq = dmprts.prt_w(prt) * dmprts.fnqs();
float q = dmprts.prt_q(prt);
int lf[3];
float of[3];
dmprts.find_idx_off_1st(prt.x, lf, of, float(-.5));
deposit(flds, kind, lf, of, fnq);
}
}
// ----------------------------------------------------------------------
// all_1st_cuda_run
template <typename BS, typename dim, bool REORDER, typename E>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, 3)
all_1st_cuda_run(DMparticlesCuda<BS> dmprts, E mflds_gt, Int3 ib)
{
BlockSimple<BS, dim> current_block;
if (!current_block.init(dmprts)) {
return;
}
auto gt = view_patch(mflds_gt, current_block.p);
auto flds = make_Fields3d<dim>(gt, ib);
Deposit<dim> deposit;
__syncthreads();
int block_begin = dmprts.off_[current_block.bid];
int block_end = dmprts.off_[current_block.bid + 1];
for (int n : in_block_loop(block_begin, block_end)) {
if (n < block_begin) {
continue;
}
const auto prt =
REORDER ? dmprts.storage[dmprts.id_[n]] : dmprts.storage[n];
float fnq = dmprts.prt_w(prt) * dmprts.fnqs();
float q = dmprts.prt_q(prt);
float m = dmprts.prt_m(prt);
int lf[3];
float of[3];
dmprts.find_idx_off_1st(prt.x, lf, of, float(-.5));
AdvanceParticle<float, dim> advance{dmprts.dt()};
auto v = advance.calc_v(prt.u);
int n_moments = 13;
int mm = prt.kind * n_moments;
deposit(flds, mm + 0, lf, of, fnq * q);
deposit(flds, mm + 1, lf, of, fnq * q * v[0]);
deposit(flds, mm + 2, lf, of, fnq * q * v[1]);
deposit(flds, mm + 3, lf, of, fnq * q * v[2]);
deposit(flds, mm + 4, lf, of, fnq * m * prt.u[0]);
deposit(flds, mm + 5, lf, of, fnq * m * prt.u[1]);
deposit(flds, mm + 6, lf, of, fnq * m * prt.u[2]);
deposit(flds, mm + 7, lf, of, fnq * m * prt.u[0] * v[0]);
deposit(flds, mm + 8, lf, of, fnq * m * prt.u[1] * v[1]);
deposit(flds, mm + 9, lf, of, fnq * m * prt.u[2] * v[2]);
deposit(flds, mm + 10, lf, of, fnq * m * prt.u[0] * v[1]);
deposit(flds, mm + 11, lf, of, fnq * m * prt.u[1] * v[2]);
deposit(flds, mm + 12, lf, of, fnq * m * prt.u[2] * v[0]);
}
}
// ----------------------------------------------------------------------
// CudaMoments1stNcRho::operator()
template <typename CudaMparticles, typename dim>
void CudaMoments1stNcRho<CudaMparticles, dim>::operator()(
CudaMparticles& cmprts, MfieldsCuda& mres)
{
if (cmprts.n_prts == 0) {
return;
}
cmprts.reorder(); // FIXME/OPT?
if (!cmprts.need_reorder) {
dim3 dimGrid =
BlockSimple<typename CudaMparticles::BS, dim>::dimGrid(cmprts);
hipLaunchKernelGGL(( rho_1st_nc_cuda_run<typename CudaMparticles::DMparticles, dim, false>)
, dim3(dimGrid), dim3(THREADS_PER_BLOCK), 0, 0, cmprts, mres.gt(), -mres.ibn());
cuda_sync_if_enabled();
} else {
assert(0);
}
}
// ----------------------------------------------------------------------
// CudaMoments1stN::operator()
template <typename CudaMparticles, typename dim>
void CudaMoments1stN<CudaMparticles, dim>::operator()(CudaMparticles& cmprts,
MfieldsCuda& mres)
{
static int pr, pr_1;
if (!pr) {
pr = prof_register("cuda_mom_n", 1, 0, 0);
pr_1 = prof_register("cuda_mom_n_reorder", 1, 0, 0);
}
prof_start(pr);
if (cmprts.n_prts == 0) {
return;
}
prof_start(pr_1);
cmprts.reorder(); // FIXME/OPT?
prof_stop(pr_1);
if (!cmprts.need_reorder) {
dim3 dimGrid =
BlockSimple<typename CudaMparticles::BS, dim>::dimGrid(cmprts);
hipLaunchKernelGGL(( n_1st_cuda_run<typename CudaMparticles::BS, dim, false>)
, dim3(dimGrid), dim3(THREADS_PER_BLOCK), 0, 0, cmprts, mres.gt(), -mres.ibn());
cuda_sync_if_enabled();
} else {
assert(0);
}
prof_stop(pr);
}
// ----------------------------------------------------------------------
// CudaMoments1stAll::operator()
template <typename CudaMparticles, typename dim>
void CudaMoments1stAll<CudaMparticles, dim>::operator()(CudaMparticles& cmprts,
MfieldsCuda& mres)
{
static int pr, pr_1;
if (!pr) {
pr = prof_register("cuda_mom_all", 1, 0, 0);
pr_1 = prof_register("cuda_mom_all_reorder", 1, 0, 0);
}
// prof_start(pr);
if (cmprts.n_prts == 0) {
return;
}
// prof_start(pr_1);
cmprts.reorder(); // FIXME/OPT?
// prof_stop(pr_1);
if (!cmprts.need_reorder) {
dim3 dimGrid =
BlockSimple<typename CudaMparticles::BS, dim>::dimGrid(cmprts);
hipLaunchKernelGGL(( all_1st_cuda_run<typename CudaMparticles::BS, dim, false>)
, dim3(dimGrid), dim3(THREADS_PER_BLOCK), 0, 0, cmprts, mres.gt(), -mres.ibn());
cuda_sync_if_enabled();
} else {
assert(0);
}
// prof_stop(pr);
}
template struct CudaMoments1stNcRho<cuda_mparticles<BS144>, dim_yz>;
template struct CudaMoments1stN<cuda_mparticles<BS144>, dim_yz>;
template struct CudaMoments1stAll<cuda_mparticles<BS144>, dim_yz>;
template struct CudaMoments1stNcRho<cuda_mparticles<BS444>, dim_xyz>;
template struct CudaMoments1stN<cuda_mparticles<BS444>, dim_xyz>;
template struct CudaMoments1stAll<cuda_mparticles<BS444>, dim_xyz>;
| 7def7f00b75fd6b1e44b9e1353e4476bf25e5a85.cu |
#include "cuda_mparticles.cuh"
#include "cuda_moments.cuh"
#include "bs.hxx"
#include "pushp.hxx"
#include "fields.hxx"
#define THREADS_PER_BLOCK (512)
// FIXME/TODO: we could do this w/o prior reordering, but currently the
// generic moment calculation code first reorders anyway (which it shouldn't)
// ======================================================================
template <typename DIM>
class Deposit
{
public:
using R = float;
template <typename E>
GT_INLINE void operator()(E& flds, int m, int lf[3], R of[3], R val,
dim_yz tag)
{
atomicAdd(&flds(m, 0, lf[1], lf[2]), (1.f - of[1]) * (1.f - of[2]) * val);
atomicAdd(&flds(m, 0, lf[1] + 1, lf[2]), (of[1]) * (1.f - of[2]) * val);
atomicAdd(&flds(m, 0, lf[1], lf[2] + 1), (1.f - of[1]) * (of[2]) * val);
atomicAdd(&flds(m, 0, lf[1] + 1, lf[2] + 1), (of[1]) * (of[2]) * val);
}
template <typename E>
GT_INLINE void operator()(E& flds, int m, int lf[3], R of[3], R val,
dim_xyz tag)
{
atomicAdd(&flds(m, lf[0], lf[1], lf[2]),
(1.f - of[0]) * (1.f - of[1]) * (1.f - of[2]) * val);
atomicAdd(&flds(m, lf[0] + 1, lf[1], lf[2]),
(of[0]) * (1.f - of[1]) * (1.f - of[2]) * val);
atomicAdd(&flds(m, lf[0], lf[1] + 1, lf[2]),
(1.f - of[0]) * (of[1]) * (1.f - of[2]) * val);
atomicAdd(&flds(m, lf[0] + 1, lf[1] + 1, lf[2]),
(of[0]) * (of[1]) * (1.f - of[2]) * val);
atomicAdd(&flds(m, lf[0], lf[1], lf[2] + 1),
(1.f - of[0]) * (1.f - of[1]) * (of[2]) * val);
atomicAdd(&flds(m, lf[0] + 1, lf[1], lf[2] + 1),
(of[0]) * (1.f - of[1]) * (of[2]) * val);
atomicAdd(&flds(m, lf[0], lf[1] + 1, lf[2] + 1),
(1.f - of[0]) * (of[1]) * (of[2]) * val);
atomicAdd(&flds(m, lf[0] + 1, lf[1] + 1, lf[2] + 1),
(of[0]) * (of[1]) * (of[2]) * val);
}
template <typename E>
GT_INLINE void operator()(E& flds, int m, int lf[3], R of[3], R val)
{
(*this)(flds, m, lf, of, val, DIM{});
}
};
// ----------------------------------------------------------------------
// rho_1st_nc_cuda_run
template <typename DMparticles, typename dim, bool REORDER, typename E>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, 3)
rho_1st_nc_cuda_run(DMparticles dmprts, E mflds_gt, Int3 ib)
{
BlockSimple<typename DMparticles::BS, dim> current_block;
if (!current_block.init(dmprts)) {
return;
}
auto gt = view_patch(mflds_gt, current_block.p);
auto flds = make_Fields3d<dim>(gt, ib);
Deposit<dim> deposit;
__syncthreads();
int block_begin = dmprts.off_[current_block.bid];
int block_end = dmprts.off_[current_block.bid + 1];
for (int n : in_block_loop(block_begin, block_end)) {
if (n < block_begin) {
continue;
}
const auto prt =
REORDER ? dmprts.storage[dmprts.id_[n]] : dmprts.storage[n];
float fnq = dmprts.prt_w(prt) * dmprts.fnqs();
float q = dmprts.prt_q(prt);
int lf[3];
float of[3];
dmprts.find_idx_off_1st(prt.x, lf, of, float(0.));
deposit(flds, 0, lf, of, q * fnq);
}
}
// ----------------------------------------------------------------------
// n_1st_cuda_run
template <typename BS, typename dim, bool REORDER, typename E>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, 3)
n_1st_cuda_run(DMparticlesCuda<BS> dmprts, E mflds_gt, Int3 ib)
{
BlockSimple<BS, dim> current_block;
if (!current_block.init(dmprts)) {
return;
}
auto gt = view_patch(mflds_gt, current_block.p);
auto flds = make_Fields3d<dim>(gt, ib);
Deposit<dim> deposit;
__syncthreads();
int block_begin = dmprts.off_[current_block.bid];
int block_end = dmprts.off_[current_block.bid + 1];
for (int n : in_block_loop(block_begin, block_end)) {
if (n < block_begin) {
continue;
}
const auto prt =
REORDER ? dmprts.storage[dmprts.id_[n]] : dmprts.storage[n];
int kind = prt.kind;
float fnq = dmprts.prt_w(prt) * dmprts.fnqs();
float q = dmprts.prt_q(prt);
int lf[3];
float of[3];
dmprts.find_idx_off_1st(prt.x, lf, of, float(-.5));
deposit(flds, kind, lf, of, fnq);
}
}
// ----------------------------------------------------------------------
// all_1st_cuda_run
template <typename BS, typename dim, bool REORDER, typename E>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, 3)
all_1st_cuda_run(DMparticlesCuda<BS> dmprts, E mflds_gt, Int3 ib)
{
BlockSimple<BS, dim> current_block;
if (!current_block.init(dmprts)) {
return;
}
auto gt = view_patch(mflds_gt, current_block.p);
auto flds = make_Fields3d<dim>(gt, ib);
Deposit<dim> deposit;
__syncthreads();
int block_begin = dmprts.off_[current_block.bid];
int block_end = dmprts.off_[current_block.bid + 1];
for (int n : in_block_loop(block_begin, block_end)) {
if (n < block_begin) {
continue;
}
const auto prt =
REORDER ? dmprts.storage[dmprts.id_[n]] : dmprts.storage[n];
float fnq = dmprts.prt_w(prt) * dmprts.fnqs();
float q = dmprts.prt_q(prt);
float m = dmprts.prt_m(prt);
int lf[3];
float of[3];
dmprts.find_idx_off_1st(prt.x, lf, of, float(-.5));
AdvanceParticle<float, dim> advance{dmprts.dt()};
auto v = advance.calc_v(prt.u);
int n_moments = 13;
int mm = prt.kind * n_moments;
deposit(flds, mm + 0, lf, of, fnq * q);
deposit(flds, mm + 1, lf, of, fnq * q * v[0]);
deposit(flds, mm + 2, lf, of, fnq * q * v[1]);
deposit(flds, mm + 3, lf, of, fnq * q * v[2]);
deposit(flds, mm + 4, lf, of, fnq * m * prt.u[0]);
deposit(flds, mm + 5, lf, of, fnq * m * prt.u[1]);
deposit(flds, mm + 6, lf, of, fnq * m * prt.u[2]);
deposit(flds, mm + 7, lf, of, fnq * m * prt.u[0] * v[0]);
deposit(flds, mm + 8, lf, of, fnq * m * prt.u[1] * v[1]);
deposit(flds, mm + 9, lf, of, fnq * m * prt.u[2] * v[2]);
deposit(flds, mm + 10, lf, of, fnq * m * prt.u[0] * v[1]);
deposit(flds, mm + 11, lf, of, fnq * m * prt.u[1] * v[2]);
deposit(flds, mm + 12, lf, of, fnq * m * prt.u[2] * v[0]);
}
}
// ----------------------------------------------------------------------
// CudaMoments1stNcRho::operator()
template <typename CudaMparticles, typename dim>
void CudaMoments1stNcRho<CudaMparticles, dim>::operator()(
CudaMparticles& cmprts, MfieldsCuda& mres)
{
if (cmprts.n_prts == 0) {
return;
}
cmprts.reorder(); // FIXME/OPT?
if (!cmprts.need_reorder) {
dim3 dimGrid =
BlockSimple<typename CudaMparticles::BS, dim>::dimGrid(cmprts);
rho_1st_nc_cuda_run<typename CudaMparticles::DMparticles, dim, false>
<<<dimGrid, THREADS_PER_BLOCK>>>(cmprts, mres.gt(), -mres.ibn());
cuda_sync_if_enabled();
} else {
assert(0);
}
}
// ----------------------------------------------------------------------
// CudaMoments1stN::operator()
template <typename CudaMparticles, typename dim>
void CudaMoments1stN<CudaMparticles, dim>::operator()(CudaMparticles& cmprts,
MfieldsCuda& mres)
{
static int pr, pr_1;
if (!pr) {
pr = prof_register("cuda_mom_n", 1, 0, 0);
pr_1 = prof_register("cuda_mom_n_reorder", 1, 0, 0);
}
prof_start(pr);
if (cmprts.n_prts == 0) {
return;
}
prof_start(pr_1);
cmprts.reorder(); // FIXME/OPT?
prof_stop(pr_1);
if (!cmprts.need_reorder) {
dim3 dimGrid =
BlockSimple<typename CudaMparticles::BS, dim>::dimGrid(cmprts);
n_1st_cuda_run<typename CudaMparticles::BS, dim, false>
<<<dimGrid, THREADS_PER_BLOCK>>>(cmprts, mres.gt(), -mres.ibn());
cuda_sync_if_enabled();
} else {
assert(0);
}
prof_stop(pr);
}
// ----------------------------------------------------------------------
// CudaMoments1stAll::operator()
template <typename CudaMparticles, typename dim>
void CudaMoments1stAll<CudaMparticles, dim>::operator()(CudaMparticles& cmprts,
MfieldsCuda& mres)
{
static int pr, pr_1;
if (!pr) {
pr = prof_register("cuda_mom_all", 1, 0, 0);
pr_1 = prof_register("cuda_mom_all_reorder", 1, 0, 0);
}
// prof_start(pr);
if (cmprts.n_prts == 0) {
return;
}
// prof_start(pr_1);
cmprts.reorder(); // FIXME/OPT?
// prof_stop(pr_1);
if (!cmprts.need_reorder) {
dim3 dimGrid =
BlockSimple<typename CudaMparticles::BS, dim>::dimGrid(cmprts);
all_1st_cuda_run<typename CudaMparticles::BS, dim, false>
<<<dimGrid, THREADS_PER_BLOCK>>>(cmprts, mres.gt(), -mres.ibn());
cuda_sync_if_enabled();
} else {
assert(0);
}
// prof_stop(pr);
}
template struct CudaMoments1stNcRho<cuda_mparticles<BS144>, dim_yz>;
template struct CudaMoments1stN<cuda_mparticles<BS144>, dim_yz>;
template struct CudaMoments1stAll<cuda_mparticles<BS144>, dim_yz>;
template struct CudaMoments1stNcRho<cuda_mparticles<BS444>, dim_xyz>;
template struct CudaMoments1stN<cuda_mparticles<BS444>, dim_xyz>;
template struct CudaMoments1stAll<cuda_mparticles<BS444>, dim_xyz>;
|
d3d78590723b2a19ba20d94eda8442e8485d697b.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <algorithm>
#include "../graph_parser/parse.h"
#include "../graph_parser/util.h"
#include "kernel.hip"
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
void print_vector(int *vector, int num);
int main(int argc, char **argv)
{
char *tmpchar;
bool directed = 1;
int num_nodes;
int num_edges;
int file_format = 1;
hipError_t err = hipSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]);
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse the graph and store it into the CSR structure
if (file_format == 1) {
csr = parseMetis_transpose(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
csr = parseCOO_transpose(tmpchar, &num_nodes, &num_edges, directed);
} else {
printf("reserve for future");
exit(1);
}
// Allocate the cost array
int *cost_array = (int *)malloc(num_nodes * sizeof(int));
if (!cost_array) fprintf(stderr, "malloc failed cost_array\n");
// Set the cost array to zero
for (int i = 0; i < num_nodes; i++) {
cost_array[i] = 0;
}
// Create device-side buffers
int *row_d;
int *col_d;
int *data_d;
int *vector_d1;
int *vector_d2;
int *stop_d;
// Create the device-side graph structure
err = hipMalloc(&row_d, (num_nodes + 1) * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&col_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d) => %s\n", num_edges, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&data_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc data_d (size:%d) => %s\n", num_edges, hipGetErrorString(err));
return -1;
}
// Termination variable
err = hipMalloc(&stop_d, sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc stop_d (size:%d) => %s\n", 1, hipGetErrorString(err));
return -1;
}
// Create the device-side buffers for sssp
err = hipMalloc(&vector_d1, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc vector_d1 (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&vector_d2, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc vector_d2 (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
double timer1 = gettime();
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
// Copy data to device side buffers
err = hipMemcpy(row_d, csr->row_array, (num_nodes + 1) * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy row_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(data_d, csr->data_array, num_edges * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy data_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
double timer3 = gettime();
// Work dimensions
int block_size = 64;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
// Source vertex 0
int sourceVertex = 0;
// Launch the initialization kernel
hipLaunchKernelGGL(( vector_init) , dim3(grid), dim3(threads), 0, 0, vector_d1, vector_d2, sourceVertex, num_nodes);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: vector_init failed (%s)\n", hipGetErrorString(err));
return -1;
}
int stop = 1;
int cnt = 0;
// Main computation loop
for (int i = 1; i < num_nodes; i++) {
// Reset the termination variable
stop = 0;
// Copy the termination variable to the device
err = hipMemcpy(stop_d, &stop, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: write stop_d (%s)\n", hipGetErrorString(err));
return -1;
}
// Launch the assignment kernel
hipLaunchKernelGGL(( vector_assign) , dim3(grid), dim3(threads), 0, 0, vector_d1, vector_d2, num_nodes);
// Launch the min.+ kernel
hipLaunchKernelGGL(( spmv_min_dot_plus_kernel) , dim3(grid), dim3(threads), 0, 0, num_nodes, row_d, col_d,
data_d, vector_d1,
vector_d2);
// Launch the check kernel
hipLaunchKernelGGL(( vector_diff) , dim3(grid), dim3(threads), 0, 0, vector_d1, vector_d2,
stop_d, num_nodes);
// Read the termination variable back
err = hipMemcpy(&stop, stop_d, sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: read stop_d (%s)\n", hipGetErrorString(err));
return -1;
}
// Exit the loop
if (stop == 0) {
break;
}
cnt++;
}
hipDeviceSynchronize();
double timer4 = gettime();
// Read the cost_array back
err = hipMemcpy(cost_array, vector_d1, num_nodes * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: read vector_d1 (%s)\n", hipGetErrorString(err));
return -1;
}
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double timer2 = gettime();
// Print the timing statistics
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("number iterations = %d\n", cnt);
#if 1
// Print cost_array
print_vector(cost_array, num_nodes);
#endif
// Clean up the host arrays
free(cost_array);
csr->freeArrays();
free(csr);
// Clean up the device-side buffers
hipFree(row_d);
hipFree(col_d);
hipFree(data_d);
hipFree(stop_d);
hipFree(vector_d1);
hipFree(vector_d2);
return 0;
}
void print_vector(int *vector, int num)
{
FILE * fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++)
fprintf(fp, "%d: %d\n", i + 1, vector[i]);
fclose(fp);
}
| d3d78590723b2a19ba20d94eda8442e8485d697b.cu | /************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <algorithm>
#include "../graph_parser/parse.h"
#include "../graph_parser/util.h"
#include "kernel.cu"
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
void print_vector(int *vector, int num);
int main(int argc, char **argv)
{
char *tmpchar;
bool directed = 1;
int num_nodes;
int num_edges;
int file_format = 1;
cudaError_t err = cudaSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]);
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse the graph and store it into the CSR structure
if (file_format == 1) {
csr = parseMetis_transpose(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
csr = parseCOO_transpose(tmpchar, &num_nodes, &num_edges, directed);
} else {
printf("reserve for future");
exit(1);
}
// Allocate the cost array
int *cost_array = (int *)malloc(num_nodes * sizeof(int));
if (!cost_array) fprintf(stderr, "malloc failed cost_array\n");
// Set the cost array to zero
for (int i = 0; i < num_nodes; i++) {
cost_array[i] = 0;
}
// Create device-side buffers
int *row_d;
int *col_d;
int *data_d;
int *vector_d1;
int *vector_d2;
int *stop_d;
// Create the device-side graph structure
err = cudaMalloc(&row_d, (num_nodes + 1) * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&data_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc data_d (size:%d) => %s\n", num_edges, cudaGetErrorString(err));
return -1;
}
// Termination variable
err = cudaMalloc(&stop_d, sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc stop_d (size:%d) => %s\n", 1, cudaGetErrorString(err));
return -1;
}
// Create the device-side buffers for sssp
err = cudaMalloc(&vector_d1, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc vector_d1 (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&vector_d2, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc vector_d2 (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
double timer1 = gettime();
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
// Copy data to device side buffers
err = cudaMemcpy(row_d, csr->row_array, (num_nodes + 1) * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(data_d, csr->data_array, num_edges * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy data_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
double timer3 = gettime();
// Work dimensions
int block_size = 64;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
// Source vertex 0
int sourceVertex = 0;
// Launch the initialization kernel
vector_init <<<grid, threads>>>(vector_d1, vector_d2, sourceVertex, num_nodes);
cudaThreadSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: vector_init failed (%s)\n", cudaGetErrorString(err));
return -1;
}
int stop = 1;
int cnt = 0;
// Main computation loop
for (int i = 1; i < num_nodes; i++) {
// Reset the termination variable
stop = 0;
// Copy the termination variable to the device
err = cudaMemcpy(stop_d, &stop, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: write stop_d (%s)\n", cudaGetErrorString(err));
return -1;
}
// Launch the assignment kernel
vector_assign <<<grid, threads>>>(vector_d1, vector_d2, num_nodes);
// Launch the min.+ kernel
spmv_min_dot_plus_kernel <<<grid, threads>>>(num_nodes, row_d, col_d,
data_d, vector_d1,
vector_d2);
// Launch the check kernel
vector_diff <<<grid, threads>>>(vector_d1, vector_d2,
stop_d, num_nodes);
// Read the termination variable back
err = cudaMemcpy(&stop, stop_d, sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: read stop_d (%s)\n", cudaGetErrorString(err));
return -1;
}
// Exit the loop
if (stop == 0) {
break;
}
cnt++;
}
cudaThreadSynchronize();
double timer4 = gettime();
// Read the cost_array back
err = cudaMemcpy(cost_array, vector_d1, num_nodes * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: read vector_d1 (%s)\n", cudaGetErrorString(err));
return -1;
}
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double timer2 = gettime();
// Print the timing statistics
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("number iterations = %d\n", cnt);
#if 1
// Print cost_array
print_vector(cost_array, num_nodes);
#endif
// Clean up the host arrays
free(cost_array);
csr->freeArrays();
free(csr);
// Clean up the device-side buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(data_d);
cudaFree(stop_d);
cudaFree(vector_d1);
cudaFree(vector_d2);
return 0;
}
void print_vector(int *vector, int num)
{
FILE * fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++)
fprintf(fp, "%d: %d\n", i + 1, vector[i]);
fclose(fp);
}
|
8a37634a207a50ac468abf5676cb91e47ea3bfff.hip | // !!! This is a file automatically generated by hipify!!!
// Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include<stdio.h>
#include "hip/hip_runtime.h"
#include<string.h>
#include<stdlib.h>
#define BLOCK_SIZE 512
char *inputFile,*outputFile;
void _errorCheck(hipError_t e) {
if(e != hipSuccess) {
printf("Failed to run statement \n");
}
}
// ==================================================================
__global__ void total_sequential(float *input, float *output, int len) {
if (threadIdx.x == 0) {
int i;
int sum = 0;
int block = blockIdx.x;
int elems = (len + gridDim.x - 1) / gridDim.x;
int start = block * elems;
int stop = min(start + elems, len);
// One thread computes sum
for (i = start; i < stop; i++) {
sum += input[i];
}
output[block] = sum;
}
}
__global__ void total_sequential_coalesced(float *input, float *output, int len) {
if (threadIdx.x == 0) {
int i;
int sum = 0;
int start = blockIdx.x;
int step = gridDim.x;
// One thread computes sum
for (i = start; i < len; i += step) {
sum += input[i];
}
output[start] = sum;
}
}
__global__ void total_atomic(float *input, float *output, int len) {
int i;
int block = blockIdx.x;
int thread = threadIdx.x;
int elems = (len + gridDim.x - 1) / gridDim.x;
int start = block * elems;
int stop = min(start + elems, len);
// Thread computes adjacent elements
int elems_per_thread = (elems + BLOCK_SIZE - 1) / BLOCK_SIZE;
start = start + thread * elems_per_thread;
stop = min(stop, start + elems_per_thread);
for (i = start; i < stop; i++) {
// Every thread atomically updates the sum
atomicAdd(output + block, input[i]);
}
}
__global__ void total_atomic_coalesced(float *input, float *output, int len) {
int i;
int block = blockIdx.x;
int thread = threadIdx.x;
int start = block * BLOCK_SIZE + thread;
int step = gridDim.x * BLOCK_SIZE;
for (i = start; i < len; i+= step) {
// Every thread atomically updates the sum
atomicAdd(output + block, input[i]);
}
}
__global__ void total_partial_reduction(float *input, float *output, int len) {
__shared__ float shared[BLOCK_SIZE];
int i;
int block = blockIdx.x;
int thread = threadIdx.x;
int start = block * BLOCK_SIZE + thread;
int step = gridDim.x * BLOCK_SIZE;
// Each thread computes a sum in shared memory
shared[thread] = 0;
for (i = start; i < len; i += step) {
shared[thread] += input[i];
}
__syncthreads();
// Compute sum for blocks on thread 0
if (thread == 0) {
int sum = 0;
for (i = 0; i < BLOCK_SIZE; i++) {
sum += shared[i];
}
output[block] = sum;
}
}
__global__ void total_reduction(float *input, float *output, int len) {
__shared__ float shared[BLOCK_SIZE];
int i;
int block = blockIdx.x;
int thread = threadIdx.x;
int start = block * 2 * BLOCK_SIZE + thread;
int step = gridDim.x * 2 * BLOCK_SIZE;
// Many elements per thread
shared[thread] = 0;
for (i = start; i < len; i += step) {
shared[thread] += input[i] + input[i + BLOCK_SIZE];
}
__syncthreads();
// Recursively reduce down to 1 element
for (i = BLOCK_SIZE >> 1; i > 0; i >>= 1) {
if (thread < i) {
shared[thread] += shared[thread + i];
}
__syncthreads();
}
if (thread == 0) {
output[block] = shared[0];
}
}
// ==================================================================
void parseInput(int argc, char **argv) {
if(argc < 2) {
printf("Not enough arguments\n");
printf("Usage: reduction -i inputFile -o outputFile\n");
exit(1);
}
int i=1;
while(i<argc) {
if(!strcmp(argv[i],"-i")) {
++i;
inputFile = argv[i];
} else if(!strcmp(argv[i],"-o")) {
++i;
outputFile = argv[i];
} else {
printf("Wrong input");
exit(1);
}
i++;
}
}
void getSize(int &size, char *file) {
FILE *fp;
fp = fopen(file,"r");
if(fp == NULL) {
perror("Error opening File\n");
exit(1);
}
if(fscanf(fp,"%d",&size)==EOF) {
printf("Error reading file\n");
exit(1);
}
fclose(fp);
}
void readFromFile(int &size,float *v, char *file) {
FILE *fp;
fp = fopen(file,"r");
if(fp == NULL) {
printf("Error opening File %s\n",file);
exit(1);
}
if(fscanf(fp,"%d",&size)==EOF) {
printf("Error reading file\n");
exit(1);
}
int i=0;
float t;
while(i < size) {
if(fscanf(fp,"%f",&t)==EOF) {
printf("Error reading file\n");
exit(1);
}
v[i++]=t;
}
fclose(fp);
}
int main(int argc, char **argv) {
int ii;
float *hostInput; // input list
float *hostOutput; // output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
float *solution;
// Read arguments and input files
parseInput(argc,argv);
// Read input from data
getSize(numInputElements,inputFile);
//numInputElements <<= 10;
hostInput = (float*) calloc(numInputElements, sizeof(float));
//numInputElements >>= 10;
readFromFile(numInputElements,hostInput,inputFile);
//numInputElements <<= 10;
int opsz;
getSize(opsz,outputFile);
solution = (float*) calloc(opsz, sizeof(float));
readFromFile(opsz,solution,outputFile);
// ============== Assumes output element per block ==============
numOutputElements = numInputElements / (BLOCK_SIZE << 1);
if (numInputElements % (BLOCK_SIZE << 1)) {
numOutputElements++;
}
hostOutput = (float *)calloc(numOutputElements, sizeof(float));
if (numOutputElements > 512) {
numOutputElements = 512;
}
// ==============================================================
// Allocate GPU memory here
hipMalloc((float **) &deviceInput, numInputElements*sizeof(float));
hipMalloc((float **) &deviceOutput, numOutputElements*sizeof(float));
hipMemcpy(deviceInput, hostInput, numInputElements*sizeof(float), hipMemcpyHostToDevice);
// ====================== Initialize timer ======================
hipEvent_t start,stop;
float elapsed_time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
// Launch the GPU Kernel here, you may want multiple implementations to compare
hipLaunchKernelGGL(( total_reduction), dim3(numOutputElements), dim3(BLOCK_SIZE), 0, 0, deviceInput, deviceOutput, numInputElements);
hipDeviceSynchronize();
// Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, numOutputElements*sizeof(float), hipMemcpyDeviceToHost);
// Reduce any remaining output on host
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time,start, stop);
// ==============================================================
// Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceOutput);
// Check solution
if(solution[0] == hostOutput[0]) {
printf("The operation was successful \n");
printf("Time: %2.6f \n",elapsed_time);
} else {
printf("The operation failed \n");
}
printf("Expected sum: %0.0f \n", solution[0]);
printf("Computed sum: %0.0f \n", hostOutput[0]);
printf("Number of input elements: %i \n", numInputElements);
printf("Number of output elements: %i \n \n", numOutputElements);
free(hostInput);
free(hostOutput);
return 0;
}
| 8a37634a207a50ac468abf5676cb91e47ea3bfff.cu | // Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include<stdio.h>
#include "cuda.h"
#include<string.h>
#include<stdlib.h>
#define BLOCK_SIZE 512
char *inputFile,*outputFile;
void _errorCheck(cudaError_t e) {
if(e != cudaSuccess) {
printf("Failed to run statement \n");
}
}
// ==================================================================
__global__ void total_sequential(float *input, float *output, int len) {
if (threadIdx.x == 0) {
int i;
int sum = 0;
int block = blockIdx.x;
int elems = (len + gridDim.x - 1) / gridDim.x;
int start = block * elems;
int stop = min(start + elems, len);
// One thread computes sum
for (i = start; i < stop; i++) {
sum += input[i];
}
output[block] = sum;
}
}
__global__ void total_sequential_coalesced(float *input, float *output, int len) {
if (threadIdx.x == 0) {
int i;
int sum = 0;
int start = blockIdx.x;
int step = gridDim.x;
// One thread computes sum
for (i = start; i < len; i += step) {
sum += input[i];
}
output[start] = sum;
}
}
__global__ void total_atomic(float *input, float *output, int len) {
int i;
int block = blockIdx.x;
int thread = threadIdx.x;
int elems = (len + gridDim.x - 1) / gridDim.x;
int start = block * elems;
int stop = min(start + elems, len);
// Thread computes adjacent elements
int elems_per_thread = (elems + BLOCK_SIZE - 1) / BLOCK_SIZE;
start = start + thread * elems_per_thread;
stop = min(stop, start + elems_per_thread);
for (i = start; i < stop; i++) {
// Every thread atomically updates the sum
atomicAdd(output + block, input[i]);
}
}
__global__ void total_atomic_coalesced(float *input, float *output, int len) {
int i;
int block = blockIdx.x;
int thread = threadIdx.x;
int start = block * BLOCK_SIZE + thread;
int step = gridDim.x * BLOCK_SIZE;
for (i = start; i < len; i+= step) {
// Every thread atomically updates the sum
atomicAdd(output + block, input[i]);
}
}
__global__ void total_partial_reduction(float *input, float *output, int len) {
__shared__ float shared[BLOCK_SIZE];
int i;
int block = blockIdx.x;
int thread = threadIdx.x;
int start = block * BLOCK_SIZE + thread;
int step = gridDim.x * BLOCK_SIZE;
// Each thread computes a sum in shared memory
shared[thread] = 0;
for (i = start; i < len; i += step) {
shared[thread] += input[i];
}
__syncthreads();
// Compute sum for blocks on thread 0
if (thread == 0) {
int sum = 0;
for (i = 0; i < BLOCK_SIZE; i++) {
sum += shared[i];
}
output[block] = sum;
}
}
__global__ void total_reduction(float *input, float *output, int len) {
__shared__ float shared[BLOCK_SIZE];
int i;
int block = blockIdx.x;
int thread = threadIdx.x;
int start = block * 2 * BLOCK_SIZE + thread;
int step = gridDim.x * 2 * BLOCK_SIZE;
// Many elements per thread
shared[thread] = 0;
for (i = start; i < len; i += step) {
shared[thread] += input[i] + input[i + BLOCK_SIZE];
}
__syncthreads();
// Recursively reduce down to 1 element
for (i = BLOCK_SIZE >> 1; i > 0; i >>= 1) {
if (thread < i) {
shared[thread] += shared[thread + i];
}
__syncthreads();
}
if (thread == 0) {
output[block] = shared[0];
}
}
// ==================================================================
void parseInput(int argc, char **argv) {
if(argc < 2) {
printf("Not enough arguments\n");
printf("Usage: reduction -i inputFile -o outputFile\n");
exit(1);
}
int i=1;
while(i<argc) {
if(!strcmp(argv[i],"-i")) {
++i;
inputFile = argv[i];
} else if(!strcmp(argv[i],"-o")) {
++i;
outputFile = argv[i];
} else {
printf("Wrong input");
exit(1);
}
i++;
}
}
void getSize(int &size, char *file) {
FILE *fp;
fp = fopen(file,"r");
if(fp == NULL) {
perror("Error opening File\n");
exit(1);
}
if(fscanf(fp,"%d",&size)==EOF) {
printf("Error reading file\n");
exit(1);
}
fclose(fp);
}
void readFromFile(int &size,float *v, char *file) {
FILE *fp;
fp = fopen(file,"r");
if(fp == NULL) {
printf("Error opening File %s\n",file);
exit(1);
}
if(fscanf(fp,"%d",&size)==EOF) {
printf("Error reading file\n");
exit(1);
}
int i=0;
float t;
while(i < size) {
if(fscanf(fp,"%f",&t)==EOF) {
printf("Error reading file\n");
exit(1);
}
v[i++]=t;
}
fclose(fp);
}
int main(int argc, char **argv) {
int ii;
float *hostInput; // input list
float *hostOutput; // output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
float *solution;
// Read arguments and input files
parseInput(argc,argv);
// Read input from data
getSize(numInputElements,inputFile);
//numInputElements <<= 10;
hostInput = (float*) calloc(numInputElements, sizeof(float));
//numInputElements >>= 10;
readFromFile(numInputElements,hostInput,inputFile);
//numInputElements <<= 10;
int opsz;
getSize(opsz,outputFile);
solution = (float*) calloc(opsz, sizeof(float));
readFromFile(opsz,solution,outputFile);
// ============== Assumes output element per block ==============
numOutputElements = numInputElements / (BLOCK_SIZE << 1);
if (numInputElements % (BLOCK_SIZE << 1)) {
numOutputElements++;
}
hostOutput = (float *)calloc(numOutputElements, sizeof(float));
if (numOutputElements > 512) {
numOutputElements = 512;
}
// ==============================================================
// Allocate GPU memory here
cudaMalloc((float **) &deviceInput, numInputElements*sizeof(float));
cudaMalloc((float **) &deviceOutput, numOutputElements*sizeof(float));
cudaMemcpy(deviceInput, hostInput, numInputElements*sizeof(float), cudaMemcpyHostToDevice);
// ====================== Initialize timer ======================
cudaEvent_t start,stop;
float elapsed_time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
// Launch the GPU Kernel here, you may want multiple implementations to compare
total_reduction<<<numOutputElements, BLOCK_SIZE>>>(deviceInput, deviceOutput, numInputElements);
cudaDeviceSynchronize();
// Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, numOutputElements*sizeof(float), cudaMemcpyDeviceToHost);
// Reduce any remaining output on host
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time,start, stop);
// ==============================================================
// Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
// Check solution
if(solution[0] == hostOutput[0]) {
printf("The operation was successful \n");
printf("Time: %2.6f \n",elapsed_time);
} else {
printf("The operation failed \n");
}
printf("Expected sum: %0.0f \n", solution[0]);
printf("Computed sum: %0.0f \n", hostOutput[0]);
printf("Number of input elements: %i \n", numInputElements);
printf("Number of output elements: %i \n \n", numOutputElements);
free(hostInput);
free(hostOutput);
return 0;
}
|
0c02737173f7b055d4903d3d0c8d14d2503900dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <torch/serialize/tensor.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include "common.h"
#include "device_tensor.h"
namespace {
template<typename DType, typename Acctype>
struct AggOp {
__device__ AggOp(DeviceTensor<DType, 3> a,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c) : A(a), X(x), C(c) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(A[b][i][k] * (X[b][i][d] - C[k][d]));
}
DeviceTensor<DType, 3> A;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
};
template<typename DType, typename Acctype>
struct AggBackOp {
__device__ AggBackOp(DeviceTensor<DType, 3> g,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c) : G(g), X(x), C(c) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(G[b][k][d] * (X[b][i][d] - C[k][d]));
}
DeviceTensor<DType, 3> G;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
};
template<typename DType, typename Acctype>
struct SL2Op {
__device__ SL2Op(DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c) : X(x), C(c) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d)
{
DType r = X[b][i][d] - C[k][d];
return ScalarConvert<DType, Acctype>::to(r * r);
}
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
};
template<typename DType, typename Acctype>
struct SL2GradXOp {
__device__ SL2GradXOp(
DeviceTensor<DType, 3> gsl,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 1> s
) : GSL(gsl), X(x), C(c), S(s) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d)
{
return ScalarConvert<DType, Acctype>::to(
2 * S[k] * GSL[b][i][k] * (X[b][i][d]-C[k][d]));
}
DeviceTensor<DType, 3> GSL;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 1> S;
};
template<typename DType, typename Acctype>
__global__ void Aggregate_Forward_kernel (
DeviceTensor<DType, 3> E,
DeviceTensor<DType, 3> A,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C) {
/* declarations of the variables */
int b, k, d, N;
/* Get the index and channels */
b = blockIdx.z;
d = blockIdx.x;
k = blockIdx.y;
N = X.getSize(1);
/* main operation */
AggOp<DType, Acctype> g(A, X, C);
E[b][k][d] = reduceN<Acctype>(g, b, k, d, N);
}
template<typename DType, typename Acctype>
__global__ void Aggregate_Backward_kernel (
DeviceTensor<DType, 3> GA,
DeviceTensor<DType, 3> GE,
DeviceTensor<DType, 3> A,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C) {
/* declarations of the variables */
int b, k, i, D;
/* Get the index and channels */
b = blockIdx.z;
i = blockIdx.y;
k = blockIdx.x;
D = GE.getSize(2);
/* main operation */
AggBackOp<DType, Acctype> g(GE, X, C);
GA[b][i][k] = reduceD<Acctype>(g, b, i, k, D);
}
template<typename DType, typename Acctype>
__global__ void ScaledL2_Forward_kernel (
DeviceTensor<DType, 3> SL,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 1> S) {
/* declarations of the variables */
int b, k, i, D;
/* Get the index and channels */
b = blockIdx.z;
k = blockIdx.x;
i = blockIdx.y;
D = X.getSize(2);
/* main operation */
SL2Op<DType, Acctype> g(X,C);
SL[b][i][k] = S[k] * reduceD<Acctype>(g,b,i,k,D);;
}
template<typename DType, typename Acctype>
__global__ void ScaledL2_GradX_kernel (
DeviceTensor<DType, 3> GSL,
DeviceTensor<DType, 3> GX,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 1> S) {
/* declarations of the variables */
int b, d, i, K;
/* Get the index and channels */
b = blockIdx.z;
d = blockIdx.x;
i = blockIdx.y;
K = C.getSize(0);
/* main operation */
SL2GradXOp<DType, Acctype> g(GSL,X,C,S);
GX[b][i][d] = reduceK<Acctype>(g,b,i,d,K);
}
template<typename DType, typename Acctype>
__global__ void ScaledL2_GradC_kernel (
DeviceTensor<DType, 3> GSL,
DeviceTensor<DType, 2> GC,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 1> S) {
/* declarations of the variables */
int k, d, B, N;
/* Get the index and channels */
d = blockIdx.x;
k = blockIdx.y;
B = X.getSize(0);
N = X.getSize(1);
/* main operation */
SL2GradXOp<DType, Acctype> g(GSL,X,C,S);
GC[k][d] = - reduceBN<Acctype>(g, k, d, B, N);
}
}// namespace
at::Tensor Aggregate_Forward_CUDA(
const at::Tensor A_,
const at::Tensor X_,
const at::Tensor C_) {
/* Device tensors */
auto E_ = torch::zeros({A_.size(0), C_.size(0), C_.size(1)}, A_.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// B, K, D
dim3 blocks(C_.size(1), C_.size(0), X_.size(0));
dim3 threads(getNumThreads(X_.size(1)));
AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Forward_CUDA", ([&] {
DeviceTensor<scalar_t, 3> E = devicetensor<scalar_t, 3>(E_);
DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
/* kernel function */
hipLaunchKernelGGL(( Aggregate_Forward_kernel<scalar_t, scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, E, A, X, C);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return E_;
}
std::vector<at::Tensor> Aggregate_Backward_CUDA(
const at::Tensor GE_,
const at::Tensor A_,
const at::Tensor X_,
const at::Tensor C_) {
auto gradA_ = at::zeros_like(A_);
auto gradX_ = at::bmm(A_, GE_);
auto gradC_ = (-GE_ * A_.sum(1).unsqueeze(2)).sum(0);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// B, K, D
dim3 blocks(C_.size(0), X_.size(1), X_.size(0));
dim3 threads(getNumThreads(C_.size(1)));
AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> GA = devicetensor<scalar_t, 3>(gradA_);
DeviceTensor<scalar_t, 3> GE = devicetensor<scalar_t, 3>(GE_);
DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
hipLaunchKernelGGL(( Aggregate_Backward_kernel<scalar_t, scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, GA, GE, A, X, C);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return {gradA_, gradX_, gradC_};
}
at::Tensor ScaledL2_Forward_CUDA(
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor S_) {
auto SL_ = torch::zeros({X_.size(0), X_.size(1), C_.size(0)}, X_.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(C_.size(0), X_.size(1), X_.size(0));
dim3 threads(getNumThreads(C_.size(1)));
AT_DISPATCH_FLOATING_TYPES(X_.type(), "ScaledL2_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> SL = devicetensor<scalar_t, 3>(SL_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 1> S = devicetensor<scalar_t, 1>(S_);
/* kernel function */
hipLaunchKernelGGL(( ScaledL2_Forward_kernel<scalar_t, scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, SL, X, C, S);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return SL_;
}
std::vector<at::Tensor> ScaledL2_Backward_CUDA(
const at::Tensor GSL_,
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor S_,
const at::Tensor SL_) {
auto GX_ = at::zeros_like(X_);
auto GC_ = at::zeros_like(C_);
/* kernel function */
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks1(X_.size(2), X_.size(1), X_.size(0));
dim3 threads1(getNumThreads(C_.size(0)));
dim3 blocks2(C_.size(1), C_.size(0));
dim3 threads2(getNumThreads(X_.size(1)));
auto GS_ = (GSL_ * (SL_ / S_.view({1, 1, C_.size(0)}))).sum(0).sum(0);
AT_DISPATCH_FLOATING_TYPES(X_.type(), "ScaledL2_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> GSL = devicetensor<scalar_t, 3>(GSL_);
DeviceTensor<scalar_t, 3> GX = devicetensor<scalar_t, 3>(GX_);
DeviceTensor<scalar_t, 2> GC = devicetensor<scalar_t, 2>(GC_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 1> S = devicetensor<scalar_t, 1>(S_);
hipLaunchKernelGGL(( ScaledL2_GradX_kernel<scalar_t, scalar_t>)
, dim3(blocks1), dim3(threads1), 0, stream, GSL, GX, X, C, S);
AT_ASSERT(hipGetLastError() == hipSuccess);
hipLaunchKernelGGL(( ScaledL2_GradC_kernel<scalar_t, scalar_t>)
, dim3(blocks2), dim3(threads2), 0, stream, GSL, GC, X, C, S);
AT_ASSERT(hipGetLastError() == hipSuccess);
}));
return {GX_, GC_, GS_};
}
| 0c02737173f7b055d4903d3d0c8d14d2503900dd.cu | #include <vector>
#include <torch/serialize/tensor.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include "common.h"
#include "device_tensor.h"
namespace {
template<typename DType, typename Acctype>
struct AggOp {
__device__ AggOp(DeviceTensor<DType, 3> a,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c) : A(a), X(x), C(c) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(A[b][i][k] * (X[b][i][d] - C[k][d]));
}
DeviceTensor<DType, 3> A;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
};
template<typename DType, typename Acctype>
struct AggBackOp {
__device__ AggBackOp(DeviceTensor<DType, 3> g,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c) : G(g), X(x), C(c) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) {
return ScalarConvert<DType, Acctype>::to(G[b][k][d] * (X[b][i][d] - C[k][d]));
}
DeviceTensor<DType, 3> G;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
};
template<typename DType, typename Acctype>
struct SL2Op {
__device__ SL2Op(DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c) : X(x), C(c) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d)
{
DType r = X[b][i][d] - C[k][d];
return ScalarConvert<DType, Acctype>::to(r * r);
}
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
};
template<typename DType, typename Acctype>
struct SL2GradXOp {
__device__ SL2GradXOp(
DeviceTensor<DType, 3> gsl,
DeviceTensor<DType, 3> x,
DeviceTensor<DType, 2> c,
DeviceTensor<DType, 1> s
) : GSL(gsl), X(x), C(c), S(s) {}
__device__ __forceinline__ Acctype operator()(int b, int i, int k, int d)
{
return ScalarConvert<DType, Acctype>::to(
2 * S[k] * GSL[b][i][k] * (X[b][i][d]-C[k][d]));
}
DeviceTensor<DType, 3> GSL;
DeviceTensor<DType, 3> X;
DeviceTensor<DType, 2> C;
DeviceTensor<DType, 1> S;
};
template<typename DType, typename Acctype>
__global__ void Aggregate_Forward_kernel (
DeviceTensor<DType, 3> E,
DeviceTensor<DType, 3> A,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C) {
/* declarations of the variables */
int b, k, d, N;
/* Get the index and channels */
b = blockIdx.z;
d = blockIdx.x;
k = blockIdx.y;
N = X.getSize(1);
/* main operation */
AggOp<DType, Acctype> g(A, X, C);
E[b][k][d] = reduceN<Acctype>(g, b, k, d, N);
}
template<typename DType, typename Acctype>
__global__ void Aggregate_Backward_kernel (
DeviceTensor<DType, 3> GA,
DeviceTensor<DType, 3> GE,
DeviceTensor<DType, 3> A,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C) {
/* declarations of the variables */
int b, k, i, D;
/* Get the index and channels */
b = blockIdx.z;
i = blockIdx.y;
k = blockIdx.x;
D = GE.getSize(2);
/* main operation */
AggBackOp<DType, Acctype> g(GE, X, C);
GA[b][i][k] = reduceD<Acctype>(g, b, i, k, D);
}
template<typename DType, typename Acctype>
__global__ void ScaledL2_Forward_kernel (
DeviceTensor<DType, 3> SL,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 1> S) {
/* declarations of the variables */
int b, k, i, D;
/* Get the index and channels */
b = blockIdx.z;
k = blockIdx.x;
i = blockIdx.y;
D = X.getSize(2);
/* main operation */
SL2Op<DType, Acctype> g(X,C);
SL[b][i][k] = S[k] * reduceD<Acctype>(g,b,i,k,D);;
}
template<typename DType, typename Acctype>
__global__ void ScaledL2_GradX_kernel (
DeviceTensor<DType, 3> GSL,
DeviceTensor<DType, 3> GX,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 1> S) {
/* declarations of the variables */
int b, d, i, K;
/* Get the index and channels */
b = blockIdx.z;
d = blockIdx.x;
i = blockIdx.y;
K = C.getSize(0);
/* main operation */
SL2GradXOp<DType, Acctype> g(GSL,X,C,S);
GX[b][i][d] = reduceK<Acctype>(g,b,i,d,K);
}
template<typename DType, typename Acctype>
__global__ void ScaledL2_GradC_kernel (
DeviceTensor<DType, 3> GSL,
DeviceTensor<DType, 2> GC,
DeviceTensor<DType, 3> X,
DeviceTensor<DType, 2> C,
DeviceTensor<DType, 1> S) {
/* declarations of the variables */
int k, d, B, N;
/* Get the index and channels */
d = blockIdx.x;
k = blockIdx.y;
B = X.getSize(0);
N = X.getSize(1);
/* main operation */
SL2GradXOp<DType, Acctype> g(GSL,X,C,S);
GC[k][d] = - reduceBN<Acctype>(g, k, d, B, N);
}
}// namespace
at::Tensor Aggregate_Forward_CUDA(
const at::Tensor A_,
const at::Tensor X_,
const at::Tensor C_) {
/* Device tensors */
auto E_ = torch::zeros({A_.size(0), C_.size(0), C_.size(1)}, A_.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// B, K, D
dim3 blocks(C_.size(1), C_.size(0), X_.size(0));
dim3 threads(getNumThreads(X_.size(1)));
AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Forward_CUDA", ([&] {
DeviceTensor<scalar_t, 3> E = devicetensor<scalar_t, 3>(E_);
DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
/* kernel function */
Aggregate_Forward_kernel<scalar_t, scalar_t>
<<<blocks, threads, 0, stream>>>(E, A, X, C);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return E_;
}
std::vector<at::Tensor> Aggregate_Backward_CUDA(
const at::Tensor GE_,
const at::Tensor A_,
const at::Tensor X_,
const at::Tensor C_) {
auto gradA_ = at::zeros_like(A_);
auto gradX_ = at::bmm(A_, GE_);
auto gradC_ = (-GE_ * A_.sum(1).unsqueeze(2)).sum(0);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// B, K, D
dim3 blocks(C_.size(0), X_.size(1), X_.size(0));
dim3 threads(getNumThreads(C_.size(1)));
AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> GA = devicetensor<scalar_t, 3>(gradA_);
DeviceTensor<scalar_t, 3> GE = devicetensor<scalar_t, 3>(GE_);
DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
Aggregate_Backward_kernel<scalar_t, scalar_t>
<<<blocks, threads, 0, stream>>> (GA, GE, A, X, C);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return {gradA_, gradX_, gradC_};
}
at::Tensor ScaledL2_Forward_CUDA(
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor S_) {
auto SL_ = torch::zeros({X_.size(0), X_.size(1), C_.size(0)}, X_.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(C_.size(0), X_.size(1), X_.size(0));
dim3 threads(getNumThreads(C_.size(1)));
AT_DISPATCH_FLOATING_TYPES(X_.type(), "ScaledL2_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> SL = devicetensor<scalar_t, 3>(SL_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 1> S = devicetensor<scalar_t, 1>(S_);
/* kernel function */
ScaledL2_Forward_kernel<scalar_t, scalar_t>
<<<blocks, threads, 0, stream>>> (SL, X, C, S);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return SL_;
}
std::vector<at::Tensor> ScaledL2_Backward_CUDA(
const at::Tensor GSL_,
const at::Tensor X_,
const at::Tensor C_,
const at::Tensor S_,
const at::Tensor SL_) {
auto GX_ = at::zeros_like(X_);
auto GC_ = at::zeros_like(C_);
/* kernel function */
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks1(X_.size(2), X_.size(1), X_.size(0));
dim3 threads1(getNumThreads(C_.size(0)));
dim3 blocks2(C_.size(1), C_.size(0));
dim3 threads2(getNumThreads(X_.size(1)));
auto GS_ = (GSL_ * (SL_ / S_.view({1, 1, C_.size(0)}))).sum(0).sum(0);
AT_DISPATCH_FLOATING_TYPES(X_.type(), "ScaledL2_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> GSL = devicetensor<scalar_t, 3>(GSL_);
DeviceTensor<scalar_t, 3> GX = devicetensor<scalar_t, 3>(GX_);
DeviceTensor<scalar_t, 2> GC = devicetensor<scalar_t, 2>(GC_);
DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_);
DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_);
DeviceTensor<scalar_t, 1> S = devicetensor<scalar_t, 1>(S_);
ScaledL2_GradX_kernel<scalar_t, scalar_t>
<<<blocks1, threads1, 0, stream>>> (GSL, GX, X, C, S);
AT_ASSERT(cudaGetLastError() == cudaSuccess);
ScaledL2_GradC_kernel<scalar_t, scalar_t>
<<<blocks2, threads2, 0, stream>>> (GSL, GC, X, C, S);
AT_ASSERT(cudaGetLastError() == cudaSuccess);
}));
return {GX_, GC_, GS_};
}
|
a3320bc08ef13444f284d7a4e2183c3144bdb2bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void spfaKernelForSSSP(int *V, int *E, int *W, int *n, bool *visit,int *dist){
int old=0, u, v;
__shared__ int QuickExit;
const int threadId = threadIdx.z*(blockDim.x * blockDim.y)+ threadIdx.y* blockDim.x+ threadIdx.x;
const int blockSize =blockDim.x * blockDim.y * blockDim.z;
while(1)/* this while can solve a sssp */
{
u = threadId;
QuickExit = 0;
while(u < (*n))
{
for(int adj = V[u]; adj < V[u+1]; adj++)
{
v = E[adj];
old=atomicMin( &dist[v] , dist[u] + W[adj]);
if(old>dist[v])
{
QuickExit=1;
visit[v]=1;
}
}
u+=blockSize;
}
__syncthreads();
if(QuickExit==0){
break;
}
}
} | a3320bc08ef13444f284d7a4e2183c3144bdb2bb.cu | __global__ void spfaKernelForSSSP(int *V, int *E, int *W, int *n, bool *visit,int *dist){
int old=0, u, v;
__shared__ int QuickExit;
const int threadId = threadIdx.z*(blockDim.x * blockDim.y)+ threadIdx.y* blockDim.x+ threadIdx.x;
const int blockSize =blockDim.x * blockDim.y * blockDim.z;
while(1)/* this while can solve a sssp */
{
u = threadId;
QuickExit = 0;
while(u < (*n))
{
for(int adj = V[u]; adj < V[u+1]; adj++)
{
v = E[adj];
old=atomicMin( &dist[v] , dist[u] + W[adj]);
if(old>dist[v])
{
QuickExit=1;
visit[v]=1;
}
}
u+=blockSize;
}
__syncthreads();
if(QuickExit==0){
break;
}
}
} |
9c23cb6624ffdb2b629815ad421f976ce95d1660.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stddef.h>
#include <limits.h>
#include <string.h>
__global__ void fun(int *z){
register int a[5] = {0};
a[0];
printf("%d\n", a[0]);
}
int main(void)
{
int z;
int *dev_z;
hipMalloc((void**)&dev_z, sizeof(int));
hipLaunchKernelGGL(( fun), dim3(1),dim3(1), 0, 0, dev_z);
hipMemcpy(&z, dev_z, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_z);
return 0;
}
| 9c23cb6624ffdb2b629815ad421f976ce95d1660.cu | #include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stddef.h>
#include <limits.h>
#include <string.h>
__global__ void fun(int *z){
register int a[5] = {0};
a[0];
printf("%d\n", a[0]);
}
int main(void)
{
int z;
int *dev_z;
cudaMalloc((void**)&dev_z, sizeof(int));
fun<<<1,1>>>(dev_z);
cudaMemcpy(&z, dev_z, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_z);
return 0;
}
|
fe0c1cd01ea2ac2112cdf6ea099bab9d4a484ab0.hip | // !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_gemv_batched_strided_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl"
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 32, 32>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
using GemvKernel = cutlass::gemm::kernel::DefaultGemv<
ThreadBlockShape,
ThreadShape,
float, cutlass::layout::RowMajor,
float, cutlass::layout::RowMajor,
float, cutlass::layout::RowMajor>;
template void megdnn::cuda::cutlass_wrapper::
cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>(
BatchedGemmCoord const& problem_size,
const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a,
const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b,
typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| fe0c1cd01ea2ac2112cdf6ea099bab9d4a484ab0.cu | #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_gemv_batched_strided_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl"
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 32, 32>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
using GemvKernel = cutlass::gemm::kernel::DefaultGemv<
ThreadBlockShape,
ThreadShape,
float, cutlass::layout::RowMajor,
float, cutlass::layout::RowMajor,
float, cutlass::layout::RowMajor>;
template void megdnn::cuda::cutlass_wrapper::
cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>(
BatchedGemmCoord const& problem_size,
const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a,
const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b,
typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
c1074abe6955b65fead812580b1c95ad5a784965.hip | // !!! This is a file automatically generated by hipify!!!
#//ll includes, systeminclude <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include "rocm_smi/rocm_smi.h"
#include "cuda_functions.h"
// includes, project
//#include "magma.h"
#include "cuda_multi_gemm_unif.cu"
//#include "cuda_add_vec.h"
//My includes
#include "debug_fns.h"
// #include "transformations.h"
//switch the comments to toggle debug mode
//#define D
#define D for(;0;)
#if 0
double get_time( void )
{
struct timeval t;
gettimeofday( &t, NULL );
return t.tv_sec + t.tv_usec*1e-6;
}
#endif
__global__ void vecCopy(double *a, double*b, int jobs, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
/*if (id < n){
for(int i = 0; i< jobs; i++)
b[i*n+id] = a[i*n+id];
}*/
if(id < n*jobs)
b[id] = a[id];
}
__global__ void vecMul(double *a, double*b, int jobs, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
/*if (id < n){
for(int i = 0; i< jobs; i++)
b[i*n+id] = b[i*n+id] * a[i*n+id];
}*/
if(id < n*jobs)
b[id] = b[id] * a[id];
}
__global__ void full2face(double *vols, double*faces, int nel, int n, int nxyz, int*iface){
//6 faces, each of size nx * nz => n = nx*nz *6
//vols: e elements each of size nx*ny*nz => nxyz = nx*ny*nz
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*n){//n = nxyz
int e = id/n; //+1 in fortran
//int j = id%n; //+1 in fortran
int i = iface[id];//[e][j];
faces[id]/*[e][j]*/ = vols[e*nxyz+i-1]/*[e][i]*/;
//faces[id] = 2.55;
}
//if(id==0)
//printf("in kernel*******\n");
}
__global__ void face2full(double *vols, double*faces, int nel, int n, int nxyz, int*iface){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*n){//n = nxyz
int e = id/n; //+1 in fortran
//int j = id%n; //+1 in fortran
int i = iface[id];//[e][j]
vols[e*nxyz+i] = vols[e*nxyz+i] + faces[id];
}
}
__global__ void faceu(double *u, double*faces, int toteq, int nel, int n, int nxyz, int*iface){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<toteq*nel*n){
int ivar = id/(nel*n);
int e_n = id%(nel*n);
int e = e_n/n;
int i = iface[e_n];
faces[id] = u[e*(toteq*nxyz)+ivar*nxyz+i-1];
}
}
__global__ void fillq(double *vtrans, double *vx, double *vy, double *vz, double*pr, double*faces, int nel, int n, int nxyz, int*iface, int size){
//6 faces, each of size nx * nz => n = nx*nz *6
//vols: e elements each of size nx*ny*nz => nxyz = nx*ny*nz
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<size){
int ivar = id/(nel*n);
int e_n = id%(nel*n);
int e = e_n/n; //+1 in fortran
//int j = id%n; //+1 in fortran
int i = iface[e_n];//[e][j];
if(ivar==0)
faces[id] = vtrans[e*nxyz+i-1];
else if(ivar==1)
faces[id] = vx[e*nxyz+i-1];
else if(ivar==2)
faces[id] = vy[e*nxyz+i-1];
else if(ivar==3)
faces[id] = vz[e*nxyz+i-1];
else if(ivar==4)
faces[id] = pr[e*nxyz+i-1];
//faces[id]/*[e][j]*/ = vols[ivar*(nxyz*nel)+e*nxyz+i-1]/*[e][i]*/;
//faces[id] = 2.55;
}
//if(id==0)
//printf("in kernel*******\n");
}
extern "C" void faceuwrapper_(int *toteq1, int *n1, int *nxyz1, int*nel1, double *u, double *faces, int *iface){
int toteq = toteq1[0];
int n = n1[0];
int nxyz = nxyz1[0];
int nel = nel1[0];
double *d_u, *d_faces;
int *d_iface;
bool inCPU = false;
if(inCPU){
hipMalloc(&d_u, nxyz*nel*sizeof(double)*toteq);
hipMalloc(&d_iface, n*nel*sizeof(int));
hipMalloc(&d_faces, n*nel*sizeof(double)*toteq);
hipMemcpy( d_u, u, nxyz*nel*sizeof(double)*toteq, hipMemcpyHostToDevice);
hipMemcpy( d_iface, iface, n*nel*sizeof(int), hipMemcpyHostToDevice);
}
else{
//just assign
d_u = u;
d_iface = iface;
d_faces = faces;
}
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
//gridSize = (int)ceil((float)n*k/blockSize);
gridSize = (int)ceil((float)n*nel*toteq/blockSize);
// Execute the kernel
//printf("block size = %d, grid size = %d\n",blockSize,gridSize);
hipLaunchKernelGGL(( faceu), dim3(gridSize), dim3(blockSize), 0, 0, d_u, d_faces, toteq, nel, n, nxyz, d_iface);
hipError_t code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error str 5: %s\n",hipGetErrorString(code));
}
if(inCPU){
hipMemcpy( faces, d_faces, n*nel*sizeof(double)*toteq, hipMemcpyDeviceToHost );
hipFree(d_u);
hipFree(d_faces);
hipFree(d_iface);
}
}
extern "C" void copyqq_(double*qq, double * faces, int*size){
hipMemcpy( faces, qq, size[0]*sizeof(double)*5, hipMemcpyDeviceToHost );
hipFree(qq);
hipError_t code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error in copyqq, str: %s\n",hipGetErrorString(code));
}
}
extern "C" void fillqwrapper_(double *vols_vtrans, double *vols_vx, double *vols_vy, double *vols_vz, double *vols_pr, double*faces, int *nel1, int *n1, int *nxyz1, int*iface, bool device_arr, bool pull_result){
int nel = nel1[0];
int n = n1[0];
int nxyz = nxyz1[0];
//printf("nel = %d, n = %d, nxyz=%d\n",nel,n,nxyz);
/*for(int index = 0; index <4; index++){
printf("vols_t[%d]=%f,vols_x=%f,vols_y=%f,vols_pr=%f\n",index,vols_vtrans[index],vols_vx[index],vols_vy[index],vols_pr[index]);
printf("iface[%d]=%d\n",index,iface[index]);
}*/
//double *d_vols_vtrans, *d_vols_vx, d_vols_vy, d_vols_vz, d_vols_pr;
double *d_vols;
double *d_vtrans,*d_vx,*d_vy,*d_vz,*d_pr;
double *d_faces;
int *d_iface;
// allocate device vectors memory
/*hipMalloc(&d_vols_vtrans, nxyz*nel*sizeof(double));
hipMalloc(&d_vols_vx, nxyz*nel*sizeof(double));
hipMalloc(&d_vols_vy, nxyz*nel*sizeof(double));
hipMalloc(&d_vols_vz, nxyz*nel*sizeof(double));
hipMalloc(&d_vols_pr, nxyz*nel*sizeof(double));*/
bool inCPU = false;
if(inCPU){
hipMalloc(&d_vols, nxyz*nel*sizeof(double)*5);
hipMalloc(&d_faces, n*nel*sizeof(double)*5);
hipMalloc(&d_iface, n*nel*sizeof(int));
hipMemcpy( d_vols, vols_vtrans, nxyz*nel*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_vols+nxyz*nel, vols_vx, nxyz*nel*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_vols+2*nxyz*nel, vols_vy, nxyz*nel*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_vols+3*nxyz*nel, vols_vz, nxyz*nel*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_vols+4*nxyz*nel, vols_pr, nxyz*nel*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_iface, iface, n*nel*sizeof(int), hipMemcpyHostToDevice);
}
else{
//send vols_vtrans = all vols
//just assign
d_vols = vols_vtrans;
d_vtrans = vols_vtrans;
d_vx = vols_vx;
d_vy = vols_vy;
d_vz = vols_vz;
d_pr = vols_pr;
d_faces = faces;
d_iface = iface;
}
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
//gridSize = (int)ceil((float)n*k/blockSize);
gridSize = (int)ceil((float)n*nel*5/blockSize);
hipError_t code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error str 6 before: %s\n",hipGetErrorString(code));
}
// Execute the kernel
//printf("block size = %d, grid size = %d\n",blockSize,gridSize);
hipLaunchKernelGGL(( fillq), dim3(gridSize), dim3(blockSize), 0, 0, d_vtrans,d_vx,d_vy,d_vz,d_pr, d_faces, nel, n, nxyz, d_iface,5*nel*n);
code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error str 6: %s\n",hipGetErrorString(code));
}
if(inCPU){
hipMemcpy( faces, d_faces, n*nel*sizeof(double)*5, hipMemcpyDeviceToHost );
hipFree(d_faces);
hipFree(d_vols);
hipFree(d_iface);
}
}
extern "C" void full2facewrapper_(double *vols, double*faces, int *nel1, int *n1, int *ivar1, int *nxyz1, int*iface, bool device_arr, bool pull_result){
//test printing
int nel = nel1[0];
int n = n1[0];
int nxyz = nxyz1[0];
int ivar = ivar1[0]-1;
//printf("nel = %d, n = %d, nxyz=%d, ivar=%d\n",nel,n,nxyz,ivar);
/*for(int index = 0; index <4; index++){
printf("vols[%d]=%f\n",index,vols[index]);
printf("iface[%d]=%d\n",index,iface[index]);
}*/
// n = nx * nz
// Device input arrays
double *d_vols;
double *d_faces;
int *d_iface;
// allocate device vectors memory
hipMalloc(&d_vols, nxyz*nel*sizeof(double));
hipMalloc(&d_faces, n*nel*sizeof(double));
hipMalloc(&d_iface, n*nel*sizeof(int));
hipMemcpy( d_vols, vols, nxyz*nel*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_iface, iface, n*nel*sizeof(int), hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
//gridSize = (int)ceil((float)n*k/blockSize);
gridSize = (int)ceil((float)n*nel/blockSize);
// Execute the kernel
//printf("block size = %d, grid size = %d\n",blockSize,gridSize);
hipLaunchKernelGGL(( full2face), dim3(gridSize), dim3(blockSize), 0, 0, d_vols, d_faces, nel, n, nxyz, d_iface);
hipError_t code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error str 7: %s\n",hipGetErrorString(code));
}
hipMemcpy( faces+ivar*n*nel, d_faces, n*nel*sizeof(double), hipMemcpyDeviceToHost );
/*for(int index = 0; index <4; index++){
printf("faces[%d]=%f\n",index,faces[index]);
}*/
// Release device memory
hipFree(d_faces);
hipFree(d_vols);
hipFree(d_iface);
}
__global__ void rzero (double *arr, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
arr[id] = 0.0;
}
__global__ void surfaceintegral_flux(double * flux, double *area, double *phig, int * index, int toteq, int nelt, int nface, int nxz, int ny){
int size = toteq * nelt * nface;
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<size){
int eq = id/(nelt*nface);
int e_f = id%(nelt*nface);
int e = e_f/(nface);
int f = e_f%nface;
int count = 0;
for(int i = index[f*6+4]; i < index[f*6+5]; i++){
for(int j = index[f*6+2]; j <index[f*6+3]; j++){
for(int k = index[f*6+0]; k < index[f*6+1]; k++){
int l = eq*(nelt*nface*nxz)+e*(nface*nxz)+f*(nxz)+count;
flux[l] = flux[l] * area[e*(nface*nxz)+f*nxz+count++] * phig[e*ny*nxz+i*nxz+j*ny+k];
}
}
}
}
}
__global__ void addfull2face(double *vols, double*faces, int nel, int n, int nxyz, int*iface, int size){
//6 faces, each of size nx * nz => n = nx*nz *6
//vols: e elements each of size nx*ny*nz => nxyz = nx*ny*nz
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<size){
int eq = id/(nel*n);
int e_n = id%(nel*n);
int e = e_n/n; //+1 in fortran
//int j = id%n; //+1 in fortran
int i = iface[e_n];//[e][j];
int volIndex = eq*(nel*nxyz)+e*nxyz+i-1;
vols[volIndex] = vols[volIndex] + faces[id];
}
}
//extern "C" void surfaceintegralwrapper_(int *toteq1, int *nx1, int*ny1, int*nz1, int *nelt1, int *nface1, double* faces, double *area, double *phig, double *vols, int *iface){
extern "C" void surfaceintegralwrapper_(double* faces, double *area, double *phig, double *vols, int *iface, int *toteq1, int *nx1, int*ny1, int*nz1, int *nelt1, int *nface1){
// float time;
// hipEvent_t startEvent, stopEvent;
// hipEventCreate(&startEvent);
// hipEventCreate(&stopEvent);
// hipEventRecord(startEvent, 0);
int nx = nx1[0];
int ny = ny1[0];
int nz = nz1[0];
int nelt = nelt1[0];
int nface = nface1[0];
int toteq = toteq1[0];
//printf("nface = %d, nx = %d, ny = %d, nz = %d, nelt = %d, toteq=%d, faces[0]=%f\n", nface,nx,ny,nz,nelt,toteq,/*faces[nelt*nface*nx*nz-1]*/vols[nelt*nx*ny*nz]);
int * index = new int[nface*6];
for(int i =0; i<nface; i++){
index[i*6+0] = 0;
index[i*6+1] = nx-1;
index[i*6+2] = 0;
index[i*6+3] = ny-1;
index[i*6+4] = 0;
index[i*6+5] = nz-1;
}
index[0*6+3] = 0;
index[1*6+0] = nx-1;
index[2*6+2] = ny-1;
index[3*6+1] = 0;
index[4*6+5] = 0;
index[5*6+4] = nz-1;
double *d_faces, *d_area, *d_phig, *d_vols;
int *d_index, *d_iface;
bool dataInGPU = true;
hipError_t code ;
if(dataInGPU){
d_faces = faces;
d_area = area;
d_phig = phig;
d_vols = vols;
d_iface = iface;
}
else{
//memory allocation
hipMalloc(&d_faces, toteq*nelt*nx*nz*nface*sizeof(double));
hipMalloc(&d_area, nelt*nface*nx*nz*sizeof(double)); //ask about area
hipMalloc(&d_phig, nelt*nx*ny*nz*sizeof(double));
hipMalloc(&d_vols, toteq*nelt*nx*ny*nz*sizeof(double));
hipMalloc(&d_iface, nelt*nface*nx*nz*sizeof(int));
code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error in malloc, str: %s\n",hipGetErrorString(code));
}
//data transfer
hipMemcpy( d_area, area, nelt*nface*nx*nz*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_phig, phig, nelt*nx*ny*nz*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_iface, iface, nelt*nface*nx*nz*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_faces, faces, toteq*nelt*nx*nz*nface*sizeof(double), hipMemcpyHostToDevice);
}
hipMalloc(&d_index, nface*6*sizeof(int));
hipMemcpy( d_index, index, nface*6*sizeof(int), hipMemcpyHostToDevice);
code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error in memcpy, str: %s\n",hipGetErrorString(code));
}
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
int ntot = toteq*nelt*nx*ny*nz;
gridSize = (int)ceil((float)ntot/blockSize);
hipLaunchKernelGGL(( rzero), dim3(gridSize), dim3(blockSize), 0, 0, d_vols, ntot);
code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error in flux, str: %s\n",hipGetErrorString(code));
}
gridSize = (int)ceil((float)toteq*nelt*nface/blockSize);
hipLaunchKernelGGL(( surfaceintegral_flux), dim3(gridSize), dim3(blockSize), 0, 0, d_faces, d_area, d_phig, d_index, toteq, nelt, nface, nx*nz, ny);
code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error in flux, str: %s\n",hipGetErrorString(code));
}
gridSize = (int)ceil((float)toteq*nelt*nx*nz*nface/blockSize);
hipLaunchKernelGGL(( addfull2face), dim3(gridSize), dim3(blockSize), 0, 0, d_vols, d_faces, nelt, nx*nz*nface, nx*ny*nz, d_iface,toteq*nelt*nx*nz*nface);
code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error in full2face, str: %s\n",hipGetErrorString(code));
}
if(!dataInGPU){
hipMemcpy( vols, d_vols, toteq*nelt*nx*ny*nz*sizeof(double), hipMemcpyDeviceToHost );
hipMemcpy( faces, d_faces, toteq*nelt*nface*nx*nz*sizeof(double), hipMemcpyDeviceToHost );
// Release device memory
hipFree(d_faces);
hipFree(d_vols);
hipFree(d_iface);
hipFree(d_area);
hipFree(d_phig);
}
hipFree(d_index);
// hipEventRecord(stopEvent, 0);
// hipEventSynchronize(stopEvent);
// hipEventElapsedTime(&time, startEvent, stopEvent);
//printf("surfcae integral time is %f\n",time*1e-03);
}
//mxm multiplication
__global__ void mxm(double *a, int n1, double *b, int n2, double *c, int n3, int nel, int aSize, int bSize, int cSize, int extraEq){
//calculate c(n1,n3) = a(n1,n2) X b(n2,n3) in c
//in fortran the original calculation was
// c(n3,n1) = b(n3,n2) X a(n2,n1)
// a,b,cSize are single element size
//extraEq, in case of a matrix has equation as an index
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*n1*n3){
int e = id/(n1*n3);
int rc = id%(n1*n3);
int i = rc/n3;
int j = rc%n3;
int cid = e*cSize + rc;
int aid = e*aSize + extraEq + i*n2;
int bid = e*bSize + j;
c[cid] = 0;
for(int k = 0; k<n2; k++)
c[cid]+=a[aid+k]*b[bid+k*n3];
}
}
// specmpn routine in fortran
void specmpn(double *d_b, int nb, double *d_a, int na, double * d_ba, double* d_ab, bool if3d, double * d_w, int ldw, int nel, int neq, int eq, bool second_eq){
//d_a is array(na,na,na)*nel, d_b(nb,nb,nb)*nel, w(ldw)*nel where ldw = na*na*nb+nb*nb*na
//d_a is array of nel each array(na,na,na)
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
hipStream_t stream;
hipStreamCreate( &stream );
const double alpha = 1;
const double beta = 0;
if(if3d){
int nab = na*nb;
int nbb = nb*nb;
//calc w = ba*a in fortran
//so in c calc wt = at * bat
//call mxm(ba,nb,a,na,w,na*na)
//in fortran calc w(nb,na*na) = ba(nb,na) * a(na,na*na)
//in c w(na*na,nb) = a(na*na,na) * ba(na,nb)
//neq = 1 if array not indexed by eq and eq = 0
int aSize = neq*pow(na,3), bSize = pow(nb,3);
gridSize = (int)ceil((float)na*na*nb*nel/blockSize);
//mxm<<<gridSize, blockSize>>>(d_a,na*na, d_ba, na, d_w, nb, nel, aSize, 0, ldw, eq*pow(na,3));
cuda_multi_gemm_unif(stream, 'N', 'N', nb, na, na*na, &alpha, d_ba, nb, 0, d_a, na, aSize, &beta, d_w, nb, ldw, nel, gridSize);
int k = 0, l = na*na*nb;
for(int iz=0; iz<na;iz++){
//calc in fortran wl(nb*nb) = wk(nb*na) * ab(na*nb)
//in c wl(nb*nb) = ab(nb*na) * wk(na*nb)
gridSize = (int)ceil((float)nb*nb*nel/blockSize);
//mxm<<<gridSize, blockSize>>>(d_ab,nb, d_w+k, na, d_w+l, nb, nel, 0, ldw, ldw, 0);
cuda_multi_gemm_unif(stream, 'N', 'N', nb, na, nb, &alpha, d_w+k, nb, ldw, d_ab, na, 0, &beta, d_w+l, nb, ldw, nel, gridSize);
k = k + nab;
l = l + nbb;
}
l = na*na*nb;
//calc in fortran b(nb*nb,nb) = wl(nb*nb,na)* ab(na,nb)
//in C b(nb,nb*nb) = ab(nb,na) * wl(na,nb*nb)
gridSize = (int)ceil((float)nb*nb*nb*nel/blockSize);
//mxm<<<gridSize, blockSize>>>(d_ab,nb, d_w+l, na, d_b, nb*nb, nel, 0, ldw, bSize, 0);
cuda_multi_gemm_unif(stream, 'N', 'N', nb*nb, na, nb, &alpha, d_w+l, nb*nb, ldw, d_ab, na, 0, &beta, d_b, nb*nb, bSize, nel, gridSize);
}
else{
//calc w(nb*na) = ba(nb,na) * a(na,na) in fortran,
//in C w(na*nb) = a(na,na) * ba(na,nb)
gridSize = (int)ceil((float)na*nb*nel/blockSize);
hipLaunchKernelGGL(( mxm), dim3(gridSize), dim3(blockSize), 0, 0, d_a,na, d_ba, na, d_w, nb, nel, neq*na*na, 0, ldw, eq*na*na);
//in fortran, b(nb,nb) = w(nb,na)*ab(na,nb)
//in C b(nb,nb) = ab(nb,na) * w(na,nb)
gridSize = (int)ceil((float)nb*nb*nel/blockSize);
hipLaunchKernelGGL(( mxm), dim3(gridSize), dim3(blockSize), 0, 0, d_ab,nb, d_w, na, d_b, nb, nel, 0, ldw, nb*nb, 0);
}
hipError_t code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error str 1: %s\n",hipGetErrorString(code));
}
hipStreamDestroy(stream);
}
void specmpn_old(double *d_b, int nb, double *d_a, int na, double * d_ba, double* d_ab, bool if3d, double * d_w, int ldw, int nel, int neq, int eq, bool second_eq){
//d_a is array(na,na,na)*nel, d_b(nb,nb,nb)*nel, w(ldw)*nel where ldw = na*na*nb+nb*nb*na
//d_a is array of nel each array(na,na,na)
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
if(if3d){
int nab = na*nb;
int nbb = nb*nb;
//calc w = ba*a in fortran
//so in c calc wt = at * bat
//call mxm(ba,nb,a,na,w,na*na)
//in fortran calc w(nb,na*na) = ba(nb,na) * a(na,na*na)
//in c w(na*na,nb) = a(na*na,na) * ba(na,nb)
//neq = 1 if array not indexed by eq and eq = 0
int aSize = neq*pow(na,3), bSize = pow(nb,3);
gridSize = (int)ceil((float)na*na*nb*nel/blockSize);
hipLaunchKernelGGL(( mxm), dim3(gridSize), dim3(blockSize), 0, 0, d_a,na*na, d_ba, na, d_w, nb, nel, aSize, 0, ldw, eq*pow(na,3));
int k = 0, l = na*na*nb;
for(int iz=0; iz<na;iz++){
//calc in fortran wl(nb*nb) = wk(nb*na) * ab(na*nb)
//in c wl(nb*nb) = ab(nb*na) * wk(na*nb)
gridSize = (int)ceil((float)nb*nb*nel/blockSize);
hipLaunchKernelGGL(( mxm), dim3(gridSize), dim3(blockSize), 0, 0, d_ab,nb, d_w+k, na, d_w+l, nb, nel, 0, ldw, ldw, 0);
k = k + nab;
l = l + nbb;
}
l = na*na*nb;
//calc in fortran b(nb*nb,nb) = wl(nb*nb,na)* ab(na,nb)
//in C b(nb,nb*nb) = ab(nb,na) * wl(na,nb*nb)
gridSize = (int)ceil((float)nb*nb*nb*nel/blockSize);
hipLaunchKernelGGL(( mxm), dim3(gridSize), dim3(blockSize), 0, 0, d_ab,nb, d_w+l, na, d_b, nb*nb, nel, 0, ldw, bSize, 0);
}
else{
//calc w(nb*na) = ba(nb,na) * a(na,na) in fortran,
//in C w(na*nb) = a(na,na) * ba(na,nb)
gridSize = (int)ceil((float)na*nb*nel/blockSize);
hipLaunchKernelGGL(( mxm), dim3(gridSize), dim3(blockSize), 0, 0, d_a,na, d_ba, na, d_w, nb, nel, neq*na*na, 0, ldw, eq*na*na);
//in fortran, b(nb,nb) = w(nb,na)*ab(na,nb)
//in C b(nb,nb) = ab(nb,na) * w(na,nb)
gridSize = (int)ceil((float)nb*nb*nel/blockSize);
hipLaunchKernelGGL(( mxm), dim3(gridSize), dim3(blockSize), 0, 0, d_ab,nb, d_w, na, d_b, nb, nel, 0, ldw, nb*nb, 0);
}
hipError_t code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error str 2: %s\n",hipGetErrorString(code));
}
}
__global__ void replicate_3(double *a, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n){
a[n+id] = a[id];
a[2*n+id] = a[id];
}
}
__global__ void nekcol2_conv(double* convh, double *vx, double *vy, double *vz, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n){
convh[id] = convh[id] * vx[id];
convh[n+id] = convh[n+id] * vy[id];
convh[2*n+id] = convh[2*n+id] * vz[id];
}
}
__global__ void merge_replicate_conv(double* convh, double *vx, double *vy, double *vz, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n){
convh[n+id] = convh[id] * vy[id];
convh[2*n+id] = convh[id] * vz[id];
convh[id] = convh[id] * vx[id];
}
}
__global__ void nekadd2col2(double *a, double *b, double *c, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n){
a[id] = a[id] + b[id] * c[id];
}
}
__global__ void merge_replicate_conv_add2col2(double* convh, double *b, double *c, double *vx, double *vy, double *vz, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n){
convh[id] = convh[id] + b[id] * c[id];
convh[n+id] = convh[id] * vy[id];
convh[2*n+id] = convh[id] * vz[id];
convh[id] = convh[id] * vx[id];
}
}
void evaluate_conv_h(int nel, int neq, int eq, int ndim, int ldw, double *jgl, double *jgt, double * convh, double *u, int nx1, int nxd, int nd, int n1, double *ju1, double*ju2, double*phig, double*pr, double *vxd, double *vyd, double *vzd, double *w, bool if3d){
//for now totalh = convh so we can pass totalh instead of convh
//nd = nel * nxd * nyd * nzd
//n1 = nel * nx1 * ny1 * nz1
//modify fortran code, convh(nx^3,ndim) -> convh(nx^3,nel,ndim)
//initially for each element, each equation do
//do for equation 1
/*int ldw = 2* pow(2*nxd,ndim);
double *w;
hipMalloc(&w, nel*ldw*sizeof(double));*/
int nx1_3 = pow(nx1,3);
if(eq == 0)
for(int j = 0; j<ndim;j++)
specmpn(convh+j*nd, nxd, u+(j+1)*nx1_3 ,nx1, jgl, jgt, if3d, w, ldw, nel, neq, j+1, true);
else{
specmpn(ju1, nxd, phig, nx1, jgl, jgt, if3d, w, ldw, nel, 1, 0,true);
specmpn(ju2, nxd, pr, nx1, jgl, jgt, if3d, w, ldw, nel, 1, 0,true);
if(eq<4){
specmpn(convh, nxd, u+eq*nx1_3, nx1, jgl, jgt, if3d, w, ldw, nel, neq, eq,true);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
gridSize = (int)ceil((float)nd/blockSize);
//merge_replicate_conv<<<gridSize, blockSize>>>(convh,vxd,vyd,vzd,nd);
hipLaunchKernelGGL(( replicate_3), dim3(gridSize), dim3(blockSize), 0, 0, convh,nd);
hipLaunchKernelGGL(( nekcol2_conv), dim3(gridSize), dim3(blockSize), 0, 0, convh,vxd,vyd,vzd,nd);
hipLaunchKernelGGL(( nekadd2col2), dim3(gridSize), dim3(blockSize), 0, 0, convh+(eq-1)*nd,ju1,ju2,nd);
}
else if(eq==4){
specmpn(convh, nxd, u+eq*nx1_3, nx1, jgl, jgt, if3d, w, ldw, nel,neq,eq,true);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
gridSize = (int)ceil((float)nd/blockSize);
//merge_replicate_conv_add2col2<<<gridSize, blockSize>>>(convh,ju1,ju2,vxd,vyd,vzd,nd);
hipLaunchKernelGGL(( nekadd2col2), dim3(gridSize), dim3(blockSize), 0, 0, convh,ju1,ju2,nd);
hipLaunchKernelGGL(( replicate_3), dim3(gridSize), dim3(blockSize), 0, 0, convh,nd);
hipLaunchKernelGGL(( nekcol2_conv), dim3(gridSize), dim3(blockSize), 0, 0, convh,vxd,vyd,vzd,nd);
}
}
}
__global__ void nekadd2col2_u(double * u, double *totalh, double *rx, int nel, int n, int ndim, int offset){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n*nel){
int e = id/n;
int i = id%n;
u[id] = 0;
for(int j = 0; j<ndim; j++)
u[id] += totalh[j*(nel*n)+id] * rx[e*(3*ndim*n)+(j+offset)*n+i];
}
}
__global__ void neksub2(double *a, double*b, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
a[id]-=b[id];
}
__global__ void nekadd2(double *a, double*b, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
a[id]+=b[id];
}
__global__ void nekcol2(double *a, double*b, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
a[id]*=b[id];
}
__global__ void nekcol2_ud(double *a, double*b, int nel, int nx1_3, int nxd_3){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*nx1_3){
int e = id/nx1_3;
int i = id%nx1_3;
a[e*nxd_3+i]*=b[id];
}
}
__global__ void nekcopy(double *a, double*b, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
a[id]=b[id];
}
extern "C" void nekcopywrapper_(double *a, double *b, int *n){
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n[0]/blockSize);
hipLaunchKernelGGL(( nekcopy), dim3(gridSize), dim3(blockSize), 0, 0, a,b,n[0]);
}
__global__ void neksubcol3_res1(double *a, double *b, double *c, int nel, int nx1_3){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*nx1_3){
int i = id%nx1_3;
a[id] = a[id] - b[i] * c[id];
}
}
void local_grad3_t(double *u, double *ur, double *us, double *ut, int nxd, double *d, double *dt, double *w, int nel){
int nxd_2 = nxd * nxd;
int nxd_3 = nxd_2 * nxd;
// u(nxd,nxd*nxd) = dt(nxd,nxd) * ur(nxd, nxd*nxd) fortran
// u(nxd*nxd,nxd) = ur(nxd*nxd, nxd) * dt(nxd,nxd) C
int blockSize=1024, gridSize;
hipStream_t stream;
hipStreamCreate( &stream );
const double alpha = 1;
const double beta = 0;
gridSize = (int)ceil((float)nel*nxd_3/blockSize);
//mxm<<<gridSize, blockSize>>>(ur,nxd_2, dt, nxd, u, nxd, nel, nxd_3, 0, nxd_3, 0);
cuda_multi_gemm_unif(stream, 'N', 'N', nxd, nxd, nxd_2, &alpha, dt, nxd, 0, ur, nxd, nxd_3, &beta, u, nxd, nxd_3, nel, gridSize);
for(int k = 0; k<nxd;k++){
//wk(nxd,nxd) = usk(nxd,nxd)*D(nxd,nxd) fortran
//wk(nxd,nxd) = D(nxd,nxd)*usk(nxd,nxd) C
gridSize = (int)ceil((float)nel*nxd_2/blockSize);
//mxm<<<gridSize, blockSize>>>(d,nxd, us+k*nxd_2, nxd, w+k*nxd_2, nxd, nel, 0, nxd_3, nxd_3, 0);
cuda_multi_gemm_unif(stream, 'N', 'N', nxd, nxd, nxd, &alpha, us+k*nxd_2, nxd, nxd_3, d, nxd, 0, &beta, w+k*nxd_2, nxd, nxd_3, nel, gridSize);
}
gridSize = (int)ceil((float)nel*nxd_3/blockSize);
hipLaunchKernelGGL(( nekcopy), dim3(gridSize), dim3(blockSize), 0, 0, u,w, nel*nxd_3);
//w(nxd*nxd,nxd) = ut(nxd*nxd,nxd) * D(nxd,nxd) fortran
//w(nxd,nxd*nxd) = D(nxd,nxd) * ut(nxd,nxd*nxd) C
//mxm<<<gridSize, blockSize>>>(d,nxd, ut, nxd, w, nxd_2, nel, 0, nxd_3, nxd_3, 0);
cuda_multi_gemm_unif(stream, 'N', 'N', nxd_2, nxd, nxd, &alpha, ut, nxd, nxd_3, d, nxd, 0, &beta, w, nxd_2, nxd_3, nel, gridSize);
hipLaunchKernelGGL(( nekadd2), dim3(gridSize), dim3(blockSize), 0, 0, u,w, nel*nxd_3);
hipStreamDestroy(stream);
}
void flux_div_integral(double *ur, double *us, double *ut, double *ud, double *tu, double *totalh, double *rx, double *dg, double *dgt, double *jgt, double *jgl, double *res1, double *w, int nel, int eq, int ndim, int nx1, int nxd, int ldw, bool if3d){
//call get_dgl_ptr
int nd = pow(nxd,3);
int nx_3 = pow(nx1,3);
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)nel*nd/blockSize);
hipLaunchKernelGGL(( nekadd2col2_u), dim3(gridSize), dim3(blockSize), 0, 0, ur, totalh, rx, nel, nd, ndim,0);
hipLaunchKernelGGL(( nekadd2col2_u), dim3(gridSize), dim3(blockSize), 0, 0, us, totalh, rx, nel, nd, ndim,ndim);
if(if3d){
hipLaunchKernelGGL(( nekadd2col2_u), dim3(gridSize), dim3(blockSize), 0, 0, ut, totalh, rx, nel, nd, ndim,ndim+ndim);
local_grad3_t(ud, ur, us, ut, nxd, dg, dgt, w, nel);
}
else{
//call local_grad2
}
specmpn(tu,nx1,ud,nxd,jgt,jgl,if3d,w,ldw,nel,1,0,false);
hipLaunchKernelGGL(( neksub2), dim3(gridSize), dim3(blockSize), 0, 0, res1+eq*(nel*nx_3),tu,nel*nx_3);
}
void neklocal_grad3(double * ur, double *us, double *ut, double *u, int nx, int nxd, double *d, double *dt, int nel){
/*double *d_ub;
hipMalloc(&d_ub, nel*pow(nx,3)*sizeof(double));
hipStream_t stream;
hipStreamCreate(&stream);
const double alpha = 1;
const double beta = 0;*/
//cuda_multi_gemm_unif(stream, 'N', 'N', nx, nx, nx, &alpha, nx, nx*nx, d, u, d_ub, &beta, ur, us, ut, nel*nx, 1024);
//hipDeviceSynchronize();
//if(true) return;
int nx_2 = nx*nx;
int nx_3 = nx_2*nx;
int nxd_3 = pow(nxd,3);
//ur(nx,nx*nx) = D(nx,nx) * u(nx,nx*nx) fortran
//ur(nx*nx,nx) = u(nx*nx,nx) * D(nx,nx) C
int blockSize=1024, gridSize;
gridSize = (int)ceil((float)nel*nx_3/blockSize);
hipLaunchKernelGGL(( mxm), dim3(gridSize), dim3(blockSize), 0, 0, u,nx_2, d, nx, ur, nx, nel, nx_3, 0, nxd_3, 0);//ur,us, ut should be indexed by nxd
for(int k = 0; k<nx; k++){
//usk(nx,nx) = uk(nx,nx) * dt(nx,nx) fortran
//usk(nx,nx) = dt(nx,nx) * uk(nx,nx) C
gridSize = (int)ceil((float)nel*nx_2/blockSize);
hipLaunchKernelGGL(( mxm), dim3(gridSize), dim3(blockSize), 0, 0, dt,nx, u+k*nx_2, nx, us+k*nx_2, nx, nel, 0, nx_3, nxd_3, 0);
}
//ut(nx_2,nx) = u(nx_2,nx) * dt(nx,nx) fortran
//ut(nx,nx_2) = dt(nx,nx) * u(nx,nx_2) C
gridSize = (int)ceil((float)nel*nx_3/blockSize);
hipLaunchKernelGGL(( mxm), dim3(gridSize), dim3(blockSize), 0, 0, dt, nx, u, nx, ut, nx_2, nel, 0, nx_3, nxd_3, 0);
}
void nekgradl_rst(double *ur, double *us, double *ut, double *u, double *d, double *dt, int nx, int nxd, int nel, bool if3d){
if(if3d){
neklocal_grad3(ur, us, ut, u, nx, nxd, d, dt, nel);
}
}
__global__ void calc_ud_3(double *ud, double *rx, double *ur, double *us, double *ut, int j, int nel, int nxd_3){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*nxd_3){
int e = id/nxd_3;
int i = id%nxd_3;
int e_size = e*(9*nxd_3);
ud[id] = rx[e_size+j*nxd_3+i]*ur[id] + rx[e_size+(j+3)*nxd_3+i]*us[id] + rx[e_size+(j+6)*nxd_3+i]*ut[id];
}
}
void compute_forcing(double *ud, double *ur, double *us, double *ut, double *phig, double *rx, double *pr, double *convh /*use w*/, double *jacmi, double *bm1, double *res1, double *usrf, double *d, double *dt, int nel, int eq, int nx1, int nxd, int ldim, bool if3d){
int nxd_2 = nxd*nxd;
int nx1_2 = nx1 * nx1;
int nxd_3 = nxd_2*nxd;
int nx1_3 = nx1_2*nx1;
int blockSize=1024, gridSize;
gridSize = (int)ceil((float)nel*nx1_3/blockSize);
hipLaunchKernelGGL(( rzero), dim3(gridSize), dim3(blockSize), 0, 0, ud,nel*nx1_3);
if(eq!=0&&eq!=4){
int j=0;
if(eq==2)
j=1;
else if(eq==3){
j=1;
if(ldim==3)
j=2;
}
nekgradl_rst(ur,us,ut,phig, d, dt,nx1, nxd, nel, if3d);
if(if3d){
gridSize = (int)ceil((float)nel*nxd_3/blockSize);
hipLaunchKernelGGL(( calc_ud_3), dim3(gridSize), dim3(blockSize), 0, 0, ud,rx,ur,us,ut,j,nel,nxd_3);
}
else{
//calc_ud_2
}
if(eq!=3 || ldim!=2){
gridSize = (int)ceil((float)nel*nx1_3/blockSize);
hipLaunchKernelGGL(( nekcol2_ud), dim3(gridSize), dim3(blockSize), 0, 0, ud,pr,nel,nx1_3,nxd_3);
hipLaunchKernelGGL(( nekcopy), dim3(gridSize), dim3(blockSize), 0, 0, convh,ud,nel*nx1_3);
hipLaunchKernelGGL(( nekcol2), dim3(gridSize), dim3(blockSize), 0, 0, convh,jacmi,nel*nx1_3);
hipLaunchKernelGGL(( nekcol2), dim3(gridSize), dim3(blockSize), 0, 0, convh,bm1,nel*nx1_3);
hipLaunchKernelGGL(( neksub2), dim3(gridSize), dim3(blockSize), 0, 0, res1+eq*(nel*nx1_3),convh,nel*nx1_3);
hipLaunchKernelGGL(( neksubcol3_res1), dim3(gridSize), dim3(blockSize), 0, 0, res1+eq*(nel*nx1_3),usrf+eq*nx1_3, bm1,nel,nx1_3);
}
}
else if (eq==4)
hipLaunchKernelGGL(( neksubcol3_res1), dim3(gridSize), dim3(blockSize), 0, 0, res1+eq*(nel*nx1_3),usrf+eq*nx1_3, bm1,nel,nx1_3);
}
// this function is doing assemble_h, flux_div_integral, compute_forcing
extern "C" void computestagewrapper_(double *jgl, double *jgt, double *totalh, double *u, double *ju1, double *ju2, double *phig, double*pr, double *vxd, double *vyd, double *vzd, double *ut, double *ud, double *tu, double *rx, double *dg, double *dgt, double *res1, double *w, double *jacmi, double *bm1, double *usrf, double *d, double *dt, int *nel1, int *neq1, int *ndim1, int *ldw1, int *nx11, int *nxd1/*, bool if3d*/){
// float time;
// hipEvent_t startEvent, stopEvent;
// hipEventCreate(&startEvent);
// hipEventCreate(&stopEvent);
// hipEventRecord(startEvent, 0);
bool if3d = true;
int nel = nel1[0];
int neq = neq1[0];
int ndim = ndim1[0];
int ldw = ldw1[0];
int nx1 = nx11[0];
int nxd = nxd1[0];
//printf("nel=%d,neq=%d,ndim=%d,nx1=%d,nxd=%d,u[0]=%f\n",nel,neq,ndim,nx1,nxd,u[0]);
//use d_ju1 for d_ur
double *d_jgl, *d_jgt, *d_totalh, *d_u, *d_ju1, *d_ju2, *d_phig, *d_pr, *d_vxd,*d_vyd, *d_vzd, *d_ut, *d_ud, *d_tu, *d_rx, *d_dg, *d_dgt, *d_res1, *d_w, *d_jacmi, *d_bm1, *d_usrf, *d_d, *d_dt ;
bool inCPU = false;
int nxd_3 = pow(nxd,3), nx1_3 = pow(nx1,3);
if(inCPU){
//copy data to gpu
hipMalloc(&d_jgl, nxd_3*sizeof(double));
hipMemcpy(d_jgl, jgl, nxd_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_jgt, nxd_3*sizeof(double));
hipMemcpy(d_jgt, jgt, nxd_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_totalh, ndim*nel*nxd_3*sizeof(double));
hipMalloc(&d_u, nel*neq*nx1_3*sizeof(double));
hipMemcpy(d_u, u, nel*neq*nx1_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_ju1, nel*nxd_3*sizeof(double));
hipMalloc(&d_ju2, nel*nxd_3*sizeof(double));
hipMalloc(&d_phig, nel*nx1_3*sizeof(double));
hipMemcpy(d_phig, phig, nel*nx1_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_pr, nel*nx1_3*sizeof(double));
hipMemcpy(d_pr, pr, nel*nx1_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_vxd, nel*nxd_3*sizeof(double));
hipMemcpy(d_vxd, vxd, nel*nxd_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_vyd, nel*nxd_3*sizeof(double));
hipMemcpy(d_vyd, vyd, nel*nxd_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_vzd, nel*nxd_3*sizeof(double));
hipMemcpy(d_vzd, vzd, nel*nxd_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_ut, nel*nxd_3*sizeof(double));
hipMalloc(&d_ud, nel*nxd_3*sizeof(double));
hipMalloc(&d_tu, nel*nxd_3*sizeof(double));
hipMalloc(&d_rx, nel*9*nxd_3*sizeof(double));
hipMemcpy(d_rx, rx, nel*9*nxd_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_dg, nxd_3*sizeof(double));
hipMemcpy(d_dg, dg, nxd_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_dgt, nxd_3*sizeof(double));
hipMemcpy(d_dgt, dgt, nxd_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_d, nxd_3*sizeof(double));
hipMemcpy(d_d, d, nxd_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_dt, nxd_3*sizeof(double));
hipMemcpy(d_dt, dt, nxd_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_res1, nel*neq*nx1_3*sizeof(double));
hipMemcpy(d_res1, res1, nel*neq*nx1_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_w, nel*ldw*sizeof(double));
//hipMemcpy(d_w, w, ldw*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_jacmi, nel*nx1_3*sizeof(double));
hipMemcpy(d_jacmi, jacmi, nel*nx1_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_bm1, nel*nx1_3*sizeof(double));
hipMemcpy(d_bm1, bm1, nel*nx1_3*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_usrf, neq*nx1_3*sizeof(double));
hipMemcpy(d_usrf, usrf, neq*nx1_3*sizeof(double), hipMemcpyHostToDevice);
}
else{
//just assign pointers
d_jgl = jgl;
d_jgt = jgt;
d_totalh = totalh;
d_u = u;
d_ju1 = ju1;
d_ju2 = ju2;
d_phig = phig;
d_pr = pr;
d_vxd = vxd;
d_vyd = vyd;
d_vzd = vzd;
d_ut = ut;
d_ud = ud;
d_tu = tu;
d_rx = rx;
d_dg = dg;
d_dgt = dgt;
d_res1 = res1;
d_w = w;
d_jacmi = jacmi;
d_bm1 = bm1;
d_usrf = usrf;
d_d = d;
d_dt = dt;
}
//printf("finished memory allocation in compute\n eq=%d, if3d=%d\n",neq,if3d);
for(int eq = 0; eq<neq; eq++){
//printf("loop # %d\n",eq);
evaluate_conv_h(nel, neq, eq, ndim, ldw, d_jgl, d_jgt, d_totalh /*convh*/, d_u, nx1, nxd, /*nd*/ pow(nxd,3)*nel, /*n1*/ pow(nx1,3)*nel, d_ju1, d_ju2, d_phig, d_pr, d_vxd, d_vyd, d_vzd, d_w, if3d);
flux_div_integral(d_ju1/*d_ur*/, d_ju2/*d_us*/, d_ut, d_ud, d_tu, d_totalh, d_rx, d_dg, d_dgt, d_jgt, d_jgl, d_res1, d_w, nel, eq, ndim, nx1, nxd, ldw, if3d);
compute_forcing(d_ud, d_ju1/*d_ur*/, d_ju2/*d_us*/, d_ut, d_phig, d_rx, d_pr, d_w /*convh*/, d_jacmi, d_bm1, d_res1, d_usrf, d_d, d_dt, nel, eq, nx1, nxd, ndim/*ldim*/, if3d);
}
hipError_t code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error str 3: %s\n",hipGetErrorString(code));
}
if(inCPU){
hipMemcpy(res1, d_res1, nel*neq*nx1_3*sizeof(double), hipMemcpyDeviceToHost);
//cuda free all d_*
//double *d_jgl, *d_jgt, *d_totalh, *d_u, *d_ju1, *d_ju2, *d_phig, *d_pr, *d_vxd,*d_vyd, *d_vzd, *d_ut, *d_ud, *d_tu, *d_rx, *d_dg, *d_dgt, *d_res1, *d_w, *d_jacmi, *d_bm1, *d_usrf, *d_d, *d_dt ;
hipFree(d_jgl);
hipFree(d_jgt);
hipFree(d_totalh);
hipFree(d_u);
hipFree(d_ju1);
hipFree(d_ju2);
hipFree(d_phig);
hipFree(d_pr);
hipFree(d_vxd);
hipFree(d_vyd);
hipFree(d_vzd);
hipFree(d_ut);
hipFree(d_ud);
hipFree(d_tu);
hipFree(d_rx);
hipFree(d_dg);
hipFree(d_dgt);
hipFree(d_d);
hipFree(d_dt);
hipFree(d_res1);
hipFree(d_w);
hipFree(d_jacmi);
hipFree(d_bm1);
hipFree(d_usrf);
}
else{
}
// hipEventRecord(stopEvent, 0);
// hipEventSynchronize(stopEvent);
// hipEventElapsedTime(&time, startEvent, stopEvent);
//printf("compute stage time is %f\n",time*1e-03);
}
__global__ void calculate_u(double *u, double *bm1, double *tcoef, double *res3, double *res1, int nelt, int nxyz1, int toteq){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nelt*toteq*nxyz1){
int e = id/(toteq*nxyz1);
int r = id%(toteq*nxyz1);
int eq = r/nxyz1;
int i = r%nxyz1;
u[id] = bm1[e*nxyz1+i]*tcoef[0]*res3[id]+bm1[e*nxyz1+i]*tcoef[1]*u[id]-tcoef[2]*res1[eq*(nelt*nxyz1)+e*nxyz1+i];
u[id] = u[id]/bm1[e*nxyz1+i];
}
}
extern "C" void calculateuwrapper_(double *u, double *bm1, double *tcoef, double *res3, double *res1, int *stage1, int *nelt1, int *nxyz11, int *toteq1){
int stage = stage1[0]-1;
int nelt = nelt1[0];
int nxyz1 = nxyz11[0];
int toteq = toteq1[0];
bool inCPU = false;
if(inCPU){
double *d_u, *d_bm1, *d_tcoef, *d_res3, *d_res1;
hipMalloc(&d_u, nelt*toteq*nxyz1*sizeof(double));
hipMemcpy(d_u, u, nelt*toteq*nxyz1*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_bm1, nelt*nxyz1*sizeof(double));
hipMemcpy(d_bm1, bm1, nelt*nxyz1*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_tcoef, 9*sizeof(double));
hipMemcpy(d_tcoef, tcoef, 9*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_res3, nelt*toteq*nxyz1*sizeof(double));
hipMemcpy(d_res3, res3, nelt*toteq*nxyz1*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_res1, nelt*toteq*nxyz1*sizeof(double));
hipMemcpy(d_res1, res1, nelt*toteq*nxyz1*sizeof(double), hipMemcpyHostToDevice);
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)nelt*toteq*nxyz1/blockSize);
hipLaunchKernelGGL(( calculate_u), dim3(gridSize), dim3(blockSize), 0, 0, d_u,d_bm1,d_tcoef+stage*3,d_res3,d_res1,nelt,nxyz1,toteq);
hipMemcpy(u, d_u, nelt*toteq*nxyz1*sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_u);
hipFree(d_bm1);
hipFree(d_tcoef);
hipFree(d_res3);
hipFree(d_res1);
}
else{
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)nelt*toteq*nxyz1/blockSize);
hipLaunchKernelGGL(( calculate_u), dim3(gridSize), dim3(blockSize), 0, 0, u,bm1,tcoef+stage*3,res3,res1,nelt,nxyz1,toteq);
}
}
__global__ void nekinvcol3_vu(double *vx, double *vy, double *vz, double *u, int nel, int nxyz1, int neq, int irg, int irpu, int irpv, int irpw){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*nxyz1){
int e = id/nxyz1;
int i = id%nxyz1;
int e_offset = neq*nxyz1;
double c = u[e*e_offset+irg*nxyz1+i];
vx[id] = u[e*e_offset+irpu*nxyz1+i]/c;
vy[id] = u[e*e_offset+irpv*nxyz1+i]/c;
vz[id] = u[e*e_offset+irpw*nxyz1+i]/c;
vx[id] = 0.0;
vy[id] = 1.0;
vz[id] = 0.0;
}
}
extern "C" void computeprimitivevarswrapper_(double *vx, double *vy, double *vz, double *vxd, double *vyd, double *vzd, double *u, double *jgl, double *jgt, double *w, int *nxd1, int *nx11, int *nel1, int *toteq1, int *irpu1, int *irpv1, int *irpw1, int *irg1, int *ldw1, int *p){
//called only once and values used for all equations
// float time;
// hipEvent_t startEvent, stopEvent;
// hipEventCreate(&startEvent);
// hipEventCreate(&stopEvent);
// hipEventRecord(startEvent, 0);
int nxd = nxd1[0];
int nx1 = nx11[0];
int nel = nel1[0];
int toteq = toteq1[0];
int irpu = irpu1[0]-1;
int irpv = irpv1[0]-1;
int irpw = irpw1[0]-1;
int irg = irg1[0]-1;
int nx1_3 = pow(nx1,3);
int ldw = ldw1[0];
double *d_vx, *d_vy, *d_vz, *d_vxd, *d_vyd, *d_vzd, *d_u, *d_jgl, *d_jgt, *d_w;
bool inCPU = false;
if(p[0]==1)
inCPU = true;
if(inCPU){
//allocate gpu memory and transfer data to GPU
int tot_b = nel * nx1_3 * sizeof(double);
int totd_b = nel * pow(nxd,3) * sizeof(double);
ldw = 2*pow(nxd,3);
int ldw_b = nel * ldw * sizeof(double);
hipMalloc(&d_vx, tot_b);
hipMalloc(&d_vy, tot_b);
hipMalloc(&d_vz, tot_b);
hipMalloc(&d_vxd, totd_b);
hipMalloc(&d_vyd, totd_b);
hipMalloc(&d_vzd, totd_b);
hipMalloc(&d_w, ldw_b);
hipMalloc(&d_u, toteq*tot_b);
int nxd_3_b = pow(nxd,3) * sizeof(double);
hipMalloc(&d_jgl, nxd_3_b);
hipMalloc(&d_jgt, nxd_3_b);
hipMemcpy(d_u, u, toteq*tot_b, hipMemcpyHostToDevice);
hipMemcpy(d_jgl, jgl, nxd_3_b, hipMemcpyHostToDevice);
hipMemcpy(d_jgt, jgt, nxd_3_b, hipMemcpyHostToDevice);
}
else{
//just assign data
d_w = w;
d_vx = vx;
d_vy = vy;
d_vz = vz;
d_vxd = vxd;
d_vyd = vyd;
d_vzd = vzd;
d_u = u;
d_jgl = jgl;
d_jgt = jgt;
}
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)nel*nx1_3/blockSize);
hipLaunchKernelGGL(( nekinvcol3_vu), dim3(gridSize), dim3(blockSize), 0, 0, d_vx, d_vy, d_vz, d_u, nel, nx1_3, toteq, irg, irpu, irpv, irpw);
specmpn(d_vxd, nxd, d_vx, nx1, d_jgl, d_jgt, true, d_w, ldw, nel, 1, 0,true);
specmpn(d_vyd, nxd, d_vy, nx1, d_jgl, d_jgt, true, d_w, ldw, nel, 1, 0,true);
specmpn(d_vzd, nxd, d_vz, nx1, d_jgl, d_jgt, true, d_w, ldw, nel, 1, 0,true);
if(inCPU){
int tot_b = nel * nx1_3 * sizeof(double);
int totd_b = nel * pow(nxd,3) * sizeof(double);
hipMemcpy(vx, d_vx, tot_b, hipMemcpyDeviceToHost);
hipMemcpy(vy, d_vy, tot_b, hipMemcpyDeviceToHost);
hipMemcpy(vz, d_vz, tot_b, hipMemcpyDeviceToHost);
hipMemcpy(vxd, d_vxd, totd_b, hipMemcpyDeviceToHost);
hipMemcpy(vyd, d_vyd, totd_b, hipMemcpyDeviceToHost);
hipMemcpy(vzd, d_vzd, totd_b, hipMemcpyDeviceToHost);
hipFree(d_vx);
hipFree(d_vy);
hipFree(d_vz);
hipFree(d_vxd);
hipFree(d_vyd);
hipFree(d_vzd);
hipFree(d_w);
hipFree(d_u);
hipFree(d_jgl);
hipFree(d_jgt);
}
// hipEventRecord(stopEvent, 0);
// hipEventSynchronize(stopEvent);
// hipEventElapsedTime(&time, startEvent, stopEvent);
//printf("compute primitive time is %f\n",time*1e-03);
}
//mxm multiplication, faces
__global__ void mxm_faces(double *a, int n1, double *b, int n2, double *c, int n3, int nel, int nfaces, int aSize, int bSize, int cSize){
//calculate c(n1,n3) = a(n1,n2) X b(n2,n3) in c
//in fortran the original calculation was
// c(n3,n1) = b(n3,n2) X a(n2,n1)
// a,b,cSize are single element size
//extraEq, in case of a matrix has equation as an index
int id = blockIdx.x*blockDim.x+threadIdx.x;
int n13 = n1*n3;
if(id<nel*nfaces*n13){
int e = id/(nfaces*n13);
int rc = id%(nfaces*n13);
int f = rc/n13;
int rc2 = rc%n13;
int i = rc2/n3;
int j = rc2%n3;
int cid = e*nfaces*cSize+f*cSize+rc2;
int aid = e*nfaces*aSize+f*aSize + i*n2;
int bid = e*nfaces*bSize+f*bSize + j;
c[cid] = 0;
for(int k = 0; k<n2; k++)
c[cid]+=a[aid+k]*b[bid+k*n3];
}
}
void map_faced(double *jgl, double *jgt, double *ju, double *u, double *w, int nx1, int nxd, int fdim, int nelt, int nfaces, int idir){
hipStream_t stream;
hipStreamCreate( &stream );
const double alpha = 1;
const double beta = 0;
int nx1_2 = pow(nx1,2);
int nxd_2 = pow(nxd,2);
int batchSize = nelt*nfaces;
if(idir==0){
int blockSize = 1024, gridSize;
//calc w(nxd,nx1) = jgl(nxd*nx1) * u(nx1,nx1) in fortran
//calc w(nx1,nxd) = u(nx1,nx1) * jgl(nx1,nxd) in C
gridSize = (int)ceil((float)nelt*nfaces*nx1*nxd/blockSize);
cuda_multi_gemm_unif(stream, 'N', 'N', nxd, nx1, nx1, &alpha, jgl, nxd, 0, u, nx1, nx1_2, &beta, w, nxd, nx1*nxd, batchSize, gridSize);
//mxm_faces<<<gridSize, blockSize>>>(u, nx1, jgl, nx1, w, nxd, nelt, nfaces, nx1*nx1, 0, nx1*nxd);
//calc ju(nxd,nxd) = w(nxd,nx1) * jgt(nx1,nxd) in fortran
//calc ju(nxd,nxd) = jgt(nxd,nx1) * w(nx1,nxd)
gridSize = (int)ceil((float)nelt*nfaces*nxd*nxd/blockSize);
cuda_multi_gemm_unif(stream, 'N', 'N', nxd, nx1, nxd, &alpha, w, nxd, nx1*nxd, jgt, nx1, 0, &beta, ju, nxd, nxd_2, batchSize, gridSize);
//mxm_faces<<<gridSize, blockSize>>>(jgt, nxd, w, nx1, ju, nxd, nelt, nfaces, 0, nx1*nxd, nxd*nxd);
}
else{
int blockSize = 1024, gridSize;
//calc w(nx1,nxd) = jgt(nx1,nxd) * u(nxd,nxd) in fortran
//calc w(nxd,nx1) = u(nxd,nxd) * jgt(nxd,nx1) in C
gridSize = (int)ceil((float)nelt*nfaces*nx1*nxd/blockSize);
cuda_multi_gemm_unif(stream, 'N', 'N', nx1, nxd, nxd, &alpha, jgt, nx1, 0, u, nxd, nxd_2, &beta, w, nx1, nx1*nxd, batchSize, gridSize);
//mxm_faces<<<gridSize, blockSize>>>(u, nxd, jgt, nxd, w, nx1, nelt, nfaces, nxd*nxd, 0, nx1*nxd);
//calc ju(nx1,nx1) = w(nx1,nxd) * jgl(nxd,nx1) in fortran
//calc ju(nx1,nx1) = jgl(nx1,nxd) * w(nxd,nx1) in C
gridSize = (int)ceil((float)nelt*nfaces*nx1*nx1/blockSize);
cuda_multi_gemm_unif(stream, 'N', 'N', nx1, nxd, nx1, &alpha, w, nx1, nx1*nxd, jgl, nxd, 0, &beta, ju, nx1, nx1_2, batchSize, gridSize);
//mxm_faces<<<gridSize, blockSize>>>(jgl, nx1, w, nxd, ju, nx1, nelt, nfaces, 0, nx1*nxd, nx1*nx1);
}
hipStreamDestroy(stream);
}
__global__ void invcol3_flux(double *a, double *b, double *c, int n, int total){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<total){
a[id] = b[id] / c[id%n];
}
}
__global__ void nekcol2_flux(double *a, double*b, int n, int total){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<total)
a[id]*=b[id%n];
}
__global__ void invcol2(double *a, double*b, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
a[id]= a[id]/b[id];
}
__global__ void Ausm_flux(int neq, int ntotd, double *nx, double *ny, double *nz, double *nm, double *fs, double *rl, double *ul, double *vl, double *wl, double *pl, double *al, double *tl, double *rr, double *ur, double *vr, double *wr, double *pr, double *ar, double *tr, double *flx, double *cpl, double *cpr){
int i = blockIdx.x*blockDim.x+threadIdx.x;
//ntotd = nel * nfaces * nxzd
if(i<ntotd){
fs[i] = 0;// it is 0 in cmtbone but can be changed
double af,mf,mfa,mfm,mfp,ml,mla,mlp,mr,mra,mrm,pf,ql,qr,wtl,wtr,Hl,Hr;
Hl = cpl[i]*tl[i] + 0.5*(ul[i]*ul[i]+vl[i]*vl[i]+wl[i]*wl[i]);
Hr = cpr[i]*tr[i] + 0.5*(ur[i]*ur[i]+vr[i]*vr[i]+wr[i]*wr[i]);
ql = ul[i]*nx[i] + vl[i]*ny[i] + wl[i]*nz[i] - fs[i];
qr = ur[i]*nx[i] + vr[i]*ny[i] + wr[i]*nz[i] - fs[i];
af = 0.5*(al[i] + ar[i]);
ml = ql/af;
mla = abs(ml);
mr = qr/af;
mra = abs(mr);
if(mla <= 1.0){
mlp = 0.25*pow((ml+1.0),2) + 0.125*pow((ml*ml-1.0),2);
wtl = 0.25*pow(ml+1.0,2)*(2.0-ml) + 0.1875*ml*pow(ml*ml-1.0,2);
}
else{
mlp = 0.5*(ml+mla);
wtl = 0.5*(1.0+ml/mla);
}
if(mra <= 1.0){
mrm = -0.25*pow((mr-1.0),2) - 0.125*pow((mr*mr-1.0),2);
wtr = 0.25*pow(mr-1.0,2)*(2.0+mr) - 0.1875*mr*pow(mr*mr-1.0,2);
}
else{
mrm = 0.5*(mr-mra);
wtr = 0.5*(1.0-mr/mra);
}
mf = mlp + mrm;
mfa = abs(mf);
mfp = 0.5*(mf+mfa);
mfm = 0.5*(mf-mfa);
pf = wtl*pl[i] + wtr*pr[i];
//compute fluxes
flx[i] = (af*(mfp*rl[i] + mfm*rr[i])) * nm[i];
flx[1*ntotd+i] = (af*(mfp*rl[i]*ul[i] + mfm*rr[i]*ur[i])+pf*nx[i]) * nm[i];
flx[2*ntotd+i] = (af*(mfp*rl[i]*vl[i] + mfm*rr[i]*vr[i])+pf*ny[i]) * nm[i];
flx[3*ntotd+i] = (af*(mfp*rl[i]*wl[i] + mfm*rr[i]*wr[i])+pf*nz[i]) * nm[i];
flx[4*ntotd+i] = (af*(mfp*rl[i]*Hl + mfm*rr[i]*Hr)+pf*fs[i]) * nm[i];
}
}
void InviscidFlux(double *qminus, double *qplus, double *flux, double *unx, double *uny, double *unz, double *area, double *wghtc, double *wghtf, double *cbc, double *jgl, double *jgt, double *nx, double *ny, double *nz, double *rl, double *ul, double *vl, double *wl, double *pl, double *tl, double *al, double *cpl, double *rr, double *ur, double *vr, double *wr, double *pr, double *tr, double *ar, double *cpr, double *fs, double *jaco_f, double *flx, double *jaco_c,int neq, int nstate, int nflux, int nxd, int nx1, int nel, int ndim, int irho, int iux, int iuy, int iuz, int ipr, int ithm, int isnd, int icpf, int iph){
//nx extended to be nx(nel,nfaces,#points_in_face)
//irho should be irho1[0]-1, others also
//printf("in invFlux**\n");
int fdim = ndim-1;
int nfaces = 2*ndim;
int nx1_2 = nx1*nx1;
int nxd_2 = nxd*nxd;
double *w;
hipMalloc(&w,nel*nfaces*pow(nxd,2)*sizeof(double));
//add neksub2 which is last step of face_state_commo
int blockSize1 = 1024, gridSize1;
gridSize1 = (int)ceil((float)nstate*nel*nfaces*nx1_2/blockSize1);
hipLaunchKernelGGL(( neksub2), dim3(gridSize1), dim3(blockSize1), 0, 0, qplus,qminus,nstate*nel*nfaces*nx1_2);
hipError_t code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error Inv, comp-1: %s\n",hipGetErrorString(code));
}
int totpts = nel * nfaces * nx1_2;
map_faced(jgl, jgt, nx, unx, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, ny, uny, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, nz, unz, w, nx1, nxd, fdim, nel, nfaces, 0);
code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error Inv, comp-2: %s\n",hipGetErrorString(code));
}
//printf("irho=%d,iux=%d,iuy=%d,iuz=%d,ipr=%d,ithm=%d,isnd=%d,icpf=%d\n",irho,iux,iuy,iuz,ipr,ithm,isnd,icpf);
map_faced(jgl, jgt, rl, qminus+irho*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, ul, qminus+iux*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, vl, qminus+iuy*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, wl, qminus+iuz*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, pl, qminus+ipr*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, tl, qminus+ithm*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, al, qminus+isnd*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, cpl, qminus+icpf*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error Inv, comp-3: %s\n",hipGetErrorString(code));
}
map_faced(jgl, jgt, rr, qplus+irho*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, ur, qplus+iux*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, vr, qplus+iuy*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, wr, qplus+iuz*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, pr, qplus+ipr*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, tr, qplus+ithm*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, ar, qplus+isnd*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, cpr, qplus+icpf*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error Inv, comp-4: %s\n",hipGetErrorString(code));
}
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)totpts/blockSize);
hipLaunchKernelGGL(( invcol3_flux), dim3(gridSize), dim3(blockSize), 0, 0, jaco_c,area,wghtc,nx1_2,totpts);
map_faced(jgl, jgt, jaco_f, jaco_c, w, nx1, nxd, fdim, nel, nfaces, 0);
int totpts_d = nel * nfaces * nxd_2;
gridSize = (int)ceil((float)totpts_d/blockSize);
hipLaunchKernelGGL(( nekcol2_flux), dim3(gridSize), dim3(blockSize), 0, 0, jaco_f,wghtf,nxd_2,totpts_d);
//Ausm
//gridSize = (int)ceil((float)nel*nfaces*nxd_2/blockSize);
hipLaunchKernelGGL(( invcol2), dim3(gridSize), dim3(blockSize), 0, 0, cpl,rl,totpts_d);
hipLaunchKernelGGL(( invcol2), dim3(gridSize), dim3(blockSize), 0, 0, cpr,rr,totpts_d);
//gridSize = (int)ceil((float)nel*nfaces*nxd_2/blockSize);
hipLaunchKernelGGL(( Ausm_flux), dim3(gridSize), dim3(blockSize), 0, 0, neq, totpts_d, nx, ny, nz, jaco_f, fs, rl, ul, vl, wl, pl, al, tl, rr, ur, vr, wr, pr, ar, tr, flx, cpl, cpr);
code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error Inv, comp-5: %s\n",hipGetErrorString(code));
}
map_faced(jgl, jgt, pl, qminus+iph*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
for(int j=0; j<neq;j++){
hipLaunchKernelGGL(( nekcol2), dim3(gridSize), dim3(blockSize), 0, 0, flx+j*totpts_d,pl,totpts_d);
map_faced(jgl, jgt, flux+j*totpts, flx+j*totpts_d, w, nx1, nxd, fdim, nel, nfaces, 1);
}
code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error Inv, comp-6: %s\n",hipGetErrorString(code));
}
hipFree(w);
code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error Inv, comp-7: %s\n",hipGetErrorString(code));
}
}
extern "C" void inviscidfluxwrapper_(double *qminus, double *qplus, double *flux, double *unx, double *uny, double *unz, double *area, double *wghtc, double *wghtf, double *cbc, double *jgl, double *jgt, double *nx, double *ny, double *nz, double *rl, double *ul, double *vl, double *wl, double *pl, double *tl, double *al, double *cpl, double *rr, double *ur, double *vr, double *wr, double *pr, double *tr, double *ar, double *cpr, double *fs, double *jaco_f, double *flx, double *jaco_c, int* neq, int* nstate, int* nflux, int *nxd, int *nx1, int *nel, int *ndim, int *irho, int *iux, int *iuy, int *iuz, int *ipr, int *ithm, int *isnd, int *icpf, int *iph){
bool inCPU = false;
if(inCPU){
//input and output
double *d_qminus, *d_qplus, *d_flux, *d_unx, *d_uny, *d_unz, *d_area, *d_wghtc, *d_wghtf, *d_cbc, *d_jgl, *d_jgt;
//temp arrays
double *d_nx, *d_ny, *d_nz, *d_rl, *d_ul, *d_vl, *d_wl, *d_pl, *d_tl, *d_al, *d_cpl, *d_rr, *d_ur, *d_vr, *d_wr, *d_pr, *d_tr, *d_ar, *d_cpr, *d_fs, *d_jaco_f, *d_flx, *d_jaco_c;
int nfaces=ndim[0]*2;
int ntot = nel[0] * nfaces * pow(nx1[0],2);
int ntotd = nel[0] * nfaces * pow(nxd[0],2);
hipMalloc(&d_qminus, nstate[0]*ntot*sizeof(double));
hipMemcpy(d_qminus, qminus, nstate[0]*ntot*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_qplus, nstate[0]*ntot*sizeof(double));
hipMemcpy(d_qplus, qplus, nstate[0]*ntot*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_flux, neq[0]*ntot*sizeof(double));
hipMalloc(&d_unx, ntot*sizeof(double));
hipMemcpy(d_unx, unx, ntot*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_uny, ntot*sizeof(double));
hipMemcpy(d_uny, uny, ntot*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_unz, ntot*sizeof(double));
hipMemcpy(d_unz, unz, ntot*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_area, ntot*sizeof(double));
hipMemcpy(d_area, area, ntot*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_wghtc, pow(nx1[0],2)*sizeof(double));
hipMemcpy(d_wghtc, wghtc, pow(nx1[0],2)*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_wghtf, pow(nxd[0],2)*sizeof(double));
hipMemcpy(d_wghtf, wghtf, pow(nxd[0],2)*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_cbc, pow(nxd[0],2)*sizeof(double));//correct
//hipMemcpy(d_wghtf, wghtf, pow(nxd[0],2)*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_jgl, pow(nxd[0],3)*sizeof(double));
hipMemcpy(d_jgl, jgl, pow(nxd[0],3)*sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&d_jgt, pow(nxd[0],3)*sizeof(double));
hipMemcpy(d_jgt, jgt, pow(nxd[0],3)*sizeof(double), hipMemcpyHostToDevice);
double*d_all;
hipMalloc(&d_all, 26*ntotd*sizeof(double));
d_nx = d_all;
d_ny = d_nx+ntotd;
d_nz = d_ny+ntotd;
d_rl = d_nz+ntotd;
d_ul = d_rl+ntotd;
d_wl = d_ul+ntotd;
d_vl = d_wl+ntotd;
d_pl = d_vl+ntotd;
d_tl = d_pl+ntotd;
d_al = d_tl+ntotd;
d_cpl = d_al+ntotd;
d_rr = d_cpl+ntotd;
d_ur = d_rr+ntotd;
d_wr = d_ur+ntotd;
d_vr = d_wr+ntotd;
d_pr = d_vr+ntotd;
d_tr = d_pr+ntotd;
d_ar = d_tr+ntotd;
d_cpr = d_ar+ntotd;
d_jaco_f = d_cpr+ntotd;
d_fs = d_jaco_f+ntotd;
d_flx = d_fs+ntotd;
/*hipMalloc(&d_nx, ntotd*sizeof(double));
hipMalloc(&d_ny, ntotd*sizeof(double));
hipMalloc(&d_nz, ntotd*sizeof(double));
hipMalloc(&d_rl, ntotd*sizeof(double));
hipMalloc(&d_ul, ntotd*sizeof(double));
hipMalloc(&d_wl, ntotd*sizeof(double));
hipMalloc(&d_vl, ntotd*sizeof(double));
hipMalloc(&d_pl, ntotd*sizeof(double));
hipMalloc(&d_tl, ntotd*sizeof(double));
hipMalloc(&d_al, ntotd*sizeof(double));
hipMalloc(&d_cpl, ntotd*sizeof(double));
hipMalloc(&d_rr, ntotd*sizeof(double));
hipMalloc(&d_ur, ntotd*sizeof(double));
hipMalloc(&d_vr, ntotd*sizeof(double));
hipMalloc(&d_wr, ntotd*sizeof(double));
hipMalloc(&d_pr, ntotd*sizeof(double));
hipMalloc(&d_tr, ntotd*sizeof(double));
hipMalloc(&d_ar, ntotd*sizeof(double));
hipMalloc(&d_cpr, ntotd*sizeof(double));*/
hipMalloc(&d_jaco_c, ntot*sizeof(double));
/*hipMalloc(&d_jaco_f, ntotd*sizeof(double));
hipMalloc(&d_fs, ntotd*sizeof(double));
hipMalloc(&d_flx, 5*ntotd*sizeof(double));*/
//int* neq, int* nstate, int* nflux, int *nxd, int *nx1, int *nel, int *ndim, int *irho, int *iux, int *iuy, int *iuz, int *ipr, int *ithm, int *isnd, int *icpf, int *iph
//printf("neq = %d, nxd = %d, nx1 = %d, nel = %d, ndim = %d, irho = %d\n",neq[0],nxd[0],nx1[0],nel[0],ndim[0],irho[0]);
hipError_t code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error Inv, malloc: %s\n",hipGetErrorString(code));
}
InviscidFlux(d_qminus, d_qplus, d_flux, d_unx, d_uny, d_unz, d_area, d_wghtc, d_wghtf, d_cbc, d_jgl, d_jgt, d_nx, d_ny, d_nz, d_rl, d_ul, d_vl, d_wl, d_pl, d_tl, d_al, d_cpl, d_rr, d_ur, d_vr, d_wr, d_pr, d_tr, d_ar, d_cpr, d_fs, d_jaco_f, d_flx, d_jaco_c, neq[0], nstate[0], nflux[0], nxd[0], nx1[0], nel[0], ndim[0], irho[0]-1, iux[0]-1, iuy[0]-1, iuz[0]-1, ipr[0]-1, ithm[0]-1, isnd[0]-1, icpf[0]-1, iph[0]-1);
code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error Inv, compute: %s\n",hipGetErrorString(code));
}
hipMemcpy(flux, d_flux, neq[0]*ntot*sizeof(double), hipMemcpyDeviceToHost);
//free
hipFree(d_qminus);
hipFree(d_qplus);
hipFree(d_flux);
hipFree(d_unx);
hipFree(d_uny);
hipFree(d_unz);
hipFree(d_area);
hipFree(d_wghtc);
hipFree(d_wghtf);
hipFree(d_cbc);//correct
hipFree(d_jgl);
hipFree(d_jgt);
hipFree(d_all);
/*hipFree(d_nx);
hipFree(d_ny);
hipFree(d_nz);
hipFree(d_rl);
hipFree(d_ul);
hipFree(d_wl);
hipFree(d_vl);
hipFree(d_pl);
hipFree(d_tl);
hipFree(d_al);
hipFree(d_cpl);
hipFree(d_rr);
hipFree(d_ur);
hipFree(d_vr);
hipFree(d_wr);
hipFree(d_pr);
hipFree(d_tr);
hipFree(d_ar);
hipFree(d_cpr);*/
hipFree(d_jaco_c);
/*hipFree(d_jaco_f);
hipFree(d_fs);
hipFree(d_flx);*/
}
else{
InviscidFlux(qminus, qplus, flux, unx, uny, unz, area, wghtc, wghtf, cbc, jgl, jgt, nx, ny, nz, rl, ul, vl, wl, pl, tl, al, cpl, rr, ur, vr, wr, pr, tr, ar, cpr, fs, jaco_f, flx, jaco_c, neq[0], nstate[0], nflux[0], nxd[0], nx1[0], nel[0], ndim[0], irho[0]-1, iux[0]-1, iuy[0]-1, iuz[0]-1, ipr[0]-1, ithm[0]-1, isnd[0]-1, icpf[0]-1, iph[0]-1);
}
}
//res1 = vols
void before_fields(){
//nekcopy u into res3 - can be done at next when res3 is needed
//set_dealias_face without zwgl - can be done at next when wghtc, wghtf needed
//compute_primitive_vars
//fillq_gpu
//faceu
}
__global__ void init_stokes(double *rpart, int *ipart, int nr, int ni, int n, int nw, int np, int nid, int jx, int jy, int jz, int jf0, int jar, int jai, double p){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < nw/np){
int pid = id * np + nid + 1;
double dumx = fmod(1.352 * id/7,0.98)+.01;
double dumy = fmod(1.273 * id/8,0.98)+.01;
double dumz = fmod(1.222 * id/9,0.98)+.01;
int off = id*nr;
rpart[off+jx] = -0.9 + dumx * 1.8;
rpart[off+jy] = -0.9 + dumy * 1.8;
rpart[off+jz] = -0.9 + dumz * 1.8;
rpart[off+jf0] = 0.0;
rpart[off+jar] = p;//pow(10,15);
ipart[id*ni+jai] = pid;
}
}
#if 0
__global__ void particles_in_nid(int *fptsmap, double *rfpts, int *ifpts, double *rpart, int *ipart, double *range, int nrf, int nif, int *nfpts, int nr, int ni, int n, int lpart, int nelt, int jx, int jy, int jz,int je0, int jrc, int jpt, int jd, int jr, int nid){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < n){
//double *rpart = rpart1 + id * nr;
//int *ipart = ipart1 + id * ni;
int ie;
double xloc = rpart[id*nr+jx];
double yloc = rpart[id*nr+jy];
double zloc = rpart[id*nr+jz];
for(ie = 0; ie < nelt; ie++){
//double * range = xerange + ie * 6;
if(xloc >= range[ie*6+0] && xloc <= range[ie*6+1] && yloc >=range[ie*6+2] && yloc <= range[ie*6+3] && zloc >= range[ie*6+4] && zloc <= range[ie*6+5]){
ipart[id*ni+je0] = ie;
ipart[id*ni+jrc] = 0;
ipart[id*ni+jpt] = nid;
ipart[id*ni+jd] = 1;
rpart[id*nr+jr] = -1.0 + 2.0*(xloc-range[ie*6+0])/(range[ie*6+1]-range[ie*6+0]);
rpart[id*nr+jr+1] = -1.0 + 2.0*(yloc-range[ie*6+2])/(range[ie*6+3]-range[ie*6+2]);
rpart[id*nr+jr] = -1.0 + 2.0*(zloc-range[ie*6+4])/(range[ie*6+5]-range[ie*6+4]);
break;
}
}
if(ie==nelt){
//point is outside all elements
int old = atomicAdd(nfpts, 1);
if(old==lpart){
printf("error many moving particles\n");
return;
}
fptsmap[old] = id+1;
//double * rfp = rfpts + old * nrf;
//int * ifp = ifpts + old * nif;
for(int i = 0 ; i < nrf; i++)
rfpts[old*nrf+i] = rpart[id*nr+i];
for(int i = 0 ; i < nif; i++)
ifpts[old*nif+i] = ipart[id*ni+i];
}
}
}
extern "C" void particles_in_nid_wrapper_(double *rfpts, int *ifpts, double *rpart, int *ipart, double *xerange, int *fptsmap, int *nrf, int *nif, int *nfpts, int *nr, int *ni, int *n, int *lpart, int *nelt, int *jx, int *jy, int *jz,int *je0, int *jrc, int *jpt, int *jd, int *jr, int *nid){
float time;
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
hipEventRecord(startEvent, 0);
bool inCPU = false;
double *d_rfpts, *d_rpart, *d_xerange;
int *d_fptsmap, *d_ifpts, *d_ipart, *d_nfpts;
if(inCPU){
hipMalloc(&d_rfpts, lpart[0]*nrf[0]*sizeof(double));
hipMalloc(&d_rpart, n[0]*nr[0]*sizeof(double));
hipMalloc(&d_xerange, nelt[0]*6*sizeof(double));
hipMalloc(&d_fptsmap, lpart[0]*sizeof(int));
hipMalloc(&d_ifpts, lpart[0]*nif[0]*sizeof(int));
hipMalloc(&d_ipart, n[0]*ni[0]*sizeof(int));
hipMalloc(&d_nfpts, sizeof(int));
hipMemcpy(d_rpart, rpart, n[0]*nr[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_xerange, xerange, nelt[0]*6*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_ipart, ipart, n[0]*ni[0]*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nfpts, nfpts, sizeof(int), hipMemcpyHostToDevice);
}
else{
d_rfpts = rfpts;
d_rpart= rpart;
d_xerange = xerange;
d_fptsmap = fptsmap;
d_ifpts = ifpts;
d_ipart = ipart;
hipMalloc(&d_nfpts, sizeof(int));
hipMemcpy(d_nfpts, nfpts, sizeof(int), hipMemcpyHostToDevice);
}
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n[0]/blockSize);
// printf ("print var %d %d %d\n", n[0], jx[0], jy[0]);
hipLaunchKernelGGL(( particles_in_nid), dim3(gridSize), dim3(blockSize), 0, 0, d_fptsmap, d_rfpts, d_ifpts, d_rpart, d_ipart, d_xerange, nrf[0], nif[0], d_nfpts, nr[0], ni[0], n[0], lpart[0], nelt[0], jx[0]-1, jy[0]-1, jz[0]-1, je0[0]-1, jrc[0]-1, jpt[0]-1, jd[0]-1, jr[0]-1, nid[0]);
if(inCPU){
hipMemcpy(ipart, d_ipart, n[0]*ni[0]*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(rpart, d_rpart, n[0]*nr[0]*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(nfpts, d_nfpts, sizeof(int), hipMemcpyDeviceToHost);
if(nfpts[0]>0){
hipMemcpy(fptsmap, d_fptsmap, nfpts[0]*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(rfpts, d_rfpts, nfpts[0]*nrf[0]*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(ifpts, d_ifpts, nfpts[0]*nif[0]*sizeof(int), hipMemcpyDeviceToHost);
}
//free
hipFree(d_rpart);
hipFree(d_ipart);
hipFree(d_xerange);
hipFree(d_fptsmap);
hipFree(d_rfpts);
hipFree(d_ifpts);
}
else{
hipMemcpy(nfpts, d_nfpts, sizeof(int), hipMemcpyDeviceToHost);
// printf ("print var 1st %d\n", nfpts);
}
hipFree(d_nfpts);
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&time, startEvent, stopEvent);
// printf ("print var 2nd %d\n", nfpts);
//printf("particles in nid time is %f\n",time*1e-03);
}
#endif
extern "C" void init_stokes_particleswrapper_(double *rpart, int *ipart, int *nr, int *ni, int *n, int *nw, int *np, int *nid, int *jx, int *jy, int *jz, int *jf0, int *jar, int *jai){
bool inCPU = false;
double *d_rpart;
int *d_ipart;
if(inCPU){
hipMalloc(&d_rpart, n[0]*nr[0]*sizeof(double));
hipMalloc(&d_ipart, n[0]*ni[0]*sizeof(int));
}
else{
d_rpart = rpart;
d_ipart = ipart;
}
int blockSize = 1024, gridSize;
int proc_work = nw[0]/np[0];
gridSize = (int)ceil((float)proc_work/blockSize);
hipLaunchKernelGGL(( init_stokes), dim3(gridSize), dim3(blockSize), 0, 0, rpart, ipart, nr[0], ni[0], n[0], nw[0], np[0], nid[0], jx[0]-1, jy[0]-1, jz[0]-1, jf0[0]-1, jar[0]-1, jai[0]-1, pow(10,15));
if(inCPU){
hipMemcpy(rpart, d_rpart, n[0]*nr[0]*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(ipart, d_ipart, n[0]*ni[0]*sizeof(int), hipMemcpyDeviceToHost);
//free
hipFree(d_rpart);
hipFree(d_ipart);
}
}
__global__ void solve_velocity(double *rpart, int nr, int ni, int n, int j, int jx0, int jx1, int jx2, int jx3, int jv0, int jv1, int jv2, int jv3, int ju0, int ju1, int ju2, int ju3){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < n){
int off = id*nr+j;
double * rpart_off = rpart+off;
rpart_off[ju3] = rpart_off[ju2];
rpart_off[ju2] = rpart_off[ju1];
rpart_off[ju1] = rpart_off[ju0];
rpart_off[jv3] = rpart_off[jv2];
rpart_off[jv2] = rpart_off[jv1];
rpart_off[jv1] = rpart_off[jv0];
rpart_off[jx3] = rpart_off[jx2];
rpart_off[jx2] = rpart_off[jx1];
rpart_off[jx1] = rpart_off[jx0];
}
}
__global__ void update_velocity(double *rpart1, double *alpha, double *beta, int ndim, int nr, int ni, int n, int jx0, int jx1, int jx2, int jx3, int jv0, int jv1, int jv2, int jv3, int ju0, int ju1, int ju2, int ju3, int jf0, int jst){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < n*ndim){
int j = id%ndim;
int i = id/ndim;
double s = rpart1[i*nr+jst];
int off = i*nr+j;
double * rpart = rpart1+off;
double rhs = s*(alpha[1]*rpart[ju1] + alpha[2]*rpart[ju2] + alpha[3]*rpart[ju3]) + rpart[jf0-j] + beta[1]*rpart[jv1] + beta[2]*rpart[jv2] + beta[3]*rpart[jv3];
rpart[jv0] = rhs/(beta[0]+s);
double rhx = beta[1]*rpart[jx1] + beta[2]*rpart[jx2] + beta[3]*rpart[jx3] + rpart[jv0];
// rpart[jx0] = rhx/beta[0];
}
}
//commented by adeesha
/*__global__ void update_particle_location_kernel(double *rpart1, double *xdrange1, int nr, int n, int ndim, int jx0, int jx1, int jx2, int jx3, int jaa, int jab, int jac, int jad, int *flagsend, double dt){
int id = blockIdx.x*blockDim.x+threadIdx.x;
// printf("***update_particle_location %d\n", id);
if(id < n*ndim){
// if(id < n){
// printf("***entered if block\n");
int j = id%ndim;
int i = id/ndim;
int off = i*nr+j;
double * rpart = rpart1+off;
double *xdrange = xdrange1+2*j;
// curtain test case, update x location
int factor=(3-j)/3;
rpart[jx0] = rpart[jx0] + (1.0/3)*rpart[jx0]*dt*factor;
// new curtain test case, update y location
int factor = 0;
if (j == 1){
factor = 1;
}
rpart[jx0] = rpart[jx0] + (1.0/3)*rpart[jx0]*dt*factor;
#if 0
// Do not remove this part of the code
rpart[jx0] = rpart[jx0] + rpart[jaa] * rpart[jad-j];
#endif
if(rpart[jx0]<xdrange[0]){
//exit(0);
flagsend[0] = flagsend[0] + 1;
}
if(rpart[jx0] > xdrange[1]){
//exit(0);
flagsend[0] = flagsend[0] + 1;
}
#if 0
// Do not remove this part of the code
if(rpart[jx0]<xdrange[0]){
rpart[jx0] = xdrange[1] - fabs(xdrange[0] - rpart[jx0]);
rpart[jx1] = xdrange[1] + fabs(xdrange[0] - rpart[jx1]);
rpart[jx2] = xdrange[1] + fabs(xdrange[0] - rpart[jx2]);
rpart[jx3] = xdrange[1] + fabs(xdrange[0] - rpart[jx3]);
}
if(rpart[jx0] > xdrange[1]){
rpart[jx0] = xdrange[0] + fabs(rpart[jx0] - xdrange[1]);
rpart[jx1] = xdrange[0] - fabs(xdrange[2] - rpart[jx1]);
rpart[jx2] = xdrange[0] - fabs(xdrange[2] - rpart[jx2]);
rpart[jx3] = xdrange[0] - fabs(xdrange[2] - rpart[jx3]);
}
#endif
}
} */
//not use this function
__global__ void update_particle_location_keke(double *rpart1, double *xdrange1, int nr, int n, int j, int ndim, int jx0, int jx1, int jx2, int jx3, int jaa, int jab, int jac, int jad){
int id = blockIdx.x*blockDim.x+threadIdx.x;
// printf("***update_particle_location %d\n", id);
if(id < n){
// printf("***entered if block\n");
// int j = id%ndim;
// int i = id/ndim;
int off = id*nr;
double * rpart = rpart1+off;
double *xdrange = xdrange1+2*j;
rpart[jx0+j] = rpart[jx0+j] + rpart[jaa+j] * rpart[jad];
// rpart[jx0+1] = rpart[jx0+1] + rpart[jab] * rpart[jad];
// rpart[jx0+2] = rpart[jx0+2] + rpart[jac] * rpart[jad];
rpart = rpart + j; //avoid the following all +j
if(rpart[jx0]<xdrange[0]){
rpart[jx0] = xdrange[1] - fabs(xdrange[0] - rpart[jx0]);
rpart[jx1] = xdrange[1] + fabs(xdrange[0] - rpart[jx1]);
rpart[jx2] = xdrange[1] + fabs(xdrange[0] - rpart[jx2]);
rpart[jx3] = xdrange[1] + fabs(xdrange[0] - rpart[jx3]);
}
if(rpart[jx0] > xdrange[1]){
rpart[jx0] = xdrange[0] + fabs(rpart[jx0] - xdrange[1]);
rpart[jx1] = xdrange[0] - fabs(xdrange[2] - rpart[jx1]);
rpart[jx2] = xdrange[0] - fabs(xdrange[2] - rpart[jx2]);
rpart[jx3] = xdrange[0] - fabs(xdrange[2] - rpart[jx3]);
}
}
}
void update_stokes_particles(double *rpart, double *alpha, double *beta, double *xdrange, int ndim, int nr, int ni, int n, int jx0, int jx1, int jx2, int jx3, int jv0, int jv1, int jv2, int jv3, int ju0, int ju1, int ju2, int ju3, int jar, int jf0, int jaa, int jab, int jac, int jad, int *flagsend, double dt){
//jx0, ... all should be passed original-1
//alpha,beta[0:3]
//solve velocity
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n/blockSize);
for(int j = 0; j < ndim; j++)
hipLaunchKernelGGL(( solve_velocity), dim3(gridSize), dim3(blockSize), 0, 0, rpart, nr, ni, n, j, jx0, jx1, jx2, jx3, jv0, jv1, jv2, jv3, ju0, ju1, ju2, ju3);
gridSize = (int)ceil((float)n*ndim/blockSize);
hipLaunchKernelGGL(( update_velocity), dim3(gridSize), dim3(blockSize), 0, 0, rpart, alpha, beta, ndim, nr, ni, n, jx0, jx1, jx2, jx3, jv0, jv1, jv2, jv3, ju0, ju1, ju2, ju3, jf0, jar);
// gridSize = (int)ceil((float)n/blockSize);
// for(int j = 0; j < ndim; j++)
// update_particle_location_keke<<<gridSize, blockSize>>>(rpart, xdrange, nr, n, j, ndim, jx0, jx1, jx2, jx3, jaa, jab, jac, jad);
//update_particle_location_kernel<<<gridSize, blockSize>>>(rpart, xdrange, nr, n, ndim, jx0, jx1, jx2, jx3, jaa, jab, jac, jad, flagsend, dt); //previous one with gridSize=..*ndim
}
extern "C" void updatestokeswrapper_(double *rpart, double *alpha, double *beta, double *xdrange, int *ndim, int *nr, int *ni, int *n, int *jx0, int *jx1, int *jx2, int *jx3, int *jv0, int *jv1, int *jv2, int *jv3, int *ju0, int *ju1, int *ju2, int *ju3, int *jar, int *jf0, int *jaa, int *jab, int * jac, int * jad, int *flagsend, double* dt){
// float time;
// hipEvent_t startEvent, stopEvent;
// hipEventCreate(&startEvent);
// hipEventCreate(&stopEvent);
// hipEventRecord(startEvent, 0);
bool inCPU = false;
double * d_rpart, *d_alpha, *d_beta, *d_xdrange;
int *d_flagsend;
if(inCPU){
hipMalloc(&d_rpart, n[0]*nr[0]*sizeof(double));
hipMalloc(&d_alpha, 4*sizeof(double));
hipMalloc(&d_beta, 4*sizeof(double));
hipMalloc(&d_xdrange, 6*sizeof(double));
hipMalloc(&d_flagsend, sizeof(int));
hipMemcpy(d_rpart, rpart, n[0]*nr[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_alpha, alpha, 4*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_beta, beta, 4*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_xdrange, xdrange, 6*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_flagsend, flagsend, sizeof(int), hipMemcpyHostToDevice);
}
else{
d_rpart = rpart;
//d_alpha = alpha;
//d_beta = beta;
d_xdrange = xdrange;
hipMalloc(&d_alpha, 4*sizeof(double));
hipMalloc(&d_beta, 4*sizeof(double));
hipMemcpy(d_alpha, alpha, 4*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_beta, beta, 4*sizeof(double), hipMemcpyHostToDevice);
d_flagsend = flagsend;
}
update_stokes_particles(d_rpart, d_alpha, d_beta, d_xdrange, ndim[0], nr[0], ni[0], n[0], jx0[0]-1, jx1[0]-1, jx2[0]-1, jx3[0]-1, jv0[0]-1, jv1[0]-1, jv2[0]-1, jv3[0]-1, ju0[0]-1, ju1[0]-1, ju2[0]-1, ju3[0]-1, jar[0]-1, jf0[0]-1, jaa[0]-1, jab[0]-1, jac[0]-1, jad[0]-1, d_flagsend, dt[0]);
if(inCPU){
hipMemcpy(rpart, d_rpart, n[0]*nr[0]*sizeof(double), hipMemcpyDeviceToHost);
//free
hipFree(d_rpart);
//hipFree(d_alpha);
//hipFree(d_beta);
hipFree(d_xdrange);
hipFree(d_flagsend);
}
hipFree(d_alpha);
hipFree(d_beta);
// hipEventRecord(stopEvent, 0);
// hipEventSynchronize(stopEvent);
// hipEventElapsedTime(&time, startEvent, stopEvent);
//printf("update stokes time is %f\n",time*1e-03);
}
__global__ void baryinterp(double *rpart, int *ipart, double *vx, double *vy, double *vz, int jr, int je0, int ju0, double *rep, double *xgll, double * ygll, double *zgll, double *wxgll, double *wygll, double *wzgll, int nx1, int n, int nr, int ni){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < n){
int x,y,z;
double bot = 0.0;
x = rpart[id*nr+jr];
y = rpart[id*nr+jr+1];
z = rpart[id*nr+jr+2];
for(int k=0; k<nx1;k++){
for(int j=0; j<nx1; j++){
double repdum = wygll[j]/(y-ygll[j]) * wzgll[k]/(z-zgll[k]) ;
for(int i = 0; i<nx1; i++){
rep[k*nx1*nx1+j*nx1+i] = repdum * wxgll[i]/(x-xgll[i]);
bot = bot + rep[k*nx1*nx1+j*nx1+i];
}
}
}
int ie = ipart[id*ni+je0];
//new (vx(ie),rpart(ju0)
double top1 = 0.0, top2 = 0.0, top3 = 0.0;
int nxyz = nx1*nx1*nx1;
double *fieldx = vx+ie*nxyz;
double *fieldy = vy+ie*nxyz;
double *fieldz = vz+ie*nxyz;
for(int i = 0; i<nxyz; i++){
top1 = top1 + rep[i]*fieldx[i];
top2 = top2 + rep[i]*fieldy[i];
top3 = top3 + rep[i]*fieldz[i];
}
rpart[id*nr+ju0] = top1/bot;
rpart[id*nr+ju0+1] = top2/bot;
rpart[id*nr+ju0+2] = top3/bot;
}
}
extern "C" void baryweights_evalwrapper_(double *rpart, int *ipart, double *vx, double *vy, double *vz, double *rep, double *xgll, double * ygll, double *zgll, double *wxgll, double *wygll, double *wzgll, int* jr, int* je0, int* ju0, int* nx1, int* n, int* nr, int* ni, int *nel){
// float time;
// hipEvent_t startEvent, stopEvent;
// hipEventCreate(&startEvent);
// hipEventCreate(&stopEvent);
// hipEventRecord(startEvent, 0);
bool inCPU = false;
double *d_rpart, *d_vols, *d_rep, *d_gll;
int *d_ipart;
int nx1_2 = nx1[0]*nx1[0];
int nx1_3 = nx1_2*nx1[0];
if(inCPU){
hipMalloc(&d_rpart, n[0]*nr[0]*sizeof(double));
hipMalloc(&d_vols, 3*nel[0]*nx1_3*sizeof(double));
hipMalloc(&d_rep, nx1_3*sizeof(double));
hipMalloc(&d_gll, 6*nx1[0]*sizeof(double));
hipMalloc(&d_ipart, n[0]*ni[0]*sizeof(int));
hipMemcpy(d_rpart, rpart, n[0]*nr[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_vols, vx, nel[0]*nx1_3*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_vols+nel[0]*nx1_3, vy, nel[0]*nx1_3*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_vols+2*nel[0]*nx1_3, vz, nel[0]*nx1_3*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_rep, rep, nx1_3*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_gll, xgll, nx1[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_gll+nx1[0], ygll, nx1[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_gll+2*nx1[0], zgll, nx1[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_gll+3*nx1[0], wxgll, nx1[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_gll+4*nx1[0], wygll, nx1[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_gll+5*nx1[0], wzgll, nx1[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_ipart, ipart, n[0]*ni[0]*sizeof(int), hipMemcpyHostToDevice);
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n[0]/blockSize);
hipLaunchKernelGGL(( baryinterp), dim3(gridSize), dim3(blockSize), 0, 0, d_rpart, d_ipart, d_vols, d_vols+nel[0]*nx1_3, d_vols+2*nel[0]*nx1_3, jr[0]-1, je0[0]-1, ju0[0]-1, d_rep, d_gll, d_gll+nx1[0], d_gll+2*nx1[0], d_gll+3*nx1[0], d_gll+4*nx1[0], d_gll+5*nx1[0], nx1[0], n[0], nr[0], ni[0]);
}
else{
d_rpart = rpart;
d_ipart = ipart;
hipMalloc(&d_rep, nx1_3*sizeof(double));
hipMalloc(&d_gll, 6*nx1[0]*sizeof(double));
hipMemcpy(d_rep, rep, nx1_3*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_gll, xgll, nx1[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_gll+nx1[0], ygll, nx1[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_gll+2*nx1[0], zgll, nx1[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_gll+3*nx1[0], wxgll, nx1[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_gll+4*nx1[0], wygll, nx1[0]*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_gll+5*nx1[0], wzgll, nx1[0]*sizeof(double), hipMemcpyHostToDevice);
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n[0]/blockSize);
hipLaunchKernelGGL(( baryinterp), dim3(gridSize), dim3(blockSize), 0, 0, d_rpart, d_ipart, vx,vy,vz, jr[0]-1, je0[0]-1, ju0[0]-1, d_rep, d_gll, d_gll+nx1[0], d_gll+2*nx1[0], d_gll+3*nx1[0], d_gll+4*nx1[0], d_gll+5*nx1[0], nx1[0], n[0], nr[0], ni[0]);
}
if(inCPU){
hipMemcpy(rpart, d_rpart, n[0]*nr[0]*sizeof(double), hipMemcpyDeviceToHost);
//free
hipFree(d_rpart);
hipFree(d_vols);
hipFree(d_rep);
hipFree(d_gll);
hipFree(d_ipart);
}
else{
hipFree(d_rep);
hipFree(d_gll);
}
// hipEventRecord(stopEvent, 0);
// hipEventSynchronize(stopEvent);
// hipEventElapsedTime(&time, startEvent, stopEvent);
//printf("interp time is %f\n",time*1e-03);
}
__global__ void packFaces(double *faces, double *packed, double *sharedIndex, int n, int nelt, int nx1, int iu1, int dir){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < n){
//get element and face numbers
int e, f, nx1_2;
nx1_2 = nx1 * nx1;
e = sharedIndex[id*2]-1;//fmod(sharedIndex[id*2]-1,nelt);
f = sharedIndex[id*2+1]-1;
//printf("e = %d, f = %d\n",e,f);
/*if(e>nelt-1)
printf ("e > nelt, %d\n",e);
if(f>5)
printf("f > nface,%d\n",f);*/
//copy the whole face
int off2 = id * nx1_2;
int f_off2 = e * 6 * nx1_2 + f*nx1_2;
for(int i = 0; i < 5; i++){
int off1 = i * n * nx1_2;
int f_off1 = i * nelt * 6 * nx1_2;
double* packed1 = packed+off1+off2;
double* faces1 = faces+f_off1+f_off2;
for(int j = 0; j < nx1_2; j++){
if(dir == 0)
packed1[j] = faces1[j];
else faces1[j] = packed1[j];
}
}
for(int i = 0; i < 5; i++){
int off1 = (i+5) * n * nx1_2;
int f_off1 = (i+iu1-1) * nelt * 6 * nx1_2;
double* packed1 = packed+off1+off2;
double* faces1 = faces+f_off1+f_off2;
for(int j = 0; j < nx1_2; j++){
if(dir == 0)
packed1[j] = faces1[j];
else faces1[j] = packed1[j];
}
}
}
}
extern "C" void packfaceswrapper_(double *faces, double *packed, double *sharedIndex, int *maxIndex, int *nelt, int *nx1, int *iu, int *dir){
//all data is in GPU
int blockSize = 1024, gridSize;
// float time;
// hipEvent_t startEvent, stopEvent;
// hipEventCreate(&startEvent);
// hipEventCreate(&stopEvent);
// hipEventRecord(startEvent, 0);
double *d_shared;
hipMalloc(&d_shared, nelt[0]*12*sizeof(double));
hipMemcpy(d_shared, sharedIndex, nelt[0]*12*sizeof(double), hipMemcpyHostToDevice);
// hipEventRecord(stopEvent, 0);
// hipEventSynchronize(stopEvent);
// hipEventElapsedTime(&time, startEvent, stopEvent);
// printf("allocate sharedIndex is %f\n",time*1e-03);
gridSize = (int)ceil((float)maxIndex[0]/blockSize);
hipLaunchKernelGGL(( packFaces), dim3(gridSize), dim3(blockSize), 0, 0, faces, packed, d_shared, maxIndex[0], nelt[0], nx1[0], iu[0], dir[0]);
hipFree(d_shared);
hipError_t code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error str 4: %s\n",hipGetErrorString(code));
}
}
void init_matrix(double * mat, int size, int begin){
for(int i=0; i<size; i++){
mat[i] = begin+i;
// mat[i] = rand();
}
}
void init_u(double * mat, int n, int k, int jobs){
//for(int i=0; i<n*k*jobs;i++)
//mat[i] = 0.0;
size_t bytes = jobs*n*k*sizeof(double);
double * u_eq1; //jobs(number of elements) * k
hipHostMalloc( (void**) &u_eq1, bytes);
init_matrix(u_eq1, jobs*n*k, 10);
double * vx; //working only on vx direction
hipHostMalloc( (void**) &vx, bytes);
init_matrix(vx, jobs*n*k, 10);
//calc time
// float time;
// hipEvent_t startEvent, stopEvent;
// hipEventCreate(&startEvent);
// hipEventCreate(&stopEvent);
// hipEventRecord(startEvent, 0);
// Device input vectors
double *d_mat;
double *d_u_eq1;
double *d_vx;
// allocate device vectors memory
hipMalloc(&d_mat, bytes);
hipMalloc(&d_u_eq1, bytes);
hipMalloc(&d_vx, bytes);
// copy host vectors to device
hipMemcpy( d_u_eq1, u_eq1, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_vx, vx, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
//gridSize = (int)ceil((float)n*k/blockSize);
gridSize = (int)ceil((float)n*k*jobs/blockSize);
// Execute the kernel
hipLaunchKernelGGL(( vecCopy), dim3(gridSize), dim3(blockSize), 0, 0, d_u_eq1, d_mat, jobs, n*k);
hipLaunchKernelGGL(( vecMul), dim3(gridSize), dim3(blockSize), 0, 0, d_vx, d_mat, jobs, n*k);
// Copy array back to host
hipMemcpy( mat, d_mat, bytes, hipMemcpyDeviceToHost );
// hipEventRecord(stopEvent, 0);
// hipEventSynchronize(stopEvent);
// hipEventElapsedTime(&time, startEvent, stopEvent);
//printf("second element is %f, n %d, k%d, time is %f\n",mat[n*k+1],n,k,time*1e-03);
//do in cpu
// hipEventRecord(startEvent, 0);
for(int i =0; i< n*k*jobs; i++)
mat[i] = u_eq1[i];
for(int i=0; i< n*k*jobs;i++)
mat[i] = mat[i] * vx[i];
// hipEventRecord(stopEvent, 0);
// hipEventSynchronize(stopEvent);
// hipEventElapsedTime(&time, startEvent, stopEvent);
//printf("cpu time is %f\n",time*1e-03);
/*int nxz= 6, nxyz = 24, nel=3000;
double *vols = new double[nxyz*nel];
double *faces = new double[nxz*nel];
int *iface = new int[nxz*nel];
vols[1*nxyz+12] = 2.3;
iface[1*nxz+2] = 12;
full2faceWrapper_(vols, faces, nel, nxz, nxyz, iface, true, true);
printf("face = %f\n",faces[1*nxz+2]);*/
}
//program
extern "C" void test_( int* matsize_p, int* gridsize_p, int* jobs_p, double* h_A,
double* h_AA, int* M_p, int* N_p, int* K_p)
{
int matsize = *matsize_p;
int gridsize = *gridsize_p;
int jobs = *jobs_p;
int M = *M_p;
int N = *N_p;
int K = *K_p;
// float time;
// hipEvent_t startEvent, stopEvent;
hipDeviceProp_t prop;
hipGetDeviceProperties (&prop, 0);
hipSetDevice( 0 );
// hipEventCreate(&startEvent);
// hipEventCreate(&stopEvent);
double *h_B, *h_BB, *h_C, *h_D, *h_E;
double *d_A, *d_AA, *d_B, *d_BB;
double *d_C, *d_D, *d_E;
M = matsize;
N = matsize*matsize;
K = matsize;
hipHostMalloc( (void**) &h_B, (K*N)*sizeof(double)*jobs );
hipHostMalloc( (void**) &h_BB, (N*K)*sizeof(double)*jobs );
hipHostMalloc( (void**) &h_C, (K*N)*sizeof(double)*jobs );
hipHostMalloc( (void**) &h_D, (K*N)*sizeof(double)*jobs );
hipHostMalloc( (void**) &h_E, (N*K)*sizeof(double)*jobs );
/* Initialize and copy the matrices */
//init_matrix(h_B, N*K*jobs, 10);
init_u(h_B, N, K, jobs);
// memset(h_C, 0, (K*N)*sizeof(double)*jobs);
// memset(h_D, 0, (K*N)*sizeof(double)*jobs);
// memset(h_E, 0, (K*N)*sizeof(double)*jobs);
hipMalloc( (void**) &d_A, (M*K)*sizeof(double) );
hipMalloc( (void**) &d_AA, (K*M)*sizeof(double) );
hipMalloc( (void**) &d_B, (K*N)*sizeof(double)*jobs );
hipMalloc( (void**) &d_BB, (N*K)*sizeof(double)*jobs );
hipMalloc( (void**) &d_C, (K*N)*sizeof(double)*jobs );
hipMalloc( (void**) &d_D, (K*N)*sizeof(double)*jobs );
hipMalloc( (void**) &d_E, (N*K)*sizeof(double)*jobs );
// hipMemset(d_C, 0, (K*N)*sizeof(double)*jobs);
// hipMemset(d_D, 0, (K*N)*sizeof(double)*jobs);
// hipMemset(d_E, 0, (K*N)*sizeof(double)*jobs);
hipStream_t stream;
hipStreamCreate( &stream );
D printf("Matrix d:\n");
D print(h_A, M, K);
D printf("Matrix db:\n");
D print(h_AA, M, K);
D printf("Matrix u:\n");
D print(h_B, K, N);
D printf("Matrix ub:\n");
D print(h_BB, K, N);
const double alpha = 1;
const double beta = 0;
unsigned int dim = K;
hipblasSetMatrix(M, K, sizeof(double), h_A, K, d_A, K);
hipblasSetMatrix(K, M, sizeof(double), h_AA, K, d_AA, K);
// hipEventRecord(startEvent, 0);
hipblasSetMatrixAsync(M, N*jobs, sizeof(double), h_B, M, d_B, M, stream);
fflush( stdout );
//cuda_multi_gemm_unif(stream, 'N', 'N', dim, dim, dim, &alpha, dim, dim*dim, d_A, d_B, d_BB, &beta, d_C, d_D, d_E, jobs*K, gridsize);
hipDeviceSynchronize();
fflush( stdout );
hipblasGetMatrixAsync(M, N*jobs, sizeof(double), d_C, K, h_C, K, stream);
hipblasGetMatrixAsync(M, N*jobs, sizeof(double), d_D, K, h_D, K, stream);
hipblasGetMatrixAsync(M, N*jobs, sizeof(double), d_E, K, h_E, K, stream);
// hipEventRecord(stopEvent, 0);
// hipEventSynchronize(stopEvent);
// hipEventElapsedTime(&time, startEvent, stopEvent);
// printf("GPU time: %f, throughput: %f\n", time * 1e-03, (jobs*2.0*3*K*// K*K*K)/(1024*1024*1024*time*1e-03));
// printf(" gpu time: %f\n", time * 1e-03);
D printf("Matrix r:\n");
D print((h_C), M, N);
D printf("Matrix s:\n");
D print((h_D), M, N);
D printf("Matrix t:\n");
D print((h_E), M, N);
hipHostFree( h_B );
hipHostFree( h_BB );
hipHostFree( h_C );
hipHostFree( h_D );
hipHostFree( h_E );
hipFree( d_A );
hipFree( d_B );
hipFree( d_C );
fflush( stdout );
hipStreamDestroy(stream);
/**jobs_p = 22;
int **p1 = new int*[2];
for(int i=0; i<2;i++)
p1[i] = new int[3];
int *p2 = (int*)p1;
for(int i=0;i<2;i++)
for(int j=0;j<3;j++){
p1[i][j] = i*2+j;
printf("a[%d][%d]=%d,%d\n",i,j,p1[i][j],p2[i*2+j]);
}*/
return;
}
| c1074abe6955b65fead812580b1c95ad5a784965.cu |
#//ll includes, systeminclude <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime_api.h>
#include <cublas.h>
#include "nvml.h"
#include "cuda_functions.h"
// includes, project
//#include "magma.h"
#include "cuda_multi_gemm_unif.cu"
//#include "cuda_add_vec.h"
//My includes
#include "debug_fns.h"
// #include "transformations.h"
//switch the comments to toggle debug mode
//#define D
#define D for(;0;)
#if 0
double get_time( void )
{
struct timeval t;
gettimeofday( &t, NULL );
return t.tv_sec + t.tv_usec*1e-6;
}
#endif
__global__ void vecCopy(double *a, double*b, int jobs, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
/*if (id < n){
for(int i = 0; i< jobs; i++)
b[i*n+id] = a[i*n+id];
}*/
if(id < n*jobs)
b[id] = a[id];
}
__global__ void vecMul(double *a, double*b, int jobs, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
/*if (id < n){
for(int i = 0; i< jobs; i++)
b[i*n+id] = b[i*n+id] * a[i*n+id];
}*/
if(id < n*jobs)
b[id] = b[id] * a[id];
}
__global__ void full2face(double *vols, double*faces, int nel, int n, int nxyz, int*iface){
//6 faces, each of size nx * nz => n = nx*nz *6
//vols: e elements each of size nx*ny*nz => nxyz = nx*ny*nz
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*n){//n = nxyz
int e = id/n; //+1 in fortran
//int j = id%n; //+1 in fortran
int i = iface[id];//[e][j];
faces[id]/*[e][j]*/ = vols[e*nxyz+i-1]/*[e][i]*/;
//faces[id] = 2.55;
}
//if(id==0)
//printf("in kernel*******\n");
}
__global__ void face2full(double *vols, double*faces, int nel, int n, int nxyz, int*iface){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*n){//n = nxyz
int e = id/n; //+1 in fortran
//int j = id%n; //+1 in fortran
int i = iface[id];//[e][j]
vols[e*nxyz+i] = vols[e*nxyz+i] + faces[id];
}
}
__global__ void faceu(double *u, double*faces, int toteq, int nel, int n, int nxyz, int*iface){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<toteq*nel*n){
int ivar = id/(nel*n);
int e_n = id%(nel*n);
int e = e_n/n;
int i = iface[e_n];
faces[id] = u[e*(toteq*nxyz)+ivar*nxyz+i-1];
}
}
__global__ void fillq(double *vtrans, double *vx, double *vy, double *vz, double*pr, double*faces, int nel, int n, int nxyz, int*iface, int size){
//6 faces, each of size nx * nz => n = nx*nz *6
//vols: e elements each of size nx*ny*nz => nxyz = nx*ny*nz
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<size){
int ivar = id/(nel*n);
int e_n = id%(nel*n);
int e = e_n/n; //+1 in fortran
//int j = id%n; //+1 in fortran
int i = iface[e_n];//[e][j];
if(ivar==0)
faces[id] = vtrans[e*nxyz+i-1];
else if(ivar==1)
faces[id] = vx[e*nxyz+i-1];
else if(ivar==2)
faces[id] = vy[e*nxyz+i-1];
else if(ivar==3)
faces[id] = vz[e*nxyz+i-1];
else if(ivar==4)
faces[id] = pr[e*nxyz+i-1];
//faces[id]/*[e][j]*/ = vols[ivar*(nxyz*nel)+e*nxyz+i-1]/*[e][i]*/;
//faces[id] = 2.55;
}
//if(id==0)
//printf("in kernel*******\n");
}
extern "C" void faceuwrapper_(int *toteq1, int *n1, int *nxyz1, int*nel1, double *u, double *faces, int *iface){
int toteq = toteq1[0];
int n = n1[0];
int nxyz = nxyz1[0];
int nel = nel1[0];
double *d_u, *d_faces;
int *d_iface;
bool inCPU = false;
if(inCPU){
cudaMalloc(&d_u, nxyz*nel*sizeof(double)*toteq);
cudaMalloc(&d_iface, n*nel*sizeof(int));
cudaMalloc(&d_faces, n*nel*sizeof(double)*toteq);
cudaMemcpy( d_u, u, nxyz*nel*sizeof(double)*toteq, cudaMemcpyHostToDevice);
cudaMemcpy( d_iface, iface, n*nel*sizeof(int), cudaMemcpyHostToDevice);
}
else{
//just assign
d_u = u;
d_iface = iface;
d_faces = faces;
}
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
//gridSize = (int)ceil((float)n*k/blockSize);
gridSize = (int)ceil((float)n*nel*toteq/blockSize);
// Execute the kernel
//printf("block size = %d, grid size = %d\n",blockSize,gridSize);
faceu<<<gridSize, blockSize>>>(d_u, d_faces, toteq, nel, n, nxyz, d_iface);
cudaError_t code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error str 5: %s\n",cudaGetErrorString(code));
}
if(inCPU){
cudaMemcpy( faces, d_faces, n*nel*sizeof(double)*toteq, cudaMemcpyDeviceToHost );
cudaFree(d_u);
cudaFree(d_faces);
cudaFree(d_iface);
}
}
extern "C" void copyqq_(double*qq, double * faces, int*size){
cudaMemcpy( faces, qq, size[0]*sizeof(double)*5, cudaMemcpyDeviceToHost );
cudaFree(qq);
cudaError_t code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error in copyqq, str: %s\n",cudaGetErrorString(code));
}
}
extern "C" void fillqwrapper_(double *vols_vtrans, double *vols_vx, double *vols_vy, double *vols_vz, double *vols_pr, double*faces, int *nel1, int *n1, int *nxyz1, int*iface, bool device_arr, bool pull_result){
int nel = nel1[0];
int n = n1[0];
int nxyz = nxyz1[0];
//printf("nel = %d, n = %d, nxyz=%d\n",nel,n,nxyz);
/*for(int index = 0; index <4; index++){
printf("vols_t[%d]=%f,vols_x=%f,vols_y=%f,vols_pr=%f\n",index,vols_vtrans[index],vols_vx[index],vols_vy[index],vols_pr[index]);
printf("iface[%d]=%d\n",index,iface[index]);
}*/
//double *d_vols_vtrans, *d_vols_vx, d_vols_vy, d_vols_vz, d_vols_pr;
double *d_vols;
double *d_vtrans,*d_vx,*d_vy,*d_vz,*d_pr;
double *d_faces;
int *d_iface;
// allocate device vectors memory
/*cudaMalloc(&d_vols_vtrans, nxyz*nel*sizeof(double));
cudaMalloc(&d_vols_vx, nxyz*nel*sizeof(double));
cudaMalloc(&d_vols_vy, nxyz*nel*sizeof(double));
cudaMalloc(&d_vols_vz, nxyz*nel*sizeof(double));
cudaMalloc(&d_vols_pr, nxyz*nel*sizeof(double));*/
bool inCPU = false;
if(inCPU){
cudaMalloc(&d_vols, nxyz*nel*sizeof(double)*5);
cudaMalloc(&d_faces, n*nel*sizeof(double)*5);
cudaMalloc(&d_iface, n*nel*sizeof(int));
cudaMemcpy( d_vols, vols_vtrans, nxyz*nel*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_vols+nxyz*nel, vols_vx, nxyz*nel*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_vols+2*nxyz*nel, vols_vy, nxyz*nel*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_vols+3*nxyz*nel, vols_vz, nxyz*nel*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_vols+4*nxyz*nel, vols_pr, nxyz*nel*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_iface, iface, n*nel*sizeof(int), cudaMemcpyHostToDevice);
}
else{
//send vols_vtrans = all vols
//just assign
d_vols = vols_vtrans;
d_vtrans = vols_vtrans;
d_vx = vols_vx;
d_vy = vols_vy;
d_vz = vols_vz;
d_pr = vols_pr;
d_faces = faces;
d_iface = iface;
}
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
//gridSize = (int)ceil((float)n*k/blockSize);
gridSize = (int)ceil((float)n*nel*5/blockSize);
cudaError_t code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error str 6 before: %s\n",cudaGetErrorString(code));
}
// Execute the kernel
//printf("block size = %d, grid size = %d\n",blockSize,gridSize);
fillq<<<gridSize, blockSize>>>(d_vtrans,d_vx,d_vy,d_vz,d_pr, d_faces, nel, n, nxyz, d_iface,5*nel*n);
code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error str 6: %s\n",cudaGetErrorString(code));
}
if(inCPU){
cudaMemcpy( faces, d_faces, n*nel*sizeof(double)*5, cudaMemcpyDeviceToHost );
cudaFree(d_faces);
cudaFree(d_vols);
cudaFree(d_iface);
}
}
extern "C" void full2facewrapper_(double *vols, double*faces, int *nel1, int *n1, int *ivar1, int *nxyz1, int*iface, bool device_arr, bool pull_result){
//test printing
int nel = nel1[0];
int n = n1[0];
int nxyz = nxyz1[0];
int ivar = ivar1[0]-1;
//printf("nel = %d, n = %d, nxyz=%d, ivar=%d\n",nel,n,nxyz,ivar);
/*for(int index = 0; index <4; index++){
printf("vols[%d]=%f\n",index,vols[index]);
printf("iface[%d]=%d\n",index,iface[index]);
}*/
// n = nx * nz
// Device input arrays
double *d_vols;
double *d_faces;
int *d_iface;
// allocate device vectors memory
cudaMalloc(&d_vols, nxyz*nel*sizeof(double));
cudaMalloc(&d_faces, n*nel*sizeof(double));
cudaMalloc(&d_iface, n*nel*sizeof(int));
cudaMemcpy( d_vols, vols, nxyz*nel*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_iface, iface, n*nel*sizeof(int), cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
//gridSize = (int)ceil((float)n*k/blockSize);
gridSize = (int)ceil((float)n*nel/blockSize);
// Execute the kernel
//printf("block size = %d, grid size = %d\n",blockSize,gridSize);
full2face<<<gridSize, blockSize>>>(d_vols, d_faces, nel, n, nxyz, d_iface);
cudaError_t code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error str 7: %s\n",cudaGetErrorString(code));
}
cudaMemcpy( faces+ivar*n*nel, d_faces, n*nel*sizeof(double), cudaMemcpyDeviceToHost );
/*for(int index = 0; index <4; index++){
printf("faces[%d]=%f\n",index,faces[index]);
}*/
// Release device memory
cudaFree(d_faces);
cudaFree(d_vols);
cudaFree(d_iface);
}
__global__ void rzero (double *arr, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
arr[id] = 0.0;
}
__global__ void surfaceintegral_flux(double * flux, double *area, double *phig, int * index, int toteq, int nelt, int nface, int nxz, int ny){
int size = toteq * nelt * nface;
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<size){
int eq = id/(nelt*nface);
int e_f = id%(nelt*nface);
int e = e_f/(nface);
int f = e_f%nface;
int count = 0;
for(int i = index[f*6+4]; i < index[f*6+5]; i++){
for(int j = index[f*6+2]; j <index[f*6+3]; j++){
for(int k = index[f*6+0]; k < index[f*6+1]; k++){
int l = eq*(nelt*nface*nxz)+e*(nface*nxz)+f*(nxz)+count;
flux[l] = flux[l] * area[e*(nface*nxz)+f*nxz+count++] * phig[e*ny*nxz+i*nxz+j*ny+k];
}
}
}
}
}
__global__ void addfull2face(double *vols, double*faces, int nel, int n, int nxyz, int*iface, int size){
//6 faces, each of size nx * nz => n = nx*nz *6
//vols: e elements each of size nx*ny*nz => nxyz = nx*ny*nz
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<size){
int eq = id/(nel*n);
int e_n = id%(nel*n);
int e = e_n/n; //+1 in fortran
//int j = id%n; //+1 in fortran
int i = iface[e_n];//[e][j];
int volIndex = eq*(nel*nxyz)+e*nxyz+i-1;
vols[volIndex] = vols[volIndex] + faces[id];
}
}
//extern "C" void surfaceintegralwrapper_(int *toteq1, int *nx1, int*ny1, int*nz1, int *nelt1, int *nface1, double* faces, double *area, double *phig, double *vols, int *iface){
extern "C" void surfaceintegralwrapper_(double* faces, double *area, double *phig, double *vols, int *iface, int *toteq1, int *nx1, int*ny1, int*nz1, int *nelt1, int *nface1){
// float time;
// cudaEvent_t startEvent, stopEvent;
// cudaEventCreate(&startEvent);
// cudaEventCreate(&stopEvent);
// cudaEventRecord(startEvent, 0);
int nx = nx1[0];
int ny = ny1[0];
int nz = nz1[0];
int nelt = nelt1[0];
int nface = nface1[0];
int toteq = toteq1[0];
//printf("nface = %d, nx = %d, ny = %d, nz = %d, nelt = %d, toteq=%d, faces[0]=%f\n", nface,nx,ny,nz,nelt,toteq,/*faces[nelt*nface*nx*nz-1]*/vols[nelt*nx*ny*nz]);
int * index = new int[nface*6];
for(int i =0; i<nface; i++){
index[i*6+0] = 0;
index[i*6+1] = nx-1;
index[i*6+2] = 0;
index[i*6+3] = ny-1;
index[i*6+4] = 0;
index[i*6+5] = nz-1;
}
index[0*6+3] = 0;
index[1*6+0] = nx-1;
index[2*6+2] = ny-1;
index[3*6+1] = 0;
index[4*6+5] = 0;
index[5*6+4] = nz-1;
double *d_faces, *d_area, *d_phig, *d_vols;
int *d_index, *d_iface;
bool dataInGPU = true;
cudaError_t code ;
if(dataInGPU){
d_faces = faces;
d_area = area;
d_phig = phig;
d_vols = vols;
d_iface = iface;
}
else{
//memory allocation
cudaMalloc(&d_faces, toteq*nelt*nx*nz*nface*sizeof(double));
cudaMalloc(&d_area, nelt*nface*nx*nz*sizeof(double)); //ask about area
cudaMalloc(&d_phig, nelt*nx*ny*nz*sizeof(double));
cudaMalloc(&d_vols, toteq*nelt*nx*ny*nz*sizeof(double));
cudaMalloc(&d_iface, nelt*nface*nx*nz*sizeof(int));
code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error in malloc, str: %s\n",cudaGetErrorString(code));
}
//data transfer
cudaMemcpy( d_area, area, nelt*nface*nx*nz*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_phig, phig, nelt*nx*ny*nz*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_iface, iface, nelt*nface*nx*nz*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_faces, faces, toteq*nelt*nx*nz*nface*sizeof(double), cudaMemcpyHostToDevice);
}
cudaMalloc(&d_index, nface*6*sizeof(int));
cudaMemcpy( d_index, index, nface*6*sizeof(int), cudaMemcpyHostToDevice);
code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error in memcpy, str: %s\n",cudaGetErrorString(code));
}
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
int ntot = toteq*nelt*nx*ny*nz;
gridSize = (int)ceil((float)ntot/blockSize);
rzero<<<gridSize, blockSize>>>(d_vols, ntot);
code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error in flux, str: %s\n",cudaGetErrorString(code));
}
gridSize = (int)ceil((float)toteq*nelt*nface/blockSize);
surfaceintegral_flux<<<gridSize, blockSize>>>(d_faces, d_area, d_phig, d_index, toteq, nelt, nface, nx*nz, ny);
code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error in flux, str: %s\n",cudaGetErrorString(code));
}
gridSize = (int)ceil((float)toteq*nelt*nx*nz*nface/blockSize);
addfull2face<<<gridSize, blockSize>>>(d_vols, d_faces, nelt, nx*nz*nface, nx*ny*nz, d_iface,toteq*nelt*nx*nz*nface);
code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error in full2face, str: %s\n",cudaGetErrorString(code));
}
if(!dataInGPU){
cudaMemcpy( vols, d_vols, toteq*nelt*nx*ny*nz*sizeof(double), cudaMemcpyDeviceToHost );
cudaMemcpy( faces, d_faces, toteq*nelt*nface*nx*nz*sizeof(double), cudaMemcpyDeviceToHost );
// Release device memory
cudaFree(d_faces);
cudaFree(d_vols);
cudaFree(d_iface);
cudaFree(d_area);
cudaFree(d_phig);
}
cudaFree(d_index);
// cudaEventRecord(stopEvent, 0);
// cudaEventSynchronize(stopEvent);
// cudaEventElapsedTime(&time, startEvent, stopEvent);
//printf("surfcae integral time is %f\n",time*1e-03);
}
//mxm multiplication
__global__ void mxm(double *a, int n1, double *b, int n2, double *c, int n3, int nel, int aSize, int bSize, int cSize, int extraEq){
//calculate c(n1,n3) = a(n1,n2) X b(n2,n3) in c
//in fortran the original calculation was
// c(n3,n1) = b(n3,n2) X a(n2,n1)
// a,b,cSize are single element size
//extraEq, in case of a matrix has equation as an index
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*n1*n3){
int e = id/(n1*n3);
int rc = id%(n1*n3);
int i = rc/n3;
int j = rc%n3;
int cid = e*cSize + rc;
int aid = e*aSize + extraEq + i*n2;
int bid = e*bSize + j;
c[cid] = 0;
for(int k = 0; k<n2; k++)
c[cid]+=a[aid+k]*b[bid+k*n3];
}
}
// specmpn routine in fortran
void specmpn(double *d_b, int nb, double *d_a, int na, double * d_ba, double* d_ab, bool if3d, double * d_w, int ldw, int nel, int neq, int eq, bool second_eq){
//d_a is array(na,na,na)*nel, d_b(nb,nb,nb)*nel, w(ldw)*nel where ldw = na*na*nb+nb*nb*na
//d_a is array of nel each array(na,na,na)
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
cudaStream_t stream;
cudaStreamCreate( &stream );
const double alpha = 1;
const double beta = 0;
if(if3d){
int nab = na*nb;
int nbb = nb*nb;
//calc w = ba*a in fortran
//so in c calc wt = at * bat
//call mxm(ba,nb,a,na,w,na*na)
//in fortran calc w(nb,na*na) = ba(nb,na) * a(na,na*na)
//in c w(na*na,nb) = a(na*na,na) * ba(na,nb)
//neq = 1 if array not indexed by eq and eq = 0
int aSize = neq*pow(na,3), bSize = pow(nb,3);
gridSize = (int)ceil((float)na*na*nb*nel/blockSize);
//mxm<<<gridSize, blockSize>>>(d_a,na*na, d_ba, na, d_w, nb, nel, aSize, 0, ldw, eq*pow(na,3));
cuda_multi_gemm_unif(stream, 'N', 'N', nb, na, na*na, &alpha, d_ba, nb, 0, d_a, na, aSize, &beta, d_w, nb, ldw, nel, gridSize);
int k = 0, l = na*na*nb;
for(int iz=0; iz<na;iz++){
//calc in fortran wl(nb*nb) = wk(nb*na) * ab(na*nb)
//in c wl(nb*nb) = ab(nb*na) * wk(na*nb)
gridSize = (int)ceil((float)nb*nb*nel/blockSize);
//mxm<<<gridSize, blockSize>>>(d_ab,nb, d_w+k, na, d_w+l, nb, nel, 0, ldw, ldw, 0);
cuda_multi_gemm_unif(stream, 'N', 'N', nb, na, nb, &alpha, d_w+k, nb, ldw, d_ab, na, 0, &beta, d_w+l, nb, ldw, nel, gridSize);
k = k + nab;
l = l + nbb;
}
l = na*na*nb;
//calc in fortran b(nb*nb,nb) = wl(nb*nb,na)* ab(na,nb)
//in C b(nb,nb*nb) = ab(nb,na) * wl(na,nb*nb)
gridSize = (int)ceil((float)nb*nb*nb*nel/blockSize);
//mxm<<<gridSize, blockSize>>>(d_ab,nb, d_w+l, na, d_b, nb*nb, nel, 0, ldw, bSize, 0);
cuda_multi_gemm_unif(stream, 'N', 'N', nb*nb, na, nb, &alpha, d_w+l, nb*nb, ldw, d_ab, na, 0, &beta, d_b, nb*nb, bSize, nel, gridSize);
}
else{
//calc w(nb*na) = ba(nb,na) * a(na,na) in fortran,
//in C w(na*nb) = a(na,na) * ba(na,nb)
gridSize = (int)ceil((float)na*nb*nel/blockSize);
mxm<<<gridSize, blockSize>>>(d_a,na, d_ba, na, d_w, nb, nel, neq*na*na, 0, ldw, eq*na*na);
//in fortran, b(nb,nb) = w(nb,na)*ab(na,nb)
//in C b(nb,nb) = ab(nb,na) * w(na,nb)
gridSize = (int)ceil((float)nb*nb*nel/blockSize);
mxm<<<gridSize, blockSize>>>(d_ab,nb, d_w, na, d_b, nb, nel, 0, ldw, nb*nb, 0);
}
cudaError_t code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error str 1: %s\n",cudaGetErrorString(code));
}
cudaStreamDestroy(stream);
}
void specmpn_old(double *d_b, int nb, double *d_a, int na, double * d_ba, double* d_ab, bool if3d, double * d_w, int ldw, int nel, int neq, int eq, bool second_eq){
//d_a is array(na,na,na)*nel, d_b(nb,nb,nb)*nel, w(ldw)*nel where ldw = na*na*nb+nb*nb*na
//d_a is array of nel each array(na,na,na)
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
if(if3d){
int nab = na*nb;
int nbb = nb*nb;
//calc w = ba*a in fortran
//so in c calc wt = at * bat
//call mxm(ba,nb,a,na,w,na*na)
//in fortran calc w(nb,na*na) = ba(nb,na) * a(na,na*na)
//in c w(na*na,nb) = a(na*na,na) * ba(na,nb)
//neq = 1 if array not indexed by eq and eq = 0
int aSize = neq*pow(na,3), bSize = pow(nb,3);
gridSize = (int)ceil((float)na*na*nb*nel/blockSize);
mxm<<<gridSize, blockSize>>>(d_a,na*na, d_ba, na, d_w, nb, nel, aSize, 0, ldw, eq*pow(na,3));
int k = 0, l = na*na*nb;
for(int iz=0; iz<na;iz++){
//calc in fortran wl(nb*nb) = wk(nb*na) * ab(na*nb)
//in c wl(nb*nb) = ab(nb*na) * wk(na*nb)
gridSize = (int)ceil((float)nb*nb*nel/blockSize);
mxm<<<gridSize, blockSize>>>(d_ab,nb, d_w+k, na, d_w+l, nb, nel, 0, ldw, ldw, 0);
k = k + nab;
l = l + nbb;
}
l = na*na*nb;
//calc in fortran b(nb*nb,nb) = wl(nb*nb,na)* ab(na,nb)
//in C b(nb,nb*nb) = ab(nb,na) * wl(na,nb*nb)
gridSize = (int)ceil((float)nb*nb*nb*nel/blockSize);
mxm<<<gridSize, blockSize>>>(d_ab,nb, d_w+l, na, d_b, nb*nb, nel, 0, ldw, bSize, 0);
}
else{
//calc w(nb*na) = ba(nb,na) * a(na,na) in fortran,
//in C w(na*nb) = a(na,na) * ba(na,nb)
gridSize = (int)ceil((float)na*nb*nel/blockSize);
mxm<<<gridSize, blockSize>>>(d_a,na, d_ba, na, d_w, nb, nel, neq*na*na, 0, ldw, eq*na*na);
//in fortran, b(nb,nb) = w(nb,na)*ab(na,nb)
//in C b(nb,nb) = ab(nb,na) * w(na,nb)
gridSize = (int)ceil((float)nb*nb*nel/blockSize);
mxm<<<gridSize, blockSize>>>(d_ab,nb, d_w, na, d_b, nb, nel, 0, ldw, nb*nb, 0);
}
cudaError_t code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error str 2: %s\n",cudaGetErrorString(code));
}
}
__global__ void replicate_3(double *a, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n){
a[n+id] = a[id];
a[2*n+id] = a[id];
}
}
__global__ void nekcol2_conv(double* convh, double *vx, double *vy, double *vz, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n){
convh[id] = convh[id] * vx[id];
convh[n+id] = convh[n+id] * vy[id];
convh[2*n+id] = convh[2*n+id] * vz[id];
}
}
__global__ void merge_replicate_conv(double* convh, double *vx, double *vy, double *vz, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n){
convh[n+id] = convh[id] * vy[id];
convh[2*n+id] = convh[id] * vz[id];
convh[id] = convh[id] * vx[id];
}
}
__global__ void nekadd2col2(double *a, double *b, double *c, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n){
a[id] = a[id] + b[id] * c[id];
}
}
__global__ void merge_replicate_conv_add2col2(double* convh, double *b, double *c, double *vx, double *vy, double *vz, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n){
convh[id] = convh[id] + b[id] * c[id];
convh[n+id] = convh[id] * vy[id];
convh[2*n+id] = convh[id] * vz[id];
convh[id] = convh[id] * vx[id];
}
}
void evaluate_conv_h(int nel, int neq, int eq, int ndim, int ldw, double *jgl, double *jgt, double * convh, double *u, int nx1, int nxd, int nd, int n1, double *ju1, double*ju2, double*phig, double*pr, double *vxd, double *vyd, double *vzd, double *w, bool if3d){
//for now totalh = convh so we can pass totalh instead of convh
//nd = nel * nxd * nyd * nzd
//n1 = nel * nx1 * ny1 * nz1
//modify fortran code, convh(nx^3,ndim) -> convh(nx^3,nel,ndim)
//initially for each element, each equation do
//do for equation 1
/*int ldw = 2* pow(2*nxd,ndim);
double *w;
cudaMalloc(&w, nel*ldw*sizeof(double));*/
int nx1_3 = pow(nx1,3);
if(eq == 0)
for(int j = 0; j<ndim;j++)
specmpn(convh+j*nd, nxd, u+(j+1)*nx1_3 ,nx1, jgl, jgt, if3d, w, ldw, nel, neq, j+1, true);
else{
specmpn(ju1, nxd, phig, nx1, jgl, jgt, if3d, w, ldw, nel, 1, 0,true);
specmpn(ju2, nxd, pr, nx1, jgl, jgt, if3d, w, ldw, nel, 1, 0,true);
if(eq<4){
specmpn(convh, nxd, u+eq*nx1_3, nx1, jgl, jgt, if3d, w, ldw, nel, neq, eq,true);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
gridSize = (int)ceil((float)nd/blockSize);
//merge_replicate_conv<<<gridSize, blockSize>>>(convh,vxd,vyd,vzd,nd);
replicate_3<<<gridSize, blockSize>>>(convh,nd);
nekcol2_conv<<<gridSize, blockSize>>>(convh,vxd,vyd,vzd,nd);
nekadd2col2<<<gridSize, blockSize>>>(convh+(eq-1)*nd,ju1,ju2,nd);
}
else if(eq==4){
specmpn(convh, nxd, u+eq*nx1_3, nx1, jgl, jgt, if3d, w, ldw, nel,neq,eq,true);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
gridSize = (int)ceil((float)nd/blockSize);
//merge_replicate_conv_add2col2<<<gridSize, blockSize>>>(convh,ju1,ju2,vxd,vyd,vzd,nd);
nekadd2col2<<<gridSize, blockSize>>>(convh,ju1,ju2,nd);
replicate_3<<<gridSize, blockSize>>>(convh,nd);
nekcol2_conv<<<gridSize, blockSize>>>(convh,vxd,vyd,vzd,nd);
}
}
}
__global__ void nekadd2col2_u(double * u, double *totalh, double *rx, int nel, int n, int ndim, int offset){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n*nel){
int e = id/n;
int i = id%n;
u[id] = 0;
for(int j = 0; j<ndim; j++)
u[id] += totalh[j*(nel*n)+id] * rx[e*(3*ndim*n)+(j+offset)*n+i];
}
}
__global__ void neksub2(double *a, double*b, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
a[id]-=b[id];
}
__global__ void nekadd2(double *a, double*b, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
a[id]+=b[id];
}
__global__ void nekcol2(double *a, double*b, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
a[id]*=b[id];
}
__global__ void nekcol2_ud(double *a, double*b, int nel, int nx1_3, int nxd_3){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*nx1_3){
int e = id/nx1_3;
int i = id%nx1_3;
a[e*nxd_3+i]*=b[id];
}
}
__global__ void nekcopy(double *a, double*b, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
a[id]=b[id];
}
extern "C" void nekcopywrapper_(double *a, double *b, int *n){
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n[0]/blockSize);
nekcopy<<<gridSize, blockSize>>>(a,b,n[0]);
}
__global__ void neksubcol3_res1(double *a, double *b, double *c, int nel, int nx1_3){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*nx1_3){
int i = id%nx1_3;
a[id] = a[id] - b[i] * c[id];
}
}
void local_grad3_t(double *u, double *ur, double *us, double *ut, int nxd, double *d, double *dt, double *w, int nel){
int nxd_2 = nxd * nxd;
int nxd_3 = nxd_2 * nxd;
// u(nxd,nxd*nxd) = dt(nxd,nxd) * ur(nxd, nxd*nxd) fortran
// u(nxd*nxd,nxd) = ur(nxd*nxd, nxd) * dt(nxd,nxd) C
int blockSize=1024, gridSize;
cudaStream_t stream;
cudaStreamCreate( &stream );
const double alpha = 1;
const double beta = 0;
gridSize = (int)ceil((float)nel*nxd_3/blockSize);
//mxm<<<gridSize, blockSize>>>(ur,nxd_2, dt, nxd, u, nxd, nel, nxd_3, 0, nxd_3, 0);
cuda_multi_gemm_unif(stream, 'N', 'N', nxd, nxd, nxd_2, &alpha, dt, nxd, 0, ur, nxd, nxd_3, &beta, u, nxd, nxd_3, nel, gridSize);
for(int k = 0; k<nxd;k++){
//wk(nxd,nxd) = usk(nxd,nxd)*D(nxd,nxd) fortran
//wk(nxd,nxd) = D(nxd,nxd)*usk(nxd,nxd) C
gridSize = (int)ceil((float)nel*nxd_2/blockSize);
//mxm<<<gridSize, blockSize>>>(d,nxd, us+k*nxd_2, nxd, w+k*nxd_2, nxd, nel, 0, nxd_3, nxd_3, 0);
cuda_multi_gemm_unif(stream, 'N', 'N', nxd, nxd, nxd, &alpha, us+k*nxd_2, nxd, nxd_3, d, nxd, 0, &beta, w+k*nxd_2, nxd, nxd_3, nel, gridSize);
}
gridSize = (int)ceil((float)nel*nxd_3/blockSize);
nekcopy<<<gridSize, blockSize>>>(u,w, nel*nxd_3);
//w(nxd*nxd,nxd) = ut(nxd*nxd,nxd) * D(nxd,nxd) fortran
//w(nxd,nxd*nxd) = D(nxd,nxd) * ut(nxd,nxd*nxd) C
//mxm<<<gridSize, blockSize>>>(d,nxd, ut, nxd, w, nxd_2, nel, 0, nxd_3, nxd_3, 0);
cuda_multi_gemm_unif(stream, 'N', 'N', nxd_2, nxd, nxd, &alpha, ut, nxd, nxd_3, d, nxd, 0, &beta, w, nxd_2, nxd_3, nel, gridSize);
nekadd2<<<gridSize, blockSize>>>(u,w, nel*nxd_3);
cudaStreamDestroy(stream);
}
void flux_div_integral(double *ur, double *us, double *ut, double *ud, double *tu, double *totalh, double *rx, double *dg, double *dgt, double *jgt, double *jgl, double *res1, double *w, int nel, int eq, int ndim, int nx1, int nxd, int ldw, bool if3d){
//call get_dgl_ptr
int nd = pow(nxd,3);
int nx_3 = pow(nx1,3);
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)nel*nd/blockSize);
nekadd2col2_u<<<gridSize, blockSize>>>(ur, totalh, rx, nel, nd, ndim,0);
nekadd2col2_u<<<gridSize, blockSize>>>(us, totalh, rx, nel, nd, ndim,ndim);
if(if3d){
nekadd2col2_u<<<gridSize, blockSize>>>(ut, totalh, rx, nel, nd, ndim,ndim+ndim);
local_grad3_t(ud, ur, us, ut, nxd, dg, dgt, w, nel);
}
else{
//call local_grad2
}
specmpn(tu,nx1,ud,nxd,jgt,jgl,if3d,w,ldw,nel,1,0,false);
neksub2<<<gridSize, blockSize>>>(res1+eq*(nel*nx_3),tu,nel*nx_3);
}
void neklocal_grad3(double * ur, double *us, double *ut, double *u, int nx, int nxd, double *d, double *dt, int nel){
/*double *d_ub;
cudaMalloc(&d_ub, nel*pow(nx,3)*sizeof(double));
cudaStream_t stream;
cudaStreamCreate(&stream);
const double alpha = 1;
const double beta = 0;*/
//cuda_multi_gemm_unif(stream, 'N', 'N', nx, nx, nx, &alpha, nx, nx*nx, d, u, d_ub, &beta, ur, us, ut, nel*nx, 1024);
//cudaDeviceSynchronize();
//if(true) return;
int nx_2 = nx*nx;
int nx_3 = nx_2*nx;
int nxd_3 = pow(nxd,3);
//ur(nx,nx*nx) = D(nx,nx) * u(nx,nx*nx) fortran
//ur(nx*nx,nx) = u(nx*nx,nx) * D(nx,nx) C
int blockSize=1024, gridSize;
gridSize = (int)ceil((float)nel*nx_3/blockSize);
mxm<<<gridSize, blockSize>>>(u,nx_2, d, nx, ur, nx, nel, nx_3, 0, nxd_3, 0);//ur,us, ut should be indexed by nxd
for(int k = 0; k<nx; k++){
//usk(nx,nx) = uk(nx,nx) * dt(nx,nx) fortran
//usk(nx,nx) = dt(nx,nx) * uk(nx,nx) C
gridSize = (int)ceil((float)nel*nx_2/blockSize);
mxm<<<gridSize, blockSize>>>(dt,nx, u+k*nx_2, nx, us+k*nx_2, nx, nel, 0, nx_3, nxd_3, 0);
}
//ut(nx_2,nx) = u(nx_2,nx) * dt(nx,nx) fortran
//ut(nx,nx_2) = dt(nx,nx) * u(nx,nx_2) C
gridSize = (int)ceil((float)nel*nx_3/blockSize);
mxm<<<gridSize, blockSize>>>(dt, nx, u, nx, ut, nx_2, nel, 0, nx_3, nxd_3, 0);
}
void nekgradl_rst(double *ur, double *us, double *ut, double *u, double *d, double *dt, int nx, int nxd, int nel, bool if3d){
if(if3d){
neklocal_grad3(ur, us, ut, u, nx, nxd, d, dt, nel);
}
}
__global__ void calc_ud_3(double *ud, double *rx, double *ur, double *us, double *ut, int j, int nel, int nxd_3){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*nxd_3){
int e = id/nxd_3;
int i = id%nxd_3;
int e_size = e*(9*nxd_3);
ud[id] = rx[e_size+j*nxd_3+i]*ur[id] + rx[e_size+(j+3)*nxd_3+i]*us[id] + rx[e_size+(j+6)*nxd_3+i]*ut[id];
}
}
void compute_forcing(double *ud, double *ur, double *us, double *ut, double *phig, double *rx, double *pr, double *convh /*use w*/, double *jacmi, double *bm1, double *res1, double *usrf, double *d, double *dt, int nel, int eq, int nx1, int nxd, int ldim, bool if3d){
int nxd_2 = nxd*nxd;
int nx1_2 = nx1 * nx1;
int nxd_3 = nxd_2*nxd;
int nx1_3 = nx1_2*nx1;
int blockSize=1024, gridSize;
gridSize = (int)ceil((float)nel*nx1_3/blockSize);
rzero<<<gridSize, blockSize>>>(ud,nel*nx1_3);
if(eq!=0&&eq!=4){
int j=0;
if(eq==2)
j=1;
else if(eq==3){
j=1;
if(ldim==3)
j=2;
}
nekgradl_rst(ur,us,ut,phig, d, dt,nx1, nxd, nel, if3d);
if(if3d){
gridSize = (int)ceil((float)nel*nxd_3/blockSize);
calc_ud_3<<<gridSize, blockSize>>>(ud,rx,ur,us,ut,j,nel,nxd_3);
}
else{
//calc_ud_2
}
if(eq!=3 || ldim!=2){
gridSize = (int)ceil((float)nel*nx1_3/blockSize);
nekcol2_ud<<<gridSize, blockSize>>>(ud,pr,nel,nx1_3,nxd_3);
nekcopy<<<gridSize, blockSize>>>(convh,ud,nel*nx1_3);
nekcol2<<<gridSize, blockSize>>>(convh,jacmi,nel*nx1_3);
nekcol2<<<gridSize, blockSize>>>(convh,bm1,nel*nx1_3);
neksub2<<<gridSize, blockSize>>>(res1+eq*(nel*nx1_3),convh,nel*nx1_3);
neksubcol3_res1<<<gridSize, blockSize>>>(res1+eq*(nel*nx1_3),usrf+eq*nx1_3, bm1,nel,nx1_3);
}
}
else if (eq==4)
neksubcol3_res1<<<gridSize, blockSize>>>(res1+eq*(nel*nx1_3),usrf+eq*nx1_3, bm1,nel,nx1_3);
}
// this function is doing assemble_h, flux_div_integral, compute_forcing
extern "C" void computestagewrapper_(double *jgl, double *jgt, double *totalh, double *u, double *ju1, double *ju2, double *phig, double*pr, double *vxd, double *vyd, double *vzd, double *ut, double *ud, double *tu, double *rx, double *dg, double *dgt, double *res1, double *w, double *jacmi, double *bm1, double *usrf, double *d, double *dt, int *nel1, int *neq1, int *ndim1, int *ldw1, int *nx11, int *nxd1/*, bool if3d*/){
// float time;
// cudaEvent_t startEvent, stopEvent;
// cudaEventCreate(&startEvent);
// cudaEventCreate(&stopEvent);
// cudaEventRecord(startEvent, 0);
bool if3d = true;
int nel = nel1[0];
int neq = neq1[0];
int ndim = ndim1[0];
int ldw = ldw1[0];
int nx1 = nx11[0];
int nxd = nxd1[0];
//printf("nel=%d,neq=%d,ndim=%d,nx1=%d,nxd=%d,u[0]=%f\n",nel,neq,ndim,nx1,nxd,u[0]);
//use d_ju1 for d_ur
double *d_jgl, *d_jgt, *d_totalh, *d_u, *d_ju1, *d_ju2, *d_phig, *d_pr, *d_vxd,*d_vyd, *d_vzd, *d_ut, *d_ud, *d_tu, *d_rx, *d_dg, *d_dgt, *d_res1, *d_w, *d_jacmi, *d_bm1, *d_usrf, *d_d, *d_dt ;
bool inCPU = false;
int nxd_3 = pow(nxd,3), nx1_3 = pow(nx1,3);
if(inCPU){
//copy data to gpu
cudaMalloc(&d_jgl, nxd_3*sizeof(double));
cudaMemcpy(d_jgl, jgl, nxd_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_jgt, nxd_3*sizeof(double));
cudaMemcpy(d_jgt, jgt, nxd_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_totalh, ndim*nel*nxd_3*sizeof(double));
cudaMalloc(&d_u, nel*neq*nx1_3*sizeof(double));
cudaMemcpy(d_u, u, nel*neq*nx1_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_ju1, nel*nxd_3*sizeof(double));
cudaMalloc(&d_ju2, nel*nxd_3*sizeof(double));
cudaMalloc(&d_phig, nel*nx1_3*sizeof(double));
cudaMemcpy(d_phig, phig, nel*nx1_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_pr, nel*nx1_3*sizeof(double));
cudaMemcpy(d_pr, pr, nel*nx1_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_vxd, nel*nxd_3*sizeof(double));
cudaMemcpy(d_vxd, vxd, nel*nxd_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_vyd, nel*nxd_3*sizeof(double));
cudaMemcpy(d_vyd, vyd, nel*nxd_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_vzd, nel*nxd_3*sizeof(double));
cudaMemcpy(d_vzd, vzd, nel*nxd_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_ut, nel*nxd_3*sizeof(double));
cudaMalloc(&d_ud, nel*nxd_3*sizeof(double));
cudaMalloc(&d_tu, nel*nxd_3*sizeof(double));
cudaMalloc(&d_rx, nel*9*nxd_3*sizeof(double));
cudaMemcpy(d_rx, rx, nel*9*nxd_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_dg, nxd_3*sizeof(double));
cudaMemcpy(d_dg, dg, nxd_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_dgt, nxd_3*sizeof(double));
cudaMemcpy(d_dgt, dgt, nxd_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_d, nxd_3*sizeof(double));
cudaMemcpy(d_d, d, nxd_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_dt, nxd_3*sizeof(double));
cudaMemcpy(d_dt, dt, nxd_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_res1, nel*neq*nx1_3*sizeof(double));
cudaMemcpy(d_res1, res1, nel*neq*nx1_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_w, nel*ldw*sizeof(double));
//cudaMemcpy(d_w, w, ldw*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_jacmi, nel*nx1_3*sizeof(double));
cudaMemcpy(d_jacmi, jacmi, nel*nx1_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_bm1, nel*nx1_3*sizeof(double));
cudaMemcpy(d_bm1, bm1, nel*nx1_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_usrf, neq*nx1_3*sizeof(double));
cudaMemcpy(d_usrf, usrf, neq*nx1_3*sizeof(double), cudaMemcpyHostToDevice);
}
else{
//just assign pointers
d_jgl = jgl;
d_jgt = jgt;
d_totalh = totalh;
d_u = u;
d_ju1 = ju1;
d_ju2 = ju2;
d_phig = phig;
d_pr = pr;
d_vxd = vxd;
d_vyd = vyd;
d_vzd = vzd;
d_ut = ut;
d_ud = ud;
d_tu = tu;
d_rx = rx;
d_dg = dg;
d_dgt = dgt;
d_res1 = res1;
d_w = w;
d_jacmi = jacmi;
d_bm1 = bm1;
d_usrf = usrf;
d_d = d;
d_dt = dt;
}
//printf("finished memory allocation in compute\n eq=%d, if3d=%d\n",neq,if3d);
for(int eq = 0; eq<neq; eq++){
//printf("loop # %d\n",eq);
evaluate_conv_h(nel, neq, eq, ndim, ldw, d_jgl, d_jgt, d_totalh /*convh*/, d_u, nx1, nxd, /*nd*/ pow(nxd,3)*nel, /*n1*/ pow(nx1,3)*nel, d_ju1, d_ju2, d_phig, d_pr, d_vxd, d_vyd, d_vzd, d_w, if3d);
flux_div_integral(d_ju1/*d_ur*/, d_ju2/*d_us*/, d_ut, d_ud, d_tu, d_totalh, d_rx, d_dg, d_dgt, d_jgt, d_jgl, d_res1, d_w, nel, eq, ndim, nx1, nxd, ldw, if3d);
compute_forcing(d_ud, d_ju1/*d_ur*/, d_ju2/*d_us*/, d_ut, d_phig, d_rx, d_pr, d_w /*convh*/, d_jacmi, d_bm1, d_res1, d_usrf, d_d, d_dt, nel, eq, nx1, nxd, ndim/*ldim*/, if3d);
}
cudaError_t code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error str 3: %s\n",cudaGetErrorString(code));
}
if(inCPU){
cudaMemcpy(res1, d_res1, nel*neq*nx1_3*sizeof(double), cudaMemcpyDeviceToHost);
//cuda free all d_*
//double *d_jgl, *d_jgt, *d_totalh, *d_u, *d_ju1, *d_ju2, *d_phig, *d_pr, *d_vxd,*d_vyd, *d_vzd, *d_ut, *d_ud, *d_tu, *d_rx, *d_dg, *d_dgt, *d_res1, *d_w, *d_jacmi, *d_bm1, *d_usrf, *d_d, *d_dt ;
cudaFree(d_jgl);
cudaFree(d_jgt);
cudaFree(d_totalh);
cudaFree(d_u);
cudaFree(d_ju1);
cudaFree(d_ju2);
cudaFree(d_phig);
cudaFree(d_pr);
cudaFree(d_vxd);
cudaFree(d_vyd);
cudaFree(d_vzd);
cudaFree(d_ut);
cudaFree(d_ud);
cudaFree(d_tu);
cudaFree(d_rx);
cudaFree(d_dg);
cudaFree(d_dgt);
cudaFree(d_d);
cudaFree(d_dt);
cudaFree(d_res1);
cudaFree(d_w);
cudaFree(d_jacmi);
cudaFree(d_bm1);
cudaFree(d_usrf);
}
else{
}
// cudaEventRecord(stopEvent, 0);
// cudaEventSynchronize(stopEvent);
// cudaEventElapsedTime(&time, startEvent, stopEvent);
//printf("compute stage time is %f\n",time*1e-03);
}
__global__ void calculate_u(double *u, double *bm1, double *tcoef, double *res3, double *res1, int nelt, int nxyz1, int toteq){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nelt*toteq*nxyz1){
int e = id/(toteq*nxyz1);
int r = id%(toteq*nxyz1);
int eq = r/nxyz1;
int i = r%nxyz1;
u[id] = bm1[e*nxyz1+i]*tcoef[0]*res3[id]+bm1[e*nxyz1+i]*tcoef[1]*u[id]-tcoef[2]*res1[eq*(nelt*nxyz1)+e*nxyz1+i];
u[id] = u[id]/bm1[e*nxyz1+i];
}
}
extern "C" void calculateuwrapper_(double *u, double *bm1, double *tcoef, double *res3, double *res1, int *stage1, int *nelt1, int *nxyz11, int *toteq1){
int stage = stage1[0]-1;
int nelt = nelt1[0];
int nxyz1 = nxyz11[0];
int toteq = toteq1[0];
bool inCPU = false;
if(inCPU){
double *d_u, *d_bm1, *d_tcoef, *d_res3, *d_res1;
cudaMalloc(&d_u, nelt*toteq*nxyz1*sizeof(double));
cudaMemcpy(d_u, u, nelt*toteq*nxyz1*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_bm1, nelt*nxyz1*sizeof(double));
cudaMemcpy(d_bm1, bm1, nelt*nxyz1*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_tcoef, 9*sizeof(double));
cudaMemcpy(d_tcoef, tcoef, 9*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_res3, nelt*toteq*nxyz1*sizeof(double));
cudaMemcpy(d_res3, res3, nelt*toteq*nxyz1*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_res1, nelt*toteq*nxyz1*sizeof(double));
cudaMemcpy(d_res1, res1, nelt*toteq*nxyz1*sizeof(double), cudaMemcpyHostToDevice);
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)nelt*toteq*nxyz1/blockSize);
calculate_u<<<gridSize, blockSize>>>(d_u,d_bm1,d_tcoef+stage*3,d_res3,d_res1,nelt,nxyz1,toteq);
cudaMemcpy(u, d_u, nelt*toteq*nxyz1*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_u);
cudaFree(d_bm1);
cudaFree(d_tcoef);
cudaFree(d_res3);
cudaFree(d_res1);
}
else{
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)nelt*toteq*nxyz1/blockSize);
calculate_u<<<gridSize, blockSize>>>(u,bm1,tcoef+stage*3,res3,res1,nelt,nxyz1,toteq);
}
}
__global__ void nekinvcol3_vu(double *vx, double *vy, double *vz, double *u, int nel, int nxyz1, int neq, int irg, int irpu, int irpv, int irpw){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nel*nxyz1){
int e = id/nxyz1;
int i = id%nxyz1;
int e_offset = neq*nxyz1;
double c = u[e*e_offset+irg*nxyz1+i];
vx[id] = u[e*e_offset+irpu*nxyz1+i]/c;
vy[id] = u[e*e_offset+irpv*nxyz1+i]/c;
vz[id] = u[e*e_offset+irpw*nxyz1+i]/c;
vx[id] = 0.0;
vy[id] = 1.0;
vz[id] = 0.0;
}
}
extern "C" void computeprimitivevarswrapper_(double *vx, double *vy, double *vz, double *vxd, double *vyd, double *vzd, double *u, double *jgl, double *jgt, double *w, int *nxd1, int *nx11, int *nel1, int *toteq1, int *irpu1, int *irpv1, int *irpw1, int *irg1, int *ldw1, int *p){
//called only once and values used for all equations
// float time;
// cudaEvent_t startEvent, stopEvent;
// cudaEventCreate(&startEvent);
// cudaEventCreate(&stopEvent);
// cudaEventRecord(startEvent, 0);
int nxd = nxd1[0];
int nx1 = nx11[0];
int nel = nel1[0];
int toteq = toteq1[0];
int irpu = irpu1[0]-1;
int irpv = irpv1[0]-1;
int irpw = irpw1[0]-1;
int irg = irg1[0]-1;
int nx1_3 = pow(nx1,3);
int ldw = ldw1[0];
double *d_vx, *d_vy, *d_vz, *d_vxd, *d_vyd, *d_vzd, *d_u, *d_jgl, *d_jgt, *d_w;
bool inCPU = false;
if(p[0]==1)
inCPU = true;
if(inCPU){
//allocate gpu memory and transfer data to GPU
int tot_b = nel * nx1_3 * sizeof(double);
int totd_b = nel * pow(nxd,3) * sizeof(double);
ldw = 2*pow(nxd,3);
int ldw_b = nel * ldw * sizeof(double);
cudaMalloc(&d_vx, tot_b);
cudaMalloc(&d_vy, tot_b);
cudaMalloc(&d_vz, tot_b);
cudaMalloc(&d_vxd, totd_b);
cudaMalloc(&d_vyd, totd_b);
cudaMalloc(&d_vzd, totd_b);
cudaMalloc(&d_w, ldw_b);
cudaMalloc(&d_u, toteq*tot_b);
int nxd_3_b = pow(nxd,3) * sizeof(double);
cudaMalloc(&d_jgl, nxd_3_b);
cudaMalloc(&d_jgt, nxd_3_b);
cudaMemcpy(d_u, u, toteq*tot_b, cudaMemcpyHostToDevice);
cudaMemcpy(d_jgl, jgl, nxd_3_b, cudaMemcpyHostToDevice);
cudaMemcpy(d_jgt, jgt, nxd_3_b, cudaMemcpyHostToDevice);
}
else{
//just assign data
d_w = w;
d_vx = vx;
d_vy = vy;
d_vz = vz;
d_vxd = vxd;
d_vyd = vyd;
d_vzd = vzd;
d_u = u;
d_jgl = jgl;
d_jgt = jgt;
}
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)nel*nx1_3/blockSize);
nekinvcol3_vu<<<gridSize, blockSize>>>(d_vx, d_vy, d_vz, d_u, nel, nx1_3, toteq, irg, irpu, irpv, irpw);
specmpn(d_vxd, nxd, d_vx, nx1, d_jgl, d_jgt, true, d_w, ldw, nel, 1, 0,true);
specmpn(d_vyd, nxd, d_vy, nx1, d_jgl, d_jgt, true, d_w, ldw, nel, 1, 0,true);
specmpn(d_vzd, nxd, d_vz, nx1, d_jgl, d_jgt, true, d_w, ldw, nel, 1, 0,true);
if(inCPU){
int tot_b = nel * nx1_3 * sizeof(double);
int totd_b = nel * pow(nxd,3) * sizeof(double);
cudaMemcpy(vx, d_vx, tot_b, cudaMemcpyDeviceToHost);
cudaMemcpy(vy, d_vy, tot_b, cudaMemcpyDeviceToHost);
cudaMemcpy(vz, d_vz, tot_b, cudaMemcpyDeviceToHost);
cudaMemcpy(vxd, d_vxd, totd_b, cudaMemcpyDeviceToHost);
cudaMemcpy(vyd, d_vyd, totd_b, cudaMemcpyDeviceToHost);
cudaMemcpy(vzd, d_vzd, totd_b, cudaMemcpyDeviceToHost);
cudaFree(d_vx);
cudaFree(d_vy);
cudaFree(d_vz);
cudaFree(d_vxd);
cudaFree(d_vyd);
cudaFree(d_vzd);
cudaFree(d_w);
cudaFree(d_u);
cudaFree(d_jgl);
cudaFree(d_jgt);
}
// cudaEventRecord(stopEvent, 0);
// cudaEventSynchronize(stopEvent);
// cudaEventElapsedTime(&time, startEvent, stopEvent);
//printf("compute primitive time is %f\n",time*1e-03);
}
//mxm multiplication, faces
__global__ void mxm_faces(double *a, int n1, double *b, int n2, double *c, int n3, int nel, int nfaces, int aSize, int bSize, int cSize){
//calculate c(n1,n3) = a(n1,n2) X b(n2,n3) in c
//in fortran the original calculation was
// c(n3,n1) = b(n3,n2) X a(n2,n1)
// a,b,cSize are single element size
//extraEq, in case of a matrix has equation as an index
int id = blockIdx.x*blockDim.x+threadIdx.x;
int n13 = n1*n3;
if(id<nel*nfaces*n13){
int e = id/(nfaces*n13);
int rc = id%(nfaces*n13);
int f = rc/n13;
int rc2 = rc%n13;
int i = rc2/n3;
int j = rc2%n3;
int cid = e*nfaces*cSize+f*cSize+rc2;
int aid = e*nfaces*aSize+f*aSize + i*n2;
int bid = e*nfaces*bSize+f*bSize + j;
c[cid] = 0;
for(int k = 0; k<n2; k++)
c[cid]+=a[aid+k]*b[bid+k*n3];
}
}
void map_faced(double *jgl, double *jgt, double *ju, double *u, double *w, int nx1, int nxd, int fdim, int nelt, int nfaces, int idir){
cudaStream_t stream;
cudaStreamCreate( &stream );
const double alpha = 1;
const double beta = 0;
int nx1_2 = pow(nx1,2);
int nxd_2 = pow(nxd,2);
int batchSize = nelt*nfaces;
if(idir==0){
int blockSize = 1024, gridSize;
//calc w(nxd,nx1) = jgl(nxd*nx1) * u(nx1,nx1) in fortran
//calc w(nx1,nxd) = u(nx1,nx1) * jgl(nx1,nxd) in C
gridSize = (int)ceil((float)nelt*nfaces*nx1*nxd/blockSize);
cuda_multi_gemm_unif(stream, 'N', 'N', nxd, nx1, nx1, &alpha, jgl, nxd, 0, u, nx1, nx1_2, &beta, w, nxd, nx1*nxd, batchSize, gridSize);
//mxm_faces<<<gridSize, blockSize>>>(u, nx1, jgl, nx1, w, nxd, nelt, nfaces, nx1*nx1, 0, nx1*nxd);
//calc ju(nxd,nxd) = w(nxd,nx1) * jgt(nx1,nxd) in fortran
//calc ju(nxd,nxd) = jgt(nxd,nx1) * w(nx1,nxd)
gridSize = (int)ceil((float)nelt*nfaces*nxd*nxd/blockSize);
cuda_multi_gemm_unif(stream, 'N', 'N', nxd, nx1, nxd, &alpha, w, nxd, nx1*nxd, jgt, nx1, 0, &beta, ju, nxd, nxd_2, batchSize, gridSize);
//mxm_faces<<<gridSize, blockSize>>>(jgt, nxd, w, nx1, ju, nxd, nelt, nfaces, 0, nx1*nxd, nxd*nxd);
}
else{
int blockSize = 1024, gridSize;
//calc w(nx1,nxd) = jgt(nx1,nxd) * u(nxd,nxd) in fortran
//calc w(nxd,nx1) = u(nxd,nxd) * jgt(nxd,nx1) in C
gridSize = (int)ceil((float)nelt*nfaces*nx1*nxd/blockSize);
cuda_multi_gemm_unif(stream, 'N', 'N', nx1, nxd, nxd, &alpha, jgt, nx1, 0, u, nxd, nxd_2, &beta, w, nx1, nx1*nxd, batchSize, gridSize);
//mxm_faces<<<gridSize, blockSize>>>(u, nxd, jgt, nxd, w, nx1, nelt, nfaces, nxd*nxd, 0, nx1*nxd);
//calc ju(nx1,nx1) = w(nx1,nxd) * jgl(nxd,nx1) in fortran
//calc ju(nx1,nx1) = jgl(nx1,nxd) * w(nxd,nx1) in C
gridSize = (int)ceil((float)nelt*nfaces*nx1*nx1/blockSize);
cuda_multi_gemm_unif(stream, 'N', 'N', nx1, nxd, nx1, &alpha, w, nx1, nx1*nxd, jgl, nxd, 0, &beta, ju, nx1, nx1_2, batchSize, gridSize);
//mxm_faces<<<gridSize, blockSize>>>(jgl, nx1, w, nxd, ju, nx1, nelt, nfaces, 0, nx1*nxd, nx1*nx1);
}
cudaStreamDestroy(stream);
}
__global__ void invcol3_flux(double *a, double *b, double *c, int n, int total){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<total){
a[id] = b[id] / c[id%n];
}
}
__global__ void nekcol2_flux(double *a, double*b, int n, int total){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<total)
a[id]*=b[id%n];
}
__global__ void invcol2(double *a, double*b, int n){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
a[id]= a[id]/b[id];
}
__global__ void Ausm_flux(int neq, int ntotd, double *nx, double *ny, double *nz, double *nm, double *fs, double *rl, double *ul, double *vl, double *wl, double *pl, double *al, double *tl, double *rr, double *ur, double *vr, double *wr, double *pr, double *ar, double *tr, double *flx, double *cpl, double *cpr){
int i = blockIdx.x*blockDim.x+threadIdx.x;
//ntotd = nel * nfaces * nxzd
if(i<ntotd){
fs[i] = 0;// it is 0 in cmtbone but can be changed
double af,mf,mfa,mfm,mfp,ml,mla,mlp,mr,mra,mrm,pf,ql,qr,wtl,wtr,Hl,Hr;
Hl = cpl[i]*tl[i] + 0.5*(ul[i]*ul[i]+vl[i]*vl[i]+wl[i]*wl[i]);
Hr = cpr[i]*tr[i] + 0.5*(ur[i]*ur[i]+vr[i]*vr[i]+wr[i]*wr[i]);
ql = ul[i]*nx[i] + vl[i]*ny[i] + wl[i]*nz[i] - fs[i];
qr = ur[i]*nx[i] + vr[i]*ny[i] + wr[i]*nz[i] - fs[i];
af = 0.5*(al[i] + ar[i]);
ml = ql/af;
mla = abs(ml);
mr = qr/af;
mra = abs(mr);
if(mla <= 1.0){
mlp = 0.25*pow((ml+1.0),2) + 0.125*pow((ml*ml-1.0),2);
wtl = 0.25*pow(ml+1.0,2)*(2.0-ml) + 0.1875*ml*pow(ml*ml-1.0,2);
}
else{
mlp = 0.5*(ml+mla);
wtl = 0.5*(1.0+ml/mla);
}
if(mra <= 1.0){
mrm = -0.25*pow((mr-1.0),2) - 0.125*pow((mr*mr-1.0),2);
wtr = 0.25*pow(mr-1.0,2)*(2.0+mr) - 0.1875*mr*pow(mr*mr-1.0,2);
}
else{
mrm = 0.5*(mr-mra);
wtr = 0.5*(1.0-mr/mra);
}
mf = mlp + mrm;
mfa = abs(mf);
mfp = 0.5*(mf+mfa);
mfm = 0.5*(mf-mfa);
pf = wtl*pl[i] + wtr*pr[i];
//compute fluxes
flx[i] = (af*(mfp*rl[i] + mfm*rr[i])) * nm[i];
flx[1*ntotd+i] = (af*(mfp*rl[i]*ul[i] + mfm*rr[i]*ur[i])+pf*nx[i]) * nm[i];
flx[2*ntotd+i] = (af*(mfp*rl[i]*vl[i] + mfm*rr[i]*vr[i])+pf*ny[i]) * nm[i];
flx[3*ntotd+i] = (af*(mfp*rl[i]*wl[i] + mfm*rr[i]*wr[i])+pf*nz[i]) * nm[i];
flx[4*ntotd+i] = (af*(mfp*rl[i]*Hl + mfm*rr[i]*Hr)+pf*fs[i]) * nm[i];
}
}
void InviscidFlux(double *qminus, double *qplus, double *flux, double *unx, double *uny, double *unz, double *area, double *wghtc, double *wghtf, double *cbc, double *jgl, double *jgt, double *nx, double *ny, double *nz, double *rl, double *ul, double *vl, double *wl, double *pl, double *tl, double *al, double *cpl, double *rr, double *ur, double *vr, double *wr, double *pr, double *tr, double *ar, double *cpr, double *fs, double *jaco_f, double *flx, double *jaco_c,int neq, int nstate, int nflux, int nxd, int nx1, int nel, int ndim, int irho, int iux, int iuy, int iuz, int ipr, int ithm, int isnd, int icpf, int iph){
//nx extended to be nx(nel,nfaces,#points_in_face)
//irho should be irho1[0]-1, others also
//printf("in invFlux**\n");
int fdim = ndim-1;
int nfaces = 2*ndim;
int nx1_2 = nx1*nx1;
int nxd_2 = nxd*nxd;
double *w;
cudaMalloc(&w,nel*nfaces*pow(nxd,2)*sizeof(double));
//add neksub2 which is last step of face_state_commo
int blockSize1 = 1024, gridSize1;
gridSize1 = (int)ceil((float)nstate*nel*nfaces*nx1_2/blockSize1);
neksub2<<<gridSize1, blockSize1>>>(qplus,qminus,nstate*nel*nfaces*nx1_2);
cudaError_t code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error Inv, comp-1: %s\n",cudaGetErrorString(code));
}
int totpts = nel * nfaces * nx1_2;
map_faced(jgl, jgt, nx, unx, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, ny, uny, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, nz, unz, w, nx1, nxd, fdim, nel, nfaces, 0);
code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error Inv, comp-2: %s\n",cudaGetErrorString(code));
}
//printf("irho=%d,iux=%d,iuy=%d,iuz=%d,ipr=%d,ithm=%d,isnd=%d,icpf=%d\n",irho,iux,iuy,iuz,ipr,ithm,isnd,icpf);
map_faced(jgl, jgt, rl, qminus+irho*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, ul, qminus+iux*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, vl, qminus+iuy*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, wl, qminus+iuz*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, pl, qminus+ipr*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, tl, qminus+ithm*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, al, qminus+isnd*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, cpl, qminus+icpf*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error Inv, comp-3: %s\n",cudaGetErrorString(code));
}
map_faced(jgl, jgt, rr, qplus+irho*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, ur, qplus+iux*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, vr, qplus+iuy*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, wr, qplus+iuz*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, pr, qplus+ipr*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, tr, qplus+ithm*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, ar, qplus+isnd*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
map_faced(jgl, jgt, cpr, qplus+icpf*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error Inv, comp-4: %s\n",cudaGetErrorString(code));
}
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)totpts/blockSize);
invcol3_flux<<<gridSize, blockSize>>>(jaco_c,area,wghtc,nx1_2,totpts);
map_faced(jgl, jgt, jaco_f, jaco_c, w, nx1, nxd, fdim, nel, nfaces, 0);
int totpts_d = nel * nfaces * nxd_2;
gridSize = (int)ceil((float)totpts_d/blockSize);
nekcol2_flux<<<gridSize, blockSize>>>(jaco_f,wghtf,nxd_2,totpts_d);
//Ausm
//gridSize = (int)ceil((float)nel*nfaces*nxd_2/blockSize);
invcol2<<<gridSize, blockSize>>>(cpl,rl,totpts_d);
invcol2<<<gridSize, blockSize>>>(cpr,rr,totpts_d);
//gridSize = (int)ceil((float)nel*nfaces*nxd_2/blockSize);
Ausm_flux<<<gridSize, blockSize>>>(neq, totpts_d, nx, ny, nz, jaco_f, fs, rl, ul, vl, wl, pl, al, tl, rr, ur, vr, wr, pr, ar, tr, flx, cpl, cpr);
code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error Inv, comp-5: %s\n",cudaGetErrorString(code));
}
map_faced(jgl, jgt, pl, qminus+iph*totpts, w, nx1, nxd, fdim, nel, nfaces, 0);
for(int j=0; j<neq;j++){
nekcol2<<<gridSize, blockSize>>>(flx+j*totpts_d,pl,totpts_d);
map_faced(jgl, jgt, flux+j*totpts, flx+j*totpts_d, w, nx1, nxd, fdim, nel, nfaces, 1);
}
code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error Inv, comp-6: %s\n",cudaGetErrorString(code));
}
cudaFree(w);
code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error Inv, comp-7: %s\n",cudaGetErrorString(code));
}
}
extern "C" void inviscidfluxwrapper_(double *qminus, double *qplus, double *flux, double *unx, double *uny, double *unz, double *area, double *wghtc, double *wghtf, double *cbc, double *jgl, double *jgt, double *nx, double *ny, double *nz, double *rl, double *ul, double *vl, double *wl, double *pl, double *tl, double *al, double *cpl, double *rr, double *ur, double *vr, double *wr, double *pr, double *tr, double *ar, double *cpr, double *fs, double *jaco_f, double *flx, double *jaco_c, int* neq, int* nstate, int* nflux, int *nxd, int *nx1, int *nel, int *ndim, int *irho, int *iux, int *iuy, int *iuz, int *ipr, int *ithm, int *isnd, int *icpf, int *iph){
bool inCPU = false;
if(inCPU){
//input and output
double *d_qminus, *d_qplus, *d_flux, *d_unx, *d_uny, *d_unz, *d_area, *d_wghtc, *d_wghtf, *d_cbc, *d_jgl, *d_jgt;
//temp arrays
double *d_nx, *d_ny, *d_nz, *d_rl, *d_ul, *d_vl, *d_wl, *d_pl, *d_tl, *d_al, *d_cpl, *d_rr, *d_ur, *d_vr, *d_wr, *d_pr, *d_tr, *d_ar, *d_cpr, *d_fs, *d_jaco_f, *d_flx, *d_jaco_c;
int nfaces=ndim[0]*2;
int ntot = nel[0] * nfaces * pow(nx1[0],2);
int ntotd = nel[0] * nfaces * pow(nxd[0],2);
cudaMalloc(&d_qminus, nstate[0]*ntot*sizeof(double));
cudaMemcpy(d_qminus, qminus, nstate[0]*ntot*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_qplus, nstate[0]*ntot*sizeof(double));
cudaMemcpy(d_qplus, qplus, nstate[0]*ntot*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_flux, neq[0]*ntot*sizeof(double));
cudaMalloc(&d_unx, ntot*sizeof(double));
cudaMemcpy(d_unx, unx, ntot*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_uny, ntot*sizeof(double));
cudaMemcpy(d_uny, uny, ntot*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_unz, ntot*sizeof(double));
cudaMemcpy(d_unz, unz, ntot*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_area, ntot*sizeof(double));
cudaMemcpy(d_area, area, ntot*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_wghtc, pow(nx1[0],2)*sizeof(double));
cudaMemcpy(d_wghtc, wghtc, pow(nx1[0],2)*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_wghtf, pow(nxd[0],2)*sizeof(double));
cudaMemcpy(d_wghtf, wghtf, pow(nxd[0],2)*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_cbc, pow(nxd[0],2)*sizeof(double));//correct
//cudaMemcpy(d_wghtf, wghtf, pow(nxd[0],2)*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_jgl, pow(nxd[0],3)*sizeof(double));
cudaMemcpy(d_jgl, jgl, pow(nxd[0],3)*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&d_jgt, pow(nxd[0],3)*sizeof(double));
cudaMemcpy(d_jgt, jgt, pow(nxd[0],3)*sizeof(double), cudaMemcpyHostToDevice);
double*d_all;
cudaMalloc(&d_all, 26*ntotd*sizeof(double));
d_nx = d_all;
d_ny = d_nx+ntotd;
d_nz = d_ny+ntotd;
d_rl = d_nz+ntotd;
d_ul = d_rl+ntotd;
d_wl = d_ul+ntotd;
d_vl = d_wl+ntotd;
d_pl = d_vl+ntotd;
d_tl = d_pl+ntotd;
d_al = d_tl+ntotd;
d_cpl = d_al+ntotd;
d_rr = d_cpl+ntotd;
d_ur = d_rr+ntotd;
d_wr = d_ur+ntotd;
d_vr = d_wr+ntotd;
d_pr = d_vr+ntotd;
d_tr = d_pr+ntotd;
d_ar = d_tr+ntotd;
d_cpr = d_ar+ntotd;
d_jaco_f = d_cpr+ntotd;
d_fs = d_jaco_f+ntotd;
d_flx = d_fs+ntotd;
/*cudaMalloc(&d_nx, ntotd*sizeof(double));
cudaMalloc(&d_ny, ntotd*sizeof(double));
cudaMalloc(&d_nz, ntotd*sizeof(double));
cudaMalloc(&d_rl, ntotd*sizeof(double));
cudaMalloc(&d_ul, ntotd*sizeof(double));
cudaMalloc(&d_wl, ntotd*sizeof(double));
cudaMalloc(&d_vl, ntotd*sizeof(double));
cudaMalloc(&d_pl, ntotd*sizeof(double));
cudaMalloc(&d_tl, ntotd*sizeof(double));
cudaMalloc(&d_al, ntotd*sizeof(double));
cudaMalloc(&d_cpl, ntotd*sizeof(double));
cudaMalloc(&d_rr, ntotd*sizeof(double));
cudaMalloc(&d_ur, ntotd*sizeof(double));
cudaMalloc(&d_vr, ntotd*sizeof(double));
cudaMalloc(&d_wr, ntotd*sizeof(double));
cudaMalloc(&d_pr, ntotd*sizeof(double));
cudaMalloc(&d_tr, ntotd*sizeof(double));
cudaMalloc(&d_ar, ntotd*sizeof(double));
cudaMalloc(&d_cpr, ntotd*sizeof(double));*/
cudaMalloc(&d_jaco_c, ntot*sizeof(double));
/*cudaMalloc(&d_jaco_f, ntotd*sizeof(double));
cudaMalloc(&d_fs, ntotd*sizeof(double));
cudaMalloc(&d_flx, 5*ntotd*sizeof(double));*/
//int* neq, int* nstate, int* nflux, int *nxd, int *nx1, int *nel, int *ndim, int *irho, int *iux, int *iuy, int *iuz, int *ipr, int *ithm, int *isnd, int *icpf, int *iph
//printf("neq = %d, nxd = %d, nx1 = %d, nel = %d, ndim = %d, irho = %d\n",neq[0],nxd[0],nx1[0],nel[0],ndim[0],irho[0]);
cudaError_t code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error Inv, malloc: %s\n",cudaGetErrorString(code));
}
InviscidFlux(d_qminus, d_qplus, d_flux, d_unx, d_uny, d_unz, d_area, d_wghtc, d_wghtf, d_cbc, d_jgl, d_jgt, d_nx, d_ny, d_nz, d_rl, d_ul, d_vl, d_wl, d_pl, d_tl, d_al, d_cpl, d_rr, d_ur, d_vr, d_wr, d_pr, d_tr, d_ar, d_cpr, d_fs, d_jaco_f, d_flx, d_jaco_c, neq[0], nstate[0], nflux[0], nxd[0], nx1[0], nel[0], ndim[0], irho[0]-1, iux[0]-1, iuy[0]-1, iuz[0]-1, ipr[0]-1, ithm[0]-1, isnd[0]-1, icpf[0]-1, iph[0]-1);
code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error Inv, compute: %s\n",cudaGetErrorString(code));
}
cudaMemcpy(flux, d_flux, neq[0]*ntot*sizeof(double), cudaMemcpyDeviceToHost);
//free
cudaFree(d_qminus);
cudaFree(d_qplus);
cudaFree(d_flux);
cudaFree(d_unx);
cudaFree(d_uny);
cudaFree(d_unz);
cudaFree(d_area);
cudaFree(d_wghtc);
cudaFree(d_wghtf);
cudaFree(d_cbc);//correct
cudaFree(d_jgl);
cudaFree(d_jgt);
cudaFree(d_all);
/*cudaFree(d_nx);
cudaFree(d_ny);
cudaFree(d_nz);
cudaFree(d_rl);
cudaFree(d_ul);
cudaFree(d_wl);
cudaFree(d_vl);
cudaFree(d_pl);
cudaFree(d_tl);
cudaFree(d_al);
cudaFree(d_cpl);
cudaFree(d_rr);
cudaFree(d_ur);
cudaFree(d_vr);
cudaFree(d_wr);
cudaFree(d_pr);
cudaFree(d_tr);
cudaFree(d_ar);
cudaFree(d_cpr);*/
cudaFree(d_jaco_c);
/*cudaFree(d_jaco_f);
cudaFree(d_fs);
cudaFree(d_flx);*/
}
else{
InviscidFlux(qminus, qplus, flux, unx, uny, unz, area, wghtc, wghtf, cbc, jgl, jgt, nx, ny, nz, rl, ul, vl, wl, pl, tl, al, cpl, rr, ur, vr, wr, pr, tr, ar, cpr, fs, jaco_f, flx, jaco_c, neq[0], nstate[0], nflux[0], nxd[0], nx1[0], nel[0], ndim[0], irho[0]-1, iux[0]-1, iuy[0]-1, iuz[0]-1, ipr[0]-1, ithm[0]-1, isnd[0]-1, icpf[0]-1, iph[0]-1);
}
}
//res1 = vols
void before_fields(){
//nekcopy u into res3 - can be done at next when res3 is needed
//set_dealias_face without zwgl - can be done at next when wghtc, wghtf needed
//compute_primitive_vars
//fillq_gpu
//faceu
}
__global__ void init_stokes(double *rpart, int *ipart, int nr, int ni, int n, int nw, int np, int nid, int jx, int jy, int jz, int jf0, int jar, int jai, double p){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < nw/np){
int pid = id * np + nid + 1;
double dumx = fmod(1.352 * id/7,0.98)+.01;
double dumy = fmod(1.273 * id/8,0.98)+.01;
double dumz = fmod(1.222 * id/9,0.98)+.01;
int off = id*nr;
rpart[off+jx] = -0.9 + dumx * 1.8;
rpart[off+jy] = -0.9 + dumy * 1.8;
rpart[off+jz] = -0.9 + dumz * 1.8;
rpart[off+jf0] = 0.0;
rpart[off+jar] = p;//pow(10,15);
ipart[id*ni+jai] = pid;
}
}
#if 0
__global__ void particles_in_nid(int *fptsmap, double *rfpts, int *ifpts, double *rpart, int *ipart, double *range, int nrf, int nif, int *nfpts, int nr, int ni, int n, int lpart, int nelt, int jx, int jy, int jz,int je0, int jrc, int jpt, int jd, int jr, int nid){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < n){
//double *rpart = rpart1 + id * nr;
//int *ipart = ipart1 + id * ni;
int ie;
double xloc = rpart[id*nr+jx];
double yloc = rpart[id*nr+jy];
double zloc = rpart[id*nr+jz];
for(ie = 0; ie < nelt; ie++){
//double * range = xerange + ie * 6;
if(xloc >= range[ie*6+0] && xloc <= range[ie*6+1] && yloc >=range[ie*6+2] && yloc <= range[ie*6+3] && zloc >= range[ie*6+4] && zloc <= range[ie*6+5]){
ipart[id*ni+je0] = ie;
ipart[id*ni+jrc] = 0;
ipart[id*ni+jpt] = nid;
ipart[id*ni+jd] = 1;
rpart[id*nr+jr] = -1.0 + 2.0*(xloc-range[ie*6+0])/(range[ie*6+1]-range[ie*6+0]);
rpart[id*nr+jr+1] = -1.0 + 2.0*(yloc-range[ie*6+2])/(range[ie*6+3]-range[ie*6+2]);
rpart[id*nr+jr] = -1.0 + 2.0*(zloc-range[ie*6+4])/(range[ie*6+5]-range[ie*6+4]);
break;
}
}
if(ie==nelt){
//point is outside all elements
int old = atomicAdd(nfpts, 1);
if(old==lpart){
printf("error many moving particles\n");
return;
}
fptsmap[old] = id+1;
//double * rfp = rfpts + old * nrf;
//int * ifp = ifpts + old * nif;
for(int i = 0 ; i < nrf; i++)
rfpts[old*nrf+i] = rpart[id*nr+i];
for(int i = 0 ; i < nif; i++)
ifpts[old*nif+i] = ipart[id*ni+i];
}
}
}
extern "C" void particles_in_nid_wrapper_(double *rfpts, int *ifpts, double *rpart, int *ipart, double *xerange, int *fptsmap, int *nrf, int *nif, int *nfpts, int *nr, int *ni, int *n, int *lpart, int *nelt, int *jx, int *jy, int *jz,int *je0, int *jrc, int *jpt, int *jd, int *jr, int *nid){
float time;
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
cudaEventRecord(startEvent, 0);
bool inCPU = false;
double *d_rfpts, *d_rpart, *d_xerange;
int *d_fptsmap, *d_ifpts, *d_ipart, *d_nfpts;
if(inCPU){
cudaMalloc(&d_rfpts, lpart[0]*nrf[0]*sizeof(double));
cudaMalloc(&d_rpart, n[0]*nr[0]*sizeof(double));
cudaMalloc(&d_xerange, nelt[0]*6*sizeof(double));
cudaMalloc(&d_fptsmap, lpart[0]*sizeof(int));
cudaMalloc(&d_ifpts, lpart[0]*nif[0]*sizeof(int));
cudaMalloc(&d_ipart, n[0]*ni[0]*sizeof(int));
cudaMalloc(&d_nfpts, sizeof(int));
cudaMemcpy(d_rpart, rpart, n[0]*nr[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_xerange, xerange, nelt[0]*6*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_ipart, ipart, n[0]*ni[0]*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nfpts, nfpts, sizeof(int), cudaMemcpyHostToDevice);
}
else{
d_rfpts = rfpts;
d_rpart= rpart;
d_xerange = xerange;
d_fptsmap = fptsmap;
d_ifpts = ifpts;
d_ipart = ipart;
cudaMalloc(&d_nfpts, sizeof(int));
cudaMemcpy(d_nfpts, nfpts, sizeof(int), cudaMemcpyHostToDevice);
}
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n[0]/blockSize);
// printf ("print var %d %d %d\n", n[0], jx[0], jy[0]);
particles_in_nid<<<gridSize, blockSize>>>(d_fptsmap, d_rfpts, d_ifpts, d_rpart, d_ipart, d_xerange, nrf[0], nif[0], d_nfpts, nr[0], ni[0], n[0], lpart[0], nelt[0], jx[0]-1, jy[0]-1, jz[0]-1, je0[0]-1, jrc[0]-1, jpt[0]-1, jd[0]-1, jr[0]-1, nid[0]);
if(inCPU){
cudaMemcpy(ipart, d_ipart, n[0]*ni[0]*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(rpart, d_rpart, n[0]*nr[0]*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(nfpts, d_nfpts, sizeof(int), cudaMemcpyDeviceToHost);
if(nfpts[0]>0){
cudaMemcpy(fptsmap, d_fptsmap, nfpts[0]*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(rfpts, d_rfpts, nfpts[0]*nrf[0]*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(ifpts, d_ifpts, nfpts[0]*nif[0]*sizeof(int), cudaMemcpyDeviceToHost);
}
//free
cudaFree(d_rpart);
cudaFree(d_ipart);
cudaFree(d_xerange);
cudaFree(d_fptsmap);
cudaFree(d_rfpts);
cudaFree(d_ifpts);
}
else{
cudaMemcpy(nfpts, d_nfpts, sizeof(int), cudaMemcpyDeviceToHost);
// printf ("print var 1st %d\n", nfpts);
}
cudaFree(d_nfpts);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
// printf ("print var 2nd %d\n", nfpts);
//printf("particles in nid time is %f\n",time*1e-03);
}
#endif
extern "C" void init_stokes_particleswrapper_(double *rpart, int *ipart, int *nr, int *ni, int *n, int *nw, int *np, int *nid, int *jx, int *jy, int *jz, int *jf0, int *jar, int *jai){
bool inCPU = false;
double *d_rpart;
int *d_ipart;
if(inCPU){
cudaMalloc(&d_rpart, n[0]*nr[0]*sizeof(double));
cudaMalloc(&d_ipart, n[0]*ni[0]*sizeof(int));
}
else{
d_rpart = rpart;
d_ipart = ipart;
}
int blockSize = 1024, gridSize;
int proc_work = nw[0]/np[0];
gridSize = (int)ceil((float)proc_work/blockSize);
init_stokes<<<gridSize, blockSize>>>(rpart, ipart, nr[0], ni[0], n[0], nw[0], np[0], nid[0], jx[0]-1, jy[0]-1, jz[0]-1, jf0[0]-1, jar[0]-1, jai[0]-1, pow(10,15));
if(inCPU){
cudaMemcpy(rpart, d_rpart, n[0]*nr[0]*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(ipart, d_ipart, n[0]*ni[0]*sizeof(int), cudaMemcpyDeviceToHost);
//free
cudaFree(d_rpart);
cudaFree(d_ipart);
}
}
__global__ void solve_velocity(double *rpart, int nr, int ni, int n, int j, int jx0, int jx1, int jx2, int jx3, int jv0, int jv1, int jv2, int jv3, int ju0, int ju1, int ju2, int ju3){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < n){
int off = id*nr+j;
double * rpart_off = rpart+off;
rpart_off[ju3] = rpart_off[ju2];
rpart_off[ju2] = rpart_off[ju1];
rpart_off[ju1] = rpart_off[ju0];
rpart_off[jv3] = rpart_off[jv2];
rpart_off[jv2] = rpart_off[jv1];
rpart_off[jv1] = rpart_off[jv0];
rpart_off[jx3] = rpart_off[jx2];
rpart_off[jx2] = rpart_off[jx1];
rpart_off[jx1] = rpart_off[jx0];
}
}
__global__ void update_velocity(double *rpart1, double *alpha, double *beta, int ndim, int nr, int ni, int n, int jx0, int jx1, int jx2, int jx3, int jv0, int jv1, int jv2, int jv3, int ju0, int ju1, int ju2, int ju3, int jf0, int jst){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < n*ndim){
int j = id%ndim;
int i = id/ndim;
double s = rpart1[i*nr+jst];
int off = i*nr+j;
double * rpart = rpart1+off;
double rhs = s*(alpha[1]*rpart[ju1] + alpha[2]*rpart[ju2] + alpha[3]*rpart[ju3]) + rpart[jf0-j] + beta[1]*rpart[jv1] + beta[2]*rpart[jv2] + beta[3]*rpart[jv3];
rpart[jv0] = rhs/(beta[0]+s);
double rhx = beta[1]*rpart[jx1] + beta[2]*rpart[jx2] + beta[3]*rpart[jx3] + rpart[jv0];
// rpart[jx0] = rhx/beta[0];
}
}
//commented by adeesha
/*__global__ void update_particle_location_kernel(double *rpart1, double *xdrange1, int nr, int n, int ndim, int jx0, int jx1, int jx2, int jx3, int jaa, int jab, int jac, int jad, int *flagsend, double dt){
int id = blockIdx.x*blockDim.x+threadIdx.x;
// printf("***update_particle_location %d\n", id);
if(id < n*ndim){
// if(id < n){
// printf("***entered if block\n");
int j = id%ndim;
int i = id/ndim;
int off = i*nr+j;
double * rpart = rpart1+off;
double *xdrange = xdrange1+2*j;
// curtain test case, update x location
int factor=(3-j)/3;
rpart[jx0] = rpart[jx0] + (1.0/3)*rpart[jx0]*dt*factor;
// new curtain test case, update y location
int factor = 0;
if (j == 1){
factor = 1;
}
rpart[jx0] = rpart[jx0] + (1.0/3)*rpart[jx0]*dt*factor;
#if 0
// Do not remove this part of the code
rpart[jx0] = rpart[jx0] + rpart[jaa] * rpart[jad-j];
#endif
if(rpart[jx0]<xdrange[0]){
//exit(0);
flagsend[0] = flagsend[0] + 1;
}
if(rpart[jx0] > xdrange[1]){
//exit(0);
flagsend[0] = flagsend[0] + 1;
}
#if 0
// Do not remove this part of the code
if(rpart[jx0]<xdrange[0]){
rpart[jx0] = xdrange[1] - fabs(xdrange[0] - rpart[jx0]);
rpart[jx1] = xdrange[1] + fabs(xdrange[0] - rpart[jx1]);
rpart[jx2] = xdrange[1] + fabs(xdrange[0] - rpart[jx2]);
rpart[jx3] = xdrange[1] + fabs(xdrange[0] - rpart[jx3]);
}
if(rpart[jx0] > xdrange[1]){
rpart[jx0] = xdrange[0] + fabs(rpart[jx0] - xdrange[1]);
rpart[jx1] = xdrange[0] - fabs(xdrange[2] - rpart[jx1]);
rpart[jx2] = xdrange[0] - fabs(xdrange[2] - rpart[jx2]);
rpart[jx3] = xdrange[0] - fabs(xdrange[2] - rpart[jx3]);
}
#endif
}
} */
//not use this function
__global__ void update_particle_location_keke(double *rpart1, double *xdrange1, int nr, int n, int j, int ndim, int jx0, int jx1, int jx2, int jx3, int jaa, int jab, int jac, int jad){
int id = blockIdx.x*blockDim.x+threadIdx.x;
// printf("***update_particle_location %d\n", id);
if(id < n){
// printf("***entered if block\n");
// int j = id%ndim;
// int i = id/ndim;
int off = id*nr;
double * rpart = rpart1+off;
double *xdrange = xdrange1+2*j;
rpart[jx0+j] = rpart[jx0+j] + rpart[jaa+j] * rpart[jad];
// rpart[jx0+1] = rpart[jx0+1] + rpart[jab] * rpart[jad];
// rpart[jx0+2] = rpart[jx0+2] + rpart[jac] * rpart[jad];
rpart = rpart + j; //avoid the following all +j
if(rpart[jx0]<xdrange[0]){
rpart[jx0] = xdrange[1] - fabs(xdrange[0] - rpart[jx0]);
rpart[jx1] = xdrange[1] + fabs(xdrange[0] - rpart[jx1]);
rpart[jx2] = xdrange[1] + fabs(xdrange[0] - rpart[jx2]);
rpart[jx3] = xdrange[1] + fabs(xdrange[0] - rpart[jx3]);
}
if(rpart[jx0] > xdrange[1]){
rpart[jx0] = xdrange[0] + fabs(rpart[jx0] - xdrange[1]);
rpart[jx1] = xdrange[0] - fabs(xdrange[2] - rpart[jx1]);
rpart[jx2] = xdrange[0] - fabs(xdrange[2] - rpart[jx2]);
rpart[jx3] = xdrange[0] - fabs(xdrange[2] - rpart[jx3]);
}
}
}
void update_stokes_particles(double *rpart, double *alpha, double *beta, double *xdrange, int ndim, int nr, int ni, int n, int jx0, int jx1, int jx2, int jx3, int jv0, int jv1, int jv2, int jv3, int ju0, int ju1, int ju2, int ju3, int jar, int jf0, int jaa, int jab, int jac, int jad, int *flagsend, double dt){
//jx0, ... all should be passed original-1
//alpha,beta[0:3]
//solve velocity
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n/blockSize);
for(int j = 0; j < ndim; j++)
solve_velocity<<<gridSize, blockSize>>>(rpart, nr, ni, n, j, jx0, jx1, jx2, jx3, jv0, jv1, jv2, jv3, ju0, ju1, ju2, ju3);
gridSize = (int)ceil((float)n*ndim/blockSize);
update_velocity<<<gridSize, blockSize>>>(rpart, alpha, beta, ndim, nr, ni, n, jx0, jx1, jx2, jx3, jv0, jv1, jv2, jv3, ju0, ju1, ju2, ju3, jf0, jar);
// gridSize = (int)ceil((float)n/blockSize);
// for(int j = 0; j < ndim; j++)
// update_particle_location_keke<<<gridSize, blockSize>>>(rpart, xdrange, nr, n, j, ndim, jx0, jx1, jx2, jx3, jaa, jab, jac, jad);
//update_particle_location_kernel<<<gridSize, blockSize>>>(rpart, xdrange, nr, n, ndim, jx0, jx1, jx2, jx3, jaa, jab, jac, jad, flagsend, dt); //previous one with gridSize=..*ndim
}
extern "C" void updatestokeswrapper_(double *rpart, double *alpha, double *beta, double *xdrange, int *ndim, int *nr, int *ni, int *n, int *jx0, int *jx1, int *jx2, int *jx3, int *jv0, int *jv1, int *jv2, int *jv3, int *ju0, int *ju1, int *ju2, int *ju3, int *jar, int *jf0, int *jaa, int *jab, int * jac, int * jad, int *flagsend, double* dt){
// float time;
// cudaEvent_t startEvent, stopEvent;
// cudaEventCreate(&startEvent);
// cudaEventCreate(&stopEvent);
// cudaEventRecord(startEvent, 0);
bool inCPU = false;
double * d_rpart, *d_alpha, *d_beta, *d_xdrange;
int *d_flagsend;
if(inCPU){
cudaMalloc(&d_rpart, n[0]*nr[0]*sizeof(double));
cudaMalloc(&d_alpha, 4*sizeof(double));
cudaMalloc(&d_beta, 4*sizeof(double));
cudaMalloc(&d_xdrange, 6*sizeof(double));
cudaMalloc(&d_flagsend, sizeof(int));
cudaMemcpy(d_rpart, rpart, n[0]*nr[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_alpha, alpha, 4*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_beta, beta, 4*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_xdrange, xdrange, 6*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_flagsend, flagsend, sizeof(int), cudaMemcpyHostToDevice);
}
else{
d_rpart = rpart;
//d_alpha = alpha;
//d_beta = beta;
d_xdrange = xdrange;
cudaMalloc(&d_alpha, 4*sizeof(double));
cudaMalloc(&d_beta, 4*sizeof(double));
cudaMemcpy(d_alpha, alpha, 4*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_beta, beta, 4*sizeof(double), cudaMemcpyHostToDevice);
d_flagsend = flagsend;
}
update_stokes_particles(d_rpart, d_alpha, d_beta, d_xdrange, ndim[0], nr[0], ni[0], n[0], jx0[0]-1, jx1[0]-1, jx2[0]-1, jx3[0]-1, jv0[0]-1, jv1[0]-1, jv2[0]-1, jv3[0]-1, ju0[0]-1, ju1[0]-1, ju2[0]-1, ju3[0]-1, jar[0]-1, jf0[0]-1, jaa[0]-1, jab[0]-1, jac[0]-1, jad[0]-1, d_flagsend, dt[0]);
if(inCPU){
cudaMemcpy(rpart, d_rpart, n[0]*nr[0]*sizeof(double), cudaMemcpyDeviceToHost);
//free
cudaFree(d_rpart);
//cudaFree(d_alpha);
//cudaFree(d_beta);
cudaFree(d_xdrange);
cudaFree(d_flagsend);
}
cudaFree(d_alpha);
cudaFree(d_beta);
// cudaEventRecord(stopEvent, 0);
// cudaEventSynchronize(stopEvent);
// cudaEventElapsedTime(&time, startEvent, stopEvent);
//printf("update stokes time is %f\n",time*1e-03);
}
__global__ void baryinterp(double *rpart, int *ipart, double *vx, double *vy, double *vz, int jr, int je0, int ju0, double *rep, double *xgll, double * ygll, double *zgll, double *wxgll, double *wygll, double *wzgll, int nx1, int n, int nr, int ni){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < n){
int x,y,z;
double bot = 0.0;
x = rpart[id*nr+jr];
y = rpart[id*nr+jr+1];
z = rpart[id*nr+jr+2];
for(int k=0; k<nx1;k++){
for(int j=0; j<nx1; j++){
double repdum = wygll[j]/(y-ygll[j]) * wzgll[k]/(z-zgll[k]) ;
for(int i = 0; i<nx1; i++){
rep[k*nx1*nx1+j*nx1+i] = repdum * wxgll[i]/(x-xgll[i]);
bot = bot + rep[k*nx1*nx1+j*nx1+i];
}
}
}
int ie = ipart[id*ni+je0];
//new (vx(ie),rpart(ju0)
double top1 = 0.0, top2 = 0.0, top3 = 0.0;
int nxyz = nx1*nx1*nx1;
double *fieldx = vx+ie*nxyz;
double *fieldy = vy+ie*nxyz;
double *fieldz = vz+ie*nxyz;
for(int i = 0; i<nxyz; i++){
top1 = top1 + rep[i]*fieldx[i];
top2 = top2 + rep[i]*fieldy[i];
top3 = top3 + rep[i]*fieldz[i];
}
rpart[id*nr+ju0] = top1/bot;
rpart[id*nr+ju0+1] = top2/bot;
rpart[id*nr+ju0+2] = top3/bot;
}
}
extern "C" void baryweights_evalwrapper_(double *rpart, int *ipart, double *vx, double *vy, double *vz, double *rep, double *xgll, double * ygll, double *zgll, double *wxgll, double *wygll, double *wzgll, int* jr, int* je0, int* ju0, int* nx1, int* n, int* nr, int* ni, int *nel){
// float time;
// cudaEvent_t startEvent, stopEvent;
// cudaEventCreate(&startEvent);
// cudaEventCreate(&stopEvent);
// cudaEventRecord(startEvent, 0);
bool inCPU = false;
double *d_rpart, *d_vols, *d_rep, *d_gll;
int *d_ipart;
int nx1_2 = nx1[0]*nx1[0];
int nx1_3 = nx1_2*nx1[0];
if(inCPU){
cudaMalloc(&d_rpart, n[0]*nr[0]*sizeof(double));
cudaMalloc(&d_vols, 3*nel[0]*nx1_3*sizeof(double));
cudaMalloc(&d_rep, nx1_3*sizeof(double));
cudaMalloc(&d_gll, 6*nx1[0]*sizeof(double));
cudaMalloc(&d_ipart, n[0]*ni[0]*sizeof(int));
cudaMemcpy(d_rpart, rpart, n[0]*nr[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_vols, vx, nel[0]*nx1_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_vols+nel[0]*nx1_3, vy, nel[0]*nx1_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_vols+2*nel[0]*nx1_3, vz, nel[0]*nx1_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_rep, rep, nx1_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_gll, xgll, nx1[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_gll+nx1[0], ygll, nx1[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_gll+2*nx1[0], zgll, nx1[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_gll+3*nx1[0], wxgll, nx1[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_gll+4*nx1[0], wygll, nx1[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_gll+5*nx1[0], wzgll, nx1[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_ipart, ipart, n[0]*ni[0]*sizeof(int), cudaMemcpyHostToDevice);
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n[0]/blockSize);
baryinterp<<<gridSize, blockSize>>>(d_rpart, d_ipart, d_vols, d_vols+nel[0]*nx1_3, d_vols+2*nel[0]*nx1_3, jr[0]-1, je0[0]-1, ju0[0]-1, d_rep, d_gll, d_gll+nx1[0], d_gll+2*nx1[0], d_gll+3*nx1[0], d_gll+4*nx1[0], d_gll+5*nx1[0], nx1[0], n[0], nr[0], ni[0]);
}
else{
d_rpart = rpart;
d_ipart = ipart;
cudaMalloc(&d_rep, nx1_3*sizeof(double));
cudaMalloc(&d_gll, 6*nx1[0]*sizeof(double));
cudaMemcpy(d_rep, rep, nx1_3*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_gll, xgll, nx1[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_gll+nx1[0], ygll, nx1[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_gll+2*nx1[0], zgll, nx1[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_gll+3*nx1[0], wxgll, nx1[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_gll+4*nx1[0], wygll, nx1[0]*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_gll+5*nx1[0], wzgll, nx1[0]*sizeof(double), cudaMemcpyHostToDevice);
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n[0]/blockSize);
baryinterp<<<gridSize, blockSize>>>(d_rpart, d_ipart, vx,vy,vz, jr[0]-1, je0[0]-1, ju0[0]-1, d_rep, d_gll, d_gll+nx1[0], d_gll+2*nx1[0], d_gll+3*nx1[0], d_gll+4*nx1[0], d_gll+5*nx1[0], nx1[0], n[0], nr[0], ni[0]);
}
if(inCPU){
cudaMemcpy(rpart, d_rpart, n[0]*nr[0]*sizeof(double), cudaMemcpyDeviceToHost);
//free
cudaFree(d_rpart);
cudaFree(d_vols);
cudaFree(d_rep);
cudaFree(d_gll);
cudaFree(d_ipart);
}
else{
cudaFree(d_rep);
cudaFree(d_gll);
}
// cudaEventRecord(stopEvent, 0);
// cudaEventSynchronize(stopEvent);
// cudaEventElapsedTime(&time, startEvent, stopEvent);
//printf("interp time is %f\n",time*1e-03);
}
__global__ void packFaces(double *faces, double *packed, double *sharedIndex, int n, int nelt, int nx1, int iu1, int dir){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < n){
//get element and face numbers
int e, f, nx1_2;
nx1_2 = nx1 * nx1;
e = sharedIndex[id*2]-1;//fmod(sharedIndex[id*2]-1,nelt);
f = sharedIndex[id*2+1]-1;
//printf("e = %d, f = %d\n",e,f);
/*if(e>nelt-1)
printf ("e > nelt, %d\n",e);
if(f>5)
printf("f > nface,%d\n",f);*/
//copy the whole face
int off2 = id * nx1_2;
int f_off2 = e * 6 * nx1_2 + f*nx1_2;
for(int i = 0; i < 5; i++){
int off1 = i * n * nx1_2;
int f_off1 = i * nelt * 6 * nx1_2;
double* packed1 = packed+off1+off2;
double* faces1 = faces+f_off1+f_off2;
for(int j = 0; j < nx1_2; j++){
if(dir == 0)
packed1[j] = faces1[j];
else faces1[j] = packed1[j];
}
}
for(int i = 0; i < 5; i++){
int off1 = (i+5) * n * nx1_2;
int f_off1 = (i+iu1-1) * nelt * 6 * nx1_2;
double* packed1 = packed+off1+off2;
double* faces1 = faces+f_off1+f_off2;
for(int j = 0; j < nx1_2; j++){
if(dir == 0)
packed1[j] = faces1[j];
else faces1[j] = packed1[j];
}
}
}
}
extern "C" void packfaceswrapper_(double *faces, double *packed, double *sharedIndex, int *maxIndex, int *nelt, int *nx1, int *iu, int *dir){
//all data is in GPU
int blockSize = 1024, gridSize;
// float time;
// cudaEvent_t startEvent, stopEvent;
// cudaEventCreate(&startEvent);
// cudaEventCreate(&stopEvent);
// cudaEventRecord(startEvent, 0);
double *d_shared;
cudaMalloc(&d_shared, nelt[0]*12*sizeof(double));
cudaMemcpy(d_shared, sharedIndex, nelt[0]*12*sizeof(double), cudaMemcpyHostToDevice);
// cudaEventRecord(stopEvent, 0);
// cudaEventSynchronize(stopEvent);
// cudaEventElapsedTime(&time, startEvent, stopEvent);
// printf("allocate sharedIndex is %f\n",time*1e-03);
gridSize = (int)ceil((float)maxIndex[0]/blockSize);
packFaces<<<gridSize, blockSize>>>(faces, packed, d_shared, maxIndex[0], nelt[0], nx1[0], iu[0], dir[0]);
cudaFree(d_shared);
cudaError_t code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error str 4: %s\n",cudaGetErrorString(code));
}
}
void init_matrix(double * mat, int size, int begin){
for(int i=0; i<size; i++){
mat[i] = begin+i;
// mat[i] = rand();
}
}
void init_u(double * mat, int n, int k, int jobs){
//for(int i=0; i<n*k*jobs;i++)
//mat[i] = 0.0;
size_t bytes = jobs*n*k*sizeof(double);
double * u_eq1; //jobs(number of elements) * k
cudaMallocHost( (void**) &u_eq1, bytes);
init_matrix(u_eq1, jobs*n*k, 10);
double * vx; //working only on vx direction
cudaMallocHost( (void**) &vx, bytes);
init_matrix(vx, jobs*n*k, 10);
//calc time
// float time;
// cudaEvent_t startEvent, stopEvent;
// cudaEventCreate(&startEvent);
// cudaEventCreate(&stopEvent);
// cudaEventRecord(startEvent, 0);
// Device input vectors
double *d_mat;
double *d_u_eq1;
double *d_vx;
// allocate device vectors memory
cudaMalloc(&d_mat, bytes);
cudaMalloc(&d_u_eq1, bytes);
cudaMalloc(&d_vx, bytes);
// copy host vectors to device
cudaMemcpy( d_u_eq1, u_eq1, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_vx, vx, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
//gridSize = (int)ceil((float)n*k/blockSize);
gridSize = (int)ceil((float)n*k*jobs/blockSize);
// Execute the kernel
vecCopy<<<gridSize, blockSize>>>(d_u_eq1, d_mat, jobs, n*k);
vecMul<<<gridSize, blockSize>>>(d_vx, d_mat, jobs, n*k);
// Copy array back to host
cudaMemcpy( mat, d_mat, bytes, cudaMemcpyDeviceToHost );
// cudaEventRecord(stopEvent, 0);
// cudaEventSynchronize(stopEvent);
// cudaEventElapsedTime(&time, startEvent, stopEvent);
//printf("second element is %f, n %d, k%d, time is %f\n",mat[n*k+1],n,k,time*1e-03);
//do in cpu
// cudaEventRecord(startEvent, 0);
for(int i =0; i< n*k*jobs; i++)
mat[i] = u_eq1[i];
for(int i=0; i< n*k*jobs;i++)
mat[i] = mat[i] * vx[i];
// cudaEventRecord(stopEvent, 0);
// cudaEventSynchronize(stopEvent);
// cudaEventElapsedTime(&time, startEvent, stopEvent);
//printf("cpu time is %f\n",time*1e-03);
/*int nxz= 6, nxyz = 24, nel=3000;
double *vols = new double[nxyz*nel];
double *faces = new double[nxz*nel];
int *iface = new int[nxz*nel];
vols[1*nxyz+12] = 2.3;
iface[1*nxz+2] = 12;
full2faceWrapper_(vols, faces, nel, nxz, nxyz, iface, true, true);
printf("face = %f\n",faces[1*nxz+2]);*/
}
//program
extern "C" void test_( int* matsize_p, int* gridsize_p, int* jobs_p, double* h_A,
double* h_AA, int* M_p, int* N_p, int* K_p)
{
int matsize = *matsize_p;
int gridsize = *gridsize_p;
int jobs = *jobs_p;
int M = *M_p;
int N = *N_p;
int K = *K_p;
// float time;
// cudaEvent_t startEvent, stopEvent;
cudaDeviceProp prop;
cudaGetDeviceProperties (&prop, 0);
cudaSetDevice( 0 );
// cudaEventCreate(&startEvent);
// cudaEventCreate(&stopEvent);
double *h_B, *h_BB, *h_C, *h_D, *h_E;
double *d_A, *d_AA, *d_B, *d_BB;
double *d_C, *d_D, *d_E;
M = matsize;
N = matsize*matsize;
K = matsize;
cudaMallocHost( (void**) &h_B, (K*N)*sizeof(double)*jobs );
cudaMallocHost( (void**) &h_BB, (N*K)*sizeof(double)*jobs );
cudaMallocHost( (void**) &h_C, (K*N)*sizeof(double)*jobs );
cudaMallocHost( (void**) &h_D, (K*N)*sizeof(double)*jobs );
cudaMallocHost( (void**) &h_E, (N*K)*sizeof(double)*jobs );
/* Initialize and copy the matrices */
//init_matrix(h_B, N*K*jobs, 10);
init_u(h_B, N, K, jobs);
// memset(h_C, 0, (K*N)*sizeof(double)*jobs);
// memset(h_D, 0, (K*N)*sizeof(double)*jobs);
// memset(h_E, 0, (K*N)*sizeof(double)*jobs);
cudaMalloc( (void**) &d_A, (M*K)*sizeof(double) );
cudaMalloc( (void**) &d_AA, (K*M)*sizeof(double) );
cudaMalloc( (void**) &d_B, (K*N)*sizeof(double)*jobs );
cudaMalloc( (void**) &d_BB, (N*K)*sizeof(double)*jobs );
cudaMalloc( (void**) &d_C, (K*N)*sizeof(double)*jobs );
cudaMalloc( (void**) &d_D, (K*N)*sizeof(double)*jobs );
cudaMalloc( (void**) &d_E, (N*K)*sizeof(double)*jobs );
// cudaMemset(d_C, 0, (K*N)*sizeof(double)*jobs);
// cudaMemset(d_D, 0, (K*N)*sizeof(double)*jobs);
// cudaMemset(d_E, 0, (K*N)*sizeof(double)*jobs);
cudaStream_t stream;
cudaStreamCreate( &stream );
D printf("Matrix d:\n");
D print(h_A, M, K);
D printf("Matrix db:\n");
D print(h_AA, M, K);
D printf("Matrix u:\n");
D print(h_B, K, N);
D printf("Matrix ub:\n");
D print(h_BB, K, N);
const double alpha = 1;
const double beta = 0;
unsigned int dim = K;
cublasSetMatrix(M, K, sizeof(double), h_A, K, d_A, K);
cublasSetMatrix(K, M, sizeof(double), h_AA, K, d_AA, K);
// cudaEventRecord(startEvent, 0);
cublasSetMatrixAsync(M, N*jobs, sizeof(double), h_B, M, d_B, M, stream);
fflush( stdout );
//cuda_multi_gemm_unif(stream, 'N', 'N', dim, dim, dim, &alpha, dim, dim*dim, d_A, d_B, d_BB, &beta, d_C, d_D, d_E, jobs*K, gridsize);
cudaDeviceSynchronize();
fflush( stdout );
cublasGetMatrixAsync(M, N*jobs, sizeof(double), d_C, K, h_C, K, stream);
cublasGetMatrixAsync(M, N*jobs, sizeof(double), d_D, K, h_D, K, stream);
cublasGetMatrixAsync(M, N*jobs, sizeof(double), d_E, K, h_E, K, stream);
// cudaEventRecord(stopEvent, 0);
// cudaEventSynchronize(stopEvent);
// cudaEventElapsedTime(&time, startEvent, stopEvent);
// printf("GPU time: %f, throughput: %f\n", time * 1e-03, (jobs*2.0*3*K*// K*K*K)/(1024*1024*1024*time*1e-03));
// printf(" gpu time: %f\n", time * 1e-03);
D printf("Matrix r:\n");
D print((h_C), M, N);
D printf("Matrix s:\n");
D print((h_D), M, N);
D printf("Matrix t:\n");
D print((h_E), M, N);
cudaFreeHost( h_B );
cudaFreeHost( h_BB );
cudaFreeHost( h_C );
cudaFreeHost( h_D );
cudaFreeHost( h_E );
cudaFree( d_A );
cudaFree( d_B );
cudaFree( d_C );
fflush( stdout );
cudaStreamDestroy(stream);
/**jobs_p = 22;
int **p1 = new int*[2];
for(int i=0; i<2;i++)
p1[i] = new int[3];
int *p2 = (int*)p1;
for(int i=0;i<2;i++)
for(int j=0;j<3;j++){
p1[i][j] = i*2+j;
printf("a[%d][%d]=%d,%d\n",i,j,p1[i][j],p2[i*2+j]);
}*/
return;
}
|
320610a719480b30adc71ced03cde7718a33c7d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "common/book.h"
#include "time.h"
#include "stdlib.h"
#define N 10
__global__ void add(int *a, int *b, int *c) {
int tid = blockIdx.x; // thread id
// do this check just in case by "mistake" the number of blocks were
// greater than N
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int random(int min, int max) {
return min + rand() % (max - min + 1);
}
int main() {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate vectors on device
HANDLE_ERROR(hipMalloc((void**) &dev_a, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**) &dev_b, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**) &dev_c, N * sizeof(int)));
// fill host vectors randomly so that later we copy them to device
srand(time(0)); // use current time as seed
for (int i = 0; i < N; ++i) {
a[i] = random(1, 10);
b[i] = random(1, 10);
}
// copy host vectors to device
HANDLE_ERROR(hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice));
// spawn N blocks each having 1 thread (each block will run in parallel)
// number of threads = N blocks * 1 thread/block = N threads
hipLaunchKernelGGL(( add), dim3(N), dim3(1), 0, 0, dev_a, dev_b, dev_c);
// copy result back to c
HANDLE_ERROR(hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 0; i < N; ++i) {
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 320610a719480b30adc71ced03cde7718a33c7d3.cu | #include "stdio.h"
#include "common/book.h"
#include "time.h"
#include "stdlib.h"
#define N 10
__global__ void add(int *a, int *b, int *c) {
int tid = blockIdx.x; // thread id
// do this check just in case by "mistake" the number of blocks were
// greater than N
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int random(int min, int max) {
return min + rand() % (max - min + 1);
}
int main() {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate vectors on device
HANDLE_ERROR(cudaMalloc((void**) &dev_a, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**) &dev_b, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**) &dev_c, N * sizeof(int)));
// fill host vectors randomly so that later we copy them to device
srand(time(0)); // use current time as seed
for (int i = 0; i < N; ++i) {
a[i] = random(1, 10);
b[i] = random(1, 10);
}
// copy host vectors to device
HANDLE_ERROR(cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice));
// spawn N blocks each having 1 thread (each block will run in parallel)
// number of threads = N blocks * 1 thread/block = N threads
add<<<N, 1>>>(dev_a, dev_b, dev_c);
// copy result back to c
HANDLE_ERROR(cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; ++i) {
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
650cddf5422ce9fe51e2d4c24ea11d0b6fd4d729.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <iostream>
#include <cmath>
#include <string>
using namespace std;
//#define THREADS_PER_BLOCK 32
void fillMatrix(int* a, int n)
{
int i;
for (i = 0; i < n*n; ++i)
a[i] = 10;//rand()%5;
}
__global__
void matrixAdition(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
if(ij<(n*n))
c[ij] = a[ij] + b[ij];
}
__global__
void matrixAditionRow(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
// if(blockDim.x != 0)
//printf("%d salida\n", ij);
for(int i =0 ;i<n;i++)
{
if(ij<n)
c[ij*n+i] = a[ij*n+i] + b[ij*n+i];
}
}
__global__
void matrixAditionCol(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
for(int i =0 ;i<n;i++)
{
if(ij<n)
c[ij+n*i] = a[ij+n*i] + b[ij+n*i];
}
}
void printMatrix(string s, int *a , int tam){
cout<<s;
for(int i=0;i<tam;i++)
{
for(int j=0;j<tam;j++)
{
cout<<a[i*tam+j]<<" ";
}
cout<<endl;
}
}
int main(int argc, char *argv[])
{
srand (time(NULL));
int N= strtol(argv[1], NULL, 10);
//cout<<N<<endl; return 1;
int THREADS_PER_BLOCK =1024;
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; //device copies of a,b,c
//int size = N*N*sizeof(int);
//allocateMemory(a,b,c,d_a,d_b,d_c,N);
int size=N*N*sizeof(int);
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
a = (int *)malloc(size);
fillMatrix(a, N);
b = (int *)malloc(size);
fillMatrix(b, N);
c = (int *)malloc(size);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
int blocks= (N*N + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
int blocks2= (N + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
cout<<"blocks : \n"<<blocks2<<"\n threds: \n "<<THREADS_PER_BLOCK<<endl;
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventRecord(start,0);
//matrixAdition<<<blocks,THREADS_PER_BLOCK>>>( d_c, d_a, d_b,N);
hipLaunchKernelGGL(( matrixAditionRow), dim3(blocks2),dim3(THREADS_PER_BLOCK), 0, 0, d_c, d_a, d_b,N);
//matrixAditionCol<<<blocks2,THREADS_PER_BLOCK>>>( d_c, d_a, d_b,N);
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time : %f ms\n" ,elapsedTime);
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
//printMatrix("Printing Matrix A \n",a,N);
//printMatrix("Printing Matrix B \n",b,N);
//printMatrix("Printing Matrix C \n",c,N);
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
}
| 650cddf5422ce9fe51e2d4c24ea11d0b6fd4d729.cu | #include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <iostream>
#include <cmath>
#include <string>
using namespace std;
//#define THREADS_PER_BLOCK 32
void fillMatrix(int* a, int n)
{
int i;
for (i = 0; i < n*n; ++i)
a[i] = 10;//rand()%5;
}
__global__
void matrixAdition(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
if(ij<(n*n))
c[ij] = a[ij] + b[ij];
}
__global__
void matrixAditionRow(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
// if(blockDim.x != 0)
//printf("%d salida\n", ij);
for(int i =0 ;i<n;i++)
{
if(ij<n)
c[ij*n+i] = a[ij*n+i] + b[ij*n+i];
}
}
__global__
void matrixAditionCol(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
for(int i =0 ;i<n;i++)
{
if(ij<n)
c[ij+n*i] = a[ij+n*i] + b[ij+n*i];
}
}
void printMatrix(string s, int *a , int tam){
cout<<s;
for(int i=0;i<tam;i++)
{
for(int j=0;j<tam;j++)
{
cout<<a[i*tam+j]<<" ";
}
cout<<endl;
}
}
int main(int argc, char *argv[])
{
srand (time(NULL));
int N= strtol(argv[1], NULL, 10);
//cout<<N<<endl; return 1;
int THREADS_PER_BLOCK =1024;
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; //device copies of a,b,c
//int size = N*N*sizeof(int);
//allocateMemory(a,b,c,d_a,d_b,d_c,N);
int size=N*N*sizeof(int);
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
a = (int *)malloc(size);
fillMatrix(a, N);
b = (int *)malloc(size);
fillMatrix(b, N);
c = (int *)malloc(size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
int blocks= (N*N + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
int blocks2= (N + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
cout<<"blocks : \n"<<blocks2<<"\n threds: \n "<<THREADS_PER_BLOCK<<endl;
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start,0);
//matrixAdition<<<blocks,THREADS_PER_BLOCK>>>( d_c, d_a, d_b,N);
matrixAditionRow<<<blocks2,THREADS_PER_BLOCK>>>( d_c, d_a, d_b,N);
//matrixAditionCol<<<blocks2,THREADS_PER_BLOCK>>>( d_c, d_a, d_b,N);
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time : %f ms\n" ,elapsedTime);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
//printMatrix("Printing Matrix A \n",a,N);
//printMatrix("Printing Matrix B \n",b,N);
//printMatrix("Printing Matrix C \n",c,N);
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
40bd35d51c67902f859a01b029fb1012d69ad702.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Template for Programming Assignment 3
// Use "module load cuda" to enable compilation with the Nvidia C compiler nvcc
// Use "nvcc -O3" to compile code; this can be done even on an OSC login node (does not have a GPU)
// To execute compiled code, you must either use a batch submission to run on a node with GPU
// or obtain an interactive GPU-node by using: qsub -I -l walltime=0:59:00 -l nodes=1:gpus=1 -A PAS1488
#include <unistd.h>
#include <stdio.h>
#include <sys/time.h>
#define threshold 1e-8
#define n (1024)
// Change n to 1024 for final testing;
// #define n (256)
// n is set to 256 since execution time of single thread template version is excessive
#define SIZE (32)
void init(void);
void ref(void);
void compare(int N, double *wref, double *w);
__global__ void test_kernel(int N, double *A, double *B, double *C);
double rtclock(void);
double a[n][n],b[n][n],c[n][n],cref[n][n];
int main(){
double clkbegin, clkend, t;
double *Ad,*Bd,*Cd;
int size;
printf("Matrix Size = %d\n",n);
init();
clkbegin = rtclock();
ref();
clkend = rtclock();
t = clkend-clkbegin;
printf("Seq: Approx GFLOPS: %.1f ; Time = %.3f sec; cref[n/2][n/2-1] = %f; \n",
2.0*n*n*n/t/1e9,t,cref[n/2][n/2-1]);
size = sizeof(double)*n*n;
hipMalloc((void **) &Ad,size);
hipMalloc((void **) &Bd,size);
hipMalloc((void **) &Cd,size);
hipMemcpy(Ad,a,size,hipMemcpyHostToDevice);
hipMemcpy(Bd,b,size,hipMemcpyHostToDevice);
dim3 threads(SIZE, SIZE);
dim3 grid(n / threads.x, n / threads.y);
clkbegin = rtclock();
hipLaunchKernelGGL(( test_kernel), dim3(grid), dim3(threads), 0, 0, n,Ad,Bd,Cd);
if (hipDeviceSynchronize() != hipSuccess)
printf ("Error return for test_kernel: Was execution done on a node with a GPU?\n");
else
{
clkend = rtclock();
t = clkend-clkbegin;
hipMemcpy(c,Cd,size,hipMemcpyDeviceToHost);
hipFree(Ad); hipFree(Bd); hipFree(Cd);
printf("GPU: Approx GFLOPS: %.1f ; Time = %.3f sec; c[n/2][n/2-1] = %f; \n",
2.0*n*n*n/t/1e9,t,c[n/2][n/2-1]);
printf("Correctness Check for GPU solution:\n");
compare(n, (double *) c,(double *) cref);
}
}
__global__ void test_kernel(int N, double *A, double *B, double *C)
{
int i,j,k;
// Template version uses only one thread, which does all the work
// This must be changed (and the launch parameters) to exploit GPU parallelism
// You can make any changes; only requirement is that correctness test passes
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
__shared__ double Bb[SIZE * SIZE], Ab[SIZE * SIZE];
double sum = 0;
for(int ks = 0; ks < N; ks += SIZE){
Bb[ty * SIZE + tx] = B[(ks + tx) * N + j];
Ab[ty * SIZE + tx] = A[(ks + ty) * N + i];
__syncthreads();
for(k = 0; k < SIZE; k++){
sum += Ab[k * SIZE + tx] * Bb[ty * SIZE + k];
}
__syncthreads();
}
C[j * N + i] = sum;
}
void ref(void)
{
int i,j,k;
for (j=0;j<n;j++)
for (k=0;k<n;k++)
for (i=0;i<n;i++)
cref[j][i] += a[k][i]*b[k][j];
}
void init(void)
{
int i,j;
for(i=0;i<n;i++)
for(j=0;j<n;j++)
{ c[i][j] = 0.0;
cref[i][j] = 0.0;
a[i][j] = drand48();
b[i][j] = drand48();
}
}
void compare(int N, double *wref, double *w)
{
double maxdiff,this_diff;
int numdiffs;
int i,j;
numdiffs = 0;
maxdiff = 0;
for (i=0;i<N;i++)
for (j=0;j<N;j++)
{
this_diff = wref[i*N+j]-w[i*N+j];
if (this_diff < 0) this_diff = -1.0*this_diff;
if (this_diff>threshold)
{ numdiffs++;
if (this_diff > maxdiff) maxdiff=this_diff;
}
}
if (numdiffs > 0)
printf("%d Diffs found over threshold %f; Max Diff = %f\n",
numdiffs,threshold,maxdiff);
else
printf("No differences found between reference and test versions\n");
}
double rtclock(void)
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
| 40bd35d51c67902f859a01b029fb1012d69ad702.cu | // Template for Programming Assignment 3
// Use "module load cuda" to enable compilation with the Nvidia C compiler nvcc
// Use "nvcc -O3" to compile code; this can be done even on an OSC login node (does not have a GPU)
// To execute compiled code, you must either use a batch submission to run on a node with GPU
// or obtain an interactive GPU-node by using: qsub -I -l walltime=0:59:00 -l nodes=1:gpus=1 -A PAS1488
#include <unistd.h>
#include <stdio.h>
#include <sys/time.h>
#define threshold 1e-8
#define n (1024)
// Change n to 1024 for final testing;
// #define n (256)
// n is set to 256 since execution time of single thread template version is excessive
#define SIZE (32)
void init(void);
void ref(void);
void compare(int N, double *wref, double *w);
__global__ void test_kernel(int N, double *A, double *B, double *C);
double rtclock(void);
double a[n][n],b[n][n],c[n][n],cref[n][n];
int main(){
double clkbegin, clkend, t;
double *Ad,*Bd,*Cd;
int size;
printf("Matrix Size = %d\n",n);
init();
clkbegin = rtclock();
ref();
clkend = rtclock();
t = clkend-clkbegin;
printf("Seq: Approx GFLOPS: %.1f ; Time = %.3f sec; cref[n/2][n/2-1] = %f; \n",
2.0*n*n*n/t/1e9,t,cref[n/2][n/2-1]);
size = sizeof(double)*n*n;
cudaMalloc((void **) &Ad,size);
cudaMalloc((void **) &Bd,size);
cudaMalloc((void **) &Cd,size);
cudaMemcpy(Ad,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(Bd,b,size,cudaMemcpyHostToDevice);
dim3 threads(SIZE, SIZE);
dim3 grid(n / threads.x, n / threads.y);
clkbegin = rtclock();
test_kernel<<<grid, threads>>>(n,Ad,Bd,Cd);
if (cudaDeviceSynchronize() != cudaSuccess)
printf ("Error return for test_kernel: Was execution done on a node with a GPU?\n");
else
{
clkend = rtclock();
t = clkend-clkbegin;
cudaMemcpy(c,Cd,size,cudaMemcpyDeviceToHost);
cudaFree(Ad); cudaFree(Bd); cudaFree(Cd);
printf("GPU: Approx GFLOPS: %.1f ; Time = %.3f sec; c[n/2][n/2-1] = %f; \n",
2.0*n*n*n/t/1e9,t,c[n/2][n/2-1]);
printf("Correctness Check for GPU solution:\n");
compare(n, (double *) c,(double *) cref);
}
}
__global__ void test_kernel(int N, double *A, double *B, double *C)
{
int i,j,k;
// Template version uses only one thread, which does all the work
// This must be changed (and the launch parameters) to exploit GPU parallelism
// You can make any changes; only requirement is that correctness test passes
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
__shared__ double Bb[SIZE * SIZE], Ab[SIZE * SIZE];
double sum = 0;
for(int ks = 0; ks < N; ks += SIZE){
Bb[ty * SIZE + tx] = B[(ks + tx) * N + j];
Ab[ty * SIZE + tx] = A[(ks + ty) * N + i];
__syncthreads();
for(k = 0; k < SIZE; k++){
sum += Ab[k * SIZE + tx] * Bb[ty * SIZE + k];
}
__syncthreads();
}
C[j * N + i] = sum;
}
void ref(void)
{
int i,j,k;
for (j=0;j<n;j++)
for (k=0;k<n;k++)
for (i=0;i<n;i++)
cref[j][i] += a[k][i]*b[k][j];
}
void init(void)
{
int i,j;
for(i=0;i<n;i++)
for(j=0;j<n;j++)
{ c[i][j] = 0.0;
cref[i][j] = 0.0;
a[i][j] = drand48();
b[i][j] = drand48();
}
}
void compare(int N, double *wref, double *w)
{
double maxdiff,this_diff;
int numdiffs;
int i,j;
numdiffs = 0;
maxdiff = 0;
for (i=0;i<N;i++)
for (j=0;j<N;j++)
{
this_diff = wref[i*N+j]-w[i*N+j];
if (this_diff < 0) this_diff = -1.0*this_diff;
if (this_diff>threshold)
{ numdiffs++;
if (this_diff > maxdiff) maxdiff=this_diff;
}
}
if (numdiffs > 0)
printf("%d Diffs found over threshold %f; Max Diff = %f\n",
numdiffs,threshold,maxdiff);
else
printf("No differences found between reference and test versions\n");
}
double rtclock(void)
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
|
7df5594600ee6164bc0667eed4614d91bb49f06f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include "scanImpl.cu"
#include "../include/common.h"
#include "../include/gpuCudaLib.h"
#include <iostream>
using namespace std;
#define TEST 1
#define utype unsigned long long
#define type long long
#define type_len (sizeof(type) * 8)
double ave_time ;
__global__ void static equal(type * a, int n, utype constC){
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i=offset; i<n; i+=stride){
a[i] = constC;
}
}
__global__ void static genScanFilter_int_lth_bit(type * col,int n, utype constC,type * lt, type * eq){
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i=offset; i < n; i+=stride){
lt[i] = lt[i] | (eq[i] & ~constC & col[i]);
eq[i] = eq[i] & ~(col[i] ^ constC);
//printf(" %d %u %u %u\n",i,lt[i],eq[i],col[i]);
}
}
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
inline type bit_constC(type where,int j){
type constC = ((((utype)1 << (type_len - 1 - j) )) & where)>>(type_len - 1 - j);
if(constC != 0)
constC = - 1;
return constC;
}
inline type ran(){
type x = rand();
if(sizeof(type) == 8) return (x << 32) + rand();
return x;
}
void profilebitscan(type *h_a,
type *h_b,
type *d,
type *lt,
type *eq,
int n,
utype where,
char *desc,
unsigned int loopTotal)
{
dim3 block(256);
dim3 grid(1024);
float time,stime;
// events for timing
hipEvent_t startEvent, stopEvent;
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
stime=0;
utype bytes=n * sizeof(type);
for(int loop = 1; loop <= loopTotal; loop++){
checkCuda( hipEventRecord(startEvent, 0) );
checkCuda( hipMemcpy(d, h_a, bytes, hipMemcpyHostToDevice) );
checkCuda(hipDeviceSynchronize());
utype c = 0;
for(int i = 0;i < type_len;i++) c += ((utype)1 << i);
hipLaunchKernelGGL(( equal), dim3(grid),dim3(block), 0, 0, lt, n/type_len, 0) ;
hipLaunchKernelGGL(( equal), dim3(grid),dim3(block), 0, 0, eq, n/type_len, c) ;
checkCuda(hipDeviceSynchronize());
for(int j = 0; j < type_len; ++j){
int constC =bit_constC(where,j);
hipLaunchKernelGGL(( genScanFilter_int_lth_bit), dim3(grid),dim3(block), 0, 0, d + j * (n / type_len), n / type_len, constC, lt, eq);
checkCuda(hipDeviceSynchronize());
}
checkCuda( hipMemcpy(h_b, lt, n / type_len * sizeof(type), hipMemcpyDeviceToHost) );
checkCuda( hipMemcpy(h_b + n / type_len , eq, n / type_len * sizeof(type), hipMemcpyDeviceToHost) );
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
ave_time += time;
//printf("time=%f\n",stime);
}
//cerr<<stime<<endl;
//printf("%f\n" ,bytes * 1e-6/(stime / loopTotal));
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
}
int main(int argc, char ** argv)
{
#ifdef TEST
freopen("scan.in","r",stdin);
freopen("scan_bit.out","w",stdout);
#endif
int inputN;
sscanf(argv[1],"%d",&inputN);
utype nElements = inputN;
#ifdef TEST
scanf("%d",&nElements);
#endif
utype bytes = nElements * sizeof(type);
// host arrays
type *h_aPageable, *h_bPageable,*know_stop_constC_cpu;
type *h_bitPageable;
type *know_stop_len_cpu;
// device array
type *d_a;
type *lt,*eq;
// allocate and initialize
h_aPageable = (type*)malloc(bytes );
h_bPageable = (type*)malloc(bytes );
h_bitPageable =(type *)malloc(bytes );
know_stop_len_cpu = (type *)malloc(bytes );
know_stop_constC_cpu = (type *)malloc(bytes ); // host pageable
//checkCuda( hipHostMalloc((void**)&h_aPinned, bytes ) ); // host pinned
//checkCuda( hipHostMalloc((void**)&h_bPinned, bytes ) );
checkCuda( hipMalloc((void**)&d_a, bytes ) ); // device
checkCuda( hipMalloc((void**)<, bytes ) ); // device return
checkCuda( hipMalloc((void**)&eq, bytes ) );
int early_size = 1024*1024;
sscanf(argv[2],"%d",&early_size);
srand(0);
for (int i = 0; i < nElements; ++i) h_aPageable[i] = ran()%((utype)1<<(type_len - 1));
#ifdef TEST
for (int i = 0; i < nElements; ++i){
if(sizeof(type)==4) scanf("%d",h_aPageable + i);
else scanf("%lld",h_aPageable + i);
// cerr<<h_aPageable[i]<<endl;
}
#endif
for (int i = 0; i < nElements; ++i)
for(int j = type_len - 1; j >= 0; --j){
h_bitPageable[i / type_len + (type_len - 1 - j)*(nElements/type_len)] += (((h_aPageable[i] &((utype)1<<j))>>j)<<(type_len - 1 - i % type_len));
//h_bitPageable[i / 32 + (31-j)*(nElements/32)] += 0;
}
// output device info and transfer size
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, 0) );
// printf("\nDevice: %s\n", prop.name);
// if(bytes< 1024){
// printf("scan size (B): %d\n", bytes);
// }else if (bytes < 1024 * 1024)
// {
// printf("scan size (KB): %d\n", bytes / (1024));
// }else{
// printf("scan size (MB): %d\n", bytes / (1024 * 1024));
// }
type constC = ran()%((utype)1<<(type_len - 1));
// perform scan eq
// profilescan(h_aPageable, h_bPageable, d_a, filter, nElements, constC,"Pageable",20);
//profilescan(h_aPinned, h_bPinned, d_a, filter,nElements, constC,"Pinned",20);
#ifdef TEST
int test_num = 0;
scanf("%d",&test_num);
for(int i = 0; i < test_num; i++){
if(sizeof(type)==4)
scanf("%d",&constC);
else scanf("%lld",&constC);
profilebitscan(h_bitPageable, h_bPageable, d_a, lt, eq, nElements, constC,"Pageable",1);
}
#else
constC = ran()%((utype)1<<(type_len - 1));
profilebitscan(h_bitPageable, h_bPageable, d_a, lt, eq, nElements, constC,"Pageable",1);
#endif
// for(int i = 0; i < nElements; i++) printf("%3u ",h_aPageable[i]);printf("\n");
// for(int i = 0; i < nElements; i++) printf("%3u ",((h_bPageable[i/32] & (1u << (31 - i % 32)))>> (31 - i % 32)));printf("\n");
// for(int i = 0; i < nElements; i++) printf("%3u ",((h_bPageable[i/32 + nElements/32] & (1u << (31 - i % 32)))>> (31 - i % 32)));printf("\n");
// for(int i = 0; i < nElements; i++) printf("%3u ",h_bitPageable[i]);printf("\n");
#ifdef TEST
for(int i = 0; i < nElements; i++) {
int x =(h_bPageable[i/type_len] & ((utype)1 << (type_len - 1 - i % type_len)))>> (type_len - 1 - i % type_len);
int y =(h_bPageable[i/type_len + nElements/type_len] & ((utype)1 << (type_len - 1 - i % type_len)))>> (type_len - 1 - i % type_len);
if(x ==0 && y == 0) printf("%d\n",1);
else printf("%d\n", 0);
}
printf("%.6f\n",bytes* 1e-6 /(ave_time / test_num));
cerr<<bytes* 1e-6 /(ave_time / test_num)<<endl;
#endif
// cleanup
hipFree(lt);
hipFree(eq);
free(h_aPageable);
}
| 7df5594600ee6164bc0667eed4614d91bb49f06f.cu | /*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include "scanImpl.cu"
#include "../include/common.h"
#include "../include/gpuCudaLib.h"
#include <iostream>
using namespace std;
#define TEST 1
#define utype unsigned long long
#define type long long
#define type_len (sizeof(type) * 8)
double ave_time ;
__global__ void static equal(type * a, int n, utype constC){
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i=offset; i<n; i+=stride){
a[i] = constC;
}
}
__global__ void static genScanFilter_int_lth_bit(type * col,int n, utype constC,type * lt, type * eq){
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i=offset; i < n; i+=stride){
lt[i] = lt[i] | (eq[i] & ~constC & col[i]);
eq[i] = eq[i] & ~(col[i] ^ constC);
//printf(" %d %u %u %u\n",i,lt[i],eq[i],col[i]);
}
}
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
inline type bit_constC(type where,int j){
type constC = ((((utype)1 << (type_len - 1 - j) )) & where)>>(type_len - 1 - j);
if(constC != 0)
constC = - 1;
return constC;
}
inline type ran(){
type x = rand();
if(sizeof(type) == 8) return (x << 32) + rand();
return x;
}
void profilebitscan(type *h_a,
type *h_b,
type *d,
type *lt,
type *eq,
int n,
utype where,
char *desc,
unsigned int loopTotal)
{
dim3 block(256);
dim3 grid(1024);
float time,stime;
// events for timing
cudaEvent_t startEvent, stopEvent;
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
stime=0;
utype bytes=n * sizeof(type);
for(int loop = 1; loop <= loopTotal; loop++){
checkCuda( cudaEventRecord(startEvent, 0) );
checkCuda( cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice) );
checkCuda(cudaThreadSynchronize());
utype c = 0;
for(int i = 0;i < type_len;i++) c += ((utype)1 << i);
equal<<<grid,block>>>(lt, n/type_len, 0) ;
equal<<<grid,block>>>(eq, n/type_len, c) ;
checkCuda(cudaThreadSynchronize());
for(int j = 0; j < type_len; ++j){
int constC =bit_constC(where,j);
genScanFilter_int_lth_bit<<<grid,block>>>(d + j * (n / type_len), n / type_len, constC, lt, eq);
checkCuda(cudaThreadSynchronize());
}
checkCuda( cudaMemcpy(h_b, lt, n / type_len * sizeof(type), cudaMemcpyDeviceToHost) );
checkCuda( cudaMemcpy(h_b + n / type_len , eq, n / type_len * sizeof(type), cudaMemcpyDeviceToHost) );
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
ave_time += time;
//printf("time=%f\n",stime);
}
//cerr<<stime<<endl;
//printf("%f\n" ,bytes * 1e-6/(stime / loopTotal));
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
}
int main(int argc, char ** argv)
{
#ifdef TEST
freopen("scan.in","r",stdin);
freopen("scan_bit.out","w",stdout);
#endif
int inputN;
sscanf(argv[1],"%d",&inputN);
utype nElements = inputN;
#ifdef TEST
scanf("%d",&nElements);
#endif
utype bytes = nElements * sizeof(type);
// host arrays
type *h_aPageable, *h_bPageable,*know_stop_constC_cpu;
type *h_bitPageable;
type *know_stop_len_cpu;
// device array
type *d_a;
type *lt,*eq;
// allocate and initialize
h_aPageable = (type*)malloc(bytes );
h_bPageable = (type*)malloc(bytes );
h_bitPageable =(type *)malloc(bytes );
know_stop_len_cpu = (type *)malloc(bytes );
know_stop_constC_cpu = (type *)malloc(bytes ); // host pageable
//checkCuda( cudaMallocHost((void**)&h_aPinned, bytes ) ); // host pinned
//checkCuda( cudaMallocHost((void**)&h_bPinned, bytes ) );
checkCuda( cudaMalloc((void**)&d_a, bytes ) ); // device
checkCuda( cudaMalloc((void**)<, bytes ) ); // device return
checkCuda( cudaMalloc((void**)&eq, bytes ) );
int early_size = 1024*1024;
sscanf(argv[2],"%d",&early_size);
srand(0);
for (int i = 0; i < nElements; ++i) h_aPageable[i] = ran()%((utype)1<<(type_len - 1));
#ifdef TEST
for (int i = 0; i < nElements; ++i){
if(sizeof(type)==4) scanf("%d",h_aPageable + i);
else scanf("%lld",h_aPageable + i);
// cerr<<h_aPageable[i]<<endl;
}
#endif
for (int i = 0; i < nElements; ++i)
for(int j = type_len - 1; j >= 0; --j){
h_bitPageable[i / type_len + (type_len - 1 - j)*(nElements/type_len)] += (((h_aPageable[i] &((utype)1<<j))>>j)<<(type_len - 1 - i % type_len));
//h_bitPageable[i / 32 + (31-j)*(nElements/32)] += 0;
}
// output device info and transfer size
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, 0) );
// printf("\nDevice: %s\n", prop.name);
// if(bytes< 1024){
// printf("scan size (B): %d\n", bytes);
// }else if (bytes < 1024 * 1024)
// {
// printf("scan size (KB): %d\n", bytes / (1024));
// }else{
// printf("scan size (MB): %d\n", bytes / (1024 * 1024));
// }
type constC = ran()%((utype)1<<(type_len - 1));
// perform scan eq
// profilescan(h_aPageable, h_bPageable, d_a, filter, nElements, constC,"Pageable",20);
//profilescan(h_aPinned, h_bPinned, d_a, filter,nElements, constC,"Pinned",20);
#ifdef TEST
int test_num = 0;
scanf("%d",&test_num);
for(int i = 0; i < test_num; i++){
if(sizeof(type)==4)
scanf("%d",&constC);
else scanf("%lld",&constC);
profilebitscan(h_bitPageable, h_bPageable, d_a, lt, eq, nElements, constC,"Pageable",1);
}
#else
constC = ran()%((utype)1<<(type_len - 1));
profilebitscan(h_bitPageable, h_bPageable, d_a, lt, eq, nElements, constC,"Pageable",1);
#endif
// for(int i = 0; i < nElements; i++) printf("%3u ",h_aPageable[i]);printf("\n");
// for(int i = 0; i < nElements; i++) printf("%3u ",((h_bPageable[i/32] & (1u << (31 - i % 32)))>> (31 - i % 32)));printf("\n");
// for(int i = 0; i < nElements; i++) printf("%3u ",((h_bPageable[i/32 + nElements/32] & (1u << (31 - i % 32)))>> (31 - i % 32)));printf("\n");
// for(int i = 0; i < nElements; i++) printf("%3u ",h_bitPageable[i]);printf("\n");
#ifdef TEST
for(int i = 0; i < nElements; i++) {
int x =(h_bPageable[i/type_len] & ((utype)1 << (type_len - 1 - i % type_len)))>> (type_len - 1 - i % type_len);
int y =(h_bPageable[i/type_len + nElements/type_len] & ((utype)1 << (type_len - 1 - i % type_len)))>> (type_len - 1 - i % type_len);
if(x ==0 && y == 0) printf("%d\n",1);
else printf("%d\n", 0);
}
printf("%.6f\n",bytes* 1e-6 /(ave_time / test_num));
cerr<<bytes* 1e-6 /(ave_time / test_num)<<endl;
#endif
// cleanup
cudaFree(lt);
cudaFree(eq);
free(h_aPageable);
}
|
65c763203d4a887b03295a47f48e1c52d9413c26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "defs.h"
#include "kernel_prescan.cu"
__global__ void splitGPU(unsigned int*in_d, unsigned int *out_d, unsigned int in_size, int bit_shift) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int bit = 0;
if (index < in_size) {
bit = in_d[index] & (1 << bit_shift);
bit = (bit > 0) ? 1 : 0;
out_d[index] = 1 - bit;
}
}
__global__ void indexDefine(unsigned int *in_d, unsigned int *rev_bit_d, unsigned int in_size, unsigned int last_input) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
int total_falses = in_d[in_size - 1] + last_input;
__syncthreads();
if (index < in_size) {
if (rev_bit_d[index] == 0) {
int val = in_d[index];
in_d[index] = index + 1 - val + total_falses;
}
}
}
__global__ void scatterElements(unsigned int *in_d, unsigned int *index_d, unsigned int *out_d, unsigned int in_size) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < in_size) {
unsigned int val = index_d[index];
if (val < in_size) {
out_d[val] = in_d[index];
}
}
}
void radix_sort(unsigned int *in_d, unsigned int *out_d, unsigned int *out_scan_d, unsigned int *in_h,unsigned int *out_scan_h, int num_elements) {
hipError_t ret;
unsigned int *temp;
dim3 dimThreadBlock;
dimThreadBlock.x = BLOCK_SIZE;
dimThreadBlock.y = 1;
dimThreadBlock.z = 1;
dim3 dimGrid;
dimGrid.x =(int)(ceil(num_elements/(1.0 * dimThreadBlock.x)));
dimGrid.y = 1;
dimGrid.z = 1;
for (int i =0;i<32;i++) {
hipLaunchKernelGGL(( splitGPU), dim3(dimGrid), dim3(dimThreadBlock), 0, 0, in_d,out_d,num_elements,i);
ret = hipDeviceSynchronize();
if(ret != hipSuccess) FATAL("Unable to launch kernel:splitGPU");
preScan(out_scan_d, out_d, num_elements);
ret = hipDeviceSynchronize();
if(ret != hipSuccess) FATAL("Unable to launch kernel");
#ifdef TEST_MODE
hipMemcpy(out_scan_h, out_scan_d, num_elements * sizeof(unsigned int),
hipMemcpyDeviceToHost);
ret = hipDeviceSynchronize();
if(ret != hipSuccess) FATAL("Unable to copy memory fromthe device");
printf("after exclusive scan:\n");
for (int i = 0; i< num_elements;i++) {
printf("%u,",out_scan_h[i]);
}
printf("\n");
#endif
hipLaunchKernelGGL(( indexDefine), dim3(dimGrid), dim3(dimThreadBlock), 0, 0, out_scan_d, out_d, num_elements, in_h[num_elements - 1]);
ret = hipDeviceSynchronize();
if(ret != hipSuccess) FATAL("Unable to launch kernel");
#ifdef TEST_MODE
hipMemcpy(out_scan_h, out_scan_d, num_elements * sizeof(unsigned int),
hipMemcpyDeviceToHost);
ret = hipDeviceSynchronize();
if(ret != hipSuccess) FATAL("Unable to copy memory fromthe device");
printf("after index define:\n");
for (int i = 0; i< num_elements;i++) {
printf("%u,",out_scan_h[i]);
}
printf("\n");
#endif
hipLaunchKernelGGL(( scatterElements), dim3(dimGrid), dim3(dimThreadBlock), 0, 0, in_d, out_scan_d, out_d, num_elements);
ret = hipDeviceSynchronize();
if(ret != hipSuccess) FATAL("Unable to launch kernel");
// swap pointers
temp = in_d;
in_d = out_d;
out_d = temp;
}
}
| 65c763203d4a887b03295a47f48e1c52d9413c26.cu | #include "defs.h"
#include "kernel_prescan.cu"
__global__ void splitGPU(unsigned int*in_d, unsigned int *out_d, unsigned int in_size, int bit_shift) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int bit = 0;
if (index < in_size) {
bit = in_d[index] & (1 << bit_shift);
bit = (bit > 0) ? 1 : 0;
out_d[index] = 1 - bit;
}
}
__global__ void indexDefine(unsigned int *in_d, unsigned int *rev_bit_d, unsigned int in_size, unsigned int last_input) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
int total_falses = in_d[in_size - 1] + last_input;
__syncthreads();
if (index < in_size) {
if (rev_bit_d[index] == 0) {
int val = in_d[index];
in_d[index] = index + 1 - val + total_falses;
}
}
}
__global__ void scatterElements(unsigned int *in_d, unsigned int *index_d, unsigned int *out_d, unsigned int in_size) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < in_size) {
unsigned int val = index_d[index];
if (val < in_size) {
out_d[val] = in_d[index];
}
}
}
void radix_sort(unsigned int *in_d, unsigned int *out_d, unsigned int *out_scan_d, unsigned int *in_h,unsigned int *out_scan_h, int num_elements) {
cudaError_t ret;
unsigned int *temp;
dim3 dimThreadBlock;
dimThreadBlock.x = BLOCK_SIZE;
dimThreadBlock.y = 1;
dimThreadBlock.z = 1;
dim3 dimGrid;
dimGrid.x =(int)(ceil(num_elements/(1.0 * dimThreadBlock.x)));
dimGrid.y = 1;
dimGrid.z = 1;
for (int i =0;i<32;i++) {
splitGPU<<<dimGrid, dimThreadBlock>>>(in_d,out_d,num_elements,i);
ret = cudaDeviceSynchronize();
if(ret != cudaSuccess) FATAL("Unable to launch kernel:splitGPU");
preScan(out_scan_d, out_d, num_elements);
ret = cudaDeviceSynchronize();
if(ret != cudaSuccess) FATAL("Unable to launch kernel");
#ifdef TEST_MODE
cudaMemcpy(out_scan_h, out_scan_d, num_elements * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
ret = cudaDeviceSynchronize();
if(ret != cudaSuccess) FATAL("Unable to copy memory fromthe device");
printf("after exclusive scan:\n");
for (int i = 0; i< num_elements;i++) {
printf("%u,",out_scan_h[i]);
}
printf("\n");
#endif
indexDefine<<<dimGrid, dimThreadBlock>>>(out_scan_d, out_d, num_elements, in_h[num_elements - 1]);
ret = cudaDeviceSynchronize();
if(ret != cudaSuccess) FATAL("Unable to launch kernel");
#ifdef TEST_MODE
cudaMemcpy(out_scan_h, out_scan_d, num_elements * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
ret = cudaDeviceSynchronize();
if(ret != cudaSuccess) FATAL("Unable to copy memory fromthe device");
printf("after index define:\n");
for (int i = 0; i< num_elements;i++) {
printf("%u,",out_scan_h[i]);
}
printf("\n");
#endif
scatterElements<<<dimGrid, dimThreadBlock>>>(in_d, out_scan_d, out_d, num_elements);
ret = cudaDeviceSynchronize();
if(ret != cudaSuccess) FATAL("Unable to launch kernel");
// swap pointers
temp = in_d;
in_d = out_d;
out_d = temp;
}
}
|
8cece221db6efdaf43bdc5fe062ef60625a58c5a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// cache filter in shared memory
const int filterCacheCol = threadIdx.x;
const int filterCacheRow = threadIdx.y;
const int filterCacheIndex = filterCacheRow * filterWidth + filterCacheCol;
extern __shared__ float filterCache[];
if (filterCacheIndex < filterWidth * filterWidth) {
filterCache[filterCacheIndex] = filter[filterCacheIndex];
}
__syncthreads();
// pixel(x,y)
const int x = (blockIdx.x * blockDim.x) + threadIdx.x; // column
const int y = (blockIdx.y * blockDim.y) + threadIdx.y; // row
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
if ( x >= numCols || y >= numRows )
{
return;
}
const auto i = y * numCols + x;
const auto filterHalf = filterWidth / 2;
float acc = 0.0f;
int imgRow;
int imgCol;
int imgIndex;
int filterIndex = 0;
// todo run 3 loops:
// 1. values y - filterHalf < 0 to 0
// 2. 0 < values y - filterHalf < numRows - 1
// 3. y - filterHalf > numRows - 1
for (auto row = 0; row < filterWidth; ++row) {
imgRow = numCols * min(max(y - filterHalf + row, 0), numRows - 1);
for (auto col = 0; col < filterWidth; ++col) {
imgCol = min(max(x - filterHalf + col , 0), numCols - 1);
imgIndex = imgRow + imgCol;
acc += inputChannel[imgIndex] * filterCache[filterIndex];
filterIndex++;
}
}
outputChannel[i] = static_cast<char>(acc);
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// pixel(x,y)
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
if ( x >= numCols || y >= numRows )
{
return;
}
const int i = x + y * numCols;
const uchar4 rgba = inputImageRGBA[i];
redChannel[i] = rgba.x;
greenChannel[i] = rgba.y;
blueChannel[i] = rgba.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage - 1));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage - 1));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage - 1));
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
// filter is a square, so it's total number of elements is filterWidth^2
const size_t nFilterElements = filterWidth * filterWidth;
const size_t nFilterBytes = sizeof(float) * nFilterElements;
checkCudaErrors(
hipMalloc(&d_filter, nFilterBytes)
);
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(
hipMemcpy(d_filter, h_filter, nFilterBytes, hipMemcpyHostToDevice)
);
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
const int MAX_THREADS = 1024;
const int thx = filterWidth * filterWidth;
const int thy = MAX_THREADS / thx;
// const int thz = filterWidth * filterWidth; // index into filter array
//Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(thx, thy); // = threadsPerBlock
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const int gridx = (numCols / blockSize.x) + 1;
const int gridy = (numRows / blockSize.y) + 1;
const dim3 gridSize(gridx, gridy); // = numBlocks
//Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
const auto sharedMemorySize = filterWidth * filterWidth * sizeof(float);
//Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), sharedMemorySize, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), sharedMemorySize, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), sharedMemorySize, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| 8cece221db6efdaf43bdc5fe062ef60625a58c5a.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// cache filter in shared memory
const int filterCacheCol = threadIdx.x;
const int filterCacheRow = threadIdx.y;
const int filterCacheIndex = filterCacheRow * filterWidth + filterCacheCol;
extern __shared__ float filterCache[];
if (filterCacheIndex < filterWidth * filterWidth) {
filterCache[filterCacheIndex] = filter[filterCacheIndex];
}
__syncthreads();
// pixel(x,y)
const int x = (blockIdx.x * blockDim.x) + threadIdx.x; // column
const int y = (blockIdx.y * blockDim.y) + threadIdx.y; // row
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
if ( x >= numCols || y >= numRows )
{
return;
}
const auto i = y * numCols + x;
const auto filterHalf = filterWidth / 2;
float acc = 0.0f;
int imgRow;
int imgCol;
int imgIndex;
int filterIndex = 0;
// todo run 3 loops:
// 1. values y - filterHalf < 0 to 0
// 2. 0 < values y - filterHalf < numRows - 1
// 3. y - filterHalf > numRows - 1
for (auto row = 0; row < filterWidth; ++row) {
imgRow = numCols * min(max(y - filterHalf + row, 0), numRows - 1);
for (auto col = 0; col < filterWidth; ++col) {
imgCol = min(max(x - filterHalf + col , 0), numCols - 1);
imgIndex = imgRow + imgCol;
acc += inputChannel[imgIndex] * filterCache[filterIndex];
filterIndex++;
}
}
outputChannel[i] = static_cast<char>(acc);
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// pixel(x,y)
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
if ( x >= numCols || y >= numRows )
{
return;
}
const int i = x + y * numCols;
const uchar4 rgba = inputImageRGBA[i];
redChannel[i] = rgba.x;
greenChannel[i] = rgba.y;
blueChannel[i] = rgba.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage - 1));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage - 1));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage - 1));
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
// filter is a square, so it's total number of elements is filterWidth^2
const size_t nFilterElements = filterWidth * filterWidth;
const size_t nFilterBytes = sizeof(float) * nFilterElements;
checkCudaErrors(
cudaMalloc(&d_filter, nFilterBytes)
);
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(
cudaMemcpy(d_filter, h_filter, nFilterBytes, cudaMemcpyHostToDevice)
);
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
const int MAX_THREADS = 1024;
const int thx = filterWidth * filterWidth;
const int thy = MAX_THREADS / thx;
// const int thz = filterWidth * filterWidth; // index into filter array
//Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(thx, thy); // = threadsPerBlock
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const int gridx = (numCols / blockSize.x) + 1;
const int gridy = (numRows / blockSize.y) + 1;
const dim3 gridSize(gridx, gridy); // = numBlocks
//Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
const auto sharedMemorySize = filterWidth * filterWidth * sizeof(float);
//Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize, sharedMemorySize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize, sharedMemorySize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize, sharedMemorySize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
8d17e1ea65ff75629d10a67df786ba01e6d12bff.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
//#define MATERIAL_SORT
#define STREAM_COMPACT
#define TIME
#define AA
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
static Triangle * dev_tris = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
const int numtris = hst_scene->N_tris;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
hipMalloc(&dev_tris, numtris * sizeof(Triangle));
hipMemcpy(dev_tris, scene->tris.data(), numtris * sizeof(Triangle), hipMemcpyHostToDevice);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
// TODO: clean up any extra device memory you created
hipFree(dev_tris);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, pathSegments[index].remainingBounces);
thrust::uniform_real_distribution<float> u01(0, 1);
float jitter_x = 0;
float jitter_y = 0;
#ifdef AA
jitter_x = u01(rng);
jitter_y = u01(rng);
#endif
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)(x + jitter_x) - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)(y + jitter_y) - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
, Triangle * tris
, int N_tris
, glm::vec3 box_min
, glm::vec3 box_max
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == TRIMESH)
{
t = triangleMeshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside,
tris, N_tris, box_min, box_max);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
intersections[path_index].materialId = -1;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths && pathSegments[idx].remainingBounces > 0)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
//float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
//pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
//pathSegments[idx].color *= u01(rng); // apply some noise because why not
Ray r = pathSegments[idx].ray;
glm::vec3 rayPosIntersect = getPointOnRay(r, intersection.t);
scatterRay(pathSegments[idx], rayPosIntersect, intersection.surfaceNormal, material, rng);
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
//PerformanceTimer timer;
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
const int numtris = hst_scene->N_tris;
const glm::vec3 box_min = hst_scene->box_min;
const glm::vec3 box_max = hst_scene->box_max;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
hipLaunchKernelGGL(( generateRayFromCamera) , dim3(blocksPerGrid2d), dim3(blockSize2d) , 0, 0, cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool isFinished = false;
while (!isFinished) {
//printf("%d rays launched\n", num_paths);
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( computeIntersections) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
, dev_tris
, numtris
, box_min
, box_max
);
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
//timer.startGpuTimer();
#ifdef MATERIAL_SORT
thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, MCmp());
#endif
//timer.endGpuTimer();
//timer.printGPUTime(hst_scene->state.iterations, hst_scene->state.traceDepth);
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
hipLaunchKernelGGL(( shadeMaterial), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
//// TODO: should be based off stream compaction results.
#ifdef STREAM_COMPACT
PathSegment* compact_point = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, IsNotZero());
num_paths = compact_point - dev_paths;
#endif
if (num_paths <= 0) {
isFinished = true;
}
isFinished = (depth == traceDepth);
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( finalGather), dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, pixelcount, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
| 8d17e1ea65ff75629d10a67df786ba01e6d12bff.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
//#define MATERIAL_SORT
#define STREAM_COMPACT
#define TIME
#define AA
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
static Triangle * dev_tris = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
const int numtris = hst_scene->N_tris;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
cudaMalloc(&dev_tris, numtris * sizeof(Triangle));
cudaMemcpy(dev_tris, scene->tris.data(), numtris * sizeof(Triangle), cudaMemcpyHostToDevice);
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
// TODO: clean up any extra device memory you created
cudaFree(dev_tris);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, pathSegments[index].remainingBounces);
thrust::uniform_real_distribution<float> u01(0, 1);
float jitter_x = 0;
float jitter_y = 0;
#ifdef AA
jitter_x = u01(rng);
jitter_y = u01(rng);
#endif
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)(x + jitter_x) - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)(y + jitter_y) - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
, Triangle * tris
, int N_tris
, glm::vec3 box_min
, glm::vec3 box_max
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == TRIMESH)
{
t = triangleMeshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside,
tris, N_tris, box_min, box_max);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
intersections[path_index].materialId = -1;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths && pathSegments[idx].remainingBounces > 0)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
//float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
//pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
//pathSegments[idx].color *= u01(rng); // apply some noise because why not
Ray r = pathSegments[idx].ray;
glm::vec3 rayPosIntersect = getPointOnRay(r, intersection.t);
scatterRay(pathSegments[idx], rayPosIntersect, intersection.surfaceNormal, material, rng);
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
//PerformanceTimer timer;
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
const int numtris = hst_scene->N_tris;
const glm::vec3 box_min = hst_scene->box_min;
const glm::vec3 box_max = hst_scene->box_max;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool isFinished = false;
while (!isFinished) {
//printf("%d rays launched\n", num_paths);
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
computeIntersections <<<numblocksPathSegmentTracing, blockSize1d>>> (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
, dev_tris
, numtris
, box_min
, box_max
);
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
//timer.startGpuTimer();
#ifdef MATERIAL_SORT
thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, MCmp());
#endif
//timer.endGpuTimer();
//timer.printGPUTime(hst_scene->state.iterations, hst_scene->state.traceDepth);
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
shadeMaterial<<<numblocksPathSegmentTracing, blockSize1d>>> (
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
//// TODO: should be based off stream compaction results.
#ifdef STREAM_COMPACT
PathSegment* compact_point = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, IsNotZero());
num_paths = compact_point - dev_paths;
#endif
if (num_paths <= 0) {
isFinished = true;
}
isFinished = (depth == traceDepth);
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather<<<numBlocksPixels, blockSize1d>>>(pixelcount, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
b0a8c7144f602c3130eeaedd8f819c126682b1ca.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @brief Breadth-first Search Top-Down test program
* @file
*/
#include "Static/BreadthFirstSearch/TopDown2.cuh"
#include <StandardAPI.hpp>
#include <Graph/GraphStd.hpp>
#include <Util/CommandLineParam.hpp>
#include <hip/hip_runtime_api.h> //--profile-from-start off
template <typename HornetGraph, typename BFS>
int exec(int argc, char* argv[]) {
using namespace timer;
using namespace hornets_nest;
graph::GraphStd<vid_t, eoff_t> graph;
CommandLineParam cmd(graph, argc, argv,false);
HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(),
graph.csr_out_edges());
Timer<DEVICE> TM;
hipProfilerStart();
TM.start();
HornetGraph hornet_graph(hornet_init);
TM.stop();
hipProfilerStop();
TM.print("Initilization Time:");
BFS bfs_top_down(hornet_graph);
vid_t root = graph.max_out_degree_id();
if (argc==3)
root = atoi(argv[2]);
std::cout << "My root is " << root << std::endl;
bfs_top_down.set_parameters(root);
hipProfilerStart();
TM.start();
bfs_top_down.run();
TM.stop();
hipProfilerStop();
TM.print("TopDown2");
std::cout << "Number of levels is : " << bfs_top_down.getLevels() << std::endl;
auto is_correct = bfs_top_down.validate();
std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n");
return !is_correct;
}
int main(int argc, char* argv[]) {
int ret = 0;
hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.
{//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
//ret = exec<hornets_nest::HornetDynamicGraph, hornets_nest::BfsTopDown2Dynamic>(argc, argv);
ret = exec<hornets_nest::HornetStaticGraph, hornets_nest::BfsTopDown2Static >(argc, argv);
}//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
hornets_nest::gpu::finalizeRMMPoolAllocation();
return ret;
}
| b0a8c7144f602c3130eeaedd8f819c126682b1ca.cu | /**
* @brief Breadth-first Search Top-Down test program
* @file
*/
#include "Static/BreadthFirstSearch/TopDown2.cuh"
#include <StandardAPI.hpp>
#include <Graph/GraphStd.hpp>
#include <Util/CommandLineParam.hpp>
#include <cuda_profiler_api.h> //--profile-from-start off
template <typename HornetGraph, typename BFS>
int exec(int argc, char* argv[]) {
using namespace timer;
using namespace hornets_nest;
graph::GraphStd<vid_t, eoff_t> graph;
CommandLineParam cmd(graph, argc, argv,false);
HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(),
graph.csr_out_edges());
Timer<DEVICE> TM;
cudaProfilerStart();
TM.start();
HornetGraph hornet_graph(hornet_init);
TM.stop();
cudaProfilerStop();
TM.print("Initilization Time:");
BFS bfs_top_down(hornet_graph);
vid_t root = graph.max_out_degree_id();
if (argc==3)
root = atoi(argv[2]);
std::cout << "My root is " << root << std::endl;
bfs_top_down.set_parameters(root);
cudaProfilerStart();
TM.start();
bfs_top_down.run();
TM.stop();
cudaProfilerStop();
TM.print("TopDown2");
std::cout << "Number of levels is : " << bfs_top_down.getLevels() << std::endl;
auto is_correct = bfs_top_down.validate();
std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n");
return !is_correct;
}
int main(int argc, char* argv[]) {
int ret = 0;
hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.
{//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
//ret = exec<hornets_nest::HornetDynamicGraph, hornets_nest::BfsTopDown2Dynamic>(argc, argv);
ret = exec<hornets_nest::HornetStaticGraph, hornets_nest::BfsTopDown2Static >(argc, argv);
}//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
hornets_nest::gpu::finalizeRMMPoolAllocation();
return ret;
}
|
ee35416b1d93b5d413bb7ecbdc26a658f6c787cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if defined(__HIPCC__)
#include "rocblas.h"
#include <hipsparse.h>
#include <rocblas.h>
#endif
#include "dtts_merging.incl"
#define TILE_WIDTH 16
#if defined(__HIPCC__)
__global__
void sinterpolate_gpu(point_t* points,seaminfo* seaminf, int nsize, int bsize, float drange){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i<bsize && j<bsize && points[i+j*bsize].x>-1e3 ){
node_t p(i,j);
float wtot=0, wsrcx=0, wsrcy=0;
for (unsigned int k=0; k<nsize; k++)
{
float d = sqrtf((p.x-seaminf[k].pt.x)*(p.x-seaminf[k].pt.x) + (p.y-seaminf[k].pt.y)*(p.y-seaminf[k].pt.y));
float w = powf((drange-d)/(drange*d),4); //powf(((d*d)/(drange*drange))-1.,2);
wtot = wtot+w;
if (d <= drange)
{
wsrcx += w*(seaminf[k].diffx);
wsrcy += w*(seaminf[k].diffy);
}
}
points[i+j*bsize].x = (wsrcx/wtot);
points[i+j*bsize].y = (wsrcy/wtot);
}
}
#endif
#if defined(__HIPCC__)
void cgrad (int N, int nnz, float* vals, int* colind, int* rowptr, float* X, float* B, int* niter, float* epsilon){
hipblasInit();
//for (int k=0; k<N; k++){
// X[k] = 0.0;
//cout<<b[k]<<" ";
//}
//cout<<endl;
float* vals_dev; hipblasAlloc(nnz, sizeof(float), (void**) &vals_dev);
int* colind_dev; hipblasAlloc(nnz, sizeof(int), (void**) &colind_dev);
int * rowptr_dev; hipblasAlloc(N+1, sizeof(int), (void**) &rowptr_dev);
float * X_dev; hipblasAlloc(N, sizeof(float), (void**) &X_dev);
float * B_dev; hipblasAlloc(N, sizeof(float), (void**) &B_dev);
//int* niter_dev; hipblasAlloc(1, sizeof(int), (void**) &niter_dev);
//float* epsilon_dev; hipblasAlloc(1, sizeof(float), (void**) &epsilon_dev);
hipblasSetVector (nnz, sizeof(float),vals, 1, vals_dev, 1);
hipblasSetVector (nnz, sizeof(int),colind, 1, colind_dev, 1);
hipblasSetVector (N+1, sizeof(int),rowptr, 1, rowptr_dev, 1);
hipblasSetVector (N, sizeof(float),X, 1, X_dev, 1);
hipblasSetVector (N, sizeof(float),B, 1, B_dev, 1);
//*niter = 0;
/*
hipDeviceProp_t deviceProp;
int devID = 0;
if (devID < 0) {
printf("exiting...\n");
exit(0);
}
hipGetDeviceProperties(&deviceProp, devID) ;
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if(version < 0x11)
{
printf("Requires a minimum CUDA compute 1.1 capability\n");
printf("PASSED");
hipDeviceReset();
}*/
//sicl_gscsrcg_seq( N, vals, colind, rowptr, X, B,P_NONE,niter,epsilon);
sicl_gscsrcg( N, vals_dev, colind_dev, rowptr_dev, X_dev, B_dev,P_NONE,niter,epsilon);
//bicgstab_kernel( N, vals_dev, colind_dev, rowptr_dev, X_dev, B_dev,P_NONE,niter,epsilon);
//sicl_gscsrmv( N, vals_dev, colind_dev, rowptr_dev, X_dev, B_dev);
/*int max_iter =10000;
hipsparseHandle_t handle = 0;
hipsparseStatus_t status;
status = hipsparseCreate(&handle);
if (status != HIPSPARSE_STATUS_SUCCESS) {
fprintf( stderr, "!!!! CUSPARSE initialization error\n" );
return ;
}
hipsparseMatDescr_t descr = 0;
status = hipsparseCreateMatDescr(&descr);
if (status != HIPSPARSE_STATUS_SUCCESS) {
fprintf( stderr, "!!!! CUSPARSE hipsparseCreateMatDescr error\n" );
return ;
}
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
float a, b, r0, r1;
float *d_Ax;
float *d_p;
hipMalloc((void**)&d_p, N*sizeof(float));
hipMalloc((void**)&d_Ax, N*sizeof(float));
hipsparseScsrmv(handle,HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, 1.0, descr, vals_dev, rowptr_dev, colind_dev, X_dev, 0.0, d_Ax);
hipblasSaxpy(N, -1.0, d_Ax, 1, B_dev, 1);
r1 = hipblasSdot(N, B_dev, 1, B_dev, 1);
int k = 1;
const float tol = 1e-5;
while (r1 > tol*tol && k <= max_iter) {
if (k > 1) {
b = r1 / r0;
hipblasSscal(N, b, d_p, 1);
hipblasSaxpy(N, 1.0, B_dev, 1, d_p, 1);
} else {
hipblasScopy(N, B_dev, 1, d_p, 1);
}
hipsparseScsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, 1.0, descr, vals_dev, rowptr_dev, colind_dev,d_p, 0.0, d_Ax);
a = r1 / hipblasSdot(N, d_p, 1, d_Ax, 1);
hipblasSaxpy(N, a, d_p, 1, X_dev, 1);
hipblasSaxpy(N, -a, d_Ax, 1, B_dev, 1);
r0 = r1;
r1 = hipblasSdot(N, B_dev, 1, B_dev, 1);
hipDeviceSynchronize();
//shrLog("iteration = %3d, residual = %e\n", k, sqrtf(r1));
k++;
}
hipFree(d_p);
hipFree(d_Ax);
*/ hipblasGetVector (N, sizeof(float),X_dev, 1, X, 1);
hipblasFree(vals_dev);
hipblasFree(colind_dev);
hipblasFree(rowptr_dev);
hipblasFree(X_dev);
hipblasFree(B_dev);
//hipblasFree(niter_dev);
//hipblasFree(epsilon_dev);
hipblasShutdown();
//cout<<"Niter: "<<*niter<<" Epsilon: "<<*epsilon<<endl;
//for (int k=0; k<N; k++) cout<<X[k]<<" ";
//cout<<endl;
}
#if defined(__HIPCC__)
void patch_merging(Image* dest_f, Image* patch_f, int dx, int dy, int nlevel, float drange)
{
int w = (*patch_f).width(), h = patch_f->height();
Image* mask = graphCut(dest_f,patch_f,dx,dy); //Tested and severe=false performs the best
//mask->savePGM("/tmp/mask.pgm");
Gradient pdest_g(Image(w,h),Image(w,h));
Gradient patch_g = get_gradient(*patch_f);
vector<node_t> seam;
vector<float> xdiff, ydiff;
vector<seaminfo> seaminf;
for (int i=0; i<w; i++)
for (int j=0; j<h; j++){
if ( (*mask)(i,j) >= vsSINK){
float h = dest_f->getPixelXY(i+dx, j+dy);
float h1 = dest_f->getPixelXY(i+dx-1, j+dy);
float h2 = dest_f->getPixelXY(i+dx, j+dy-1);
if (h1>BG) (pdest_g.first)(i,j)= h - h1;
if (h2>BG) (pdest_g.second)(i,j)= h - h2;
}
}
for (int j=0; j<patch_f->height(); j++)
for (int i=0; i<patch_f->width(); i++){
if ( (*mask)(i,j) == vsSINK ){
//seam.push_back(node_t(i,j));
//xdiff.push_back(pdest_g.first(i,j)-patch_g.first(i,j));
//ydiff.push_back(pdest_g.second(i,j)-patch_g.second(i,j));
seam.push_back(node_t(i,j));
seaminfo tmp(node_t(i,j), pdest_g.first(i,j)-patch_g.first(i,j), pdest_g.second(i,j)-patch_g.second(i,j) );
seaminf.push_back(tmp);
}
if ( (*mask)(i,j) >= vsSINK ){
patch_g.first(i,j) = pdest_g.first(i,j);
patch_g.second(i,j) = pdest_g.second(i,j);
(*patch_f)(i,j) = dest_f->getPixelXY(i+dx, j+dy);
}
}
//patch_g.first.savePGM("/tmp/patch_gx_baf.pgm",20); patch_g.second.savePGM("/tmp/patch_gy_baf.pgm",20);
//pdest_g.first.savePGM("/tmp/patch_gx_bcf.pgm",20); pdest_g.second.savePGM("/tmp/patch_gy_bcf.pgm",20);
if (seam.size()>0)
{
{
int bsize = mask->width();
//patch_g.first.savePGM("/tmp/patch_gx_bef.pgm",20); patch_g.second.savePGM("/tmp/patch_gy_bef.pgm",20);
// Shepard Interpolation
point_t* points = new point_t[bsize*bsize];
for (int j=0; j<patch_f->height(); j++)
for (int i=0; i<patch_f->width(); i++){
point_t tmp(-1e5,-1e5);
if ( (*mask)(i,j) < vsSINK )
tmp = point_t(0,0);
points[i+j*bsize] = tmp;
}
//sinterpolate_cpu(points, &seaminf[0], seaminf.size(), bsize, drange);
point_t* points_dev; hipMalloc( (void**) &points_dev, sizeof(point_t)*bsize*bsize );
seaminfo* seaminf_dev; hipMalloc( (void**) &seaminf_dev, sizeof(seaminfo)*seaminf.size() );
hipMemcpy(points_dev, points, sizeof(point_t)*bsize*bsize, hipMemcpyHostToDevice );
hipMemcpy(seaminf_dev, &seaminf[0], sizeof(seaminfo)*seaminf.size(), hipMemcpyHostToDevice );
dim3 dimGrid( (bsize/TILE_WIDTH)+1, (bsize/TILE_WIDTH)+1);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH);
hipLaunchKernelGGL(( sinterpolate_gpu), dim3(dimGrid),dim3(dimBlock), 0, 0, points_dev, seaminf_dev, seaminf.size(), bsize, drange);
hipMemcpy(points, points_dev, sizeof(point_t)*bsize*bsize, hipMemcpyDeviceToHost );
hipFree(points_dev);
hipFree(seaminf_dev);
for (int j=0; j<patch_f->height(); j++)
for (int i=0; i<patch_f->width(); i++)
if ( (*mask)(i,j) < vsSINK ){
patch_g.first(i,j) = patch_g.first(i,j) + points[i+j*bsize].x; //(wsrcx/wtot);
patch_g.second(i,j) = patch_g.second(i,j) + points[i+j*bsize].y; //(wsrcy/wtot);
}
delete [] points;
}
//patch_g.first.savePGM("/tmp/patch_gx_bef.pgm",20); patch_g.second.savePGM("/tmp/patch_gy_bef.pgm",20);
//sinterpolate_g(patch_g,*mask,seam,xdiff,ydiff,drange);
//patch_g.first.savePGM("/tmp/patch_gx.pgm",20); patch_g.second.savePGM("/tmp/patch_gy.pgm",20);
Image div= get_divergent(patch_g);
//div.savePGM("/tmp/patch_div.pgm",15);
int *pos = new int [w*h];
uint N = 0;
for (int y=0; y<h; y++)
for (int x=0; x<w; x++)
{
pos[x+y*w] = N;
N++;
}
//cin.get();
//patch_f->savePGM("/tmp/res_cand1.pgm",dest_f->maxval);
poissonsolve(dest_f,patch_f,div,pos,N,dx,dy);
//patch_f->savePGM("/tmp/res_cand2.pgm",dest_f->maxval);
//cin.get();
delete [] pos;
}
for (int i=0; i<patch_f->width(); i++)
for (int j=0; j<patch_f->height(); j++)
{
if (dest_f->inBounds(i+dx,j+dy))
{
(*dest_f)( i+dx,j+dy) = (*patch_f)(i,j) ;
}
}
delete mask;
}
#endif
| ee35416b1d93b5d413bb7ecbdc26a658f6c787cc.cu | #if defined(__CUDACC__)
#include "cublas.h"
#include <cusparse.h>
#include <cublas.h>
#endif
#include "dtts_merging.incl"
#define TILE_WIDTH 16
#if defined(__CUDACC__)
__global__
void sinterpolate_gpu(point_t* points,seaminfo* seaminf, int nsize, int bsize, float drange){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i<bsize && j<bsize && points[i+j*bsize].x>-1e3 ){
node_t p(i,j);
float wtot=0, wsrcx=0, wsrcy=0;
for (unsigned int k=0; k<nsize; k++)
{
float d = sqrtf((p.x-seaminf[k].pt.x)*(p.x-seaminf[k].pt.x) + (p.y-seaminf[k].pt.y)*(p.y-seaminf[k].pt.y));
float w = powf((drange-d)/(drange*d),4); //powf(((d*d)/(drange*drange))-1.,2);
wtot = wtot+w;
if (d <= drange)
{
wsrcx += w*(seaminf[k].diffx);
wsrcy += w*(seaminf[k].diffy);
}
}
points[i+j*bsize].x = (wsrcx/wtot);
points[i+j*bsize].y = (wsrcy/wtot);
}
}
#endif
#if defined(__CUDACC__)
void cgrad (int N, int nnz, float* vals, int* colind, int* rowptr, float* X, float* B, int* niter, float* epsilon){
cublasInit();
//for (int k=0; k<N; k++){
// X[k] = 0.0;
//cout<<b[k]<<" ";
//}
//cout<<endl;
float* vals_dev; cublasAlloc(nnz, sizeof(float), (void**) &vals_dev);
int* colind_dev; cublasAlloc(nnz, sizeof(int), (void**) &colind_dev);
int * rowptr_dev; cublasAlloc(N+1, sizeof(int), (void**) &rowptr_dev);
float * X_dev; cublasAlloc(N, sizeof(float), (void**) &X_dev);
float * B_dev; cublasAlloc(N, sizeof(float), (void**) &B_dev);
//int* niter_dev; cublasAlloc(1, sizeof(int), (void**) &niter_dev);
//float* epsilon_dev; cublasAlloc(1, sizeof(float), (void**) &epsilon_dev);
cublasSetVector (nnz, sizeof(float),vals, 1, vals_dev, 1);
cublasSetVector (nnz, sizeof(int),colind, 1, colind_dev, 1);
cublasSetVector (N+1, sizeof(int),rowptr, 1, rowptr_dev, 1);
cublasSetVector (N, sizeof(float),X, 1, X_dev, 1);
cublasSetVector (N, sizeof(float),B, 1, B_dev, 1);
//*niter = 0;
/*
cudaDeviceProp deviceProp;
int devID = 0;
if (devID < 0) {
printf("exiting...\n");
exit(0);
}
cudaGetDeviceProperties(&deviceProp, devID) ;
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if(version < 0x11)
{
printf("Requires a minimum CUDA compute 1.1 capability\n");
printf("PASSED");
cudaThreadExit();
}*/
//sicl_gscsrcg_seq( N, vals, colind, rowptr, X, B,P_NONE,niter,epsilon);
sicl_gscsrcg( N, vals_dev, colind_dev, rowptr_dev, X_dev, B_dev,P_NONE,niter,epsilon);
//bicgstab_kernel( N, vals_dev, colind_dev, rowptr_dev, X_dev, B_dev,P_NONE,niter,epsilon);
//sicl_gscsrmv( N, vals_dev, colind_dev, rowptr_dev, X_dev, B_dev);
/*int max_iter =10000;
cusparseHandle_t handle = 0;
cusparseStatus_t status;
status = cusparseCreate(&handle);
if (status != CUSPARSE_STATUS_SUCCESS) {
fprintf( stderr, "!!!! CUSPARSE initialization error\n" );
return ;
}
cusparseMatDescr_t descr = 0;
status = cusparseCreateMatDescr(&descr);
if (status != CUSPARSE_STATUS_SUCCESS) {
fprintf( stderr, "!!!! CUSPARSE cusparseCreateMatDescr error\n" );
return ;
}
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
float a, b, r0, r1;
float *d_Ax;
float *d_p;
cudaMalloc((void**)&d_p, N*sizeof(float));
cudaMalloc((void**)&d_Ax, N*sizeof(float));
cusparseScsrmv(handle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, 1.0, descr, vals_dev, rowptr_dev, colind_dev, X_dev, 0.0, d_Ax);
cublasSaxpy(N, -1.0, d_Ax, 1, B_dev, 1);
r1 = cublasSdot(N, B_dev, 1, B_dev, 1);
int k = 1;
const float tol = 1e-5;
while (r1 > tol*tol && k <= max_iter) {
if (k > 1) {
b = r1 / r0;
cublasSscal(N, b, d_p, 1);
cublasSaxpy(N, 1.0, B_dev, 1, d_p, 1);
} else {
cublasScopy(N, B_dev, 1, d_p, 1);
}
cusparseScsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, 1.0, descr, vals_dev, rowptr_dev, colind_dev,d_p, 0.0, d_Ax);
a = r1 / cublasSdot(N, d_p, 1, d_Ax, 1);
cublasSaxpy(N, a, d_p, 1, X_dev, 1);
cublasSaxpy(N, -a, d_Ax, 1, B_dev, 1);
r0 = r1;
r1 = cublasSdot(N, B_dev, 1, B_dev, 1);
cudaThreadSynchronize();
//shrLog("iteration = %3d, residual = %e\n", k, sqrtf(r1));
k++;
}
cudaFree(d_p);
cudaFree(d_Ax);
*/ cublasGetVector (N, sizeof(float),X_dev, 1, X, 1);
cublasFree(vals_dev);
cublasFree(colind_dev);
cublasFree(rowptr_dev);
cublasFree(X_dev);
cublasFree(B_dev);
//cublasFree(niter_dev);
//cublasFree(epsilon_dev);
cublasShutdown();
//cout<<"Niter: "<<*niter<<" Epsilon: "<<*epsilon<<endl;
//for (int k=0; k<N; k++) cout<<X[k]<<" ";
//cout<<endl;
}
#if defined(__CUDACC__)
void patch_merging(Image* dest_f, Image* patch_f, int dx, int dy, int nlevel, float drange)
{
int w = (*patch_f).width(), h = patch_f->height();
Image* mask = graphCut(dest_f,patch_f,dx,dy); //Tested and severe=false performs the best
//mask->savePGM("/tmp/mask.pgm");
Gradient pdest_g(Image(w,h),Image(w,h));
Gradient patch_g = get_gradient(*patch_f);
vector<node_t> seam;
vector<float> xdiff, ydiff;
vector<seaminfo> seaminf;
for (int i=0; i<w; i++)
for (int j=0; j<h; j++){
if ( (*mask)(i,j) >= vsSINK){
float h = dest_f->getPixelXY(i+dx, j+dy);
float h1 = dest_f->getPixelXY(i+dx-1, j+dy);
float h2 = dest_f->getPixelXY(i+dx, j+dy-1);
if (h1>BG) (pdest_g.first)(i,j)= h - h1;
if (h2>BG) (pdest_g.second)(i,j)= h - h2;
}
}
for (int j=0; j<patch_f->height(); j++)
for (int i=0; i<patch_f->width(); i++){
if ( (*mask)(i,j) == vsSINK ){
//seam.push_back(node_t(i,j));
//xdiff.push_back(pdest_g.first(i,j)-patch_g.first(i,j));
//ydiff.push_back(pdest_g.second(i,j)-patch_g.second(i,j));
seam.push_back(node_t(i,j));
seaminfo tmp(node_t(i,j), pdest_g.first(i,j)-patch_g.first(i,j), pdest_g.second(i,j)-patch_g.second(i,j) );
seaminf.push_back(tmp);
}
if ( (*mask)(i,j) >= vsSINK ){
patch_g.first(i,j) = pdest_g.first(i,j);
patch_g.second(i,j) = pdest_g.second(i,j);
(*patch_f)(i,j) = dest_f->getPixelXY(i+dx, j+dy);
}
}
//patch_g.first.savePGM("/tmp/patch_gx_baf.pgm",20); patch_g.second.savePGM("/tmp/patch_gy_baf.pgm",20);
//pdest_g.first.savePGM("/tmp/patch_gx_bcf.pgm",20); pdest_g.second.savePGM("/tmp/patch_gy_bcf.pgm",20);
if (seam.size()>0)
{
{
int bsize = mask->width();
//patch_g.first.savePGM("/tmp/patch_gx_bef.pgm",20); patch_g.second.savePGM("/tmp/patch_gy_bef.pgm",20);
// Shepard Interpolation
point_t* points = new point_t[bsize*bsize];
for (int j=0; j<patch_f->height(); j++)
for (int i=0; i<patch_f->width(); i++){
point_t tmp(-1e5,-1e5);
if ( (*mask)(i,j) < vsSINK )
tmp = point_t(0,0);
points[i+j*bsize] = tmp;
}
//sinterpolate_cpu(points, &seaminf[0], seaminf.size(), bsize, drange);
point_t* points_dev; cudaMalloc( (void**) &points_dev, sizeof(point_t)*bsize*bsize );
seaminfo* seaminf_dev; cudaMalloc( (void**) &seaminf_dev, sizeof(seaminfo)*seaminf.size() );
cudaMemcpy(points_dev, points, sizeof(point_t)*bsize*bsize, cudaMemcpyHostToDevice );
cudaMemcpy(seaminf_dev, &seaminf[0], sizeof(seaminfo)*seaminf.size(), cudaMemcpyHostToDevice );
dim3 dimGrid( (bsize/TILE_WIDTH)+1, (bsize/TILE_WIDTH)+1);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH);
sinterpolate_gpu<<<dimGrid,dimBlock>>>(points_dev, seaminf_dev, seaminf.size(), bsize, drange);
cudaMemcpy(points, points_dev, sizeof(point_t)*bsize*bsize, cudaMemcpyDeviceToHost );
cudaFree(points_dev);
cudaFree(seaminf_dev);
for (int j=0; j<patch_f->height(); j++)
for (int i=0; i<patch_f->width(); i++)
if ( (*mask)(i,j) < vsSINK ){
patch_g.first(i,j) = patch_g.first(i,j) + points[i+j*bsize].x; //(wsrcx/wtot);
patch_g.second(i,j) = patch_g.second(i,j) + points[i+j*bsize].y; //(wsrcy/wtot);
}
delete [] points;
}
//patch_g.first.savePGM("/tmp/patch_gx_bef.pgm",20); patch_g.second.savePGM("/tmp/patch_gy_bef.pgm",20);
//sinterpolate_g(patch_g,*mask,seam,xdiff,ydiff,drange);
//patch_g.first.savePGM("/tmp/patch_gx.pgm",20); patch_g.second.savePGM("/tmp/patch_gy.pgm",20);
Image div= get_divergent(patch_g);
//div.savePGM("/tmp/patch_div.pgm",15);
int *pos = new int [w*h];
uint N = 0;
for (int y=0; y<h; y++)
for (int x=0; x<w; x++)
{
pos[x+y*w] = N;
N++;
}
//cin.get();
//patch_f->savePGM("/tmp/res_cand1.pgm",dest_f->maxval);
poissonsolve(dest_f,patch_f,div,pos,N,dx,dy);
//patch_f->savePGM("/tmp/res_cand2.pgm",dest_f->maxval);
//cin.get();
delete [] pos;
}
for (int i=0; i<patch_f->width(); i++)
for (int j=0; j<patch_f->height(); j++)
{
if (dest_f->inBounds(i+dx,j+dy))
{
(*dest_f)( i+dx,j+dy) = (*patch_f)(i,j) ;
}
}
delete mask;
}
#endif
|
338b667b2104601e09b70d5a2272d08513d9b2b7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
void PRINT_FIELD(int, double*);
__global__ void INTERPOLATE_2D(int dimension, double* field_coarse, double* field_fine)
{
int N_fine = dimension;
int idx_x_fine = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y_fine = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x_fine<N_fine&&idx_y_fine<N_fine)
{
int idx_fine = idx_x_fine + N_fine*idx_y_fine;
int N_coarse = (N_fine-1)/2 + 1;
int idx_x_coarse = idx_x_fine/2;
int idx_y_coarse = idx_y_fine/2;
int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse;
if (idx_x_fine%2==0&&idx_y_fine%2==0)
field_fine[idx_fine] = field_coarse[idx_coarse];
else if (idx_x_fine%2==1&&idx_y_fine%2==0)
field_fine[idx_fine] = 0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]);
else if (idx_x_fine%2==0&&idx_y_fine%2==1)
field_fine[idx_fine] = 0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+N_coarse]);
else
field_fine[idx_fine] = 0.25*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]+field_coarse[idx_coarse+N_coarse]+field_coarse[idx_coarse+N_coarse+1]);
}
}
__global__ void RESTRICT_2D(int dimension, double* field_fine, double* field_coarse)
{
int N_coarse = dimension;
int idx_x_coarse = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y_coarse = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x_coarse<N_coarse&&idx_y_coarse<N_coarse)
{
int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse;
int N_fine = (N_coarse-1)*2 + 1;
int idx_x_fine = idx_x_coarse*2;
int idx_y_fine = idx_y_coarse*2;
int idx_fine = idx_x_fine + idx_y_fine*N_fine;
if (idx_x_coarse!=0&&idx_x_coarse!=N_coarse-1&&idx_y_coarse!=0&&idx_y_coarse!=N_coarse-1)
field_coarse[idx_coarse] = 1./16.*(field_fine[idx_fine-4]+field_fine[idx_fine-2]+field_fine[idx_fine+2]+field_fine[idx_fine+4]) + 1./8.*(field_fine[idx_fine-3]+field_fine[idx_fine-1]+field_fine[idx_fine+1]+field_fine[idx_fine+3]) + 1./4.*field_fine[idx_fine];
else
field_coarse[idx_coarse] = field_fine[idx_fine];
// printf("%d\t%.4e\t", idx_coarse, field_coarse[idx_coarse]);
}
}
int main(void)
{
int N, N_level;
int tpb_x, tpb_y, bpg_x, bpg_y;
int *dimension_level;
double **field_level;
printf("Test the interpolate and restrict for multi-grid by GPU.\n\n");
printf("Enter the latttice size (N,N) .");
scanf("%d", &N);
printf("The lattice size is (%d,%d).\n", N, N);
// printf("Set the depth of the V process level.\n");
// scanf("%d", &N_level);
printf("The depth of the V process level will be set automatically.\n");
N_level = (int)(log2((N-1)/4.));
printf("The depth of the V process is %d .\n", N_level);
// printf("Set the photon mass.\n");
// scanf("%lf", &photon_mass);
// printf("The photon mass is %.4e .\n", photon_mass);
printf("Set the GPU threads per block (tx,ty). \n");
scanf("%d %d", &tpb_x, &tpb_y);
printf("Threads per block for GPU is (%d,%d) .\n", tpb_x, tpb_y);
printf("The block per grid will be set automatically.");
bpg_x = (N+tpb_x-1)/tpb_x;
bpg_y = (N+tpb_y-1)/tpb_y;
printf("Blocks per grid for GPU is (%d,%d) .\n", bpg_x, bpg_y);
printf("\n");
hipSetDevice(0);
dim3 tpb(tpb_x,tpb_y);
dim3 bpg(bpg_x,bpg_y);
hipMallocManaged(&dimension_level, (N_level+1)*sizeof(int));
field_level = (double**)malloc((N_level+1)*sizeof(double*));
int dimension = N-1;
for (int level=0; level<=N_level; level++)
{
hipMallocManaged(&field_level[level], (dimension+1)*(dimension+1)*sizeof(double));
dimension_level[level] = dimension + 1;
dimension /= 2;
}
for (int i=0; i<dimension_level[0]*dimension_level[0]; i++)
// field_level[0][i] = 1.0;
field_level[0][i] = i;
//hipLaunchKernelGGL(( RESTRICT_2D), dim3(bpg),dim3(tpb), 0, 0, dimension_level[1], field_level[0], field_level[1]);
//hipLaunchKernelGGL(( INTERPOLATE_2D), dim3(bpg),dim3(tpb), 0, 0, dimension_level[0], field_level[1], field_level[0]);
// hipDeviceSynchronize();
for (int i=0; i<N_level; i++)
{
hipLaunchKernelGGL(( RESTRICT_2D), dim3(bpg),dim3(tpb), 0, 0, dimension_level[i+1], field_level[i], field_level[i+1]);
hipDeviceSynchronize();
}
for (int j=0; j<N_level; j++)
{
for (int i=0; i<dimension_level[j]*dimension_level[j]; i++)
field_level[j][i] = 0.0;
}
for (int i=N_level; i>=1; i--)
{
hipLaunchKernelGGL(( INTERPOLATE_2D), dim3(bpg),dim3(tpb), 0, 0, dimension_level[i-1], field_level[i], field_level[i-1]);
hipDeviceSynchronize();
}
// PRINT_FIELD(dimension_level[1], field_level[1]);
PRINT_FIELD(dimension_level[0], field_level[0]);
free(field_level);
hipFree(dimension_level);
return EXIT_SUCCESS;
}
void PRINT_FIELD(int dimension, double* field)
{
for (int j=0; j<dimension*dimension; j++)
// printf("%.4e\n", field[j]);
printf("%.2f\n", field[j]);
}
| 338b667b2104601e09b70d5a2272d08513d9b2b7.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
void PRINT_FIELD(int, double*);
__global__ void INTERPOLATE_2D(int dimension, double* field_coarse, double* field_fine)
{
int N_fine = dimension;
int idx_x_fine = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y_fine = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x_fine<N_fine&&idx_y_fine<N_fine)
{
int idx_fine = idx_x_fine + N_fine*idx_y_fine;
int N_coarse = (N_fine-1)/2 + 1;
int idx_x_coarse = idx_x_fine/2;
int idx_y_coarse = idx_y_fine/2;
int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse;
if (idx_x_fine%2==0&&idx_y_fine%2==0)
field_fine[idx_fine] = field_coarse[idx_coarse];
else if (idx_x_fine%2==1&&idx_y_fine%2==0)
field_fine[idx_fine] = 0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]);
else if (idx_x_fine%2==0&&idx_y_fine%2==1)
field_fine[idx_fine] = 0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+N_coarse]);
else
field_fine[idx_fine] = 0.25*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]+field_coarse[idx_coarse+N_coarse]+field_coarse[idx_coarse+N_coarse+1]);
}
}
__global__ void RESTRICT_2D(int dimension, double* field_fine, double* field_coarse)
{
int N_coarse = dimension;
int idx_x_coarse = threadIdx.x + blockDim.x*blockIdx.x;
int idx_y_coarse = threadIdx.y + blockDim.y*blockIdx.y;
if (idx_x_coarse<N_coarse&&idx_y_coarse<N_coarse)
{
int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse;
int N_fine = (N_coarse-1)*2 + 1;
int idx_x_fine = idx_x_coarse*2;
int idx_y_fine = idx_y_coarse*2;
int idx_fine = idx_x_fine + idx_y_fine*N_fine;
if (idx_x_coarse!=0&&idx_x_coarse!=N_coarse-1&&idx_y_coarse!=0&&idx_y_coarse!=N_coarse-1)
field_coarse[idx_coarse] = 1./16.*(field_fine[idx_fine-4]+field_fine[idx_fine-2]+field_fine[idx_fine+2]+field_fine[idx_fine+4]) + 1./8.*(field_fine[idx_fine-3]+field_fine[idx_fine-1]+field_fine[idx_fine+1]+field_fine[idx_fine+3]) + 1./4.*field_fine[idx_fine];
else
field_coarse[idx_coarse] = field_fine[idx_fine];
// printf("%d\t%.4e\t", idx_coarse, field_coarse[idx_coarse]);
}
}
int main(void)
{
int N, N_level;
int tpb_x, tpb_y, bpg_x, bpg_y;
int *dimension_level;
double **field_level;
printf("Test the interpolate and restrict for multi-grid by GPU.\n\n");
printf("Enter the latttice size (N,N) .");
scanf("%d", &N);
printf("The lattice size is (%d,%d).\n", N, N);
// printf("Set the depth of the V process level.\n");
// scanf("%d", &N_level);
printf("The depth of the V process level will be set automatically.\n");
N_level = (int)(log2((N-1)/4.));
printf("The depth of the V process is %d .\n", N_level);
// printf("Set the photon mass.\n");
// scanf("%lf", &photon_mass);
// printf("The photon mass is %.4e .\n", photon_mass);
printf("Set the GPU threads per block (tx,ty). \n");
scanf("%d %d", &tpb_x, &tpb_y);
printf("Threads per block for GPU is (%d,%d) .\n", tpb_x, tpb_y);
printf("The block per grid will be set automatically.");
bpg_x = (N+tpb_x-1)/tpb_x;
bpg_y = (N+tpb_y-1)/tpb_y;
printf("Blocks per grid for GPU is (%d,%d) .\n", bpg_x, bpg_y);
printf("\n");
cudaSetDevice(0);
dim3 tpb(tpb_x,tpb_y);
dim3 bpg(bpg_x,bpg_y);
cudaMallocManaged(&dimension_level, (N_level+1)*sizeof(int));
field_level = (double**)malloc((N_level+1)*sizeof(double*));
int dimension = N-1;
for (int level=0; level<=N_level; level++)
{
cudaMallocManaged(&field_level[level], (dimension+1)*(dimension+1)*sizeof(double));
dimension_level[level] = dimension + 1;
dimension /= 2;
}
for (int i=0; i<dimension_level[0]*dimension_level[0]; i++)
// field_level[0][i] = 1.0;
field_level[0][i] = i;
// RESTRICT_2D<<<bpg,tpb>>>(dimension_level[1], field_level[0], field_level[1]);
// INTERPOLATE_2D<<<bpg,tpb>>>(dimension_level[0], field_level[1], field_level[0]);
// cudaDeviceSynchronize();
for (int i=0; i<N_level; i++)
{
RESTRICT_2D<<<bpg,tpb>>>(dimension_level[i+1], field_level[i], field_level[i+1]);
cudaDeviceSynchronize();
}
for (int j=0; j<N_level; j++)
{
for (int i=0; i<dimension_level[j]*dimension_level[j]; i++)
field_level[j][i] = 0.0;
}
for (int i=N_level; i>=1; i--)
{
INTERPOLATE_2D<<<bpg,tpb>>>(dimension_level[i-1], field_level[i], field_level[i-1]);
cudaDeviceSynchronize();
}
// PRINT_FIELD(dimension_level[1], field_level[1]);
PRINT_FIELD(dimension_level[0], field_level[0]);
free(field_level);
cudaFree(dimension_level);
return EXIT_SUCCESS;
}
void PRINT_FIELD(int dimension, double* field)
{
for (int j=0; j<dimension*dimension; j++)
// printf("%.4e\n", field[j]);
printf("%.2f\n", field[j]);
}
|
002b1d4227a54f415c296274a0d2502e95162567.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <assert.h>
__global__ void partial_sum(long num, double *out) {
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y, index = x + y*blockDim.x*gridDim.x;
double sum = 0.0;
double cur = index*num + 1;
for (long i = 0; i < num; ++i) {
sum += 1.0/cur;
cur += 1.0;
}
out[index] = sum;
}
__global__ void add_harmonics(double start, double *partials, long num) {
partials[num] = start;
for (long i = 0; i < num; ++i) {
partials[num] += partials[i];
}
}
int main(int argc, char **argv) {
if (argc < 2) {
printf("usage:\n%s <N_ITERATIONS>\n", *argv);
return -1;
}
dim3 block(32, 8);
long threads_per_block = block.x * block.y, block_w = 6, block_h = 2, blocks = block_w * block_h, threads = threads_per_block*blocks;
long terms = (long)strtod(argv[1], 0), iterations_per_thread = terms/threads, iterations_left = terms%threads;
long bytes = (threads+1) * sizeof(double); // last elem is sum of all
dim3 grid(block_w, block_h);
double *partials, harmonics = 0.0;
for (long i = terms-iterations_left; i <= terms; ++i) {
harmonics += 1.0/i;
}
hipMalloc(&partials, bytes);
hipLaunchKernelGGL(( partial_sum) , dim3(grid), dim3(block), 0, 0, iterations_per_thread, partials);
hipDeviceSynchronize();
hipLaunchKernelGGL(( add_harmonics) , dim3(1), dim3(1), 0, 0, harmonics, partials, threads); // we want to compute the sum of partial sums on the device
hipMemcpy(&harmonics, partials+threads, sizeof(double), hipMemcpyDeviceToHost);
hipFree(partials);
double gamma = harmonics - log(terms);
printf("%.17f\n", gamma);
return 0;
}
| 002b1d4227a54f415c296274a0d2502e95162567.cu | #include <stdio.h>
#include <math.h>
#include <assert.h>
__global__ void partial_sum(long num, double *out) {
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y, index = x + y*blockDim.x*gridDim.x;
double sum = 0.0;
double cur = index*num + 1;
for (long i = 0; i < num; ++i) {
sum += 1.0/cur;
cur += 1.0;
}
out[index] = sum;
}
__global__ void add_harmonics(double start, double *partials, long num) {
partials[num] = start;
for (long i = 0; i < num; ++i) {
partials[num] += partials[i];
}
}
int main(int argc, char **argv) {
if (argc < 2) {
printf("usage:\n%s <N_ITERATIONS>\n", *argv);
return -1;
}
dim3 block(32, 8);
long threads_per_block = block.x * block.y, block_w = 6, block_h = 2, blocks = block_w * block_h, threads = threads_per_block*blocks;
long terms = (long)strtod(argv[1], 0), iterations_per_thread = terms/threads, iterations_left = terms%threads;
long bytes = (threads+1) * sizeof(double); // last elem is sum of all
dim3 grid(block_w, block_h);
double *partials, harmonics = 0.0;
for (long i = terms-iterations_left; i <= terms; ++i) {
harmonics += 1.0/i;
}
cudaMalloc(&partials, bytes);
partial_sum <<<grid, block>>> (iterations_per_thread, partials);
cudaDeviceSynchronize();
add_harmonics <<<1, 1>>> (harmonics, partials, threads); // we want to compute the sum of partial sums on the device
cudaMemcpy(&harmonics, partials+threads, sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(partials);
double gamma = harmonics - log(terms);
printf("%.17f\n", gamma);
return 0;
}
|
31658f137b8394348168143f5d1b59f558b97aab.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (C) 2018 ETH Zurich
// Copyright (C) 2018 UT-Battelle, LLC
// All rights reserved.
//
// See LICENSE.txt for terms of usage.
// See CITATION.md for citation guidelines, if DCA++ is used for scientific publications.
//
// Authors: Giovanni Balduzzi ([email protected])
//
// This file implements the device methods of G0Interpolation<GPU>.
#include "dca/phys/dca_step/cluster_solver/ctint/walker/tools/g0_interpolation_gpu.hpp"
#include <hip/hip_runtime.h>
#include "dca/linalg/util/error_cuda.hpp"
#include "dca/util/cuda_blocks.hpp"
namespace dca {
namespace phys {
namespace solver {
namespace ctint {
// dca::phys::solver::ctint::
template <typename Real>
__global__ void g0InterpolationTestKernel(Real tau, const int lindex,
DeviceInterpolationData<Real> g0, Real* result) {
*result = g0(tau, lindex);
}
template <typename Real>
Real G0Interpolation<linalg::GPU, Real>::operator()(Real tau, int lindex) const {
Real* d_result;
Real result;
hipMalloc((void**)&d_result, sizeof(Real));
hipLaunchKernelGGL(( g0InterpolationTestKernel), dim3(1), dim3(1), 0, 0, tau, lindex, *this, d_result);
assert(hipSuccess == hipPeekAtLastError());
hipMemcpy(&result, d_result, sizeof(Real), hipMemcpyDeviceToHost);
hipFree(d_result);
return result;
}
template class G0Interpolation<linalg::GPU, float>;
template class G0Interpolation<linalg::GPU, double>;
} // namespace ctint
} // namespace solver
} // namespace phys
} // namespace dca
| 31658f137b8394348168143f5d1b59f558b97aab.cu | // Copyright (C) 2018 ETH Zurich
// Copyright (C) 2018 UT-Battelle, LLC
// All rights reserved.
//
// See LICENSE.txt for terms of usage.
// See CITATION.md for citation guidelines, if DCA++ is used for scientific publications.
//
// Authors: Giovanni Balduzzi ([email protected])
//
// This file implements the device methods of G0Interpolation<GPU>.
#include "dca/phys/dca_step/cluster_solver/ctint/walker/tools/g0_interpolation_gpu.hpp"
#include <cuda_runtime.h>
#include "dca/linalg/util/error_cuda.hpp"
#include "dca/util/cuda_blocks.hpp"
namespace dca {
namespace phys {
namespace solver {
namespace ctint {
// dca::phys::solver::ctint::
template <typename Real>
__global__ void g0InterpolationTestKernel(Real tau, const int lindex,
DeviceInterpolationData<Real> g0, Real* result) {
*result = g0(tau, lindex);
}
template <typename Real>
Real G0Interpolation<linalg::GPU, Real>::operator()(Real tau, int lindex) const {
Real* d_result;
Real result;
cudaMalloc((void**)&d_result, sizeof(Real));
g0InterpolationTestKernel<<<1, 1>>>(tau, lindex, *this, d_result);
assert(cudaSuccess == cudaPeekAtLastError());
cudaMemcpy(&result, d_result, sizeof(Real), cudaMemcpyDeviceToHost);
cudaFree(d_result);
return result;
}
template class G0Interpolation<linalg::GPU, float>;
template class G0Interpolation<linalg::GPU, double>;
} // namespace ctint
} // namespace solver
} // namespace phys
} // namespace dca
|
4e97b3822ca41cdf46470cc71addc29048fc6b0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void CombineScreen(float* d_postEdge1, float* d_postEdge2, float* d_postGradient1, float* d_postGradient2, float* d_postGradient3, float* d_postSobel3LR, float* d_postSobel3UD, float* d_postSmooth31, float* d_output){
int id = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = 0; i < 73; ++i){
d_output[i + id * 73 + 73 * 73 * 0] = d_postEdge1[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 1] = d_postEdge2[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 2] = d_postGradient1[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 3] = d_postGradient2[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 4] = d_postGradient3[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 5] = d_postSobel3LR[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 6] = d_postSobel3UD[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 7] = d_postSmooth31[id * 73 + i];
}
} | 4e97b3822ca41cdf46470cc71addc29048fc6b0d.cu | #include "includes.h"
__global__ void CombineScreen(float* d_postEdge1, float* d_postEdge2, float* d_postGradient1, float* d_postGradient2, float* d_postGradient3, float* d_postSobel3LR, float* d_postSobel3UD, float* d_postSmooth31, float* d_output){
int id = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = 0; i < 73; ++i){
d_output[i + id * 73 + 73 * 73 * 0] = d_postEdge1[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 1] = d_postEdge2[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 2] = d_postGradient1[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 3] = d_postGradient2[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 4] = d_postGradient3[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 5] = d_postSobel3LR[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 6] = d_postSobel3UD[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 7] = d_postSmooth31[id * 73 + i];
}
} |
8297b4545f2252a1c2372c567e3b1d15ac331d10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/elementwise_div_op.h"
#include <algorithm>
#include <functional>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/elementwise_ops_utils.h"
#include "caffe2/utils/fixed_divisor.h"
namespace caffe2 {
namespace {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename TGrad, typename TIn, int D>
__global__ void ComputeDivAGradientCUDAKernel(
const int outer_size,
const int inner_size,
const SimpleArray<FixedDivisor<int>, D> C_dims,
const SimpleArray<int, D> C_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FixedDivisor<int>, D> A_dims,
const TGrad* dC,
const TIn* B,
TGrad* dA) {
__shared__ typename BlockReduce<TGrad>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
TGrad sum = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int A_index = i * inner_size + j;
int C_index = 0;
int A_index_val = A_index;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
A_dims.data[d].DivMod(A_index_val, &A_index_val, &r);
C_index += r * C_strides.data[d];
}
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
C_dims.data[d].DivMod(C_index_val, &C_index_val, &r);
B_index += r * B_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
sum += __ldg(dC + C_index) / __ldg(B + B_index);
#else
sum += dC[C_index] / B[B_index];
#endif
}
sum = BlockReduce<TGrad>(temp_storage).Reduce(sum, hipcub::Sum());
if (threadIdx.x == 0) {
dA[i] = sum;
}
__syncthreads();
}
}
template <typename TGrad, typename TIn, typename TOut>
__global__ void ComputeSimpleDivBGradientCUDAKernel(
const int size,
const TGrad* dC,
const TIn* B,
const TOut* C,
TGrad* dB) {
CUDA_1D_KERNEL_LOOP(i, size) {
#if __CUDA_ARCH__ >= 350
dB[i] = -__ldg(dC + i) * __ldg(C + i) / __ldg(B + i);
#else
dB[i] = -dC[i] * C[i] / B[i];
#endif
}
}
template <typename TGrad, typename TIn, typename TOut, int D>
__global__ void ComputeDivBGradientCUDAKernel(
const int outer_size,
const int inner_size,
const SimpleArray<int, D> C_strides,
const SimpleArray<FixedDivisor<int>, D> B_dims,
const TGrad* dC,
const TIn* B,
const TOut* C,
TGrad* dB) {
__shared__ typename BlockReduce<TGrad>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
TGrad sum = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int C_index = 0;
int B_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
B_dims.data[d].DivMod(B_index, &B_index, &r);
C_index += r * C_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
sum += -__ldg(dC + C_index) * __ldg(C + C_index) / __ldg(B + i);
#else
sum += -dC[C_index] * C[C_index] / B[i];
#endif
}
sum = BlockReduce<TGrad>(temp_storage).Reduce(sum, hipcub::Sum());
if (threadIdx.x == 0) {
dB[i] = sum;
}
__syncthreads();
}
}
template <typename TGrad, typename TIn, int D>
void ComputeDivAGradientCUDAImpl(
const int outer_size,
const int inner_size,
const int* C_dims,
const int* B_dims,
const int* A_axes,
const TGrad* dC,
const TIn* B,
TGrad* dA,
CUDAContext* context) {
SimpleArray<FixedDivisor<int>, D> C_dims_arr;
SimpleArray<int, D> C_strides_arr;
SimpleArray<int, D> B_strides_arr;
SimpleArray<FixedDivisor<int>, D> A_dims_arr;
for (int i = 0; i < D; ++i) {
C_dims_arr.data[i] = FixedDivisor<int>(C_dims[i]);
A_dims_arr.data[i] = FixedDivisor<int>(C_dims[A_axes[i]]);
}
math::utils::ComputeTransposedStrides(D, C_dims, A_axes, C_strides_arr.data);
int cur_stride = 1;
for (int i = D - 1; i >= 0; --i) {
B_strides_arr.data[i] = B_dims[i] == 1 ? 0 : cur_stride;
cur_stride *= B_dims[i];
}
hipLaunchKernelGGL(( ComputeDivAGradientCUDAKernel<TGrad, TIn, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
C_dims_arr,
C_strides_arr,
B_strides_arr,
A_dims_arr,
dC,
B,
dA);
}
template <typename TGrad, typename TIn, typename TOut, int D>
void ComputeDivBGradientCUDAImpl(
const int outer_size,
const int inner_size,
const int* C_dims,
const int* B_axes,
const TGrad* dC,
const TIn* B,
const TOut* C,
TGrad* dB,
CUDAContext* context) {
SimpleArray<int, D> C_strides_arr;
SimpleArray<FixedDivisor<int>, D> B_dims_arr;
math::utils::ComputeTransposedStrides(D, C_dims, B_axes, C_strides_arr.data);
for (int i = 0; i < D; ++i) {
B_dims_arr.data[i] = FixedDivisor<int>(C_dims[B_axes[i]]);
}
hipLaunchKernelGGL(( ComputeDivBGradientCUDAKernel<TGrad, TIn, TOut, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, C_strides_arr, B_dims_arr, dC, B, C, dB);
}
template <typename TGrad, typename TIn>
void ComputeDivAGradientCUDA(
const std::vector<int>& C_dims,
const std::vector<int>& B_dims,
const std::vector<int>& A_axes,
const TGrad* dC,
const TIn* B,
TGrad* dA,
CUDAContext* context) {
CAFFE_ENFORCE_EQ(C_dims.size(), B_dims.size());
const int ndim = C_dims.size();
std::vector<int> A_transpose_axes(ndim);
math::utils::ComputeTransposeAxesForReduceOp(
ndim, A_axes.size(), A_axes.data(), A_transpose_axes.data());
const int pivot = ndim - A_axes.size();
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= C_dims[A_transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < ndim; ++i) {
inner_size *= C_dims[A_transpose_axes[i]];
}
if (outer_size > 0 && inner_size > 0) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
ndim,
ComputeDivAGradientCUDAImpl,
TGrad,
TIn,
outer_size,
inner_size,
C_dims.data(),
B_dims.data(),
A_transpose_axes.data(),
dC,
B,
dA,
context);
} else if (outer_size > 0) {
math::Set<TGrad, CUDAContext>(outer_size, TGrad(0), dA, context);
}
}
template <typename TGrad, typename TIn, typename TOut>
void ComputeDivBGradientCUDA(
const std::vector<int>& C_dims,
const std::vector<int>& B_axes,
const TGrad* dC,
const TIn* B,
const TOut* C,
TGrad* dB,
CUDAContext* context) {
const int ndim = C_dims.size();
std::vector<int> B_transpose_axes(ndim);
math::utils::ComputeTransposeAxesForReduceOp(
ndim, B_axes.size(), B_axes.data(), B_transpose_axes.data());
const int pivot = ndim - B_axes.size();
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= C_dims[B_transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < ndim; ++i) {
inner_size *= C_dims[B_transpose_axes[i]];
}
if (outer_size > 0 && inner_size > 0) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
ComputeDivBGradientCUDAImpl,
TGrad,
TIn,
TOut,
outer_size,
inner_size,
C_dims.data(),
B_transpose_axes.data(),
dC,
B,
C,
dB,
context);
} else if (outer_size > 0) {
math::Set<TGrad, CUDAContext>(outer_size, TGrad(0), dB, context);
}
}
} // namespace
template <>
template <typename TGrad, typename TIn, typename TOut>
bool DivFunctor<CUDAContext>::Backward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TGrad* dC,
const TIn* /* A */,
const TIn* B,
const TOut* C,
TGrad* dA,
TGrad* dB,
CUDAContext* context) const {
if (A_dims == B_dims) {
const int size = std::accumulate(
A_dims.cbegin(), A_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( ComputeSimpleDivBGradientCUDAKernel<TGrad, TIn, TOut>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, dC, B, C, dB);
math::Div(size, dC, B, dA, context);
return true;
}
const int ndim = ::max(A_dims.size(), B_dims.size());
std::vector<int> A_broadcast_dims(ndim);
std::vector<int> B_broadcast_dims(ndim);
std::vector<int> C_broadcast_dims(ndim);
math::utils::ComputeBroadcastBinaryOpDims(
A_dims.size(),
A_dims.data(),
B_dims.size(),
B_dims.data(),
A_broadcast_dims.data(),
B_broadcast_dims.data(),
C_broadcast_dims.data());
std::vector<int> A_axes;
std::vector<int> B_axes;
elementwise_ops_utils::ComputeBinaryBroadcastBackwardAxes(
A_dims, B_dims, &A_axes, &B_axes);
ComputeDivBGradientCUDA<TGrad, TIn, TOut>(
C_broadcast_dims, B_axes, dC, B, C, dB, context);
ComputeDivAGradientCUDA<TGrad, TIn>(
C_broadcast_dims, B_broadcast_dims, A_axes, dC, B, dA, context);
return true;
}
template <>
class BinaryElementwiseWithArgsGradientOp<
NumericTypes,
CUDAContext,
BinaryFunctorWithDefaultCtor<DivFunctor<CUDAContext>>,
SameTypeAsInput,
SameTypeAsInput>
final : public Operator<CUDAContext> {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
BinaryElementwiseWithArgsGradientOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<CUDAContext>(operator_def, ws),
OP_SINGLE_ARG(bool, "broadcast", legacy_broadcast_, false),
OP_SINGLE_ARG(int, "axis", axis_, -1),
OP_SINGLE_ARG(string, "axis_str", axis_str_, ""),
OP_SINGLE_ARG(string, "order", order_, "NCHW"),
functor_(*this) {
if (legacy_broadcast_) {
if (axis_ != -1) {
// Get axis from an explicit axis argument.
CAFFE_ENFORCE_EQ(
axis_str_.size(),
0,
"Args axis and axis_str cannot be used simultaneously.");
} else if (axis_str_.size()) {
// Get the axis index semantically.
CAFFE_ENFORCE_EQ(
axis_str_.size(), 1, "Unsupported axis string", axis_str_);
const size_t semantic_axis_ = order_.find(axis_str_);
CAFFE_ENFORCE_NE(
semantic_axis_,
string::npos,
"Unrecognizable axis string ",
axis_str_,
" from order string ",
order_);
axis_ = semantic_axis_;
} else {
CAFFE_ENFORCE(
axis_ == -1 && axis_str_.empty(),
"Do not specify axis or axis_str if broadcast is not enabled.");
}
}
}
bool RunOnDevice() override {
return DispatchHelper<NumericTypes>::call(this, Input(1));
}
template <typename T>
bool DoRunWithType() {
auto* dA = Output(0);
auto* dB = Output(1);
const T* dC_data = nullptr;
const T* A_data = nullptr;
const T* B_data = nullptr;
const T* C_data = nullptr;
std::vector<int> A_dims;
std::vector<int> B_dims;
if (InputSize() == 3) {
const auto& B = Input(0);
const auto& C = Input(1);
const auto& dC = Input(2);
if (legacy_broadcast_) {
if (B.size() == 1) {
A_dims = {static_cast<int>(C.size())};
B_dims = {1};
} else {
size_t pre, n, post;
std::tie(pre, n, post) =
elementwise_ops_utils::ComputeLegacyBroadcastSizes(C, B, axis_);
A_dims = {static_cast<int>(pre),
static_cast<int>(n),
static_cast<int>(post)};
B_dims = {static_cast<int>(n), 1};
}
} else {
std::copy(
C.sizes().cbegin(), C.sizes().cend(), std::back_inserter(A_dims));
std::copy(
B.sizes().cbegin(), B.sizes().cend(), std::back_inserter(B_dims));
}
B_data = B.template data<T>();
C_data = C.template data<T>();
dC_data = dC.template data<T>();
dA->ResizeLike(C);
dB->ResizeLike(B);
} else {
const auto& dC = Input(0);
const auto& A = Input(1);
const auto& B = Input(2);
const auto& C = Input(3);
if (legacy_broadcast_) {
if (B.size() == 1) {
A_dims = {static_cast<int>(A.size())};
B_dims = {1};
} else {
size_t pre, n, post;
std::tie(pre, n, post) =
elementwise_ops_utils::ComputeLegacyBroadcastSizes(A, B, axis_);
A_dims = {static_cast<int>(pre),
static_cast<int>(n),
static_cast<int>(post)};
B_dims = {static_cast<int>(n), 1};
}
} else {
std::copy(
A.sizes().cbegin(), A.sizes().cend(), std::back_inserter(A_dims));
std::copy(
B.sizes().cbegin(), B.sizes().cend(), std::back_inserter(B_dims));
}
dC_data = dC.template data<T>();
A_data = A.template data<T>();
B_data = B.template data<T>();
C_data = C.template data<T>();
dA->ResizeLike(A);
dB->ResizeLike(B);
}
auto* dA_data = dA->template mutable_data<T>();
auto* dB_data = dB->template mutable_data<T>();
return functor_.Backward(
A_dims,
B_dims,
dC_data,
A_data,
B_data,
C_data,
dA_data,
dB_data,
&context_);
}
private:
const bool legacy_broadcast_;
int axis_;
const std::string axis_str_;
const std::string order_;
BinaryFunctorWithDefaultCtor<DivFunctor<CUDAContext>> functor_;
};
REGISTER_CUDA_OPERATOR(
Div,
BinaryElementwiseOp<NumericTypes, CUDAContext, DivFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
DivGradient,
BinaryElementwiseGradientOp<
NumericTypes,
CUDAContext,
DivFunctor<CUDAContext>>);
} // namespace caffe2
| 8297b4545f2252a1c2372c567e3b1d15ac331d10.cu | #include "caffe2/operators/elementwise_div_op.h"
#include <algorithm>
#include <functional>
#include <cub/block/block_reduce.cuh>
#include <cub/cub.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/elementwise_ops_utils.h"
#include "caffe2/utils/fixed_divisor.h"
namespace caffe2 {
namespace {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename TGrad, typename TIn, int D>
__global__ void ComputeDivAGradientCUDAKernel(
const int outer_size,
const int inner_size,
const SimpleArray<FixedDivisor<int>, D> C_dims,
const SimpleArray<int, D> C_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FixedDivisor<int>, D> A_dims,
const TGrad* dC,
const TIn* B,
TGrad* dA) {
__shared__ typename BlockReduce<TGrad>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
TGrad sum = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int A_index = i * inner_size + j;
int C_index = 0;
int A_index_val = A_index;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
A_dims.data[d].DivMod(A_index_val, &A_index_val, &r);
C_index += r * C_strides.data[d];
}
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
C_dims.data[d].DivMod(C_index_val, &C_index_val, &r);
B_index += r * B_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
sum += __ldg(dC + C_index) / __ldg(B + B_index);
#else
sum += dC[C_index] / B[B_index];
#endif
}
sum = BlockReduce<TGrad>(temp_storage).Reduce(sum, cub::Sum());
if (threadIdx.x == 0) {
dA[i] = sum;
}
__syncthreads();
}
}
template <typename TGrad, typename TIn, typename TOut>
__global__ void ComputeSimpleDivBGradientCUDAKernel(
const int size,
const TGrad* dC,
const TIn* B,
const TOut* C,
TGrad* dB) {
CUDA_1D_KERNEL_LOOP(i, size) {
#if __CUDA_ARCH__ >= 350
dB[i] = -__ldg(dC + i) * __ldg(C + i) / __ldg(B + i);
#else
dB[i] = -dC[i] * C[i] / B[i];
#endif
}
}
template <typename TGrad, typename TIn, typename TOut, int D>
__global__ void ComputeDivBGradientCUDAKernel(
const int outer_size,
const int inner_size,
const SimpleArray<int, D> C_strides,
const SimpleArray<FixedDivisor<int>, D> B_dims,
const TGrad* dC,
const TIn* B,
const TOut* C,
TGrad* dB) {
__shared__ typename BlockReduce<TGrad>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
TGrad sum = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int C_index = 0;
int B_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
B_dims.data[d].DivMod(B_index, &B_index, &r);
C_index += r * C_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
sum += -__ldg(dC + C_index) * __ldg(C + C_index) / __ldg(B + i);
#else
sum += -dC[C_index] * C[C_index] / B[i];
#endif
}
sum = BlockReduce<TGrad>(temp_storage).Reduce(sum, cub::Sum());
if (threadIdx.x == 0) {
dB[i] = sum;
}
__syncthreads();
}
}
template <typename TGrad, typename TIn, int D>
void ComputeDivAGradientCUDAImpl(
const int outer_size,
const int inner_size,
const int* C_dims,
const int* B_dims,
const int* A_axes,
const TGrad* dC,
const TIn* B,
TGrad* dA,
CUDAContext* context) {
SimpleArray<FixedDivisor<int>, D> C_dims_arr;
SimpleArray<int, D> C_strides_arr;
SimpleArray<int, D> B_strides_arr;
SimpleArray<FixedDivisor<int>, D> A_dims_arr;
for (int i = 0; i < D; ++i) {
C_dims_arr.data[i] = FixedDivisor<int>(C_dims[i]);
A_dims_arr.data[i] = FixedDivisor<int>(C_dims[A_axes[i]]);
}
math::utils::ComputeTransposedStrides(D, C_dims, A_axes, C_strides_arr.data);
int cur_stride = 1;
for (int i = D - 1; i >= 0; --i) {
B_strides_arr.data[i] = B_dims[i] == 1 ? 0 : cur_stride;
cur_stride *= B_dims[i];
}
ComputeDivAGradientCUDAKernel<TGrad, TIn, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
C_dims_arr,
C_strides_arr,
B_strides_arr,
A_dims_arr,
dC,
B,
dA);
}
template <typename TGrad, typename TIn, typename TOut, int D>
void ComputeDivBGradientCUDAImpl(
const int outer_size,
const int inner_size,
const int* C_dims,
const int* B_axes,
const TGrad* dC,
const TIn* B,
const TOut* C,
TGrad* dB,
CUDAContext* context) {
SimpleArray<int, D> C_strides_arr;
SimpleArray<FixedDivisor<int>, D> B_dims_arr;
math::utils::ComputeTransposedStrides(D, C_dims, B_axes, C_strides_arr.data);
for (int i = 0; i < D; ++i) {
B_dims_arr.data[i] = FixedDivisor<int>(C_dims[B_axes[i]]);
}
ComputeDivBGradientCUDAKernel<TGrad, TIn, TOut, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, C_strides_arr, B_dims_arr, dC, B, C, dB);
}
template <typename TGrad, typename TIn>
void ComputeDivAGradientCUDA(
const std::vector<int>& C_dims,
const std::vector<int>& B_dims,
const std::vector<int>& A_axes,
const TGrad* dC,
const TIn* B,
TGrad* dA,
CUDAContext* context) {
CAFFE_ENFORCE_EQ(C_dims.size(), B_dims.size());
const int ndim = C_dims.size();
std::vector<int> A_transpose_axes(ndim);
math::utils::ComputeTransposeAxesForReduceOp(
ndim, A_axes.size(), A_axes.data(), A_transpose_axes.data());
const int pivot = ndim - A_axes.size();
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= C_dims[A_transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < ndim; ++i) {
inner_size *= C_dims[A_transpose_axes[i]];
}
if (outer_size > 0 && inner_size > 0) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
ndim,
ComputeDivAGradientCUDAImpl,
TGrad,
TIn,
outer_size,
inner_size,
C_dims.data(),
B_dims.data(),
A_transpose_axes.data(),
dC,
B,
dA,
context);
} else if (outer_size > 0) {
math::Set<TGrad, CUDAContext>(outer_size, TGrad(0), dA, context);
}
}
template <typename TGrad, typename TIn, typename TOut>
void ComputeDivBGradientCUDA(
const std::vector<int>& C_dims,
const std::vector<int>& B_axes,
const TGrad* dC,
const TIn* B,
const TOut* C,
TGrad* dB,
CUDAContext* context) {
const int ndim = C_dims.size();
std::vector<int> B_transpose_axes(ndim);
math::utils::ComputeTransposeAxesForReduceOp(
ndim, B_axes.size(), B_axes.data(), B_transpose_axes.data());
const int pivot = ndim - B_axes.size();
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= C_dims[B_transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < ndim; ++i) {
inner_size *= C_dims[B_transpose_axes[i]];
}
if (outer_size > 0 && inner_size > 0) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
ComputeDivBGradientCUDAImpl,
TGrad,
TIn,
TOut,
outer_size,
inner_size,
C_dims.data(),
B_transpose_axes.data(),
dC,
B,
C,
dB,
context);
} else if (outer_size > 0) {
math::Set<TGrad, CUDAContext>(outer_size, TGrad(0), dB, context);
}
}
} // namespace
template <>
template <typename TGrad, typename TIn, typename TOut>
bool DivFunctor<CUDAContext>::Backward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TGrad* dC,
const TIn* /* A */,
const TIn* B,
const TOut* C,
TGrad* dA,
TGrad* dB,
CUDAContext* context) const {
if (A_dims == B_dims) {
const int size = std::accumulate(
A_dims.cbegin(), A_dims.cend(), 1, std::multiplies<int>());
ComputeSimpleDivBGradientCUDAKernel<TGrad, TIn, TOut>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dC, B, C, dB);
math::Div(size, dC, B, dA, context);
return true;
}
const int ndim = std::max(A_dims.size(), B_dims.size());
std::vector<int> A_broadcast_dims(ndim);
std::vector<int> B_broadcast_dims(ndim);
std::vector<int> C_broadcast_dims(ndim);
math::utils::ComputeBroadcastBinaryOpDims(
A_dims.size(),
A_dims.data(),
B_dims.size(),
B_dims.data(),
A_broadcast_dims.data(),
B_broadcast_dims.data(),
C_broadcast_dims.data());
std::vector<int> A_axes;
std::vector<int> B_axes;
elementwise_ops_utils::ComputeBinaryBroadcastBackwardAxes(
A_dims, B_dims, &A_axes, &B_axes);
ComputeDivBGradientCUDA<TGrad, TIn, TOut>(
C_broadcast_dims, B_axes, dC, B, C, dB, context);
ComputeDivAGradientCUDA<TGrad, TIn>(
C_broadcast_dims, B_broadcast_dims, A_axes, dC, B, dA, context);
return true;
}
template <>
class BinaryElementwiseWithArgsGradientOp<
NumericTypes,
CUDAContext,
BinaryFunctorWithDefaultCtor<DivFunctor<CUDAContext>>,
SameTypeAsInput,
SameTypeAsInput>
final : public Operator<CUDAContext> {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
BinaryElementwiseWithArgsGradientOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<CUDAContext>(operator_def, ws),
OP_SINGLE_ARG(bool, "broadcast", legacy_broadcast_, false),
OP_SINGLE_ARG(int, "axis", axis_, -1),
OP_SINGLE_ARG(string, "axis_str", axis_str_, ""),
OP_SINGLE_ARG(string, "order", order_, "NCHW"),
functor_(*this) {
if (legacy_broadcast_) {
if (axis_ != -1) {
// Get axis from an explicit axis argument.
CAFFE_ENFORCE_EQ(
axis_str_.size(),
0,
"Args axis and axis_str cannot be used simultaneously.");
} else if (axis_str_.size()) {
// Get the axis index semantically.
CAFFE_ENFORCE_EQ(
axis_str_.size(), 1, "Unsupported axis string", axis_str_);
const size_t semantic_axis_ = order_.find(axis_str_);
CAFFE_ENFORCE_NE(
semantic_axis_,
string::npos,
"Unrecognizable axis string ",
axis_str_,
" from order string ",
order_);
axis_ = semantic_axis_;
} else {
CAFFE_ENFORCE(
axis_ == -1 && axis_str_.empty(),
"Do not specify axis or axis_str if broadcast is not enabled.");
}
}
}
bool RunOnDevice() override {
return DispatchHelper<NumericTypes>::call(this, Input(1));
}
template <typename T>
bool DoRunWithType() {
auto* dA = Output(0);
auto* dB = Output(1);
const T* dC_data = nullptr;
const T* A_data = nullptr;
const T* B_data = nullptr;
const T* C_data = nullptr;
std::vector<int> A_dims;
std::vector<int> B_dims;
if (InputSize() == 3) {
const auto& B = Input(0);
const auto& C = Input(1);
const auto& dC = Input(2);
if (legacy_broadcast_) {
if (B.size() == 1) {
A_dims = {static_cast<int>(C.size())};
B_dims = {1};
} else {
size_t pre, n, post;
std::tie(pre, n, post) =
elementwise_ops_utils::ComputeLegacyBroadcastSizes(C, B, axis_);
A_dims = {static_cast<int>(pre),
static_cast<int>(n),
static_cast<int>(post)};
B_dims = {static_cast<int>(n), 1};
}
} else {
std::copy(
C.sizes().cbegin(), C.sizes().cend(), std::back_inserter(A_dims));
std::copy(
B.sizes().cbegin(), B.sizes().cend(), std::back_inserter(B_dims));
}
B_data = B.template data<T>();
C_data = C.template data<T>();
dC_data = dC.template data<T>();
dA->ResizeLike(C);
dB->ResizeLike(B);
} else {
const auto& dC = Input(0);
const auto& A = Input(1);
const auto& B = Input(2);
const auto& C = Input(3);
if (legacy_broadcast_) {
if (B.size() == 1) {
A_dims = {static_cast<int>(A.size())};
B_dims = {1};
} else {
size_t pre, n, post;
std::tie(pre, n, post) =
elementwise_ops_utils::ComputeLegacyBroadcastSizes(A, B, axis_);
A_dims = {static_cast<int>(pre),
static_cast<int>(n),
static_cast<int>(post)};
B_dims = {static_cast<int>(n), 1};
}
} else {
std::copy(
A.sizes().cbegin(), A.sizes().cend(), std::back_inserter(A_dims));
std::copy(
B.sizes().cbegin(), B.sizes().cend(), std::back_inserter(B_dims));
}
dC_data = dC.template data<T>();
A_data = A.template data<T>();
B_data = B.template data<T>();
C_data = C.template data<T>();
dA->ResizeLike(A);
dB->ResizeLike(B);
}
auto* dA_data = dA->template mutable_data<T>();
auto* dB_data = dB->template mutable_data<T>();
return functor_.Backward(
A_dims,
B_dims,
dC_data,
A_data,
B_data,
C_data,
dA_data,
dB_data,
&context_);
}
private:
const bool legacy_broadcast_;
int axis_;
const std::string axis_str_;
const std::string order_;
BinaryFunctorWithDefaultCtor<DivFunctor<CUDAContext>> functor_;
};
REGISTER_CUDA_OPERATOR(
Div,
BinaryElementwiseOp<NumericTypes, CUDAContext, DivFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
DivGradient,
BinaryElementwiseGradientOp<
NumericTypes,
CUDAContext,
DivFunctor<CUDAContext>>);
} // namespace caffe2
|
9427be11899c95b39264b55390f01d2fc188a66a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Includes, system
#define ulong4 uint4
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/types.h>
#include <unistd.h>
#include <errno.h>
#include <sys/time.h>
// includes, kernels
#include "common.hip"
#include "mummergpu.h"
#include "mummergpu_kernel.cu"
#define BLOCKSIZE 256
#define CUDA_SAFE_CALL( call) do { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
void computeGold(MatchResults* results,
char* refstr,
char* queries,
int* queryAddrs,
int* queryLengths,
PixelOfNode* nodeTexture,
PixelOfChildren* childrenTexture,
int numQueries,
int mismatch_length,
int rc);
extern "C"
void getReferenceString(const char * filename, char** refstr, size_t* reflen);
extern "C"
void createTreeTexture(const char * filename,
PixelOfNode** nodeTexture, PixelOfChildren** childrenTexture,
unsigned int* width, unsigned int* height,
AuxiliaryNodeData** aux_data,
int* num_nodes,
const char * dotfilename,
const char * texfilename);
extern "C"
void getQueriesTexture(int qfile,
char** queryTexture,
size_t* queryLength,
int** queryAddrs,
char*** queryNames,
int** queryLengths,
unsigned int* numQueries,
size_t device_memory_avail,
int min_match_length,
bool rc);
void printAlignments(char* ref,
ReferencePage* page,
char* query,
int qrylen,
int nodeid,
int qrypos,
int edge_depth,
int min_match,
bool rc,
bool forwardcoordinates);
int countLeafNodes(int nodeid);
// Timer management
struct Timer_t
{
struct timeval start_m;
struct timeval end_m;
};
void createTimer(struct Timer_t ** timer)
{
struct Timer_t * ptr = (struct Timer_t *) malloc(sizeof(struct Timer_t));
memset(ptr, 0, sizeof(struct Timer_t));
*timer = ptr;
}
void startTimer(struct Timer_t * ptr)
{
gettimeofday(&(ptr->start_m), NULL);
}
void stopTimer(struct Timer_t * ptr)
{
gettimeofday(&(ptr->end_m), NULL);
}
float getTimerValue(struct Timer_t * ptr)
{
Timer_t * timer = ptr;
if (timer == NULL)
{
fprintf(stderr, "Uninitialized timer!!!\n");
return 0.0;
}
if (timer->end_m.tv_sec == 0) { stopTimer(ptr); }
return (float) (1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec)
+ (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec)));
}
void deleteTimer(struct Timer_t * ptr)
{
free(ptr);
}
extern "C"
int createReference(const char* fromFile, Reference* ref)
{
if (!fromFile || !ref)
return -1;
getReferenceString(fromFile, &(ref->str), &(ref->len));
return 0;
}
extern "C"
int destroyReference(Reference* ref)
{
free(ref->h_node_tex_array);
free(ref->h_children_tex_array);
free(ref->str);
free(ref->h_ref_tex_array);
free(ref->aux_data);
ref->str = NULL;
ref->len = 0;
return 0;
}
extern "C"
int createQuerySet(const char* fromFile, QuerySet* queries) {
// fprintf(stderr, "Opening %s...\n", fromFile);
int qfile = open(fromFile, O_RDONLY);
if (qfile == -1) {
fprintf(stderr, "Can't open %s: %d\n", fromFile, errno);
exit (1);
}
queries->qfile = qfile;
return 0;
}
extern "C"
int destroyQuerySet(QuerySet* queries) {
if (queries->qfile)
close(queries->qfile);
return 0;
}
extern "C"
void printStringForError(int err)
{
}
extern "C"
int createMatchContext(Reference* ref,
QuerySet* queries,
MatchResults* matches,
MUMMERGPU_OPTIONS options,
int min_match_length,
char* stats_file,
bool reverse,
bool forwardreverse,
bool forwardcoordinates,
bool showQueryLength,
MatchContext* ctx)
{
ctx->queries = queries;
ctx->ref = ref;
ctx->full_ref = ref->str;
ctx->full_ref_len = ref->len;
// break out options here
ctx->on_cpu = options & ON_CPU;
ctx->min_match_length = min_match_length;
ctx->stats_file = stats_file;
ctx->reverse = reverse;
ctx->forwardreverse = forwardreverse;
ctx->forwardcoordinates = forwardcoordinates;
ctx->show_query_length = showQueryLength;
return 0;
}
extern "C"
int destroyMatchContext(MatchContext* ctx)
{
free(ctx->full_ref);
//destroyReference(ctx->ref);
destroyQuerySet(ctx->queries);
return 0;
}
void buildReferenceTexture(Reference* ref, char* full_ref, size_t begin, size_t end) {
// fprintf(stderr, "Building reference texture...\n");
PixelOfNode* nodeTexture = NULL;
PixelOfChildren * childrenTexture = NULL;
unsigned int height = 0;
unsigned int width = 0;
AuxiliaryNodeData* aux_data = NULL;
int num_nodes;
ref->len = end - begin + 3;
ref->str = (char*)malloc(ref->len);
ref->str[0] = 's';
strncpy(ref->str + 1, full_ref + begin, ref->len - 3);
strcpy(ref->str + ref->len - 2, "$");
createTreeTexture(ref->str,
&nodeTexture,
&childrenTexture,
&width, &height,
&aux_data,
&num_nodes,
NULL,
NULL);
ref->h_node_tex_array = nodeTexture;
ref->h_children_tex_array = childrenTexture;
ref->tex_width = width;
ref->tex_height = height;
ref->aux_data = aux_data;
ref->num_nodes = num_nodes;
ref->bytes_on_board = width * height * (sizeof(PixelOfNode) + sizeof(PixelOfChildren));
unsigned int refpitch = ref->pitch = 65536;
int numrows = ceil(ref->len / ((float)refpitch));
int blocksize = 4;
numrows += blocksize;
ref->h_ref_tex_array = (char *) malloc(numrows*refpitch);
ref->bytes_on_board += numrows*refpitch;
int z_max = numrows * refpitch;
for (int z = 0; z < z_max; z++) { ref->h_ref_tex_array[z] = 'Z'; }
int x, y;
int maxx = 0, maxy = 0;
size_t reflen = ref->len;
char* refstr = ref->str;
int block_dim = refpitch * blocksize;
for (int i = 0; i < reflen; i++)
{
int bigx = i % (block_dim);
int bigy = i / (block_dim);
y = bigy*blocksize+bigx%blocksize;
x = bigx / blocksize;
// printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]);
assert(x < refpitch);
assert(y < numrows);
ref->h_ref_tex_array[y*refpitch+x] = refstr[i];
if (x > maxx) { maxx = x; }
if (y > maxy) { maxy = y; }
}
if ((maxx >= refpitch) || (maxy >= numrows))
{
fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n",
maxx, refpitch, maxy, numrows);
exit(1);
}
}
void loadReferenceTexture(MatchContext* ctx) {
Reference* ref = ctx->ref;
int numrows = ceil(ref->len / ((float)ref->pitch));
int blocksize = 4;
numrows += blocksize;
hipChannelFormatDesc refTextureDesc =
hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindSigned);
if (!ctx->on_cpu) {
#ifdef TIMING
struct Timer_t * toboardtimer = 0;
createTimer(&toboardtimer);
startTimer(toboardtimer);
#endif
// fprintf(stderr, "allocating reference texture\n");
CUDA_SAFE_CALL(hipMallocArray( (hipArray**)(&ref->d_ref_tex_array),
&refTextureDesc,
ref->pitch,
numrows));
//ref->bytes_on_board += ref->pitch * numrows;
CUDA_SAFE_CALL(hipMemcpyToArray( (hipArray*)(ref->d_ref_tex_array),
0,
0,
ref->h_ref_tex_array,
numrows*ref->pitch,
hipMemcpyHostToDevice));
reftex.addressMode[0] = hipAddressModeClamp;
reftex.addressMode[1] = hipAddressModeClamp;
reftex.filterMode = hipFilterModePoint;
reftex.normalized = false;
CUDA_SAFE_CALL(hipBindTextureToArray( reftex, (hipArray*)ref->d_ref_tex_array, refTextureDesc));
#ifdef TIMING
stopTimer(toboardtimer);
ctx->statistics.t_moving_tree_pages += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
#endif
}
else {
ref->d_ref_tex_array = NULL;
}
// fprintf(stderr,"done\n");
}
void unloadReferenceTexture(Reference* ref)
{
CUDA_SAFE_CALL(hipUnbindTexture( reftex ) );
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_ref_tex_array)));
ref->d_ref_tex_array = NULL;
}
//loads a tree and text for [begin, end) in the reference
void loadReference(MatchContext* ctx) {
Reference* ref = ctx->ref;
//ref->bytes_on_board = 0;
loadReferenceTexture(ctx);
if (!ctx->on_cpu) {
#ifdef TIMING
struct Timer_t * toboardtimer = 0;
createTimer(&toboardtimer);
startTimer(toboardtimer);
#endif
hipChannelFormatDesc nodeTextureDesc =
hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
CUDA_SAFE_CALL( hipMallocArray( (hipArray**)(&ref->d_node_tex_array),
&nodeTextureDesc,
ref->tex_width,
ref->tex_height ));
//ref->bytes_on_board += ref->tex_width * ref->tex_height * (sizeof(PixelOfNode));
CUDA_SAFE_CALL( hipMemcpyToArray( (hipArray*)(ref->d_node_tex_array),
0,
0,
ref->h_node_tex_array,
ref->tex_width * ref->tex_height * sizeof(PixelOfNode),
hipMemcpyHostToDevice));
nodetex.addressMode[0] = hipAddressModeClamp;
nodetex.addressMode[1] = hipAddressModeClamp;
nodetex.filterMode = hipFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
CUDA_SAFE_CALL( hipBindTextureToArray( nodetex,
(hipArray*)ref->d_node_tex_array,
nodeTextureDesc));
hipChannelFormatDesc childrenTextureDesc =
hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
CUDA_SAFE_CALL( hipMallocArray( (hipArray**)(&ref->d_children_tex_array),
&childrenTextureDesc,
ref->tex_width,
ref->tex_height ));
//ref->bytes_on_board += ref->tex_width * ref->tex_height * sizeof(PixelOfNode);
CUDA_SAFE_CALL( hipMemcpyToArray((hipArray*)(ref->d_children_tex_array),
0,
0,
ref->h_children_tex_array,
ref->tex_width * ref->tex_height * sizeof(PixelOfChildren),
hipMemcpyHostToDevice));
childrentex.addressMode[0] = hipAddressModeClamp;
childrentex.addressMode[1] = hipAddressModeClamp;
childrentex.filterMode = hipFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
CUDA_SAFE_CALL( hipBindTextureToArray( childrentex,
(hipArray*)(ref->d_children_tex_array),
childrenTextureDesc));
// fprintf(stderr, "done\n");
#ifdef TIMING
stopTimer(toboardtimer);
ctx->statistics.t_moving_tree_pages += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
#endif
}
else
{
ref->d_node_tex_array = NULL;
ref->d_children_tex_array = NULL;
}
}
void unloadReference(MatchContext* ctx)
{
Reference* ref = ctx->ref;
CUDA_SAFE_CALL(hipUnbindTexture( nodetex ) );
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_node_tex_array)));
ref->d_node_tex_array = NULL;
CUDA_SAFE_CALL(hipUnbindTexture( childrentex ) );
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_children_tex_array)));
ref->d_children_tex_array = NULL;
unloadReferenceTexture(ctx->ref);
}
void loadQueries(MatchContext* ctx) {
#ifdef TIMING
struct Timer_t * toboardtimer = 0;
createTimer(&toboardtimer);
startTimer(toboardtimer);
#endif
QuerySet* queries = ctx->queries;
queries->bytes_on_board = 0;
unsigned int numQueries = queries->count;
if (!ctx->on_cpu) {
// fprintf(stderr, "loadQueries on GPU: Allocating device memory for queries...\n");
CUDA_SAFE_CALL( hipMalloc((void**) &queries->d_tex_array, queries->texlen));
queries->bytes_on_board += queries->texlen;
CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_tex_array,
queries->h_tex_array + queries->h_addrs_tex_array[0],
queries->texlen,
hipMemcpyHostToDevice));
CUDA_SAFE_CALL( hipMalloc((void**) &queries->d_addrs_tex_array,
numQueries * sizeof(int)));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_addrs_tex_array,
queries->h_addrs_tex_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
CUDA_SAFE_CALL( hipMalloc((void**) &queries->d_lengths_array,
numQueries * sizeof(int)));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_lengths_array,
queries->h_lengths_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
// fprintf(stderr, "loadQueries on GPU: allocated %ld bytes done\n", 2 * numQueries*sizeof(int) + queries->texlen);
}
else {
queries->d_addrs_tex_array = NULL;
queries->d_tex_array = NULL;
queries->d_lengths_array = NULL;
// fprintf(stderr, "loadQueries on CPU: allocated %ld bytes done\n", numQueries*sizeof(int) + queries->texlen);
}
#ifdef TIMING
stopTimer(toboardtimer);
ctx->statistics.t_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
#endif
}
void unloadQueries(MatchContext* ctx) {
QuerySet* queries = ctx->queries;
CUDA_SAFE_CALL(hipFree(queries->d_tex_array));
queries->d_tex_array = NULL;
CUDA_SAFE_CALL(hipFree(queries->d_addrs_tex_array));
queries->d_addrs_tex_array = NULL;
CUDA_SAFE_CALL(hipFree(queries->d_lengths_array));
queries->d_lengths_array = NULL;
queries->bytes_on_board = 0;
}
void loadResultBuffer(MatchContext* ctx) {
unsigned int numQueries = ctx->queries->count;
assert (numQueries);
int match_length = ctx->min_match_length;
unsigned int numCoords = 0;
numCoords = ctx->queries->texlen - numQueries * (match_length + 1);
ctx->results.numCoords = numCoords;
// fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...", numQueries, (int)(numCoords*sizeof(MatchCoord)));
ctx->results.h_match_coords = (MatchCoord*) calloc( numCoords, sizeof(MatchCoord));
if (!ctx->on_cpu) {
#ifdef TIMING
struct Timer_t * toboardtimer = 0;
createTimer(&toboardtimer);
startTimer(toboardtimer);
#endif
ctx->results.bytes_on_board = 0;
CUDA_SAFE_CALL( hipMalloc( (void**) &ctx->results.d_match_coords,
numCoords * sizeof(MatchCoord)));
ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord);
CUDA_SAFE_CALL( hipMemset( (void*)ctx->results.d_match_coords, 0,
numCoords * sizeof(MatchCoord)));
#ifdef TIMING
stopTimer(toboardtimer);
ctx->statistics.t_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
#endif
}
else {
ctx->results.d_match_coords = NULL;
}
// fprintf(stderr, "done\n");
}
void unloadResultBuffer(MatchContext* ctx) {
CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords));
ctx->results.bytes_on_board = 0;
}
void freeResults(MatchContext* ctx, ReferencePage pages[], unsigned int num_pages) {
for (int i = 0; i < num_pages; ++i) {
free(pages[i].results.h_match_coords);
}
}
void transferResultsFromDevice(MatchContext* ctx) {
if (!ctx->on_cpu) {
#ifdef TIMING
struct Timer_t * fromboardtimer = 0;
createTimer(&fromboardtimer);
startTimer(fromboardtimer);
#endif
CUDA_SAFE_CALL(hipMemcpy(ctx->results.h_match_coords,
ctx->results.d_match_coords,
ctx->results.numCoords * sizeof(MatchCoord),
hipMemcpyDeviceToHost) );
#ifdef TIMING
stopTimer(fromboardtimer);
ctx->statistics.t_from_board += getTimerValue(fromboardtimer);
deleteTimer(fromboardtimer);
#endif
}
}
int flushOutput();
int addToBuffer(char* string);
inline int match_coord_addrs(int qryid, int qry_addrs, int match_length) {
return qry_addrs - qryid * (match_length + 1);
}
#define MAX_QUERY_LEN 8192
struct packed_slot {
unsigned short page;
unsigned short qpos;
MatchCoord coord;
};
struct packed_slot_array {
packed_slot* slots;
unsigned int num_slots;
};
void addPackedOutput(MatchContext* ctx, packed_slot_array** curr_output, packed_slot_array slot_array[]) {
unsigned int numQueries = ctx->queries->count;
if (*curr_output == NULL) {
*curr_output = slot_array;
}
else {
for (int i = 0; i < numQueries; i++) {
if (slot_array[i].num_slots) {
//packed_slot_array* slots = &(slot_array[i]);
(*curr_output)[i].slots = (packed_slot*)realloc((*curr_output)[i].slots, ((*curr_output)[i].num_slots + slot_array[i].num_slots) * sizeof(packed_slot));
memcpy((*curr_output)[i].slots + (*curr_output)[i].num_slots, slot_array[i].slots,
slot_array[i].num_slots * sizeof(packed_slot));
(*curr_output)[i].num_slots += slot_array[i].num_slots;
free(slot_array[i].slots);
}
}
free(slot_array);
}
}
char numbuffer[32];
void printRCSlots(MatchContext * ctx, ReferencePage pages[], int qry, packed_slot_array * slots) {
char* h_tex_array = ctx->queries->h_tex_array;
int* h_addrs_tex_array = ctx->queries->h_addrs_tex_array;
int qrylen = ctx->queries->h_lengths_array[qry];
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + qry));
addToBuffer(" Reverse");
if (ctx->show_query_length) {
addToBuffer(" Len = ");
sprintf(numbuffer, "%d", qrylen);
addToBuffer(numbuffer);
}
addToBuffer("\n");
for (int j = 0; j < slots->num_slots; ++j) {
packed_slot slot = slots->slots[j];
if (slot.coord.edge_match_length & FRMASK) {
printAlignments(ctx->full_ref,
&(pages[slot.page]),
h_tex_array + h_addrs_tex_array[qry],
qrylen,
slot.coord.node,
slot.qpos,
(slot.coord.edge_match_length & FRUMASK),
ctx->min_match_length,
1,
ctx->forwardcoordinates);
}
}
}
int FOO;
void printForwardSlots(MatchContext * ctx, ReferencePage pages[], int qry, packed_slot_array * slots) {
char* h_tex_array = ctx->queries->h_tex_array;
int* h_addrs_tex_array = ctx->queries->h_addrs_tex_array;
int qrylen = ctx->queries->h_lengths_array[qry];
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + qry));
if (ctx->show_query_length)
{
addToBuffer(" Len = ");
sprintf(numbuffer, "%d", qrylen);
addToBuffer(numbuffer);
}
addToBuffer("\n");
for (int j = 0; j < slots->num_slots; ++j)
{
packed_slot slot = slots->slots[j];
if (!(slot.coord.edge_match_length & FRMASK))
{
printAlignments(ctx->full_ref,
&(pages[slot.page]),
h_tex_array + h_addrs_tex_array[qry],
qrylen,
slot.coord.node,
slot.qpos,
slot.coord.edge_match_length,
ctx->min_match_length,
0,
ctx->forwardcoordinates);
}
}
FOO += slots->num_slots;
}
void printPackedResults(MatchContext* ctx, ReferencePage pages[], packed_slot_array slot_array[]) {
unsigned int numQueries = ctx->queries->count;
FOO = 0;
for (int qry = 0; qry < numQueries; qry++) {
packed_slot_array* slots = &(slot_array[qry]);
if (ctx->reverse) {
printRCSlots(ctx, pages, qry, slots);
}
else {
printForwardSlots(ctx, pages, qry, slots);
if (ctx->forwardreverse) {
printRCSlots(ctx, pages, qry, slots);
}
}
}
printf("FOO = %d\n", FOO);
flushOutput();
}
void packSlots(MatchContext* ctx, MatchResults* results, unsigned int page_num, packed_slot_array** slot_arrays, bool rc) {
unsigned int numQueries = ctx->queries->count;
int* h_addrs_tex_array = ctx->queries->h_addrs_tex_array;
int match_length = ctx->min_match_length;
*slot_arrays = (packed_slot_array*)calloc(numQueries, sizeof(packed_slot_array));
for (int i = 0; i < numQueries; i++) {
int qlen;
if (i == numQueries - 1)
qlen = ctx->queries->texlen - h_addrs_tex_array[i] - match_length;
else
qlen = h_addrs_tex_array[i + 1] - h_addrs_tex_array[i] - match_length;
packed_slot* qslots = (packed_slot*)calloc(qlen, sizeof(packed_slot));
int filled = 0;
for (int p = 0; p < qlen; ++p) {
MatchCoord* coords = results->h_match_coords;
int query_coord_begin = match_coord_addrs(i, h_addrs_tex_array[i], match_length);
int query_coord_end = i < numQueries - 1 ?
match_coord_addrs(i + 1, h_addrs_tex_array[i + 1], match_length) : results->numCoords;
int query_coord = query_coord_begin + p;
if ((query_coord < query_coord_end) &&
(coords[query_coord].node > 1) &&
(!(coords[query_coord].edge_match_length & FRMASK) == !rc))
{
packed_slot s;
s.page = page_num;
s.qpos = p;
s.coord = coords[query_coord];
qslots[filled++] = s;
}
}
if (filled)
{
packed_slot* pslots = (packed_slot*)calloc(filled, sizeof(packed_slot));
memcpy(pslots, qslots, (filled)*sizeof(packed_slot));
(*slot_arrays)[i].slots = pslots;
(*slot_arrays)[i].num_slots = filled;
}
else
{
(*slot_arrays)[i].slots = NULL;
(*slot_arrays)[i].num_slots = 0;
}
free(qslots);
}
}
int getQueryBlock(MatchContext* ctx, size_t device_mem_avail)
{
QuerySet* queries = ctx->queries;
char * queryTex = NULL;
int* queryAddrs = NULL;
int* queryLengths = NULL;
unsigned int numQueries;
size_t queryLen;
char** names;
struct Timer_t * queryreadtimer = 0;
createTimer(&queryreadtimer);
startTimer(queryreadtimer);
getQueriesTexture(queries->qfile,
&queryTex,
&queryLen,
&queryAddrs,
&names,
&queryLengths,
&numQueries,
device_mem_avail,
ctx->min_match_length,
ctx->reverse || ctx->forwardreverse);
stopTimer(queryreadtimer);
ctx->statistics.t_query_read += getTimerValue(queryreadtimer);
deleteTimer(queryreadtimer);
queries->h_tex_array = queryTex;
queries->count = numQueries;
queries->h_addrs_tex_array = queryAddrs;
queries->texlen = queryLen;
queries->h_names = names;
queries->h_lengths_array = queryLengths;
return numQueries;
}
void destroyQueryBlock(QuerySet* queries) {
free(queries->h_tex_array);
queries->h_tex_array = NULL;
for (int i = 0; i < queries->count; ++i)
free(queries->h_names[i]);
free(queries->h_names);
queries->count = 0;
queries->texlen = 0;
free(queries->h_addrs_tex_array);
queries->h_addrs_tex_array = NULL;
free(queries->h_lengths_array);
queries->h_lengths_array = NULL;
}
void writeStatisticsFile(MatchContext* ctx, char* stats_filename) {
if (!stats_filename)
return;
FILE* f = fopen(stats_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename);
return;
}
fprintf(f, "Total,%f\n", ctx->statistics.t_total);
fprintf(f, "Kernel,%f\n", ctx->statistics.t_kernel);
fprintf(f, "Print matches,%f\n", ctx->statistics.t_output);
fprintf(f, "Copy queries to GPU,%f\n", ctx->statistics.t_to_board);
fprintf(f, "Copy output from GPU,%f\n", ctx->statistics.t_from_board);
fprintf(f, "Copy suffix tree to GPU,%f\n", ctx->statistics.t_moving_tree_pages);
fprintf(f, "Read queries from disk,%f\n", ctx->statistics.t_query_read);
fprintf(f, "Suffix tree constructions,%f\n", ctx->statistics.t_construction);
fprintf(f, "Minimum substring length, %d\n", ctx->min_match_length);
fprintf(f, "Average query length, %f\n", ctx->statistics.bp_avg_query_length);
fclose(f);
}
int matchSubset(MatchContext* ctx,
int query_block_offset,
ReferencePage pages[],
unsigned int num_pages) {
loadQueries(ctx);
packed_slot_array* packed_slots = NULL;
for (unsigned int i = 0; i < num_pages; ++i) {
ctx->ref = &(pages[i].ref);
loadReference(ctx);
loadResultBuffer(ctx);
#ifdef TIMING
struct Timer_t * ktimer = 0;
createTimer(&ktimer);
#endif
unsigned int numQueries = ctx->queries->count;
int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries;
dim3 dimBlock(blocksize,1,1);
dim3 dimGrid(ceil(numQueries/(float)BLOCKSIZE), 1, 1);
if (!ctx->on_cpu) {
fprintf(stderr,"[INFO] Blocks(%d, %d, %d)\n",
dimBlock.x, dimBlock.y, dimBlock.z);
fprintf(stderr,"[INFO] Grid(%d, %d, %d)\n",
dimGrid.x, dimGrid.y, dimBlock.z);
/*
fprintf(stderr,"Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n",
(int)ctx->queries->bytes_on_board,
(int)ctx->ref->bytes_on_board,
(int)ctx->results.bytes_on_board);
*/
}
#ifdef TIMING
startTimer(ktimer);
#endif
bool alignRC = ctx->reverse;
if (ctx->on_cpu) {
if (alignRC) {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
REVERSE);
}
else {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
FORWARD);
}
}
else {
if (alignRC) {
hipLaunchKernelGGL(( mummergpuRCKernel), dim3(dimGrid), dim3(dimBlock), 0 , 0, ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
}
else
{
hipLaunchKernelGGL(( mummergpuKernel), dim3(dimGrid), dim3(dimBlock), 0 , 0, ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
}
}
hipDeviceSynchronize();
// printf("check if kernel execution generated an error\n");
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
#ifdef TIMING
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_kernel += ktime;
fprintf(stderr,"[STATS] kernel time = %f\n", ktime);
deleteTimer(ktimer);
#endif
transferResultsFromDevice(ctx);
pages[i].results = ctx->results;
packed_slot_array* packed;
packSlots(ctx, &(pages[i].results), i, &packed, ctx->reverse);
addPackedOutput(ctx, &packed_slots, packed);
// printf("compute the reverse matches\n");
if (ctx->forwardreverse) {
#ifdef TIMING
struct Timer_t * rctimer = 0;
createTimer(&rctimer);
startTimer(rctimer);
#endif
if (ctx->on_cpu) {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
REVERSE);
}
else {
hipLaunchKernelGGL(( mummergpuRCKernel), dim3(dimGrid), dim3(dimBlock), 0 , 0, ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
hipDeviceSynchronize();
}
#ifdef TIMING
stopTimer(rctimer);
float rctime = getTimerValue(rctimer);
ctx->statistics.t_kernel += rctime;
fprintf(stderr,"rc kernel time= %f\n", rctime);
deleteTimer(rctimer);
#endif
transferResultsFromDevice(ctx);
pages[i].results = ctx->results;
packed_slot_array* packed;
packSlots(ctx, &(pages[i].results), i, &packed, 1);
addPackedOutput(ctx, &packed_slots, packed);
}// end if (ctx->forwardreverse)
free(pages[i].results.h_match_coords);
pages[i].results.h_match_coords = NULL;
unloadReference(ctx);
unloadResultBuffer(ctx);
}// end for
#ifdef TIMING
struct Timer_t * otimer = 0;
createTimer(&otimer);
startTimer(otimer);
// printPackedResults(ctx, pages, packed_slots);
stopTimer(otimer);
ctx->statistics.t_output += getTimerValue(otimer);
deleteTimer(otimer);
#endif
for (int i = 0; i < ctx->queries->count; ++i) {
free(packed_slots[i].slots);
}
free(packed_slots);
unloadQueries(ctx);
return 0;
}
#define BREATHING_ROOM (64 * 1024 * 1024)
#define BASES_PER_TREE_PAGE 7500000
#define CHUMP_CHANGE 1500000
extern "C"
int matchQueries(MatchContext* ctx) {
assert(sizeof(struct PixelOfNode) == sizeof(ulong4));
assert(sizeof(struct PixelOfChildren) == sizeof(ulong4));
ctx->statistics.t_kernel = 0.0;
ctx->statistics.t_output = 0.0;
ctx->statistics.t_to_board = 0.0;
ctx->statistics.t_from_board = 0.0;
ctx->statistics.t_moving_tree_pages = 0.0;
ctx->statistics.t_query_read = 0.0;
ctx->statistics.t_total = 0.0;
ctx->statistics.t_construction = 0.0;
ctx->statistics.bp_avg_query_length = 0.0;
#ifdef Timing
struct Timer_t * ttimer = 0;
createTimer(&ttimer);
startTimer(ttimer);
struct Timer_t * ctimer = 0;
createTimer(&ctimer);
startTimer(ctimer);
#endif
unsigned int bases_in_ref = ctx->full_ref_len - 3;
unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref;
unsigned int num_reference_pages = bases_in_ref / page_size;
ReferencePage* pages = (ReferencePage*)calloc(num_reference_pages, sizeof(ReferencePage));
unsigned int page_overlap = MAX_QUERY_LEN + 1;
pages[0].begin = 1;
pages[0].end = pages[0].begin +
page_size +
ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning
pages[0].shadow_left = -1;
pages[0].id = 0;
buildReferenceTexture(&(pages[0].ref), ctx->full_ref, pages[0].begin, pages[0].end);
for (int i = 1; i < num_reference_pages - 1; ++i) {
pages[i].begin = pages[i - 1].end - page_overlap;
pages[i].end = pages[i].begin + page_size + page_overlap;
pages[i - 1].shadow_right = pages[i].begin;
pages[i].shadow_left = pages[i-1].end;
pages[i].id = i;
buildReferenceTexture(&(pages[i].ref), ctx->full_ref, pages[i].begin, pages[i].end);
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
pages[last_page].begin = pages[last_page - 1].end - page_overlap;
pages[last_page].end = ctx->full_ref_len - 1;
pages[last_page - 1].shadow_right = pages[last_page].begin;
pages[last_page].shadow_right = -1;
pages[last_page].shadow_left = pages[last_page - 1].end;
pages[last_page].id = last_page;
buildReferenceTexture(&(pages[last_page].ref),
ctx->full_ref,
pages[last_page].begin,
pages[last_page].end);
}
#ifdef Timing
stopTimer(ctimer);
ctx->statistics.t_construction += getTimerValue(ctimer);
deleteTimer(ctimer);
#endif
///*
hipDeviceProp_t props;
if (!ctx->on_cpu) {
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
if (deviceCount != 1) {
//fprintf(stderr, "Fatal error: no CUDA-capable device found, exiting\n");
//return -1;
}
hipGetDeviceProperties(&props, 0);
// fprintf(stderr, "[BENCH] Running under CUDA %d.%d\n", props.major, props.minor);
// fprintf(stderr, "[BENCH] CUDA device has %d bytes of memory\n", (int)props.totalGlobalMem);
}
else {
props.totalGlobalMem = 804585472; // pretend we are on a 8800 GTX
}
//*/
size_t mem_avail = 0;
for (int i = 0; i < num_reference_pages; ++i) {
mem_avail = max((unsigned int)pages[i].ref.bytes_on_board,
(unsigned int)mem_avail);
}
mem_avail = props.totalGlobalMem - mem_avail;
// fprintf(stderr, "[BENCH] There are %d bytes left on the board\n", (int)mem_avail);
mem_avail -= BREATHING_ROOM;
// printf("[DEBUG] matchQueries 1\n");
while (getQueryBlock(ctx, mem_avail)) {
matchSubset(ctx, 0, pages, num_reference_pages);
ctx->statistics.bp_avg_query_length = ctx->queries->texlen/(float)(ctx->queries->count) - 2;
destroyQueryBlock(ctx->queries);
hipDeviceReset();
}
// printf("[DEBUG] matchQueries 2\n");
for (int i = 0; i < num_reference_pages; ++i) {
destroyReference(&(pages[i].ref));
}
free(pages);
#ifdef Timing
stopTimer(ttimer);
ctx->statistics.t_total += getTimerValue(ttimer);
deleteTimer(ttimer);
#endif
writeStatisticsFile(ctx, ctx->stats_file);
// printf("[DEBUG] matchQueries end\n");
return 0;
}
| 9427be11899c95b39264b55390f01d2fc188a66a.cu | // Includes, system
#define ulong4 uint4
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/types.h>
#include <unistd.h>
#include <errno.h>
#include <sys/time.h>
// includes, kernels
#include "common.cu"
#include "mummergpu.h"
#include "mummergpu_kernel.cu"
#define BLOCKSIZE 256
#define CUDA_SAFE_CALL( call) do { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
void computeGold(MatchResults* results,
char* refstr,
char* queries,
int* queryAddrs,
int* queryLengths,
PixelOfNode* nodeTexture,
PixelOfChildren* childrenTexture,
int numQueries,
int mismatch_length,
int rc);
extern "C"
void getReferenceString(const char * filename, char** refstr, size_t* reflen);
extern "C"
void createTreeTexture(const char * filename,
PixelOfNode** nodeTexture, PixelOfChildren** childrenTexture,
unsigned int* width, unsigned int* height,
AuxiliaryNodeData** aux_data,
int* num_nodes,
const char * dotfilename,
const char * texfilename);
extern "C"
void getQueriesTexture(int qfile,
char** queryTexture,
size_t* queryLength,
int** queryAddrs,
char*** queryNames,
int** queryLengths,
unsigned int* numQueries,
size_t device_memory_avail,
int min_match_length,
bool rc);
void printAlignments(char* ref,
ReferencePage* page,
char* query,
int qrylen,
int nodeid,
int qrypos,
int edge_depth,
int min_match,
bool rc,
bool forwardcoordinates);
int countLeafNodes(int nodeid);
// Timer management
struct Timer_t
{
struct timeval start_m;
struct timeval end_m;
};
void createTimer(struct Timer_t ** timer)
{
struct Timer_t * ptr = (struct Timer_t *) malloc(sizeof(struct Timer_t));
memset(ptr, 0, sizeof(struct Timer_t));
*timer = ptr;
}
void startTimer(struct Timer_t * ptr)
{
gettimeofday(&(ptr->start_m), NULL);
}
void stopTimer(struct Timer_t * ptr)
{
gettimeofday(&(ptr->end_m), NULL);
}
float getTimerValue(struct Timer_t * ptr)
{
Timer_t * timer = ptr;
if (timer == NULL)
{
fprintf(stderr, "Uninitialized timer!!!\n");
return 0.0;
}
if (timer->end_m.tv_sec == 0) { stopTimer(ptr); }
return (float) (1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec)
+ (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec)));
}
void deleteTimer(struct Timer_t * ptr)
{
free(ptr);
}
extern "C"
int createReference(const char* fromFile, Reference* ref)
{
if (!fromFile || !ref)
return -1;
getReferenceString(fromFile, &(ref->str), &(ref->len));
return 0;
}
extern "C"
int destroyReference(Reference* ref)
{
free(ref->h_node_tex_array);
free(ref->h_children_tex_array);
free(ref->str);
free(ref->h_ref_tex_array);
free(ref->aux_data);
ref->str = NULL;
ref->len = 0;
return 0;
}
extern "C"
int createQuerySet(const char* fromFile, QuerySet* queries) {
// fprintf(stderr, "Opening %s...\n", fromFile);
int qfile = open(fromFile, O_RDONLY);
if (qfile == -1) {
fprintf(stderr, "Can't open %s: %d\n", fromFile, errno);
exit (1);
}
queries->qfile = qfile;
return 0;
}
extern "C"
int destroyQuerySet(QuerySet* queries) {
if (queries->qfile)
close(queries->qfile);
return 0;
}
extern "C"
void printStringForError(int err)
{
}
extern "C"
int createMatchContext(Reference* ref,
QuerySet* queries,
MatchResults* matches,
MUMMERGPU_OPTIONS options,
int min_match_length,
char* stats_file,
bool reverse,
bool forwardreverse,
bool forwardcoordinates,
bool showQueryLength,
MatchContext* ctx)
{
ctx->queries = queries;
ctx->ref = ref;
ctx->full_ref = ref->str;
ctx->full_ref_len = ref->len;
// break out options here
ctx->on_cpu = options & ON_CPU;
ctx->min_match_length = min_match_length;
ctx->stats_file = stats_file;
ctx->reverse = reverse;
ctx->forwardreverse = forwardreverse;
ctx->forwardcoordinates = forwardcoordinates;
ctx->show_query_length = showQueryLength;
return 0;
}
extern "C"
int destroyMatchContext(MatchContext* ctx)
{
free(ctx->full_ref);
//destroyReference(ctx->ref);
destroyQuerySet(ctx->queries);
return 0;
}
void buildReferenceTexture(Reference* ref, char* full_ref, size_t begin, size_t end) {
// fprintf(stderr, "Building reference texture...\n");
PixelOfNode* nodeTexture = NULL;
PixelOfChildren * childrenTexture = NULL;
unsigned int height = 0;
unsigned int width = 0;
AuxiliaryNodeData* aux_data = NULL;
int num_nodes;
ref->len = end - begin + 3;
ref->str = (char*)malloc(ref->len);
ref->str[0] = 's';
strncpy(ref->str + 1, full_ref + begin, ref->len - 3);
strcpy(ref->str + ref->len - 2, "$");
createTreeTexture(ref->str,
&nodeTexture,
&childrenTexture,
&width, &height,
&aux_data,
&num_nodes,
NULL,
NULL);
ref->h_node_tex_array = nodeTexture;
ref->h_children_tex_array = childrenTexture;
ref->tex_width = width;
ref->tex_height = height;
ref->aux_data = aux_data;
ref->num_nodes = num_nodes;
ref->bytes_on_board = width * height * (sizeof(PixelOfNode) + sizeof(PixelOfChildren));
unsigned int refpitch = ref->pitch = 65536;
int numrows = ceil(ref->len / ((float)refpitch));
int blocksize = 4;
numrows += blocksize;
ref->h_ref_tex_array = (char *) malloc(numrows*refpitch);
ref->bytes_on_board += numrows*refpitch;
int z_max = numrows * refpitch;
for (int z = 0; z < z_max; z++) { ref->h_ref_tex_array[z] = 'Z'; }
int x, y;
int maxx = 0, maxy = 0;
size_t reflen = ref->len;
char* refstr = ref->str;
int block_dim = refpitch * blocksize;
for (int i = 0; i < reflen; i++)
{
int bigx = i % (block_dim);
int bigy = i / (block_dim);
y = bigy*blocksize+bigx%blocksize;
x = bigx / blocksize;
// printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]);
assert(x < refpitch);
assert(y < numrows);
ref->h_ref_tex_array[y*refpitch+x] = refstr[i];
if (x > maxx) { maxx = x; }
if (y > maxy) { maxy = y; }
}
if ((maxx >= refpitch) || (maxy >= numrows))
{
fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n",
maxx, refpitch, maxy, numrows);
exit(1);
}
}
void loadReferenceTexture(MatchContext* ctx) {
Reference* ref = ctx->ref;
int numrows = ceil(ref->len / ((float)ref->pitch));
int blocksize = 4;
numrows += blocksize;
cudaChannelFormatDesc refTextureDesc =
cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSigned);
if (!ctx->on_cpu) {
#ifdef TIMING
struct Timer_t * toboardtimer = 0;
createTimer(&toboardtimer);
startTimer(toboardtimer);
#endif
// fprintf(stderr, "allocating reference texture\n");
CUDA_SAFE_CALL(cudaMallocArray( (cudaArray**)(&ref->d_ref_tex_array),
&refTextureDesc,
ref->pitch,
numrows));
//ref->bytes_on_board += ref->pitch * numrows;
CUDA_SAFE_CALL(cudaMemcpyToArray( (cudaArray*)(ref->d_ref_tex_array),
0,
0,
ref->h_ref_tex_array,
numrows*ref->pitch,
cudaMemcpyHostToDevice));
reftex.addressMode[0] = cudaAddressModeClamp;
reftex.addressMode[1] = cudaAddressModeClamp;
reftex.filterMode = cudaFilterModePoint;
reftex.normalized = false;
CUDA_SAFE_CALL(cudaBindTextureToArray( reftex, (cudaArray*)ref->d_ref_tex_array, refTextureDesc));
#ifdef TIMING
stopTimer(toboardtimer);
ctx->statistics.t_moving_tree_pages += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
#endif
}
else {
ref->d_ref_tex_array = NULL;
}
// fprintf(stderr,"done\n");
}
void unloadReferenceTexture(Reference* ref)
{
CUDA_SAFE_CALL(cudaUnbindTexture( reftex ) );
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_ref_tex_array)));
ref->d_ref_tex_array = NULL;
}
//loads a tree and text for [begin, end) in the reference
void loadReference(MatchContext* ctx) {
Reference* ref = ctx->ref;
//ref->bytes_on_board = 0;
loadReferenceTexture(ctx);
if (!ctx->on_cpu) {
#ifdef TIMING
struct Timer_t * toboardtimer = 0;
createTimer(&toboardtimer);
startTimer(toboardtimer);
#endif
cudaChannelFormatDesc nodeTextureDesc =
cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
CUDA_SAFE_CALL( cudaMallocArray( (cudaArray**)(&ref->d_node_tex_array),
&nodeTextureDesc,
ref->tex_width,
ref->tex_height ));
//ref->bytes_on_board += ref->tex_width * ref->tex_height * (sizeof(PixelOfNode));
CUDA_SAFE_CALL( cudaMemcpyToArray( (cudaArray*)(ref->d_node_tex_array),
0,
0,
ref->h_node_tex_array,
ref->tex_width * ref->tex_height * sizeof(PixelOfNode),
cudaMemcpyHostToDevice));
nodetex.addressMode[0] = cudaAddressModeClamp;
nodetex.addressMode[1] = cudaAddressModeClamp;
nodetex.filterMode = cudaFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
CUDA_SAFE_CALL( cudaBindTextureToArray( nodetex,
(cudaArray*)ref->d_node_tex_array,
nodeTextureDesc));
cudaChannelFormatDesc childrenTextureDesc =
cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
CUDA_SAFE_CALL( cudaMallocArray( (cudaArray**)(&ref->d_children_tex_array),
&childrenTextureDesc,
ref->tex_width,
ref->tex_height ));
//ref->bytes_on_board += ref->tex_width * ref->tex_height * sizeof(PixelOfNode);
CUDA_SAFE_CALL( cudaMemcpyToArray((cudaArray*)(ref->d_children_tex_array),
0,
0,
ref->h_children_tex_array,
ref->tex_width * ref->tex_height * sizeof(PixelOfChildren),
cudaMemcpyHostToDevice));
childrentex.addressMode[0] = cudaAddressModeClamp;
childrentex.addressMode[1] = cudaAddressModeClamp;
childrentex.filterMode = cudaFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
CUDA_SAFE_CALL( cudaBindTextureToArray( childrentex,
(cudaArray*)(ref->d_children_tex_array),
childrenTextureDesc));
// fprintf(stderr, "done\n");
#ifdef TIMING
stopTimer(toboardtimer);
ctx->statistics.t_moving_tree_pages += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
#endif
}
else
{
ref->d_node_tex_array = NULL;
ref->d_children_tex_array = NULL;
}
}
void unloadReference(MatchContext* ctx)
{
Reference* ref = ctx->ref;
CUDA_SAFE_CALL(cudaUnbindTexture( nodetex ) );
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_node_tex_array)));
ref->d_node_tex_array = NULL;
CUDA_SAFE_CALL(cudaUnbindTexture( childrentex ) );
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_children_tex_array)));
ref->d_children_tex_array = NULL;
unloadReferenceTexture(ctx->ref);
}
void loadQueries(MatchContext* ctx) {
#ifdef TIMING
struct Timer_t * toboardtimer = 0;
createTimer(&toboardtimer);
startTimer(toboardtimer);
#endif
QuerySet* queries = ctx->queries;
queries->bytes_on_board = 0;
unsigned int numQueries = queries->count;
if (!ctx->on_cpu) {
// fprintf(stderr, "loadQueries on GPU: Allocating device memory for queries...\n");
CUDA_SAFE_CALL( cudaMalloc((void**) &queries->d_tex_array, queries->texlen));
queries->bytes_on_board += queries->texlen;
CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_tex_array,
queries->h_tex_array + queries->h_addrs_tex_array[0],
queries->texlen,
cudaMemcpyHostToDevice));
CUDA_SAFE_CALL( cudaMalloc((void**) &queries->d_addrs_tex_array,
numQueries * sizeof(int)));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_addrs_tex_array,
queries->h_addrs_tex_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_SAFE_CALL( cudaMalloc((void**) &queries->d_lengths_array,
numQueries * sizeof(int)));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_lengths_array,
queries->h_lengths_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
// fprintf(stderr, "loadQueries on GPU: allocated %ld bytes done\n", 2 * numQueries*sizeof(int) + queries->texlen);
}
else {
queries->d_addrs_tex_array = NULL;
queries->d_tex_array = NULL;
queries->d_lengths_array = NULL;
// fprintf(stderr, "loadQueries on CPU: allocated %ld bytes done\n", numQueries*sizeof(int) + queries->texlen);
}
#ifdef TIMING
stopTimer(toboardtimer);
ctx->statistics.t_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
#endif
}
void unloadQueries(MatchContext* ctx) {
QuerySet* queries = ctx->queries;
CUDA_SAFE_CALL(cudaFree(queries->d_tex_array));
queries->d_tex_array = NULL;
CUDA_SAFE_CALL(cudaFree(queries->d_addrs_tex_array));
queries->d_addrs_tex_array = NULL;
CUDA_SAFE_CALL(cudaFree(queries->d_lengths_array));
queries->d_lengths_array = NULL;
queries->bytes_on_board = 0;
}
void loadResultBuffer(MatchContext* ctx) {
unsigned int numQueries = ctx->queries->count;
assert (numQueries);
int match_length = ctx->min_match_length;
unsigned int numCoords = 0;
numCoords = ctx->queries->texlen - numQueries * (match_length + 1);
ctx->results.numCoords = numCoords;
// fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...", numQueries, (int)(numCoords*sizeof(MatchCoord)));
ctx->results.h_match_coords = (MatchCoord*) calloc( numCoords, sizeof(MatchCoord));
if (!ctx->on_cpu) {
#ifdef TIMING
struct Timer_t * toboardtimer = 0;
createTimer(&toboardtimer);
startTimer(toboardtimer);
#endif
ctx->results.bytes_on_board = 0;
CUDA_SAFE_CALL( cudaMalloc( (void**) &ctx->results.d_match_coords,
numCoords * sizeof(MatchCoord)));
ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord);
CUDA_SAFE_CALL( cudaMemset( (void*)ctx->results.d_match_coords, 0,
numCoords * sizeof(MatchCoord)));
#ifdef TIMING
stopTimer(toboardtimer);
ctx->statistics.t_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
#endif
}
else {
ctx->results.d_match_coords = NULL;
}
// fprintf(stderr, "done\n");
}
void unloadResultBuffer(MatchContext* ctx) {
CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords));
ctx->results.bytes_on_board = 0;
}
void freeResults(MatchContext* ctx, ReferencePage pages[], unsigned int num_pages) {
for (int i = 0; i < num_pages; ++i) {
free(pages[i].results.h_match_coords);
}
}
void transferResultsFromDevice(MatchContext* ctx) {
if (!ctx->on_cpu) {
#ifdef TIMING
struct Timer_t * fromboardtimer = 0;
createTimer(&fromboardtimer);
startTimer(fromboardtimer);
#endif
CUDA_SAFE_CALL(cudaMemcpy(ctx->results.h_match_coords,
ctx->results.d_match_coords,
ctx->results.numCoords * sizeof(MatchCoord),
cudaMemcpyDeviceToHost) );
#ifdef TIMING
stopTimer(fromboardtimer);
ctx->statistics.t_from_board += getTimerValue(fromboardtimer);
deleteTimer(fromboardtimer);
#endif
}
}
int flushOutput();
int addToBuffer(char* string);
inline int match_coord_addrs(int qryid, int qry_addrs, int match_length) {
return qry_addrs - qryid * (match_length + 1);
}
#define MAX_QUERY_LEN 8192
struct packed_slot {
unsigned short page;
unsigned short qpos;
MatchCoord coord;
};
struct packed_slot_array {
packed_slot* slots;
unsigned int num_slots;
};
void addPackedOutput(MatchContext* ctx, packed_slot_array** curr_output, packed_slot_array slot_array[]) {
unsigned int numQueries = ctx->queries->count;
if (*curr_output == NULL) {
*curr_output = slot_array;
}
else {
for (int i = 0; i < numQueries; i++) {
if (slot_array[i].num_slots) {
//packed_slot_array* slots = &(slot_array[i]);
(*curr_output)[i].slots = (packed_slot*)realloc((*curr_output)[i].slots, ((*curr_output)[i].num_slots + slot_array[i].num_slots) * sizeof(packed_slot));
memcpy((*curr_output)[i].slots + (*curr_output)[i].num_slots, slot_array[i].slots,
slot_array[i].num_slots * sizeof(packed_slot));
(*curr_output)[i].num_slots += slot_array[i].num_slots;
free(slot_array[i].slots);
}
}
free(slot_array);
}
}
char numbuffer[32];
void printRCSlots(MatchContext * ctx, ReferencePage pages[], int qry, packed_slot_array * slots) {
char* h_tex_array = ctx->queries->h_tex_array;
int* h_addrs_tex_array = ctx->queries->h_addrs_tex_array;
int qrylen = ctx->queries->h_lengths_array[qry];
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + qry));
addToBuffer(" Reverse");
if (ctx->show_query_length) {
addToBuffer(" Len = ");
sprintf(numbuffer, "%d", qrylen);
addToBuffer(numbuffer);
}
addToBuffer("\n");
for (int j = 0; j < slots->num_slots; ++j) {
packed_slot slot = slots->slots[j];
if (slot.coord.edge_match_length & FRMASK) {
printAlignments(ctx->full_ref,
&(pages[slot.page]),
h_tex_array + h_addrs_tex_array[qry],
qrylen,
slot.coord.node,
slot.qpos,
(slot.coord.edge_match_length & FRUMASK),
ctx->min_match_length,
1,
ctx->forwardcoordinates);
}
}
}
int FOO;
void printForwardSlots(MatchContext * ctx, ReferencePage pages[], int qry, packed_slot_array * slots) {
char* h_tex_array = ctx->queries->h_tex_array;
int* h_addrs_tex_array = ctx->queries->h_addrs_tex_array;
int qrylen = ctx->queries->h_lengths_array[qry];
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + qry));
if (ctx->show_query_length)
{
addToBuffer(" Len = ");
sprintf(numbuffer, "%d", qrylen);
addToBuffer(numbuffer);
}
addToBuffer("\n");
for (int j = 0; j < slots->num_slots; ++j)
{
packed_slot slot = slots->slots[j];
if (!(slot.coord.edge_match_length & FRMASK))
{
printAlignments(ctx->full_ref,
&(pages[slot.page]),
h_tex_array + h_addrs_tex_array[qry],
qrylen,
slot.coord.node,
slot.qpos,
slot.coord.edge_match_length,
ctx->min_match_length,
0,
ctx->forwardcoordinates);
}
}
FOO += slots->num_slots;
}
void printPackedResults(MatchContext* ctx, ReferencePage pages[], packed_slot_array slot_array[]) {
unsigned int numQueries = ctx->queries->count;
FOO = 0;
for (int qry = 0; qry < numQueries; qry++) {
packed_slot_array* slots = &(slot_array[qry]);
if (ctx->reverse) {
printRCSlots(ctx, pages, qry, slots);
}
else {
printForwardSlots(ctx, pages, qry, slots);
if (ctx->forwardreverse) {
printRCSlots(ctx, pages, qry, slots);
}
}
}
printf("FOO = %d\n", FOO);
flushOutput();
}
void packSlots(MatchContext* ctx, MatchResults* results, unsigned int page_num, packed_slot_array** slot_arrays, bool rc) {
unsigned int numQueries = ctx->queries->count;
int* h_addrs_tex_array = ctx->queries->h_addrs_tex_array;
int match_length = ctx->min_match_length;
*slot_arrays = (packed_slot_array*)calloc(numQueries, sizeof(packed_slot_array));
for (int i = 0; i < numQueries; i++) {
int qlen;
if (i == numQueries - 1)
qlen = ctx->queries->texlen - h_addrs_tex_array[i] - match_length;
else
qlen = h_addrs_tex_array[i + 1] - h_addrs_tex_array[i] - match_length;
packed_slot* qslots = (packed_slot*)calloc(qlen, sizeof(packed_slot));
int filled = 0;
for (int p = 0; p < qlen; ++p) {
MatchCoord* coords = results->h_match_coords;
int query_coord_begin = match_coord_addrs(i, h_addrs_tex_array[i], match_length);
int query_coord_end = i < numQueries - 1 ?
match_coord_addrs(i + 1, h_addrs_tex_array[i + 1], match_length) : results->numCoords;
int query_coord = query_coord_begin + p;
if ((query_coord < query_coord_end) &&
(coords[query_coord].node > 1) &&
(!(coords[query_coord].edge_match_length & FRMASK) == !rc))
{
packed_slot s;
s.page = page_num;
s.qpos = p;
s.coord = coords[query_coord];
qslots[filled++] = s;
}
}
if (filled)
{
packed_slot* pslots = (packed_slot*)calloc(filled, sizeof(packed_slot));
memcpy(pslots, qslots, (filled)*sizeof(packed_slot));
(*slot_arrays)[i].slots = pslots;
(*slot_arrays)[i].num_slots = filled;
}
else
{
(*slot_arrays)[i].slots = NULL;
(*slot_arrays)[i].num_slots = 0;
}
free(qslots);
}
}
int getQueryBlock(MatchContext* ctx, size_t device_mem_avail)
{
QuerySet* queries = ctx->queries;
char * queryTex = NULL;
int* queryAddrs = NULL;
int* queryLengths = NULL;
unsigned int numQueries;
size_t queryLen;
char** names;
struct Timer_t * queryreadtimer = 0;
createTimer(&queryreadtimer);
startTimer(queryreadtimer);
getQueriesTexture(queries->qfile,
&queryTex,
&queryLen,
&queryAddrs,
&names,
&queryLengths,
&numQueries,
device_mem_avail,
ctx->min_match_length,
ctx->reverse || ctx->forwardreverse);
stopTimer(queryreadtimer);
ctx->statistics.t_query_read += getTimerValue(queryreadtimer);
deleteTimer(queryreadtimer);
queries->h_tex_array = queryTex;
queries->count = numQueries;
queries->h_addrs_tex_array = queryAddrs;
queries->texlen = queryLen;
queries->h_names = names;
queries->h_lengths_array = queryLengths;
return numQueries;
}
void destroyQueryBlock(QuerySet* queries) {
free(queries->h_tex_array);
queries->h_tex_array = NULL;
for (int i = 0; i < queries->count; ++i)
free(queries->h_names[i]);
free(queries->h_names);
queries->count = 0;
queries->texlen = 0;
free(queries->h_addrs_tex_array);
queries->h_addrs_tex_array = NULL;
free(queries->h_lengths_array);
queries->h_lengths_array = NULL;
}
void writeStatisticsFile(MatchContext* ctx, char* stats_filename) {
if (!stats_filename)
return;
FILE* f = fopen(stats_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename);
return;
}
fprintf(f, "Total,%f\n", ctx->statistics.t_total);
fprintf(f, "Kernel,%f\n", ctx->statistics.t_kernel);
fprintf(f, "Print matches,%f\n", ctx->statistics.t_output);
fprintf(f, "Copy queries to GPU,%f\n", ctx->statistics.t_to_board);
fprintf(f, "Copy output from GPU,%f\n", ctx->statistics.t_from_board);
fprintf(f, "Copy suffix tree to GPU,%f\n", ctx->statistics.t_moving_tree_pages);
fprintf(f, "Read queries from disk,%f\n", ctx->statistics.t_query_read);
fprintf(f, "Suffix tree constructions,%f\n", ctx->statistics.t_construction);
fprintf(f, "Minimum substring length, %d\n", ctx->min_match_length);
fprintf(f, "Average query length, %f\n", ctx->statistics.bp_avg_query_length);
fclose(f);
}
int matchSubset(MatchContext* ctx,
int query_block_offset,
ReferencePage pages[],
unsigned int num_pages) {
loadQueries(ctx);
packed_slot_array* packed_slots = NULL;
for (unsigned int i = 0; i < num_pages; ++i) {
ctx->ref = &(pages[i].ref);
loadReference(ctx);
loadResultBuffer(ctx);
#ifdef TIMING
struct Timer_t * ktimer = 0;
createTimer(&ktimer);
#endif
unsigned int numQueries = ctx->queries->count;
int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries;
dim3 dimBlock(blocksize,1,1);
dim3 dimGrid(ceil(numQueries/(float)BLOCKSIZE), 1, 1);
if (!ctx->on_cpu) {
fprintf(stderr,"[INFO] Blocks(%d, %d, %d)\n",
dimBlock.x, dimBlock.y, dimBlock.z);
fprintf(stderr,"[INFO] Grid(%d, %d, %d)\n",
dimGrid.x, dimGrid.y, dimBlock.z);
/*
fprintf(stderr,"Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n",
(int)ctx->queries->bytes_on_board,
(int)ctx->ref->bytes_on_board,
(int)ctx->results.bytes_on_board);
*/
}
#ifdef TIMING
startTimer(ktimer);
#endif
bool alignRC = ctx->reverse;
if (ctx->on_cpu) {
if (alignRC) {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
REVERSE);
}
else {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
FORWARD);
}
}
else {
if (alignRC) {
mummergpuRCKernel<<< dimGrid, dimBlock, 0 >>>(ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
}
else
{
mummergpuKernel<<< dimGrid, dimBlock, 0 >>>(ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
}
}
cudaThreadSynchronize();
// printf("check if kernel execution generated an error\n");
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
#ifdef TIMING
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_kernel += ktime;
fprintf(stderr,"[STATS] kernel time = %f\n", ktime);
deleteTimer(ktimer);
#endif
transferResultsFromDevice(ctx);
pages[i].results = ctx->results;
packed_slot_array* packed;
packSlots(ctx, &(pages[i].results), i, &packed, ctx->reverse);
addPackedOutput(ctx, &packed_slots, packed);
// printf("compute the reverse matches\n");
if (ctx->forwardreverse) {
#ifdef TIMING
struct Timer_t * rctimer = 0;
createTimer(&rctimer);
startTimer(rctimer);
#endif
if (ctx->on_cpu) {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
REVERSE);
}
else {
mummergpuRCKernel<<< dimGrid, dimBlock, 0 >>>(ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
cudaThreadSynchronize();
}
#ifdef TIMING
stopTimer(rctimer);
float rctime = getTimerValue(rctimer);
ctx->statistics.t_kernel += rctime;
fprintf(stderr,"rc kernel time= %f\n", rctime);
deleteTimer(rctimer);
#endif
transferResultsFromDevice(ctx);
pages[i].results = ctx->results;
packed_slot_array* packed;
packSlots(ctx, &(pages[i].results), i, &packed, 1);
addPackedOutput(ctx, &packed_slots, packed);
}// end if (ctx->forwardreverse)
free(pages[i].results.h_match_coords);
pages[i].results.h_match_coords = NULL;
unloadReference(ctx);
unloadResultBuffer(ctx);
}// end for
#ifdef TIMING
struct Timer_t * otimer = 0;
createTimer(&otimer);
startTimer(otimer);
// printPackedResults(ctx, pages, packed_slots);
stopTimer(otimer);
ctx->statistics.t_output += getTimerValue(otimer);
deleteTimer(otimer);
#endif
for (int i = 0; i < ctx->queries->count; ++i) {
free(packed_slots[i].slots);
}
free(packed_slots);
unloadQueries(ctx);
return 0;
}
#define BREATHING_ROOM (64 * 1024 * 1024)
#define BASES_PER_TREE_PAGE 7500000
#define CHUMP_CHANGE 1500000
extern "C"
int matchQueries(MatchContext* ctx) {
assert(sizeof(struct PixelOfNode) == sizeof(ulong4));
assert(sizeof(struct PixelOfChildren) == sizeof(ulong4));
ctx->statistics.t_kernel = 0.0;
ctx->statistics.t_output = 0.0;
ctx->statistics.t_to_board = 0.0;
ctx->statistics.t_from_board = 0.0;
ctx->statistics.t_moving_tree_pages = 0.0;
ctx->statistics.t_query_read = 0.0;
ctx->statistics.t_total = 0.0;
ctx->statistics.t_construction = 0.0;
ctx->statistics.bp_avg_query_length = 0.0;
#ifdef Timing
struct Timer_t * ttimer = 0;
createTimer(&ttimer);
startTimer(ttimer);
struct Timer_t * ctimer = 0;
createTimer(&ctimer);
startTimer(ctimer);
#endif
unsigned int bases_in_ref = ctx->full_ref_len - 3;
unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref;
unsigned int num_reference_pages = bases_in_ref / page_size;
ReferencePage* pages = (ReferencePage*)calloc(num_reference_pages, sizeof(ReferencePage));
unsigned int page_overlap = MAX_QUERY_LEN + 1;
pages[0].begin = 1;
pages[0].end = pages[0].begin +
page_size +
ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning
pages[0].shadow_left = -1;
pages[0].id = 0;
buildReferenceTexture(&(pages[0].ref), ctx->full_ref, pages[0].begin, pages[0].end);
for (int i = 1; i < num_reference_pages - 1; ++i) {
pages[i].begin = pages[i - 1].end - page_overlap;
pages[i].end = pages[i].begin + page_size + page_overlap;
pages[i - 1].shadow_right = pages[i].begin;
pages[i].shadow_left = pages[i-1].end;
pages[i].id = i;
buildReferenceTexture(&(pages[i].ref), ctx->full_ref, pages[i].begin, pages[i].end);
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
pages[last_page].begin = pages[last_page - 1].end - page_overlap;
pages[last_page].end = ctx->full_ref_len - 1;
pages[last_page - 1].shadow_right = pages[last_page].begin;
pages[last_page].shadow_right = -1;
pages[last_page].shadow_left = pages[last_page - 1].end;
pages[last_page].id = last_page;
buildReferenceTexture(&(pages[last_page].ref),
ctx->full_ref,
pages[last_page].begin,
pages[last_page].end);
}
#ifdef Timing
stopTimer(ctimer);
ctx->statistics.t_construction += getTimerValue(ctimer);
deleteTimer(ctimer);
#endif
///*
cudaDeviceProp props;
if (!ctx->on_cpu) {
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if (deviceCount != 1) {
//fprintf(stderr, "Fatal error: no CUDA-capable device found, exiting\n");
//return -1;
}
cudaGetDeviceProperties(&props, 0);
// fprintf(stderr, "[BENCH] Running under CUDA %d.%d\n", props.major, props.minor);
// fprintf(stderr, "[BENCH] CUDA device has %d bytes of memory\n", (int)props.totalGlobalMem);
}
else {
props.totalGlobalMem = 804585472; // pretend we are on a 8800 GTX
}
//*/
size_t mem_avail = 0;
for (int i = 0; i < num_reference_pages; ++i) {
mem_avail = max((unsigned int)pages[i].ref.bytes_on_board,
(unsigned int)mem_avail);
}
mem_avail = props.totalGlobalMem - mem_avail;
// fprintf(stderr, "[BENCH] There are %d bytes left on the board\n", (int)mem_avail);
mem_avail -= BREATHING_ROOM;
// printf("[DEBUG] matchQueries 1\n");
while (getQueryBlock(ctx, mem_avail)) {
matchSubset(ctx, 0, pages, num_reference_pages);
ctx->statistics.bp_avg_query_length = ctx->queries->texlen/(float)(ctx->queries->count) - 2;
destroyQueryBlock(ctx->queries);
cudaThreadExit();
}
// printf("[DEBUG] matchQueries 2\n");
for (int i = 0; i < num_reference_pages; ++i) {
destroyReference(&(pages[i].ref));
}
free(pages);
#ifdef Timing
stopTimer(ttimer);
ctx->statistics.t_total += getTimerValue(ttimer);
deleteTimer(ttimer);
#endif
writeStatisticsFile(ctx, ctx->stats_file);
// printf("[DEBUG] matchQueries end\n");
return 0;
}
|
0c4792f7a6fb6e879aeaef8bff4239db8efa570e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* -----------------------------------------------------------------------------
* ----- TRACER_KERNEL.CU -----
* ----- REALTIME RAY TRACER -----
* -----------------------------------------------------------------------------
*
* File Description:
* This is the kernel file of the Ray Tracer. It implements the Ray Tracing
* logic and binary tree stack (needed for recursion).
*
* Table of Contents:
* pushStack - push a value unto the stack.
* popStack - pop a value from the stack
* copyMats - copy properties from one Material object to another
* copyRay - copy properties from one Ray object to another
* intersectSphere - intersect a ray with a sphere
* intersectPlane - intersect a ray with a plane
* pointInPlane - check if a point is in a plane
* intersectBox - intersect a ray with a box
* intersectCylinder - intersect a ray with a capless cylinder
* intersectCone - intersect a ray witha capless cone
* get_first_intersection - get first intersection of a ray in the scene
* get_point_color - get color at a point
* initStack - initialize stack
* updateNextLevel - update the next level of the stack's binary tree
* trace_ray - trace a ray
* (float3) mul - transform vector by matrix (no translation)
* (float4) mul - transform vector by matrix with translation
* rgbaFloatToInt - convert rgba color to unsigned integer
* rFloatToInt - convert float to 32-bit unsigned int
* d_render - peform volume rendering
*
* Objects in module:
* float3x4 - 3x4 float matrix representation
* Material - scene object material properties
* Object - generic scene object
* Intersect - a Ray and Object's intersection point's properties
* Light - Light object
* Ray - Light Ray object
* Stack4 - Ray tracer's binary tree (BinTree) stack
*
* Assumptions:
* Using a machine with a CUDA-capable NVIDIA GPU
*
* Limitations:
* - CUDA doesn't officially support C++. Don't want to fight a battle with
* classes so I'll go ahead and define structs in place of objects (hopefully
* efficiently).
* - CUDA also doesn't support recursion. So have to use a binary tree based
* stack to implement recursion here.
* + A binary tree stack is neccessary as a ray tracer recursively follows
* the path that light rays take as they intersect objects in the scene and
* result in two new rays: a relected and a refracted ray.
*
* References: (Had to learn this from somewhere)
* http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtrace0.htm
* http://www.cs.unc.edu/~rademach/xroads-RT/RTarticle.html
* http://fuzzyphoton.tripod.com/howtowrt.htm
* http://www.visualization.hpc.mil/wiki/Raytracing
*
* Compiler:
* NVIDIA's CUDA Compiler (NVCC)
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
#ifndef _TRACER_KERNEL_H_
#define _TRACER_KERNEL_H_
#include "cutil_math.h"
/*
* Raytracers have to deal with the finite precision of computer calculations.
* Let us consider a situation in which a ray hits an object and is reflected.
* We have to find the nearest object intersected by the reflected ray. Now it
* is perfectly possible that this ray goes and hits another part of the same
* object. Therefore this object must also be tested. Since this origin of the
* reflected ray lies on the surface of the object, there will be an
* intersection point at zero (0) distance. If computers had infinite precision,
* we would indeed get 0 as one of the distances and we could safely ignore it.
* But this is not the case. There will almost always be a small error in the
* distance. Thus the position of the point of reflection will be slightly
* different from the ideal value, and the distance that should be 0 will more
* liekly be some small quantity like 0.000001 (even if the point of reflection
* is ideally positioned). Yet we know that this result must be omitted. To
* handle this case, raytracers use a small value known as an "epsilon". All
* intersection distances less than the epsilon value are ignored. A good
* raytracer tries to make its epsilon setting as small as possible by reducing
* chances of numerical error, so that small details near the surface are not
* missed out
*/
#define EPSILON 0.00001f // higher precision espilon value
#define INTERSECT_EPSILON 0.01f // lower precision epsilon value
/*
* A binary tree based stack is used to track the recursive path of the light
* rays that intersect the objects in the scene and consequenttly reflect and
* refract.
*
* Of course these rays can bounce along the scene infinitely. It's only
* practical to stop following these rays after a certain number of
* intersections. So we define a maximum ray tree depth (MAX_RAY_TREE_DEPTH)
*
* This binary tree stack will be represented as an array, just like we would
* represent any binary tree as an array.
* - The binary size will have [2^(MAX_RAY_TREE_DEPTH+1) -1] nodes
* - There will be 1 empty node at index 0
* - Therefore total array size is: 2^(MAX_RAY_TREE_DEPTH+1)
*/
#define MAX_RAY_TREE_DEPTH 3 // max number of ray intersections to follow
#define MAX_STACK_DEPTH 16 // 2^(MAX_RAY_TREE_DEPTH+1) -1 nodes in BinTree
// (+1 empty node at index 0)
/*
* Supported scene object types
*/
#define SPHERE 1
#define PLANE 2
#define BOX 3
#define CYLINDER 4
#define CONE 5
///////////////////////////////////////////////////////////////////////////////
// Structs that represent module objects
///////////////////////////////////////////////////////////////////////////////
typedef struct { // 3x4 matrix representation
float4 m[3];
} float3x4;
/* material properties come up a lot, so best to define a struct for them */
typedef struct{ // material
float4 amb; // material ambient color (RGBA)
float4 diff; // material diffuse color
float4 spec; // material specular color
float shiny; // material shininess
float n; // refractive index
float kr; // reflection coefficients
float kt; // transmission coefficient
} Material;
typedef struct { // struct defining a generic object
int type; // object type -- see defines above
Material mat; // material properties
/* defined only if type == SPHERE */
float3 c; // sphere centre
float r; // sphere radius -- also works for cylinder and cone
/* defined only if type == PLANE */
float4 pl; // Plane is defined by equation:A*x + By + Cz+D
// So in this case:
// plane.x*x + plane.y*y + plane.z*z + plane.w = 0
/* defined only if type == BOX */
float3 min; // min and max vertices define a box... think bout it
float3 max;
/* defined only if type == CYLINDER OR CONE */
float ymin; // ymax-ymin gives length of cylinder
float ymax;
} Object;
typedef struct { // struct defining an intersection point's properties
float3 normal; // normal at intersection point
float3 p; // position of intersection
Material mat; // material properties here
} Intersect;
typedef struct { // Light object
float4 amb; // ambient color (RGBA)
float4 diff; // diffuse color
float4 spec; // specular color
float3 pos; // light source position
} Light;
struct Ray { // Ray for RayTracing
float3 o; // origin
float3 d; // direction
float t; // minimum distance travelled so far before intersection
Intersect pt; // intersection point
int intersected; // did ray already intersect object? 1 = yes, 0 =no
int entering; // ray will be entering or leaving an object?
};
typedef struct { // a BinTree "stack" very specific to this ray tracer
float4 body[MAX_STACK_DEPTH]; // color at a point
Ray r[MAX_STACK_DEPTH]; // ray that hit this point
int size; // number of slots in array/ nodes in BinTree
int max_level; // the maximum number of levels
int level; // points to last filled level
int top; // points to next location to push to...so one
// above top data element
} Stack4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
///////////////////////////////////////////////////////////////////////////////
// Variables/CONSTANTS that define the scene
///////////////////////////////////////////////////////////////////////////////
#define NUM_OBJECTS 10
#define NUM_LIGHTS 2
/* objects in scene */
__device__ Object d_object[NUM_OBJECTS];
Object object[NUM_OBJECTS];
/* background color */
__device__ float4 d_bgnd_col;
float4 bgnd_col;
/* lights */
__device__ Light d_light[NUM_LIGHTS];
Light light[NUM_LIGHTS];
///////////////////////////////////////////////////////////////////////////////
// functions for Ray Tracing
///////////////////////////////////////////////////////////////////////////////
/*
* pushStack
* Description:
* Push value unto the stack.
*
* Arguments:
* stack: BinTree stack
* val: Value to push onto stack.
*
* Return:
* - 1 if successful
* 0 if failure
*
* Operation:
* - if stack is full, return failure
* - else, add value to stack and increment top of stack.
*
* Assumption:
* The stack top pointer already points to an empty slot
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int pushStack(Stack4 &stack, float4 val)
{
// if stack is full, return failure
if(stack.top >= stack.size)
return 0;
// stack not full, so add value, increment stack's top, and return success
stack.body[stack.top++] = val;
return 1;
}
/*
* popStack
* Description:
* pop value from the stack.
*
* Arguments:
* stack: BinTree stack
* val: Value to push onto stack.
*
* Return:
* - 1 if successful
* 0 if failure
*
* Operation:
* - if stack is empty, return failure
* - else, decrement top of stack and get value.
*
* Assumption:
* The stack's top pointer needs to be decremented to get actual data.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int popStack(Stack4 &stack, float4 &val)
{
// if stack is empty, return failure
if(stack.top <= 0)
return 0;
// stack not empty, so decrement stack's top, get value, and return success
val = stack.body[--stack.top];
return 1;
}
/*
* copyMats
* Description:
* copy properties from one Material object to another
*
* Arguments:
* m_dest: Material object to copy properties to
* m_source: Material object to copy properties from
*
* Return:
* None
*
* Operation:
* assign all object properties in m_dest to the value of corresponding
* properties in m_source.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
void copyMats(Material &m_dest, Material m_source)
{
m_dest.amb = m_source.amb;
m_dest.diff = m_source.diff;
m_dest.spec = m_source.spec;
m_dest.shiny = m_source.shiny;
m_dest.n = m_source.n;
m_dest.kr = m_source.kr;
m_dest.kt = m_source.kt;
return;
}
/*
* copyRay
* Description:
* copy properties from one Ray object to another
*
* Arguments:
* dest: Ray object to copy properties to
* source: Ray object to copy properties from
*
* Return:
* None
*
* Operation:
* assign all object properties in dest to the value of corresponding
* properties in source. Use copyMats() to copy the Ray's Intersection Material
* property.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
void copyRay(Ray &dest, Ray source)
{
dest.o = source.o;
dest.d = source.d;
dest.t = source.t;
dest.intersected = source.intersected;
dest.entering = source.entering;
copyMats(dest.pt.mat, source.pt.mat);
dest.pt.normal = source.pt.normal;
dest.pt.p = source.pt.p;
}
/*
* intersectSphere
* Description:
* intersect a ray with a sphere
*
* Arguments:
* r: Incident Ray object
* s: Sphere object under consideration
*
* Return:
* 1: an intersection
* 0: no intersection
*
* Operation:
* - Check if ray actually intersects sphere
* - If there is an intersection then there will actually be two intersections
* - Take the closest intersection point that is further than the ray origin
* and in front of the eye-point (i.e. positive ray direction).
* - set ray's intersection point properties
*
* References:
* - http://www.cs.unc.edu/~rademach/xroads-RT/RTarticle.html
* - http://fuzzyphoton.tripod.com/howtowrt.htm
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int intersectSphere(Ray &r, /* incident ray */
const Object &s /* sphere under consideration */)
{
float3 E0 = s.c-r.o;
float v = dot(E0, r.d);
float disc = s.r*s.r - (dot(E0,E0) - v*v);
if (disc < 0) // check if ray actually intersects sphere
return 0; // no intersection
// take the closest intersection point that is further than the ray origin
// and infront of the eye-point (i.e. positive ray direction)
// that is why we compare t0 and t1 to INTERSECT_EPSILON.
// Why not 0.0f? Simply because there is a high risk that given our limited
// precision (in float), after a reflection from a point we find that our
// ray intersects the same object around the same point when it shouldn't
// with an infinite precision. By taking our starting point at a reasonable
// distance but close enough to not cause "gaps" we can avoid some artifacts
float t0 = v - sqrt(disc);
float t1 = v + sqrt(disc);
float t = 0.0;
int retVal = 0;
if ((t0>INTERSECT_EPSILON) && (t0 < t1))
{
t = t0;
retVal = 1;
}
if ((t1>INTERSECT_EPSILON) && (t1 < t0))
{
t = t1;
retVal = 1;
}
// if no intersection, end this
if (retVal == 0)
return 0;
// test if t isnt the nearest intersection noted
if (r.intersected && (t > r.t))
return 0;
// this is a legitimate intersection... save the properties
r.intersected = 1;
r.t = t; // record distance of nearest intersection
r.pt.p = r.o + t*r.d; // update point of intersection
r.pt.normal = normalize(r.pt.p-s.c); // update normal at point
copyMats(r.pt.mat, s.mat); // COPY material properties
return 1; // there was an intersection!
}
/*
* intersectPlane
* Description:
* intersect a ray with a plane
*
* Arguments:
* r: Incident Ray object
* pln: Plane object under consideration
*
* Return:
* 1: an intersection
* 0: no intersection
*
* Operation:
* - Check if ray actually intersects plane (i.e. not parallel to plane)
* - Take the closest intersection point that is in front of the eye-point
* (i.e. positive ray direction).
* - set ray's intersection point properties
*
* References:
* - http://www.siggraph.org/education/materials/HyperGraph/raytrace/rayplane_intersection.htm
* - equation of a plane through 3 points at:
* http://www.math.oregonstate.edu/home/programs/undergrad/CalculusQuestStudyGuides/vcalc/lineplane/lineplane.html
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int intersectPlane(Ray &r, /* incident ray */
const Object &pln /* plane under consideration */)
{
float3 Pn = make_float3(pln.pl); // unit normal
float Vd = dot(r.d,Pn);
if (Vd == 0) // if Vd == 0 then ray is parallel to plane so no intersection
return 0;
float V0 = -(dot(r.o,Pn) + pln.pl.w);
float t = V0/Vd;
// test if ray intersects behind eye point (i.e. not in positive ray
// direction), and if it isnt the nearest intersection noted
if ((t <= INTERSECT_EPSILON) || (r.intersected && (t > r.t)))
return 0;
// this is a legitimate intersection
r.intersected = 1;
r.t = t; // record distance of nearest intersection
r.pt.p = r.o + t*r.d; // update point of intersection
r.pt.normal = normalize(Pn); // update normal at point of intersection
copyMats(r.pt.mat, pln.mat); // COPY material properties
return 1; // there was an intersection!
}
/*
* pointInPlane
* Description:
* Check if a point is in a plane. This is a helper function for intersectBox
*
* Arguments:
* n: plane's normal vector
* pp: point already established to be in a plane
* p: point under consideration
*
* Return:
* 1: point is in a plane
* 0: otherwise
*
* Operation:
* to check if a point (px,py,pz) is on a plane, check the equation
* nx*(ppx-px) + ny*(ppy-py) + nz*(ppz-pz) == 0.0
* - note that equality to 0.0 isn't going to happen so use an EPSILON check,
* i.e. check if the absolute value is less than EPSILON
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int pointInPlane(float3 n, /* normal*/
float3 pp, /* point_in_plane */
float3 p /* point under consideration */)
{
int inPlane = 0;
float3 vecInPlane = p-pp;
float res = dot(n, vecInPlane);
if (res < 0.0f)
res *= -1.0f;
if (res < EPSILON)
inPlane = 1;
return inPlane;
}
/*
* intersectBox
* Description:
* Intersect a ray with a box
*
* Arguments:
* r: Incident Ray object
* box: Box object under consideration
*
* Return:
* 1: an intersection
* 0: no intersection
*
* Operation:
* - Check if ray actually intersects box
* - Take the closest intersection point that is in front of the eye-point
* (i.e. positive ray direction).
* - set ray's intersection point properties
*
* References:
* - http://www.visualization.hpc.mil/wiki/Adding_in_a_Box_Object
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int intersectBox(Ray &r, /* incident ray */
const Object &box /* box under consideration */)
{
float tmp, tnear = -1.0e6, tfar=1.0e6;
float3 tmin = (box.min - r.o)/r.d;
float3 tmax = (box.max - r.o)/r.d;
if(tmin.x>tmax.x) { tmp=tmin.x; tmin.x=tmax.x; tmax.x=tmp;}
if(tmin.y>tmax.y) { tmp=tmin.y; tmin.y=tmax.y; tmax.y=tmp;}
if(tmin.z>tmax.z) { tmp=tmin.z; tmin.z=tmax.z; tmax.z=tmp;}
tnear=max(tmin.z,max(tmin.y,max(tmin.x,tnear)));
tfar =min(tmax.z,min(tmax.y,min(tmax.x,tfar )));
if(tnear>tfar) return 0; // The box is missed.
if(tfar<INTERSECT_EPSILON) return 0; // The box is behind us.
if(tnear<INTERSECT_EPSILON) {return 0; } // We are inside the box.
// have an intersection
float t = tnear;
// check this is nearest intersection noted
if (r.intersected && (t>r.t))
return 0;
// this is a legitimate intersection
r.intersected = 1;
r.t = t; // record distance of nearest intersection
r.pt.p = r.o + t*r.d; // update point of intersection
// update normal at point of intersection
float3 distance = box.min - r.pt.p;
float min_dist=abs(distance.x);
int min=0;
if(abs(distance.y) < min_dist) { min_dist=abs(distance.y); min=2; }
if(abs(distance.z) < min_dist) { min_dist=abs(distance.z); min=4; }
distance = box.max - r.pt.p;
if(abs(distance.x) < min_dist) { min_dist=abs(distance.x); min=1; }
if(abs(distance.y) < min_dist) { min_dist=abs(distance.y); min=3; }
if(abs(distance.z) < min_dist) { min_dist=abs(distance.z); min=5; }
r.pt.normal = make_float3(0, 0, 1);
if (min==0) {r.pt.normal = make_float3(-1, 0, 0);}
if (min==1) {r.pt.normal = make_float3( 1, 0, 0);}
if (min==2) {r.pt.normal = make_float3( 0,-1, 0);}
if (min==3) {r.pt.normal = make_float3( 0, 1, 0);}
if (min==4) {r.pt.normal = make_float3( 0, 0,-1);}
if (dot(r.pt.normal, r.d) > 0) // normal and ray must be in opposite dirs
r.pt.normal *= -1;
copyMats(r.pt.mat, box.mat); // COPY material properties
return 1; // there was an intersection!
}
/*
* intersectCylinder
* Description:
* Intersect a ray with a capless cylinder
*
* Arguments:
* r: Indicent Ray object
* cyl: Cylinder under consideration
*
* Return:
* 1: an intersection
* 0: no intersection
*
* Operation:
* - Check if ray actually intersects cylinder
* - Take the closest intersection point that is in front of the eye-point
* (i.e. positive ray direction).
* - set ray's intersection point properties
*
* References:
* - http://www.visualization.hpc.mil/wiki/Adding_in_a_Cylinder_Object
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int intersectCylinder(Ray &r, /* incident ray */
const Object &cyl /* cylinder under consideration */)
{
float3 pvt_center = make_float3(0.0, (cyl.ymin+cyl.ymax)/2, 0.0);
float3 Rd=r.d;
float3 Ro=pvt_center - r.o;
float3 pnt_intrsct;
float a = Rd.x*Rd.x + Rd.z*Rd.z;
float b = Ro.x*Rd.x + Ro.z*Rd.z;
float c = Ro.x*Ro.x + Ro.z*Ro.z - (cyl.r*cyl.r);
float disc = b*b - a*c;
float t, d, root1, root2;
int return_value = 0;
// If the discriminant is less than 0, then we totally miss the cylinder.
if (disc > 0.0)
{
d = sqrt(disc);
root2 = (b + d)/a;
root1 = (b - d)/a;
// If root2 < 0, then root1 is also < 0, so they are both misses.
if (root2 > INTERSECT_EPSILON)
{
// If root2 > 0, and root1 < 0, we are inside the cylinder.
if(root1 < INTERSECT_EPSILON)
{
return_value=0;
// If root2 > 0, and root1 > 0, we are hit the cylinder.
} else {
t=root1; return_value= 1;
}
}
pnt_intrsct = r.o + Rd*t ;
// Limit the y values
if((pnt_intrsct.y>cyl.ymax)||(pnt_intrsct.y<cyl.ymin)) {
pnt_intrsct = r.o + Rd*root2 ;
t = root2;
// Are we too high in our first hit, but hit the back wall later
if((pnt_intrsct.y>cyl.ymax)||(pnt_intrsct.y<cyl.ymin)) {
return_value = 0;
}
}
}
if (!return_value)
return 0;
// have intersection
// check this is nearest intersection noted
if (r.intersected && (t>r.t))
return 0;
// this is a legitimate intersection
r.intersected = 1;
r.t = t; // record distance of nearest intersection
r.pt.p = pnt_intrsct; // update point of intersection
//update normal
r.pt.normal = normalize(r.pt.p - make_float3(0.0,r.pt.p.y,0.0));
copyMats(r.pt.mat, cyl.mat); // COPY material properties
return 1; // there was an intersection!
}
/*
* intersectCone
* Description:
* Intersect a ray with a capless cone
*
* Arguments:
* r: Indicent Ray object
* con: Cone under consideration
*
* Return:
* 1: an intersection
* 0: no intersection
*
* Operation:
* - Check if ray actually intersects cone
* - Take the closest intersection point that is in front of the eye-point
* (i.e. positive ray direction).
* - set ray's intersection point properties
*
* References:
* - http://www.visualization.hpc.mil/wiki/Adding_in_a_Cone_Object
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int intersectCone(Ray &r, /* incident ray */
const Object &con /* cone under consideration */)
{
float3 pvt_center = make_float3(0.0, con.ymin, 0.0);
float pvt_height = con.ymax-con.ymin;
float pvt_e = -(con.r*con.r)/(pvt_height*pvt_height);
float3 Rd=r.d;
float3 Ro=r.o;
float3 omc=pvt_center - Ro;
omc.y = pvt_center.y - Ro.y + pvt_height;
float3 pnt_intrsct;
float a = Rd.x*Rd.x + pvt_e*Rd.y*Rd.y + Rd.z*Rd.z;
float b = omc.x*Rd.x + pvt_e*omc.y*Rd.y + omc.z*Rd.z;
float c = omc.x*omc.x + pvt_e*omc.y*omc.y + omc.z*omc.z;
float disc = b*b - a*c;
float t, d, root1, root2;
int return_value = 0;
// If the discriminant is less than 0, then we totally miss the cone.
if (disc > 0.0) {
d = sqrt(disc);
root2 = (b + d)/a;
root1 = (b - d)/a;
// If root2 < 0, then root1 is also < 0, so they are both misses.
if (root2 > INTERSECT_EPSILON) {
// If root2 > 0, and root1 < 0, we are inside the cone.
if(root1 < INTERSECT_EPSILON) {
return_value=0;
// If root2 > 0, and root1 > 0, we are hit the cone.
} else {
t=root1; return_value= 1;
}
}
pnt_intrsct = Ro + Rd*t ;
// Limit the y values: ymin <= y <= ymax
// If the point of intersection is too low or too high, record it as a
// miss.
if((pnt_intrsct.y>(pvt_center.y+pvt_height))||
(pnt_intrsct.y<pvt_center.y)) {
pnt_intrsct = Ro + Rd*root2 ;
t = root2;
// Are we too high in our first hit, but hit the back wall later
if((pnt_intrsct.y>(pvt_center.y+pvt_height))||
(pnt_intrsct.y<pvt_center.y)) {
return_value = 0;
}
}
}
if (!return_value)
return 0;
// have intersection
// check this is nearest intersection noted
if (r.intersected && (t>r.t))
return 0;
// this is a legitimate intersection
r.intersected = 1;
r.t = t; // record distance of nearest intersection
r.pt.p = pnt_intrsct; // update point of intersection
//update normal
a = r.pt.p.x - pvt_center.x;
b = r.pt.p.y - pvt_center.y - pvt_height;
c = r.pt.p.z - pvt_center.z;
r.pt.normal=normalize(make_float3(a,pvt_e*b,c));
copyMats(r.pt.mat, con.mat); // COPY material properties
return 1; // there was an intersection!
}
/*
* get_first_intersection
* Description:
* get first intersection of a ray in the scene.
*
* Arguments:
* r: Indicent Ray object
*
* Return:
* None
*
* Operation:
* Loop through all objects in the scene, calling the appropriate
* intersect{Object} function for each object type. At the end of the loop
* the Ray object is updated with the details of the nearest/first intersection
* point.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
void get_first_intersection(Ray &r)
{
// this loop goes through all objects in our space and at the end
// the Ray is updated with the information of the nearest/first
// intersection
for(int i=0; i < NUM_OBJECTS; i++)
{
if(d_object[i].type == SPHERE) // call the right intersect
intersectSphere(r, d_object[i]); // function for each object type
else if(d_object[i].type == PLANE)
intersectPlane(r, d_object[i]);
else if(d_object[i].type == BOX)
intersectBox(r, d_object[i]);
else if(d_object[i].type == CYLINDER)
intersectCylinder(r, d_object[i]);
else if(d_object[i].type == CONE)
intersectCone(r, d_object[i]);
}
return; /* intersected object info is contained in ray */
}
/*
* get_point_color
* Description:
* get color at a point, accounting for all light sources and shadows.
*
* Arguments:
* r: Indicent Ray object
*
* Return:
* rgba color
*
* Operation:
* - Initialize the point color to an rgba of 0,0,0,0.
* - There will be a shadow variable that determines if a point is in the
* shadow of a light source. This variable will have a [0.0, 1.0] scale
* with 0 meaning shadow and 1 meaning no shadow.
* - Start by assuming the point is not in the shadows.
* - If the ray didn't intersect an object, return the backgground color
* - To add in the contributions from all light sources, loop through them all
* and for each light source:
* + check if a point is in the shadow of this light source. If so, get the
* shadow factor
* + scale the diffuse+specular colors by this shadow factor before
* accumulating these colors into the point color.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
float4 get_point_color(Ray &r)
{
float4 col4 = make_float4(0.0f); // will accumulate the colors
// the amount of diffuse+specular color to be accumulated is scaled by
// the shadow variable. Start by assuming this point is not in a shadow
float shadow = 1.;
if (!r.intersected) // ray didnt intersect an object, so use background
return d_bgnd_col;
// need to add in the contributions from all the light sources
for(int i = 0; i< NUM_LIGHTS; i++)
{
float3 lightDir = normalize(d_light[i].pos -r.pt.p); // from p to light
col4 += r.pt.mat.amb * d_light[i].amb; // add in ambient color
/* check if point is in shadow for this light source */
Ray shadowCheck; // create a shadow ray from intersection
shadowCheck.o = r.pt.p; // pt to light, then check if it
shadowCheck.d = lightDir; // intersects any objects in the scene.
shadowCheck.intersected = 0; // if it does, then shadow factor is set
get_first_intersection(shadowCheck); // to transmission coefficient of
if(shadowCheck.intersected) // that object. If object is opaque,
shadow = shadowCheck.pt.mat.kt; // shadow factor becomes 0
/* compute the dot product between normal and normalized lightdir */
float NdotL = max(dot(r.pt.normal,lightDir), 0.0);
float3 R = 2*r.pt.normal*NdotL - lightDir; // R = light ray's reflection
R = normalize(R);
float3 Eye = normalize(-r.d);
/* compute the dot product between reflection and eye-view */
float RdotE = max(dot(R,Eye),0.0);
if (NdotL > 0.0)
{
/* add diffuse component */
col4 += shadow*(r.pt.mat.diff * d_light[i].diff * NdotL);
}
/* add specular component */
col4 += shadow*(r.pt.mat.spec * pow(RdotE, r.pt.mat.shiny));
}
return col4;
}
/*
* initStack
* Description:
* Initialize BinTree stack.
*
* Arguments:
* stack: BinTree stack to be initialized
* val: rgba point color value to be put at root node
* r: Ray that this the point whose color is in root node.
*
* Return:
* None
*
* Operation:
* - Initialize_stack by setting up the root node and other properties of the
* stack.
* - Also setup all rays to a non-intersected state and set all colors
* to blank. This way we can just loop through and add up colors easily.
* - Note that the root is initialized at index 1. This is just how array
* implementations of binary trees work. Index 0 is purposely left blank.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
void initStack(Stack4 &stack, float4 val, Ray &r)
{
stack.size = MAX_STACK_DEPTH;
stack.max_level = MAX_RAY_TREE_DEPTH;
stack.top = 2;
stack.level = 0;
for(int i=0; i < stack.size ;i++)
{
stack.body[i] = make_float4(0.);
stack.r[i].intersected = 0;
}
stack.body[1] = val;
copyRay(stack.r[1], r);
}
/*
* updateNextLevel
* Description:
* Update the next level of the stack's binary tree
* This function to update the next level is very specific to the ray tracing
* algorithm.
* Remember that level is setup to point to the currently filled one. So
* when this function is called you plan on filling level+1
* At each level the array indices will run from [2^stack.level] to
* [2^(stack.level+ 1) -1]
*
* Arguments:
* stack: BinTree stack to be initialized
*
* Return:
* 1: Success
* 0: Failure
*
* Operation:
* You get the stack with the level set at that of the last filled level. Using
* this, you can update the next level, with this basic idea:
* - have a for loop of index i going from [2^level] to [2^(level+1) -1]
* - at each entry here, if it's value is non-zero then we update its
* reflection(left) and refraction(right) child nodes.
* - When done updating the level, increment the level pointer
*
* Assumptions:
* The root of the stack is already initialized
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int updateNextLevel(Stack4 &stack)
{
int start = powf(2,stack.level);
int stop = 2*start -1;
stack.top = stop+1; // update top of stack appropriately -- next empty slot
int child_nodes = 0; // boolean value indicating existence of child nodes
float kr, kt, n, c1, c2, sinT2;
if(stack.level >= stack.max_level) // check if stack is full
return 0;
for(int i = start; i <= stop; i++) //loop through all nodes on current level
{
Ray r; // dont want to keep on accessing
copyRay(r, stack.r[i]); // an array, so copy ray info
if(!r.intersected) // if ray didnt intersect point, move on
continue;
kr = r.pt.mat.kr; // copy these values for easier typing
kt = r.pt.mat.kt;
// save reflected ray and color from the ray if object is reflective
if(kr > 0.)
{
child_nodes = 1; // next level actually got updated
stack.r[2*i].o = r.pt.p;
stack.r[2*i].d = normalize(r.d -2*r.pt.normal*dot(r.pt.normal,r.d));
stack.r[2*i].intersected = 0;
get_first_intersection(stack.r[2*i]);
stack.r[2*i].pt.mat.kr *= kr; // need to recursively multiply kr
stack.body[2*i] = kr*get_point_color(stack.r[2*i]);
}
else
{
stack.r[2*i].pt.mat.kr = 0.0;
}
// save refracted ray and color from the ray if object is non-opaque
if(kt > 0.)
{
child_nodes = 1; // next level actually got updated
// refractive index value n, depends on direction( in or out) of ray
// which is flipped each time a refracted ray is created
n = (r.entering ? 1./r.pt.mat.n : r.pt.mat.n);
stack.r[2*i+1].entering = 1- r.entering; // flip boolean value
c1 = -dot(r.pt.normal, r.d);
c2 = sqrt(1- n*n*(1 - c1*c1));
sinT2 = n*n*(1 - c1*c1);
if (sinT2 > 1.0) { // total internal reflection -- so use reflection
// code for ray direction
stack.r[2*i+1].d = normalize(r.d -2*r.pt.normal*dot(r.pt.normal,r.d));
} else {
stack.r[2*i+1].d = normalize((n*r.d) + (n*c1 -c2)*r.pt.normal);
}
stack.r[2*i+1].o = r.pt.p;
stack.r[2*i+1].intersected = 0;
get_first_intersection(stack.r[2*i+1]);
stack.r[2*i+1].pt.mat.kt *= kt; // recursively multiply kt
stack.body[2*i+1] = kt*get_point_color(stack.r[2*i+1]);
}
else
{
stack.r[2*i].pt.mat.kt = 0.0;
}
}
if(child_nodes)
stack.level++; // if there was a child node
return child_nodes;
}
/*
* trace_ray
* Description:
* Trace a Ray going through a point, accumulate and return final pixel color
*
* Arguments:
* r: Incident Ray Object
*
* Return:
* final color at a given point.
*
* Operation:
* - As soon as a ray hits a surface, reflected and transmitted rays are
* generated and these are traced through the scene. Each of these rays
* contribute to the color at a given pixel.
* - To keep a check on the level of recursion keep tracing the ray until you
* hit max tree depth or until ray is done bouncing (whichever comes first)
* - When done, pop off all saved colors and accumulate them. This is the final
* pixel color.
*
* References:
* http://www.cs.unc.edu/~rademach/xroads-RT/RTarticle.html
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
float4 trace_ray(Ray r)
{
float4 point_color;
float4 final_color = make_float4(0.); // final point color
Stack4 stack;
// keep looping while ray is still intersecting and there is still a color
// to push
get_first_intersection(r);
point_color = get_point_color(r);
initStack(stack, point_color, r);
while(updateNextLevel(stack)) // recursion not available on CUDA
continue; // so keep on updating next level in BinTree
// stack until done or hit max depth
while(popStack(stack, point_color)) // now just get pop off all saved
final_color+= point_color; // colors and accumulate them
return final_color; // then return final pixel color
}
/*
* (float3) mul
* Description:
* Transform vector by matrix (no translation)
*
* Arguments:
* M: 3x4 matrix
* v: vectorof dimension 3
*
* Return:
* vector of dimension 3
*
* Operation:
* x = v.(row 1 of M)
* y = v.(row 2 of M)
* z = v.(row 3 of M)
* return (x,y,z)
*
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
/*
* (float3) mul
* Description:
* Transform vector by matrix with translation
*
* Arguments:
* M: 3x4 matrix
* v: vectorof dimension 4
*
* Return:
* vector of dimension 4
*
* Operation:
* x = v.(row 1 of M)
* y = v.(row 2 of M)
* z = v.(row 3 of M)
* return (x,y,z, 1.0f)
*
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
/*
* rgbaFloatToInt
* Description:
* Convert rgba color to 32-bit unsigned integer
*
* Arguments:
* rgba: rgba color
*
* Return:
* unsigned integer with 8-bit component colors spread out over 32 bits as
* follows:
* 0 7 15 23 32
* +-------+-------+-------+-------+
* | red | blue | green | alpha |
* +-------+-------+-------+-------+
*
* Operation:
* - Clamp all component colors to the range [0.0, 1.0]
* - Multiply component colors by 255 and truncate to unsigned 8-byte ints
* - Shift components appropriately and OR in the values to get an accumulated
* 32-bit unsigned int.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
/*
* rFloatToInt
* Description:
* convert float to 32-bit unsigned integer
*
* Arguments:
* rgba: rgba color
*
* Return:
* unsigned integer with repeated 8-bit components spread out over 32 bits as
* follows:
* 0 7 15 23 32
* +-------+-------+-------+-------+
* | float | float | float | float |
* +-------+-------+-------+-------+
*
* Operation:
* - Clamp float to the range [0.0, 1.0]
* - Multiply float by 255 and truncate to unsigned 8-byte int
* - Duplicate and shift int appropriately and OR in the values to get an
* accumulated 32-bit unsigned int.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__ uint rFloatToInt(float r)
{
r = __saturatef(r); // clamp to [0.0, 1.0]
return (uint(r*255)<<24) | (uint(r*255)<<16) | (uint(r*255)<<8) | uint(r*255);
}
/*
* d_render
* Description:
* Peform volume rendering
*
* Arguments:
* d_output: pointer to output pixel grid
* imageW: width of pixel grid
* imageH: height of pixel grid
*
* Return:
* None
*
* Operation:
* For each pixel, identified by x,y coordinates:
* - Calculate eye ray in world space, based off of inverse View Matrix.
* - Raytrace eye ray (by calling trace_ray).
* - Get the correspnding color and save it in output pixel grid.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__global__ void
d_render(uint *d_output, uint imageW, uint imageH)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix,
make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
eyeRay.intersected = 0; // obviously hasnt intersected any object yet.
eyeRay.entering = 1; // starts off entering objects
if ((x < imageW) && (y < imageH)) {
// write output color
uint i = __umul24(y, imageW) + x;
// trace ray and determine pixel color
float4 col4 = trace_ray(eyeRay);
d_output[i] = rgbaFloatToInt(col4);
}
}
#endif // #ifndef _TRACER_KERNEL_H_
| 0c4792f7a6fb6e879aeaef8bff4239db8efa570e.cu | /*
* -----------------------------------------------------------------------------
* ----- TRACER_KERNEL.CU -----
* ----- REALTIME RAY TRACER -----
* -----------------------------------------------------------------------------
*
* File Description:
* This is the kernel file of the Ray Tracer. It implements the Ray Tracing
* logic and binary tree stack (needed for recursion).
*
* Table of Contents:
* pushStack - push a value unto the stack.
* popStack - pop a value from the stack
* copyMats - copy properties from one Material object to another
* copyRay - copy properties from one Ray object to another
* intersectSphere - intersect a ray with a sphere
* intersectPlane - intersect a ray with a plane
* pointInPlane - check if a point is in a plane
* intersectBox - intersect a ray with a box
* intersectCylinder - intersect a ray with a capless cylinder
* intersectCone - intersect a ray witha capless cone
* get_first_intersection - get first intersection of a ray in the scene
* get_point_color - get color at a point
* initStack - initialize stack
* updateNextLevel - update the next level of the stack's binary tree
* trace_ray - trace a ray
* (float3) mul - transform vector by matrix (no translation)
* (float4) mul - transform vector by matrix with translation
* rgbaFloatToInt - convert rgba color to unsigned integer
* rFloatToInt - convert float to 32-bit unsigned int
* d_render - peform volume rendering
*
* Objects in module:
* float3x4 - 3x4 float matrix representation
* Material - scene object material properties
* Object - generic scene object
* Intersect - a Ray and Object's intersection point's properties
* Light - Light object
* Ray - Light Ray object
* Stack4 - Ray tracer's binary tree (BinTree) stack
*
* Assumptions:
* Using a machine with a CUDA-capable NVIDIA GPU
*
* Limitations:
* - CUDA doesn't officially support C++. Don't want to fight a battle with
* classes so I'll go ahead and define structs in place of objects (hopefully
* efficiently).
* - CUDA also doesn't support recursion. So have to use a binary tree based
* stack to implement recursion here.
* + A binary tree stack is neccessary as a ray tracer recursively follows
* the path that light rays take as they intersect objects in the scene and
* result in two new rays: a relected and a refracted ray.
*
* References: (Had to learn this from somewhere)
* http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtrace0.htm
* http://www.cs.unc.edu/~rademach/xroads-RT/RTarticle.html
* http://fuzzyphoton.tripod.com/howtowrt.htm
* http://www.visualization.hpc.mil/wiki/Raytracing
*
* Compiler:
* NVIDIA's CUDA Compiler (NVCC)
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
#ifndef _TRACER_KERNEL_H_
#define _TRACER_KERNEL_H_
#include "cutil_math.h"
/*
* Raytracers have to deal with the finite precision of computer calculations.
* Let us consider a situation in which a ray hits an object and is reflected.
* We have to find the nearest object intersected by the reflected ray. Now it
* is perfectly possible that this ray goes and hits another part of the same
* object. Therefore this object must also be tested. Since this origin of the
* reflected ray lies on the surface of the object, there will be an
* intersection point at zero (0) distance. If computers had infinite precision,
* we would indeed get 0 as one of the distances and we could safely ignore it.
* But this is not the case. There will almost always be a small error in the
* distance. Thus the position of the point of reflection will be slightly
* different from the ideal value, and the distance that should be 0 will more
* liekly be some small quantity like 0.000001 (even if the point of reflection
* is ideally positioned). Yet we know that this result must be omitted. To
* handle this case, raytracers use a small value known as an "epsilon". All
* intersection distances less than the epsilon value are ignored. A good
* raytracer tries to make its epsilon setting as small as possible by reducing
* chances of numerical error, so that small details near the surface are not
* missed out
*/
#define EPSILON 0.00001f // higher precision espilon value
#define INTERSECT_EPSILON 0.01f // lower precision epsilon value
/*
* A binary tree based stack is used to track the recursive path of the light
* rays that intersect the objects in the scene and consequenttly reflect and
* refract.
*
* Of course these rays can bounce along the scene infinitely. It's only
* practical to stop following these rays after a certain number of
* intersections. So we define a maximum ray tree depth (MAX_RAY_TREE_DEPTH)
*
* This binary tree stack will be represented as an array, just like we would
* represent any binary tree as an array.
* - The binary size will have [2^(MAX_RAY_TREE_DEPTH+1) -1] nodes
* - There will be 1 empty node at index 0
* - Therefore total array size is: 2^(MAX_RAY_TREE_DEPTH+1)
*/
#define MAX_RAY_TREE_DEPTH 3 // max number of ray intersections to follow
#define MAX_STACK_DEPTH 16 // 2^(MAX_RAY_TREE_DEPTH+1) -1 nodes in BinTree
// (+1 empty node at index 0)
/*
* Supported scene object types
*/
#define SPHERE 1
#define PLANE 2
#define BOX 3
#define CYLINDER 4
#define CONE 5
///////////////////////////////////////////////////////////////////////////////
// Structs that represent module objects
///////////////////////////////////////////////////////////////////////////////
typedef struct { // 3x4 matrix representation
float4 m[3];
} float3x4;
/* material properties come up a lot, so best to define a struct for them */
typedef struct{ // material
float4 amb; // material ambient color (RGBA)
float4 diff; // material diffuse color
float4 spec; // material specular color
float shiny; // material shininess
float n; // refractive index
float kr; // reflection coefficients
float kt; // transmission coefficient
} Material;
typedef struct { // struct defining a generic object
int type; // object type -- see defines above
Material mat; // material properties
/* defined only if type == SPHERE */
float3 c; // sphere centre
float r; // sphere radius -- also works for cylinder and cone
/* defined only if type == PLANE */
float4 pl; // Plane is defined by equation:A*x + By + Cz+D
// So in this case:
// plane.x*x + plane.y*y + plane.z*z + plane.w = 0
/* defined only if type == BOX */
float3 min; // min and max vertices define a box... think bout it
float3 max;
/* defined only if type == CYLINDER OR CONE */
float ymin; // ymax-ymin gives length of cylinder
float ymax;
} Object;
typedef struct { // struct defining an intersection point's properties
float3 normal; // normal at intersection point
float3 p; // position of intersection
Material mat; // material properties here
} Intersect;
typedef struct { // Light object
float4 amb; // ambient color (RGBA)
float4 diff; // diffuse color
float4 spec; // specular color
float3 pos; // light source position
} Light;
struct Ray { // Ray for RayTracing
float3 o; // origin
float3 d; // direction
float t; // minimum distance travelled so far before intersection
Intersect pt; // intersection point
int intersected; // did ray already intersect object? 1 = yes, 0 =no
int entering; // ray will be entering or leaving an object?
};
typedef struct { // a BinTree "stack" very specific to this ray tracer
float4 body[MAX_STACK_DEPTH]; // color at a point
Ray r[MAX_STACK_DEPTH]; // ray that hit this point
int size; // number of slots in array/ nodes in BinTree
int max_level; // the maximum number of levels
int level; // points to last filled level
int top; // points to next location to push to...so one
// above top data element
} Stack4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
///////////////////////////////////////////////////////////////////////////////
// Variables/CONSTANTS that define the scene
///////////////////////////////////////////////////////////////////////////////
#define NUM_OBJECTS 10
#define NUM_LIGHTS 2
/* objects in scene */
__device__ Object d_object[NUM_OBJECTS];
Object object[NUM_OBJECTS];
/* background color */
__device__ float4 d_bgnd_col;
float4 bgnd_col;
/* lights */
__device__ Light d_light[NUM_LIGHTS];
Light light[NUM_LIGHTS];
///////////////////////////////////////////////////////////////////////////////
// functions for Ray Tracing
///////////////////////////////////////////////////////////////////////////////
/*
* pushStack
* Description:
* Push value unto the stack.
*
* Arguments:
* stack: BinTree stack
* val: Value to push onto stack.
*
* Return:
* - 1 if successful
* 0 if failure
*
* Operation:
* - if stack is full, return failure
* - else, add value to stack and increment top of stack.
*
* Assumption:
* The stack top pointer already points to an empty slot
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int pushStack(Stack4 &stack, float4 val)
{
// if stack is full, return failure
if(stack.top >= stack.size)
return 0;
// stack not full, so add value, increment stack's top, and return success
stack.body[stack.top++] = val;
return 1;
}
/*
* popStack
* Description:
* pop value from the stack.
*
* Arguments:
* stack: BinTree stack
* val: Value to push onto stack.
*
* Return:
* - 1 if successful
* 0 if failure
*
* Operation:
* - if stack is empty, return failure
* - else, decrement top of stack and get value.
*
* Assumption:
* The stack's top pointer needs to be decremented to get actual data.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int popStack(Stack4 &stack, float4 &val)
{
// if stack is empty, return failure
if(stack.top <= 0)
return 0;
// stack not empty, so decrement stack's top, get value, and return success
val = stack.body[--stack.top];
return 1;
}
/*
* copyMats
* Description:
* copy properties from one Material object to another
*
* Arguments:
* m_dest: Material object to copy properties to
* m_source: Material object to copy properties from
*
* Return:
* None
*
* Operation:
* assign all object properties in m_dest to the value of corresponding
* properties in m_source.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
void copyMats(Material &m_dest, Material m_source)
{
m_dest.amb = m_source.amb;
m_dest.diff = m_source.diff;
m_dest.spec = m_source.spec;
m_dest.shiny = m_source.shiny;
m_dest.n = m_source.n;
m_dest.kr = m_source.kr;
m_dest.kt = m_source.kt;
return;
}
/*
* copyRay
* Description:
* copy properties from one Ray object to another
*
* Arguments:
* dest: Ray object to copy properties to
* source: Ray object to copy properties from
*
* Return:
* None
*
* Operation:
* assign all object properties in dest to the value of corresponding
* properties in source. Use copyMats() to copy the Ray's Intersection Material
* property.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
void copyRay(Ray &dest, Ray source)
{
dest.o = source.o;
dest.d = source.d;
dest.t = source.t;
dest.intersected = source.intersected;
dest.entering = source.entering;
copyMats(dest.pt.mat, source.pt.mat);
dest.pt.normal = source.pt.normal;
dest.pt.p = source.pt.p;
}
/*
* intersectSphere
* Description:
* intersect a ray with a sphere
*
* Arguments:
* r: Incident Ray object
* s: Sphere object under consideration
*
* Return:
* 1: an intersection
* 0: no intersection
*
* Operation:
* - Check if ray actually intersects sphere
* - If there is an intersection then there will actually be two intersections
* - Take the closest intersection point that is further than the ray origin
* and in front of the eye-point (i.e. positive ray direction).
* - set ray's intersection point properties
*
* References:
* - http://www.cs.unc.edu/~rademach/xroads-RT/RTarticle.html
* - http://fuzzyphoton.tripod.com/howtowrt.htm
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int intersectSphere(Ray &r, /* incident ray */
const Object &s /* sphere under consideration */)
{
float3 E0 = s.c-r.o;
float v = dot(E0, r.d);
float disc = s.r*s.r - (dot(E0,E0) - v*v);
if (disc < 0) // check if ray actually intersects sphere
return 0; // no intersection
// take the closest intersection point that is further than the ray origin
// and infront of the eye-point (i.e. positive ray direction)
// that is why we compare t0 and t1 to INTERSECT_EPSILON.
// Why not 0.0f? Simply because there is a high risk that given our limited
// precision (in float), after a reflection from a point we find that our
// ray intersects the same object around the same point when it shouldn't
// with an infinite precision. By taking our starting point at a reasonable
// distance but close enough to not cause "gaps" we can avoid some artifacts
float t0 = v - sqrt(disc);
float t1 = v + sqrt(disc);
float t = 0.0;
int retVal = 0;
if ((t0>INTERSECT_EPSILON) && (t0 < t1))
{
t = t0;
retVal = 1;
}
if ((t1>INTERSECT_EPSILON) && (t1 < t0))
{
t = t1;
retVal = 1;
}
// if no intersection, end this
if (retVal == 0)
return 0;
// test if t isnt the nearest intersection noted
if (r.intersected && (t > r.t))
return 0;
// this is a legitimate intersection... save the properties
r.intersected = 1;
r.t = t; // record distance of nearest intersection
r.pt.p = r.o + t*r.d; // update point of intersection
r.pt.normal = normalize(r.pt.p-s.c); // update normal at point
copyMats(r.pt.mat, s.mat); // COPY material properties
return 1; // there was an intersection!
}
/*
* intersectPlane
* Description:
* intersect a ray with a plane
*
* Arguments:
* r: Incident Ray object
* pln: Plane object under consideration
*
* Return:
* 1: an intersection
* 0: no intersection
*
* Operation:
* - Check if ray actually intersects plane (i.e. not parallel to plane)
* - Take the closest intersection point that is in front of the eye-point
* (i.e. positive ray direction).
* - set ray's intersection point properties
*
* References:
* - http://www.siggraph.org/education/materials/HyperGraph/raytrace/rayplane_intersection.htm
* - equation of a plane through 3 points at:
* http://www.math.oregonstate.edu/home/programs/undergrad/CalculusQuestStudyGuides/vcalc/lineplane/lineplane.html
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int intersectPlane(Ray &r, /* incident ray */
const Object &pln /* plane under consideration */)
{
float3 Pn = make_float3(pln.pl); // unit normal
float Vd = dot(r.d,Pn);
if (Vd == 0) // if Vd == 0 then ray is parallel to plane so no intersection
return 0;
float V0 = -(dot(r.o,Pn) + pln.pl.w);
float t = V0/Vd;
// test if ray intersects behind eye point (i.e. not in positive ray
// direction), and if it isnt the nearest intersection noted
if ((t <= INTERSECT_EPSILON) || (r.intersected && (t > r.t)))
return 0;
// this is a legitimate intersection
r.intersected = 1;
r.t = t; // record distance of nearest intersection
r.pt.p = r.o + t*r.d; // update point of intersection
r.pt.normal = normalize(Pn); // update normal at point of intersection
copyMats(r.pt.mat, pln.mat); // COPY material properties
return 1; // there was an intersection!
}
/*
* pointInPlane
* Description:
* Check if a point is in a plane. This is a helper function for intersectBox
*
* Arguments:
* n: plane's normal vector
* pp: point already established to be in a plane
* p: point under consideration
*
* Return:
* 1: point is in a plane
* 0: otherwise
*
* Operation:
* to check if a point (px,py,pz) is on a plane, check the equation
* nx*(ppx-px) + ny*(ppy-py) + nz*(ppz-pz) == 0.0
* - note that equality to 0.0 isn't going to happen so use an EPSILON check,
* i.e. check if the absolute value is less than EPSILON
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int pointInPlane(float3 n, /* normal*/
float3 pp, /* point_in_plane */
float3 p /* point under consideration */)
{
int inPlane = 0;
float3 vecInPlane = p-pp;
float res = dot(n, vecInPlane);
if (res < 0.0f)
res *= -1.0f;
if (res < EPSILON)
inPlane = 1;
return inPlane;
}
/*
* intersectBox
* Description:
* Intersect a ray with a box
*
* Arguments:
* r: Incident Ray object
* box: Box object under consideration
*
* Return:
* 1: an intersection
* 0: no intersection
*
* Operation:
* - Check if ray actually intersects box
* - Take the closest intersection point that is in front of the eye-point
* (i.e. positive ray direction).
* - set ray's intersection point properties
*
* References:
* - http://www.visualization.hpc.mil/wiki/Adding_in_a_Box_Object
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int intersectBox(Ray &r, /* incident ray */
const Object &box /* box under consideration */)
{
float tmp, tnear = -1.0e6, tfar=1.0e6;
float3 tmin = (box.min - r.o)/r.d;
float3 tmax = (box.max - r.o)/r.d;
if(tmin.x>tmax.x) { tmp=tmin.x; tmin.x=tmax.x; tmax.x=tmp;}
if(tmin.y>tmax.y) { tmp=tmin.y; tmin.y=tmax.y; tmax.y=tmp;}
if(tmin.z>tmax.z) { tmp=tmin.z; tmin.z=tmax.z; tmax.z=tmp;}
tnear=max(tmin.z,max(tmin.y,max(tmin.x,tnear)));
tfar =min(tmax.z,min(tmax.y,min(tmax.x,tfar )));
if(tnear>tfar) return 0; // The box is missed.
if(tfar<INTERSECT_EPSILON) return 0; // The box is behind us.
if(tnear<INTERSECT_EPSILON) {return 0; } // We are inside the box.
// have an intersection
float t = tnear;
// check this is nearest intersection noted
if (r.intersected && (t>r.t))
return 0;
// this is a legitimate intersection
r.intersected = 1;
r.t = t; // record distance of nearest intersection
r.pt.p = r.o + t*r.d; // update point of intersection
// update normal at point of intersection
float3 distance = box.min - r.pt.p;
float min_dist=abs(distance.x);
int min=0;
if(abs(distance.y) < min_dist) { min_dist=abs(distance.y); min=2; }
if(abs(distance.z) < min_dist) { min_dist=abs(distance.z); min=4; }
distance = box.max - r.pt.p;
if(abs(distance.x) < min_dist) { min_dist=abs(distance.x); min=1; }
if(abs(distance.y) < min_dist) { min_dist=abs(distance.y); min=3; }
if(abs(distance.z) < min_dist) { min_dist=abs(distance.z); min=5; }
r.pt.normal = make_float3(0, 0, 1);
if (min==0) {r.pt.normal = make_float3(-1, 0, 0);}
if (min==1) {r.pt.normal = make_float3( 1, 0, 0);}
if (min==2) {r.pt.normal = make_float3( 0,-1, 0);}
if (min==3) {r.pt.normal = make_float3( 0, 1, 0);}
if (min==4) {r.pt.normal = make_float3( 0, 0,-1);}
if (dot(r.pt.normal, r.d) > 0) // normal and ray must be in opposite dirs
r.pt.normal *= -1;
copyMats(r.pt.mat, box.mat); // COPY material properties
return 1; // there was an intersection!
}
/*
* intersectCylinder
* Description:
* Intersect a ray with a capless cylinder
*
* Arguments:
* r: Indicent Ray object
* cyl: Cylinder under consideration
*
* Return:
* 1: an intersection
* 0: no intersection
*
* Operation:
* - Check if ray actually intersects cylinder
* - Take the closest intersection point that is in front of the eye-point
* (i.e. positive ray direction).
* - set ray's intersection point properties
*
* References:
* - http://www.visualization.hpc.mil/wiki/Adding_in_a_Cylinder_Object
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int intersectCylinder(Ray &r, /* incident ray */
const Object &cyl /* cylinder under consideration */)
{
float3 pvt_center = make_float3(0.0, (cyl.ymin+cyl.ymax)/2, 0.0);
float3 Rd=r.d;
float3 Ro=pvt_center - r.o;
float3 pnt_intrsct;
float a = Rd.x*Rd.x + Rd.z*Rd.z;
float b = Ro.x*Rd.x + Ro.z*Rd.z;
float c = Ro.x*Ro.x + Ro.z*Ro.z - (cyl.r*cyl.r);
float disc = b*b - a*c;
float t, d, root1, root2;
int return_value = 0;
// If the discriminant is less than 0, then we totally miss the cylinder.
if (disc > 0.0)
{
d = sqrt(disc);
root2 = (b + d)/a;
root1 = (b - d)/a;
// If root2 < 0, then root1 is also < 0, so they are both misses.
if (root2 > INTERSECT_EPSILON)
{
// If root2 > 0, and root1 < 0, we are inside the cylinder.
if(root1 < INTERSECT_EPSILON)
{
return_value=0;
// If root2 > 0, and root1 > 0, we are hit the cylinder.
} else {
t=root1; return_value= 1;
}
}
pnt_intrsct = r.o + Rd*t ;
// Limit the y values
if((pnt_intrsct.y>cyl.ymax)||(pnt_intrsct.y<cyl.ymin)) {
pnt_intrsct = r.o + Rd*root2 ;
t = root2;
// Are we too high in our first hit, but hit the back wall later
if((pnt_intrsct.y>cyl.ymax)||(pnt_intrsct.y<cyl.ymin)) {
return_value = 0;
}
}
}
if (!return_value)
return 0;
// have intersection
// check this is nearest intersection noted
if (r.intersected && (t>r.t))
return 0;
// this is a legitimate intersection
r.intersected = 1;
r.t = t; // record distance of nearest intersection
r.pt.p = pnt_intrsct; // update point of intersection
//update normal
r.pt.normal = normalize(r.pt.p - make_float3(0.0,r.pt.p.y,0.0));
copyMats(r.pt.mat, cyl.mat); // COPY material properties
return 1; // there was an intersection!
}
/*
* intersectCone
* Description:
* Intersect a ray with a capless cone
*
* Arguments:
* r: Indicent Ray object
* con: Cone under consideration
*
* Return:
* 1: an intersection
* 0: no intersection
*
* Operation:
* - Check if ray actually intersects cone
* - Take the closest intersection point that is in front of the eye-point
* (i.e. positive ray direction).
* - set ray's intersection point properties
*
* References:
* - http://www.visualization.hpc.mil/wiki/Adding_in_a_Cone_Object
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int intersectCone(Ray &r, /* incident ray */
const Object &con /* cone under consideration */)
{
float3 pvt_center = make_float3(0.0, con.ymin, 0.0);
float pvt_height = con.ymax-con.ymin;
float pvt_e = -(con.r*con.r)/(pvt_height*pvt_height);
float3 Rd=r.d;
float3 Ro=r.o;
float3 omc=pvt_center - Ro;
omc.y = pvt_center.y - Ro.y + pvt_height;
float3 pnt_intrsct;
float a = Rd.x*Rd.x + pvt_e*Rd.y*Rd.y + Rd.z*Rd.z;
float b = omc.x*Rd.x + pvt_e*omc.y*Rd.y + omc.z*Rd.z;
float c = omc.x*omc.x + pvt_e*omc.y*omc.y + omc.z*omc.z;
float disc = b*b - a*c;
float t, d, root1, root2;
int return_value = 0;
// If the discriminant is less than 0, then we totally miss the cone.
if (disc > 0.0) {
d = sqrt(disc);
root2 = (b + d)/a;
root1 = (b - d)/a;
// If root2 < 0, then root1 is also < 0, so they are both misses.
if (root2 > INTERSECT_EPSILON) {
// If root2 > 0, and root1 < 0, we are inside the cone.
if(root1 < INTERSECT_EPSILON) {
return_value=0;
// If root2 > 0, and root1 > 0, we are hit the cone.
} else {
t=root1; return_value= 1;
}
}
pnt_intrsct = Ro + Rd*t ;
// Limit the y values: ymin <= y <= ymax
// If the point of intersection is too low or too high, record it as a
// miss.
if((pnt_intrsct.y>(pvt_center.y+pvt_height))||
(pnt_intrsct.y<pvt_center.y)) {
pnt_intrsct = Ro + Rd*root2 ;
t = root2;
// Are we too high in our first hit, but hit the back wall later
if((pnt_intrsct.y>(pvt_center.y+pvt_height))||
(pnt_intrsct.y<pvt_center.y)) {
return_value = 0;
}
}
}
if (!return_value)
return 0;
// have intersection
// check this is nearest intersection noted
if (r.intersected && (t>r.t))
return 0;
// this is a legitimate intersection
r.intersected = 1;
r.t = t; // record distance of nearest intersection
r.pt.p = pnt_intrsct; // update point of intersection
//update normal
a = r.pt.p.x - pvt_center.x;
b = r.pt.p.y - pvt_center.y - pvt_height;
c = r.pt.p.z - pvt_center.z;
r.pt.normal=normalize(make_float3(a,pvt_e*b,c));
copyMats(r.pt.mat, con.mat); // COPY material properties
return 1; // there was an intersection!
}
/*
* get_first_intersection
* Description:
* get first intersection of a ray in the scene.
*
* Arguments:
* r: Indicent Ray object
*
* Return:
* None
*
* Operation:
* Loop through all objects in the scene, calling the appropriate
* intersect{Object} function for each object type. At the end of the loop
* the Ray object is updated with the details of the nearest/first intersection
* point.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
void get_first_intersection(Ray &r)
{
// this loop goes through all objects in our space and at the end
// the Ray is updated with the information of the nearest/first
// intersection
for(int i=0; i < NUM_OBJECTS; i++)
{
if(d_object[i].type == SPHERE) // call the right intersect
intersectSphere(r, d_object[i]); // function for each object type
else if(d_object[i].type == PLANE)
intersectPlane(r, d_object[i]);
else if(d_object[i].type == BOX)
intersectBox(r, d_object[i]);
else if(d_object[i].type == CYLINDER)
intersectCylinder(r, d_object[i]);
else if(d_object[i].type == CONE)
intersectCone(r, d_object[i]);
}
return; /* intersected object info is contained in ray */
}
/*
* get_point_color
* Description:
* get color at a point, accounting for all light sources and shadows.
*
* Arguments:
* r: Indicent Ray object
*
* Return:
* rgba color
*
* Operation:
* - Initialize the point color to an rgba of 0,0,0,0.
* - There will be a shadow variable that determines if a point is in the
* shadow of a light source. This variable will have a [0.0, 1.0] scale
* with 0 meaning shadow and 1 meaning no shadow.
* - Start by assuming the point is not in the shadows.
* - If the ray didn't intersect an object, return the backgground color
* - To add in the contributions from all light sources, loop through them all
* and for each light source:
* + check if a point is in the shadow of this light source. If so, get the
* shadow factor
* + scale the diffuse+specular colors by this shadow factor before
* accumulating these colors into the point color.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
float4 get_point_color(Ray &r)
{
float4 col4 = make_float4(0.0f); // will accumulate the colors
// the amount of diffuse+specular color to be accumulated is scaled by
// the shadow variable. Start by assuming this point is not in a shadow
float shadow = 1.;
if (!r.intersected) // ray didnt intersect an object, so use background
return d_bgnd_col;
// need to add in the contributions from all the light sources
for(int i = 0; i< NUM_LIGHTS; i++)
{
float3 lightDir = normalize(d_light[i].pos -r.pt.p); // from p to light
col4 += r.pt.mat.amb * d_light[i].amb; // add in ambient color
/* check if point is in shadow for this light source */
Ray shadowCheck; // create a shadow ray from intersection
shadowCheck.o = r.pt.p; // pt to light, then check if it
shadowCheck.d = lightDir; // intersects any objects in the scene.
shadowCheck.intersected = 0; // if it does, then shadow factor is set
get_first_intersection(shadowCheck); // to transmission coefficient of
if(shadowCheck.intersected) // that object. If object is opaque,
shadow = shadowCheck.pt.mat.kt; // shadow factor becomes 0
/* compute the dot product between normal and normalized lightdir */
float NdotL = max(dot(r.pt.normal,lightDir), 0.0);
float3 R = 2*r.pt.normal*NdotL - lightDir; // R = light ray's reflection
R = normalize(R);
float3 Eye = normalize(-r.d);
/* compute the dot product between reflection and eye-view */
float RdotE = max(dot(R,Eye),0.0);
if (NdotL > 0.0)
{
/* add diffuse component */
col4 += shadow*(r.pt.mat.diff * d_light[i].diff * NdotL);
}
/* add specular component */
col4 += shadow*(r.pt.mat.spec * pow(RdotE, r.pt.mat.shiny));
}
return col4;
}
/*
* initStack
* Description:
* Initialize BinTree stack.
*
* Arguments:
* stack: BinTree stack to be initialized
* val: rgba point color value to be put at root node
* r: Ray that this the point whose color is in root node.
*
* Return:
* None
*
* Operation:
* - Initialize_stack by setting up the root node and other properties of the
* stack.
* - Also setup all rays to a non-intersected state and set all colors
* to blank. This way we can just loop through and add up colors easily.
* - Note that the root is initialized at index 1. This is just how array
* implementations of binary trees work. Index 0 is purposely left blank.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
void initStack(Stack4 &stack, float4 val, Ray &r)
{
stack.size = MAX_STACK_DEPTH;
stack.max_level = MAX_RAY_TREE_DEPTH;
stack.top = 2;
stack.level = 0;
for(int i=0; i < stack.size ;i++)
{
stack.body[i] = make_float4(0.);
stack.r[i].intersected = 0;
}
stack.body[1] = val;
copyRay(stack.r[1], r);
}
/*
* updateNextLevel
* Description:
* Update the next level of the stack's binary tree
* This function to update the next level is very specific to the ray tracing
* algorithm.
* Remember that level is setup to point to the currently filled one. So
* when this function is called you plan on filling level+1
* At each level the array indices will run from [2^stack.level] to
* [2^(stack.level+ 1) -1]
*
* Arguments:
* stack: BinTree stack to be initialized
*
* Return:
* 1: Success
* 0: Failure
*
* Operation:
* You get the stack with the level set at that of the last filled level. Using
* this, you can update the next level, with this basic idea:
* - have a for loop of index i going from [2^level] to [2^(level+1) -1]
* - at each entry here, if it's value is non-zero then we update its
* reflection(left) and refraction(right) child nodes.
* - When done updating the level, increment the level pointer
*
* Assumptions:
* The root of the stack is already initialized
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
int updateNextLevel(Stack4 &stack)
{
int start = powf(2,stack.level);
int stop = 2*start -1;
stack.top = stop+1; // update top of stack appropriately -- next empty slot
int child_nodes = 0; // boolean value indicating existence of child nodes
float kr, kt, n, c1, c2, sinT2;
if(stack.level >= stack.max_level) // check if stack is full
return 0;
for(int i = start; i <= stop; i++) //loop through all nodes on current level
{
Ray r; // dont want to keep on accessing
copyRay(r, stack.r[i]); // an array, so copy ray info
if(!r.intersected) // if ray didnt intersect point, move on
continue;
kr = r.pt.mat.kr; // copy these values for easier typing
kt = r.pt.mat.kt;
// save reflected ray and color from the ray if object is reflective
if(kr > 0.)
{
child_nodes = 1; // next level actually got updated
stack.r[2*i].o = r.pt.p;
stack.r[2*i].d = normalize(r.d -2*r.pt.normal*dot(r.pt.normal,r.d));
stack.r[2*i].intersected = 0;
get_first_intersection(stack.r[2*i]);
stack.r[2*i].pt.mat.kr *= kr; // need to recursively multiply kr
stack.body[2*i] = kr*get_point_color(stack.r[2*i]);
}
else
{
stack.r[2*i].pt.mat.kr = 0.0;
}
// save refracted ray and color from the ray if object is non-opaque
if(kt > 0.)
{
child_nodes = 1; // next level actually got updated
// refractive index value n, depends on direction( in or out) of ray
// which is flipped each time a refracted ray is created
n = (r.entering ? 1./r.pt.mat.n : r.pt.mat.n);
stack.r[2*i+1].entering = 1- r.entering; // flip boolean value
c1 = -dot(r.pt.normal, r.d);
c2 = sqrt(1- n*n*(1 - c1*c1));
sinT2 = n*n*(1 - c1*c1);
if (sinT2 > 1.0) { // total internal reflection -- so use reflection
// code for ray direction
stack.r[2*i+1].d = normalize(r.d -2*r.pt.normal*dot(r.pt.normal,r.d));
} else {
stack.r[2*i+1].d = normalize((n*r.d) + (n*c1 -c2)*r.pt.normal);
}
stack.r[2*i+1].o = r.pt.p;
stack.r[2*i+1].intersected = 0;
get_first_intersection(stack.r[2*i+1]);
stack.r[2*i+1].pt.mat.kt *= kt; // recursively multiply kt
stack.body[2*i+1] = kt*get_point_color(stack.r[2*i+1]);
}
else
{
stack.r[2*i].pt.mat.kt = 0.0;
}
}
if(child_nodes)
stack.level++; // if there was a child node
return child_nodes;
}
/*
* trace_ray
* Description:
* Trace a Ray going through a point, accumulate and return final pixel color
*
* Arguments:
* r: Incident Ray Object
*
* Return:
* final color at a given point.
*
* Operation:
* - As soon as a ray hits a surface, reflected and transmitted rays are
* generated and these are traced through the scene. Each of these rays
* contribute to the color at a given pixel.
* - To keep a check on the level of recursion keep tracing the ray until you
* hit max tree depth or until ray is done bouncing (whichever comes first)
* - When done, pop off all saved colors and accumulate them. This is the final
* pixel color.
*
* References:
* http://www.cs.unc.edu/~rademach/xroads-RT/RTarticle.html
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
float4 trace_ray(Ray r)
{
float4 point_color;
float4 final_color = make_float4(0.); // final point color
Stack4 stack;
// keep looping while ray is still intersecting and there is still a color
// to push
get_first_intersection(r);
point_color = get_point_color(r);
initStack(stack, point_color, r);
while(updateNextLevel(stack)) // recursion not available on CUDA
continue; // so keep on updating next level in BinTree
// stack until done or hit max depth
while(popStack(stack, point_color)) // now just get pop off all saved
final_color+= point_color; // colors and accumulate them
return final_color; // then return final pixel color
}
/*
* (float3) mul
* Description:
* Transform vector by matrix (no translation)
*
* Arguments:
* M: 3x4 matrix
* v: vectorof dimension 3
*
* Return:
* vector of dimension 3
*
* Operation:
* x = v.(row 1 of M)
* y = v.(row 2 of M)
* z = v.(row 3 of M)
* return (x,y,z)
*
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
/*
* (float3) mul
* Description:
* Transform vector by matrix with translation
*
* Arguments:
* M: 3x4 matrix
* v: vectorof dimension 4
*
* Return:
* vector of dimension 4
*
* Operation:
* x = v.(row 1 of M)
* y = v.(row 2 of M)
* z = v.(row 3 of M)
* return (x,y,z, 1.0f)
*
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
/*
* rgbaFloatToInt
* Description:
* Convert rgba color to 32-bit unsigned integer
*
* Arguments:
* rgba: rgba color
*
* Return:
* unsigned integer with 8-bit component colors spread out over 32 bits as
* follows:
* 0 7 15 23 32
* +-------+-------+-------+-------+
* | red | blue | green | alpha |
* +-------+-------+-------+-------+
*
* Operation:
* - Clamp all component colors to the range [0.0, 1.0]
* - Multiply component colors by 255 and truncate to unsigned 8-byte ints
* - Shift components appropriately and OR in the values to get an accumulated
* 32-bit unsigned int.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
/*
* rFloatToInt
* Description:
* convert float to 32-bit unsigned integer
*
* Arguments:
* rgba: rgba color
*
* Return:
* unsigned integer with repeated 8-bit components spread out over 32 bits as
* follows:
* 0 7 15 23 32
* +-------+-------+-------+-------+
* | float | float | float | float |
* +-------+-------+-------+-------+
*
* Operation:
* - Clamp float to the range [0.0, 1.0]
* - Multiply float by 255 and truncate to unsigned 8-byte int
* - Duplicate and shift int appropriately and OR in the values to get an
* accumulated 32-bit unsigned int.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__device__ uint rFloatToInt(float r)
{
r = __saturatef(r); // clamp to [0.0, 1.0]
return (uint(r*255)<<24) | (uint(r*255)<<16) | (uint(r*255)<<8) | uint(r*255);
}
/*
* d_render
* Description:
* Peform volume rendering
*
* Arguments:
* d_output: pointer to output pixel grid
* imageW: width of pixel grid
* imageH: height of pixel grid
*
* Return:
* None
*
* Operation:
* For each pixel, identified by x,y coordinates:
* - Calculate eye ray in world space, based off of inverse View Matrix.
* - Raytrace eye ray (by calling trace_ray).
* - Get the correspnding color and save it in output pixel grid.
*
* Revision History:
* Mar. 07, 2012 Nnoduka Eruchalu Initial Revision
* Mar. 15, 2014 Nnoduka Eruchalu Cleaned up comments
*/
__global__ void
d_render(uint *d_output, uint imageW, uint imageH)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix,
make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
eyeRay.intersected = 0; // obviously hasnt intersected any object yet.
eyeRay.entering = 1; // starts off entering objects
if ((x < imageW) && (y < imageH)) {
// write output color
uint i = __umul24(y, imageW) + x;
// trace ray and determine pixel color
float4 col4 = trace_ray(eyeRay);
d_output[i] = rgbaFloatToInt(col4);
}
}
#endif // #ifndef _TRACER_KERNEL_H_
|
649a74f388b86235a0bd1142a91a835b2411e876.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef PADDLE_WITH_CUDA
#include <xxhash.h>
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <unordered_map>
#include "glog/logging.h"
#include "paddle/phi/backends/context_pool.h"
#include "paddle/phi/backends/dynload/cudnn.h"
#include "paddle/phi/backends/gpu/cuda/cudnn_desc.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/impl/conv_cudnn_impl.h"
#include "paddle/utils/optional.h"
namespace phi {
namespace fusion {
namespace {
// TODO(wilber): Add a LRU strategy.
class CudnnConvDescManager {
public:
static CudnnConvDescManager* Instance() {
static CudnnConvDescManager global;
return &global;
}
struct CudnnCacheInfo {
phi::backends::gpu::TensorDescriptor* x_desc{nullptr};
phi::backends::gpu::FilterDescriptor* w_desc{nullptr};
phi::backends::gpu::TensorDescriptor* b_desc{nullptr};
phi::backends::gpu::TensorDescriptor* o_desc{nullptr};
phi::backends::gpu::ConvolutionDescriptor* conv_desc{nullptr};
phi::backends::gpu::ActivationDescriptor* act_desc{nullptr};
size_t workspace_size;
cudnnConvolutionFwdAlgo_t algo;
std::vector<int> paddings;
std::vector<int> dilations;
std::vector<int> input_pad;
std::vector<int> new_input_shape_vec;
bool is_sys_pad;
// TODO(wilber): The destruction of cudnn descriptor depends on the
// phi::dynload::cudnn singleton, but when the process exits, the singleton
// destruction order cannot be determined.
// After testing, it is found that the phi::dynload::cudnn related singleton
// on Windows is destructed first, causing the descriptor to be destructed
// and failed, while the descriptor on Linux is destructed first, and the
// phi::dynload::cudnn singleton is destructed later, so that it is correct.
// To circumvent this problem, we rely entirely on freeing resources when
// the process exits.
// ~CudnnCacheInfo() {
// if (x_desc) delete x_desc;
// if (w_desc) delete w_desc;
// if (b_desc) delete b_desc;
// if (o_desc) delete o_desc;
// if (conv_desc) delete conv_desc;
// if (act_desc) delete act_desc;
// }
};
CudnnCacheInfo* GetCudnnCacheInfo(
const std::vector<int>& input_dims,
const std::vector<int>& filter_dims,
const std::vector<int>& bias_dims,
const std::vector<int>& output_dims,
const std::vector<int>& paddings,
const std::vector<int>& strides,
const std::vector<int>& dilations,
phi::DataType input_dtype,
int groups,
cudnnDataType_t dtype,
cudnnTensorFormat_t format,
const std::function<void(cudnnConvolutionFwdAlgo_t*,
size_t*,
cudnnTensorDescriptor_t,
cudnnFilterDescriptor_t,
cudnnTensorDescriptor_t,
cudnnConvolutionDescriptor_t)>& search_func,
const std::string& act,
double value_max = std::numeric_limits<double>::max()) {
// std::hash takes about 5us, xxhash can optimize to 2.5us.
XXH64_state_t* const state = XXH64_createState();
if (state == nullptr) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash create state failed, maybe a environment error."));
}
XXH64_hash_t const seed = 0;
if (XXH64_reset(state, seed) == XXH_ERROR) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash reset state failed, maybe a environment error."));
}
XXH64_update(state, input_dims.data(), input_dims.size() * sizeof(int));
XXH64_update(state, filter_dims.data(), filter_dims.size() * sizeof(int));
XXH64_update(state, bias_dims.data(), bias_dims.size() * sizeof(int));
// XXH64_update(state, output_dims.data(), output_dims.size() *
// sizeof(int));
XXH64_update(state, paddings.data(), paddings.size() * sizeof(int));
XXH64_update(state, strides.data(), strides.size() * sizeof(int));
XXH64_update(state, dilations.data(), dilations.size() * sizeof(int));
XXH64_update(state, &input_dtype, sizeof(int));
XXH64_update(state, &groups, sizeof(int));
XXH64_update(state, &dtype, sizeof(int));
XXH64_update(state, &format, sizeof(int));
XXH64_update(state, act.data(), act.length() * sizeof(char));
// XXH64_update(state, &value_max, sizeof(double));
XXH64_hash_t hash_key = XXH64_digest(state);
XXH64_freeState(state);
if (!cudnn_conv_cache_.count(hash_key)) {
std::lock_guard<std::mutex> lock(cache_mutex_);
if (!cudnn_conv_cache_.count(hash_key)) {
cudnn_conv_cache_[hash_key] = CudnnCacheInfo();
cudnn_conv_cache_[hash_key].x_desc =
GetTensorDescInfo(input_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].w_desc =
GetFilterDescInfo(filter_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].o_desc =
GetTensorDescInfo(output_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].b_desc =
GetTensorDescInfo(bias_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].conv_desc =
GetConvDescInfo(paddings, strides, dilations, groups, dtype);
cudnn_conv_cache_[hash_key].act_desc =
GetActivationDescInfo(act, value_max);
size_t workspace_size;
cudnnConvolutionFwdAlgo_t algo;
search_func(&algo,
&workspace_size,
cudnn_conv_cache_[hash_key].x_desc->desc(),
cudnn_conv_cache_[hash_key].w_desc->desc(),
cudnn_conv_cache_[hash_key].o_desc->desc(),
cudnn_conv_cache_[hash_key].conv_desc->desc());
cudnn_conv_cache_[hash_key].workspace_size = workspace_size;
cudnn_conv_cache_[hash_key].algo = algo;
}
}
return &cudnn_conv_cache_.at(hash_key);
}
struct ConvAttrCacheInfo {
std::vector<int> paddings;
std::vector<int> dilations;
std::vector<int> input_pad;
std::vector<int> new_input_shape_vec;
bool is_sys_pad;
};
ConvAttrCacheInfo* GetConvAttr(const std::vector<int>& paddings_t,
const std::vector<int>& dilations_t,
const std::string& padding_algorithm,
const std::vector<int>& input_dims,
const std::vector<int>& filter_dims,
const std::vector<int>& strides,
cudnnTensorFormat_t format) {
XXH64_state_t* const state = XXH64_createState();
if (state == nullptr) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash create state failed, maybe a environment error."));
}
XXH64_hash_t const seed = 0;
if (XXH64_reset(state, seed) == XXH_ERROR) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash create state failed, maybe a environment error."));
}
XXH64_update(state, paddings_t.data(), paddings_t.size() * sizeof(int));
XXH64_update(state, dilations_t.data(), dilations_t.size() * sizeof(int));
XXH64_update(state, input_dims.data(), input_dims.size() * sizeof(int));
XXH64_update(state, filter_dims.data(), filter_dims.size() * sizeof(int));
XXH64_update(state, strides.data(), strides.size() * sizeof(int));
XXH64_update(state, &format, sizeof(int));
XXH64_update(state,
padding_algorithm.data(),
padding_algorithm.length() * sizeof(char));
XXH64_hash_t hash_key = XXH64_digest(state);
XXH64_freeState(state);
if (!conv_attr_cache_.count(hash_key)) {
std::lock_guard<std::mutex> lock(attr_mutex_);
if (!conv_attr_cache_.count(hash_key)) {
ConvAttrCacheInfo cache;
auto paddings = paddings_t;
auto dilations = dilations_t;
std::vector<int> in_data_dims(input_dims.size() - 2);
std::vector<int> ksize(filter_dims.size() - 2);
if (format == CUDNN_TENSOR_NHWC) {
for (size_t i = 1; i < input_dims.size() - 1; ++i) {
in_data_dims[i - 1] = input_dims[i];
}
for (size_t i = 1; i < filter_dims.size() - 1; ++i) {
ksize[i - 1] = filter_dims[i];
}
} else {
for (size_t i = 2; i < input_dims.size(); ++i) {
in_data_dims[i - 2] = input_dims[i];
}
for (size_t i = 2; i < filter_dims.size(); ++i) {
ksize[i - 2] = filter_dims[i];
}
}
phi::UpdatePaddingAndDilation(&paddings,
&dilations,
padding_algorithm,
make_ddim(in_data_dims),
strides,
ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim);
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = input_dims[0];
if (format == CUDNN_TENSOR_NCHW) {
new_input_shape_vec[1] = input_dims[1];
} else {
new_input_shape_vec[data_dim + 1] = input_dims[data_dim + 1];
}
std::vector<int> input_pad(input_dims.size() * 2, 0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
if (format == CUDNN_TENSOR_NCHW) {
new_input_shape_vec[i + 2] = input_dims[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] = input_dims[i + 1] + padding_diff[i];
}
if (format == CUDNN_TENSOR_NCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] =
paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] =
paddings[2 * i + 1] - padding_common[i];
}
}
cache.is_sys_pad = false;
cache.input_pad = input_pad;
cache.new_input_shape_vec = new_input_shape_vec;
} else {
cache.is_sys_pad = true;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
cache.dilations = dilations;
cache.paddings = padding_common;
conv_attr_cache_[hash_key] = cache;
}
}
return &conv_attr_cache_.at(hash_key);
}
private:
phi::backends::gpu::TensorDescriptor* GetTensorDescInfo(
const std::vector<int>& input_dims,
phi::DataType input_dtype,
cudnnTensorFormat_t input_format) {
auto* desc = new phi::backends::gpu::TensorDescriptor();
desc->set(
input_dims, input_format, backends::gpu::ToCudnnDataType(input_dtype));
return desc;
}
phi::backends::gpu::FilterDescriptor* GetFilterDescInfo(
const std::vector<int>& input_dims,
phi::DataType input_dtype,
cudnnTensorFormat_t input_format) {
auto* desc = new phi::backends::gpu::FilterDescriptor();
desc->set(
input_dims, input_format, backends::gpu::ToCudnnDataType(input_dtype));
return desc;
}
phi::backends::gpu::ConvolutionDescriptor* GetConvDescInfo(
const std::vector<int>& paddings,
const std::vector<int>& strides,
const std::vector<int>& dilations,
int groups,
cudnnDataType_t dtype) {
auto* desc = new phi::backends::gpu::ConvolutionDescriptor();
desc->set(
dtype, paddings, strides, dilations, phi::AllowTF32Cudnn(), groups);
return desc;
}
phi::backends::gpu::ActivationDescriptor* GetActivationDescInfo(
const std::string& act,
double value_max = std::numeric_limits<double>::max()) {
auto* desc = new phi::backends::gpu::ActivationDescriptor();
cudnnActivationMode_t mode;
double relu_ceiling = 0.0;
if (act == "identity") {
mode = CUDNN_ACTIVATION_IDENTITY;
} else if (act == "relu") {
mode = CUDNN_ACTIVATION_RELU;
} else if (act == "relu6") {
relu_ceiling = 6.0;
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
} else if (act == "sigmoid") {
mode = CUDNN_ACTIVATION_SIGMOID;
} else if (act == "relux") {
relu_ceiling = value_max;
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
} else if (act == "tanh") {
mode = CUDNN_ACTIVATION_TANH;
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Unknown CUDNN activation string: %s.", act));
}
desc->set(mode, relu_ceiling);
return desc;
}
std::mutex cache_mutex_;
std::unordered_map<size_t, CudnnCacheInfo> cudnn_conv_cache_;
std::mutex attr_mutex_;
std::unordered_map<size_t, ConvAttrCacheInfo> conv_attr_cache_;
};
} // namespace
template <typename T, typename Context>
void ConvFusionKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& bias,
const paddle::optional<DenseTensor>& residual,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
const std::vector<int>& dilations_t,
int groups,
const std::string& data_format,
const std::string& activation,
bool exhaustive_search,
const std::vector<int>& channels,
int user_workspace_size,
DenseTensor* output,
std::vector<DenseTensor*> outs) {
auto handle = ctx.cudnn_handle();
ctx.template Alloc<T>(output);
auto workspace_handle = ctx.cudnn_workspace_handle();
exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search;
bool deterministic = FLAGS_cudnn_deterministic;
PADDLE_ENFORCE_EQ(exhaustive_search && deterministic,
false,
phi::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
size_t workspace_size_limit = 0;
if (FLAGS_conv_workspace_size_limit > 0 || user_workspace_size > 0) {
int64_t max_user_size =
::min(static_cast<int64_t>(FLAGS_conv_workspace_size_limit),
static_cast<int64_t>(user_workspace_size));
workspace_size_limit = max_user_size * 1024 * 1024;
}
auto dtype = phi::backends::gpu::CudnnDataType<T>::type;
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// Choose NHWC or NCHW by data_format attr.
auto compute_format = channel_last ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW;
VLOG(3) << "Compute ConvFusionOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == CUDNN_TENSOR_NHWC ? "NHWC" : "NCHW");
auto* conv_attr_cache = CudnnConvDescManager::Instance()->GetConvAttr(
paddings_t,
dilations_t,
padding_algorithm,
phi::vectorize<int>(input.dims()),
phi::vectorize<int>(filter.dims()),
strides,
compute_format);
DenseTensor transformed_input;
const int input_rank = input.dims().size();
auto unsys_pad_process = [&](const std::vector<int>& new_input_shape_vec,
const std::vector<int>& input_pad) {
DDim new_input_shape(make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
ctx.template Alloc<T>(&transformed_input);
T pad_value(0.0);
switch (input_rank) {
case 4: {
funcs::PadFunction<Context, T, 4>(
ctx, input_pad, input, pad_value, &transformed_input);
} break;
case 5: {
funcs::PadFunction<Context, T, 5>(
ctx, input_pad, input, pad_value, &transformed_input);
} break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
};
if (conv_attr_cache->is_sys_pad) {
transformed_input.ShareDataWith(input);
} else {
unsys_pad_process(conv_attr_cache->new_input_shape_vec,
conv_attr_cache->input_pad);
}
std::vector<int> b_dims(input_rank, 1);
if (compute_format == CUDNN_TENSOR_NCHW) {
auto bias_rank = bias.dims().size();
if (input_rank == bias_rank) {
b_dims[1] = static_cast<int>(bias.dims()[1]);
} else {
b_dims[1] = static_cast<int>(bias.dims()[0]);
}
} else {
b_dims[input_rank - 1] = static_cast<int>(bias.dims()[0]);
}
auto search_func = [&](cudnnConvolutionFwdAlgo_t* cudnn_algo,
size_t* wks_bytes,
cudnnTensorDescriptor_t x_desc,
cudnnFilterDescriptor_t w_desc,
cudnnTensorDescriptor_t o_desc,
cudnnConvolutionDescriptor_t cudnn_conv_desc) {
if (!exhaustive_search) {
#if CUDNN_VERSION >= 8000
int perf_count;
int best_algo_idx = 0;
size_t tmp_size = 0;
std::unique_ptr<cudnnConvolutionFwdAlgoPerf_t[]> perf_results(
new cudnnConvolutionFwdAlgoPerf_t[phi::kNUM_CUDNN_FWD_ALGS]);
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardAlgorithm_v7(
handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
phi::kNUM_CUDNN_FWD_ALGS,
&perf_count,
perf_results.get()));
*cudnn_algo = (perf_results.get())[best_algo_idx].algo;
#else
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardAlgorithm(
handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit,
cudnn_algo));
#endif
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardWorkspaceSize(handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
*cudnn_algo,
wks_bytes));
} else {
std::array<cudnnConvolutionFwdAlgoPerf_t, phi::kNUM_CUDNN_FWD_ALGS>
fwd_perf_stat;
int returned_algo_count;
auto cudnn_find_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnFindConvolutionForwardAlgorithmEx(
handle,
x_desc,
transformed_input.data(),
w_desc,
filter.data(),
cudnn_conv_desc,
o_desc,
output->data(),
phi::kNUM_CUDNN_FWD_ALGS,
&returned_algo_count,
fwd_perf_stat.data(),
cudnn_workspace,
workspace_size_limit));
};
workspace_handle.RunFuncSync(cudnn_find_func, workspace_size_limit);
*cudnn_algo = fwd_perf_stat[0].algo;
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
fwd_perf_stat[0].algo,
wks_bytes));
}
};
auto cudnn_cache_info = CudnnConvDescManager::Instance()->GetCudnnCacheInfo(
phi::vectorize<int>(transformed_input.dims()),
phi::vectorize<int>(filter.dims()),
b_dims,
phi::vectorize<int>(output->dims()),
conv_attr_cache->paddings,
strides,
conv_attr_cache->dilations,
transformed_input.dtype(),
groups,
phi::backends::gpu::CudnnDataType<T>::type,
compute_format,
search_func,
activation);
auto x_desc = cudnn_cache_info->x_desc->desc();
auto w_desc = cudnn_cache_info->w_desc->desc();
auto b_desc = cudnn_cache_info->b_desc->desc();
auto o_desc = cudnn_cache_info->o_desc->desc();
auto cudnn_conv_desc = cudnn_cache_info->conv_desc->desc();
auto act_desc = cudnn_cache_info->act_desc->desc();
auto algo = cudnn_cache_info->algo;
auto workspace_size = cudnn_cache_info->workspace_size;
if ((activation == "identity") && (!residual.get_ptr())) {
// Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is
// enabled with CUDNN_ACTIVATION_IDENTITY in cuDNN lib.
// But test in some case, the speed is slower, change to use
// cudnnConvolutionForward and cudnnAddTensor
// ------------- cudnn conv forward and bias add ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnConvolutionForward(handle,
&alpha,
x_desc,
transformed_input.data(),
w_desc,
filter.data(),
cudnn_conv_desc,
algo,
cudnn_workspace,
workspace_size,
&beta,
o_desc,
output->data()));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnAddTensor(
handle, &alpha, b_desc, bias.data(), &alpha, o_desc, output->data()));
} else {
// Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is
// enabled with CUDNN_ACTIVATION_IDENTITY.
if (activation == "identity") {
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
}
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = residual.get_ptr() ? 1.0f : 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnConvolutionBiasActivationForward(
handle,
&alpha,
x_desc,
transformed_input.data(),
w_desc,
filter.data(),
cudnn_conv_desc,
algo,
cudnn_workspace,
workspace_size,
&beta,
o_desc,
residual.get_ptr() ? residual->data() : output->data(),
b_desc,
bias.data(),
act_desc,
o_desc,
output->data()));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
if (!channels.empty()) {
if (transformed_input.dims()[0] == 1 &&
compute_format == CUDNN_TENSOR_NCHW) {
// share data with Output
phi::DenseTensor t;
t.ShareDataWith(*output);
auto y_dims = output->dims();
t.Resize({y_dims[1], y_dims[2], y_dims[3]});
int s = 0;
for (size_t i = 0; i < channels.size(); ++i) {
int e = s + channels[i];
outs[i]->ShareDataWith(t.Slice(s, e));
outs[i]->Resize(
{transformed_input.dims()[0], channels[i], y_dims[2], y_dims[3]});
s = e;
}
} else {
// TODO(qingiqng): do copy when batch size large than 1
PADDLE_THROW(phi::errors::Unimplemented(
"Input with batch size greater than 1 is unsupported. The received "
"batch size is %d, Input's shape is [%s].",
transformed_input.dims()[0],
transformed_input.dims()));
}
}
}
} // namespace fusion
} // namespace phi
PD_REGISTER_KERNEL(conv2d_fusion, // cuda_only
GPUDNN,
ALL_LAYOUT,
phi::fusion::ConvFusionKernel,
float,
double,
phi::dtype::float16) {}
#endif
| 649a74f388b86235a0bd1142a91a835b2411e876.cu | // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef PADDLE_WITH_CUDA
#include <xxhash.h>
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <unordered_map>
#include "glog/logging.h"
#include "paddle/phi/backends/context_pool.h"
#include "paddle/phi/backends/dynload/cudnn.h"
#include "paddle/phi/backends/gpu/cuda/cudnn_desc.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/impl/conv_cudnn_impl.h"
#include "paddle/utils/optional.h"
namespace phi {
namespace fusion {
namespace {
// TODO(wilber): Add a LRU strategy.
class CudnnConvDescManager {
public:
static CudnnConvDescManager* Instance() {
static CudnnConvDescManager global;
return &global;
}
struct CudnnCacheInfo {
phi::backends::gpu::TensorDescriptor* x_desc{nullptr};
phi::backends::gpu::FilterDescriptor* w_desc{nullptr};
phi::backends::gpu::TensorDescriptor* b_desc{nullptr};
phi::backends::gpu::TensorDescriptor* o_desc{nullptr};
phi::backends::gpu::ConvolutionDescriptor* conv_desc{nullptr};
phi::backends::gpu::ActivationDescriptor* act_desc{nullptr};
size_t workspace_size;
cudnnConvolutionFwdAlgo_t algo;
std::vector<int> paddings;
std::vector<int> dilations;
std::vector<int> input_pad;
std::vector<int> new_input_shape_vec;
bool is_sys_pad;
// TODO(wilber): The destruction of cudnn descriptor depends on the
// phi::dynload::cudnn singleton, but when the process exits, the singleton
// destruction order cannot be determined.
// After testing, it is found that the phi::dynload::cudnn related singleton
// on Windows is destructed first, causing the descriptor to be destructed
// and failed, while the descriptor on Linux is destructed first, and the
// phi::dynload::cudnn singleton is destructed later, so that it is correct.
// To circumvent this problem, we rely entirely on freeing resources when
// the process exits.
// ~CudnnCacheInfo() {
// if (x_desc) delete x_desc;
// if (w_desc) delete w_desc;
// if (b_desc) delete b_desc;
// if (o_desc) delete o_desc;
// if (conv_desc) delete conv_desc;
// if (act_desc) delete act_desc;
// }
};
CudnnCacheInfo* GetCudnnCacheInfo(
const std::vector<int>& input_dims,
const std::vector<int>& filter_dims,
const std::vector<int>& bias_dims,
const std::vector<int>& output_dims,
const std::vector<int>& paddings,
const std::vector<int>& strides,
const std::vector<int>& dilations,
phi::DataType input_dtype,
int groups,
cudnnDataType_t dtype,
cudnnTensorFormat_t format,
const std::function<void(cudnnConvolutionFwdAlgo_t*,
size_t*,
cudnnTensorDescriptor_t,
cudnnFilterDescriptor_t,
cudnnTensorDescriptor_t,
cudnnConvolutionDescriptor_t)>& search_func,
const std::string& act,
double value_max = std::numeric_limits<double>::max()) {
// std::hash takes about 5us, xxhash can optimize to 2.5us.
XXH64_state_t* const state = XXH64_createState();
if (state == nullptr) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash create state failed, maybe a environment error."));
}
XXH64_hash_t const seed = 0;
if (XXH64_reset(state, seed) == XXH_ERROR) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash reset state failed, maybe a environment error."));
}
XXH64_update(state, input_dims.data(), input_dims.size() * sizeof(int));
XXH64_update(state, filter_dims.data(), filter_dims.size() * sizeof(int));
XXH64_update(state, bias_dims.data(), bias_dims.size() * sizeof(int));
// XXH64_update(state, output_dims.data(), output_dims.size() *
// sizeof(int));
XXH64_update(state, paddings.data(), paddings.size() * sizeof(int));
XXH64_update(state, strides.data(), strides.size() * sizeof(int));
XXH64_update(state, dilations.data(), dilations.size() * sizeof(int));
XXH64_update(state, &input_dtype, sizeof(int));
XXH64_update(state, &groups, sizeof(int));
XXH64_update(state, &dtype, sizeof(int));
XXH64_update(state, &format, sizeof(int));
XXH64_update(state, act.data(), act.length() * sizeof(char));
// XXH64_update(state, &value_max, sizeof(double));
XXH64_hash_t hash_key = XXH64_digest(state);
XXH64_freeState(state);
if (!cudnn_conv_cache_.count(hash_key)) {
std::lock_guard<std::mutex> lock(cache_mutex_);
if (!cudnn_conv_cache_.count(hash_key)) {
cudnn_conv_cache_[hash_key] = CudnnCacheInfo();
cudnn_conv_cache_[hash_key].x_desc =
GetTensorDescInfo(input_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].w_desc =
GetFilterDescInfo(filter_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].o_desc =
GetTensorDescInfo(output_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].b_desc =
GetTensorDescInfo(bias_dims, input_dtype, format);
cudnn_conv_cache_[hash_key].conv_desc =
GetConvDescInfo(paddings, strides, dilations, groups, dtype);
cudnn_conv_cache_[hash_key].act_desc =
GetActivationDescInfo(act, value_max);
size_t workspace_size;
cudnnConvolutionFwdAlgo_t algo;
search_func(&algo,
&workspace_size,
cudnn_conv_cache_[hash_key].x_desc->desc(),
cudnn_conv_cache_[hash_key].w_desc->desc(),
cudnn_conv_cache_[hash_key].o_desc->desc(),
cudnn_conv_cache_[hash_key].conv_desc->desc());
cudnn_conv_cache_[hash_key].workspace_size = workspace_size;
cudnn_conv_cache_[hash_key].algo = algo;
}
}
return &cudnn_conv_cache_.at(hash_key);
}
struct ConvAttrCacheInfo {
std::vector<int> paddings;
std::vector<int> dilations;
std::vector<int> input_pad;
std::vector<int> new_input_shape_vec;
bool is_sys_pad;
};
ConvAttrCacheInfo* GetConvAttr(const std::vector<int>& paddings_t,
const std::vector<int>& dilations_t,
const std::string& padding_algorithm,
const std::vector<int>& input_dims,
const std::vector<int>& filter_dims,
const std::vector<int>& strides,
cudnnTensorFormat_t format) {
XXH64_state_t* const state = XXH64_createState();
if (state == nullptr) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash create state failed, maybe a environment error."));
}
XXH64_hash_t const seed = 0;
if (XXH64_reset(state, seed) == XXH_ERROR) {
PADDLE_THROW(phi::errors::PreconditionNotMet(
"xxhash create state failed, maybe a environment error."));
}
XXH64_update(state, paddings_t.data(), paddings_t.size() * sizeof(int));
XXH64_update(state, dilations_t.data(), dilations_t.size() * sizeof(int));
XXH64_update(state, input_dims.data(), input_dims.size() * sizeof(int));
XXH64_update(state, filter_dims.data(), filter_dims.size() * sizeof(int));
XXH64_update(state, strides.data(), strides.size() * sizeof(int));
XXH64_update(state, &format, sizeof(int));
XXH64_update(state,
padding_algorithm.data(),
padding_algorithm.length() * sizeof(char));
XXH64_hash_t hash_key = XXH64_digest(state);
XXH64_freeState(state);
if (!conv_attr_cache_.count(hash_key)) {
std::lock_guard<std::mutex> lock(attr_mutex_);
if (!conv_attr_cache_.count(hash_key)) {
ConvAttrCacheInfo cache;
auto paddings = paddings_t;
auto dilations = dilations_t;
std::vector<int> in_data_dims(input_dims.size() - 2);
std::vector<int> ksize(filter_dims.size() - 2);
if (format == CUDNN_TENSOR_NHWC) {
for (size_t i = 1; i < input_dims.size() - 1; ++i) {
in_data_dims[i - 1] = input_dims[i];
}
for (size_t i = 1; i < filter_dims.size() - 1; ++i) {
ksize[i - 1] = filter_dims[i];
}
} else {
for (size_t i = 2; i < input_dims.size(); ++i) {
in_data_dims[i - 2] = input_dims[i];
}
for (size_t i = 2; i < filter_dims.size(); ++i) {
ksize[i - 2] = filter_dims[i];
}
}
phi::UpdatePaddingAndDilation(&paddings,
&dilations,
padding_algorithm,
make_ddim(in_data_dims),
strides,
ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim);
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = input_dims[0];
if (format == CUDNN_TENSOR_NCHW) {
new_input_shape_vec[1] = input_dims[1];
} else {
new_input_shape_vec[data_dim + 1] = input_dims[data_dim + 1];
}
std::vector<int> input_pad(input_dims.size() * 2, 0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
if (format == CUDNN_TENSOR_NCHW) {
new_input_shape_vec[i + 2] = input_dims[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] = input_dims[i + 1] + padding_diff[i];
}
if (format == CUDNN_TENSOR_NCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] =
paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] =
paddings[2 * i + 1] - padding_common[i];
}
}
cache.is_sys_pad = false;
cache.input_pad = input_pad;
cache.new_input_shape_vec = new_input_shape_vec;
} else {
cache.is_sys_pad = true;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
cache.dilations = dilations;
cache.paddings = padding_common;
conv_attr_cache_[hash_key] = cache;
}
}
return &conv_attr_cache_.at(hash_key);
}
private:
phi::backends::gpu::TensorDescriptor* GetTensorDescInfo(
const std::vector<int>& input_dims,
phi::DataType input_dtype,
cudnnTensorFormat_t input_format) {
auto* desc = new phi::backends::gpu::TensorDescriptor();
desc->set(
input_dims, input_format, backends::gpu::ToCudnnDataType(input_dtype));
return desc;
}
phi::backends::gpu::FilterDescriptor* GetFilterDescInfo(
const std::vector<int>& input_dims,
phi::DataType input_dtype,
cudnnTensorFormat_t input_format) {
auto* desc = new phi::backends::gpu::FilterDescriptor();
desc->set(
input_dims, input_format, backends::gpu::ToCudnnDataType(input_dtype));
return desc;
}
phi::backends::gpu::ConvolutionDescriptor* GetConvDescInfo(
const std::vector<int>& paddings,
const std::vector<int>& strides,
const std::vector<int>& dilations,
int groups,
cudnnDataType_t dtype) {
auto* desc = new phi::backends::gpu::ConvolutionDescriptor();
desc->set(
dtype, paddings, strides, dilations, phi::AllowTF32Cudnn(), groups);
return desc;
}
phi::backends::gpu::ActivationDescriptor* GetActivationDescInfo(
const std::string& act,
double value_max = std::numeric_limits<double>::max()) {
auto* desc = new phi::backends::gpu::ActivationDescriptor();
cudnnActivationMode_t mode;
double relu_ceiling = 0.0;
if (act == "identity") {
mode = CUDNN_ACTIVATION_IDENTITY;
} else if (act == "relu") {
mode = CUDNN_ACTIVATION_RELU;
} else if (act == "relu6") {
relu_ceiling = 6.0;
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
} else if (act == "sigmoid") {
mode = CUDNN_ACTIVATION_SIGMOID;
} else if (act == "relux") {
relu_ceiling = value_max;
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
} else if (act == "tanh") {
mode = CUDNN_ACTIVATION_TANH;
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Unknown CUDNN activation string: %s.", act));
}
desc->set(mode, relu_ceiling);
return desc;
}
std::mutex cache_mutex_;
std::unordered_map<size_t, CudnnCacheInfo> cudnn_conv_cache_;
std::mutex attr_mutex_;
std::unordered_map<size_t, ConvAttrCacheInfo> conv_attr_cache_;
};
} // namespace
template <typename T, typename Context>
void ConvFusionKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& bias,
const paddle::optional<DenseTensor>& residual,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
const std::vector<int>& dilations_t,
int groups,
const std::string& data_format,
const std::string& activation,
bool exhaustive_search,
const std::vector<int>& channels,
int user_workspace_size,
DenseTensor* output,
std::vector<DenseTensor*> outs) {
auto handle = ctx.cudnn_handle();
ctx.template Alloc<T>(output);
auto workspace_handle = ctx.cudnn_workspace_handle();
exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search;
bool deterministic = FLAGS_cudnn_deterministic;
PADDLE_ENFORCE_EQ(exhaustive_search && deterministic,
false,
phi::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
size_t workspace_size_limit = 0;
if (FLAGS_conv_workspace_size_limit > 0 || user_workspace_size > 0) {
int64_t max_user_size =
std::min(static_cast<int64_t>(FLAGS_conv_workspace_size_limit),
static_cast<int64_t>(user_workspace_size));
workspace_size_limit = max_user_size * 1024 * 1024;
}
auto dtype = phi::backends::gpu::CudnnDataType<T>::type;
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// Choose NHWC or NCHW by data_format attr.
auto compute_format = channel_last ? CUDNN_TENSOR_NHWC : CUDNN_TENSOR_NCHW;
VLOG(3) << "Compute ConvFusionOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == CUDNN_TENSOR_NHWC ? "NHWC" : "NCHW");
auto* conv_attr_cache = CudnnConvDescManager::Instance()->GetConvAttr(
paddings_t,
dilations_t,
padding_algorithm,
phi::vectorize<int>(input.dims()),
phi::vectorize<int>(filter.dims()),
strides,
compute_format);
DenseTensor transformed_input;
const int input_rank = input.dims().size();
auto unsys_pad_process = [&](const std::vector<int>& new_input_shape_vec,
const std::vector<int>& input_pad) {
DDim new_input_shape(make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
ctx.template Alloc<T>(&transformed_input);
T pad_value(0.0);
switch (input_rank) {
case 4: {
funcs::PadFunction<Context, T, 4>(
ctx, input_pad, input, pad_value, &transformed_input);
} break;
case 5: {
funcs::PadFunction<Context, T, 5>(
ctx, input_pad, input, pad_value, &transformed_input);
} break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
};
if (conv_attr_cache->is_sys_pad) {
transformed_input.ShareDataWith(input);
} else {
unsys_pad_process(conv_attr_cache->new_input_shape_vec,
conv_attr_cache->input_pad);
}
std::vector<int> b_dims(input_rank, 1);
if (compute_format == CUDNN_TENSOR_NCHW) {
auto bias_rank = bias.dims().size();
if (input_rank == bias_rank) {
b_dims[1] = static_cast<int>(bias.dims()[1]);
} else {
b_dims[1] = static_cast<int>(bias.dims()[0]);
}
} else {
b_dims[input_rank - 1] = static_cast<int>(bias.dims()[0]);
}
auto search_func = [&](cudnnConvolutionFwdAlgo_t* cudnn_algo,
size_t* wks_bytes,
cudnnTensorDescriptor_t x_desc,
cudnnFilterDescriptor_t w_desc,
cudnnTensorDescriptor_t o_desc,
cudnnConvolutionDescriptor_t cudnn_conv_desc) {
if (!exhaustive_search) {
#if CUDNN_VERSION >= 8000
int perf_count;
int best_algo_idx = 0;
size_t tmp_size = 0;
std::unique_ptr<cudnnConvolutionFwdAlgoPerf_t[]> perf_results(
new cudnnConvolutionFwdAlgoPerf_t[phi::kNUM_CUDNN_FWD_ALGS]);
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardAlgorithm_v7(
handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
phi::kNUM_CUDNN_FWD_ALGS,
&perf_count,
perf_results.get()));
*cudnn_algo = (perf_results.get())[best_algo_idx].algo;
#else
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardAlgorithm(
handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit,
cudnn_algo));
#endif
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardWorkspaceSize(handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
*cudnn_algo,
wks_bytes));
} else {
std::array<cudnnConvolutionFwdAlgoPerf_t, phi::kNUM_CUDNN_FWD_ALGS>
fwd_perf_stat;
int returned_algo_count;
auto cudnn_find_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnFindConvolutionForwardAlgorithmEx(
handle,
x_desc,
transformed_input.data(),
w_desc,
filter.data(),
cudnn_conv_desc,
o_desc,
output->data(),
phi::kNUM_CUDNN_FWD_ALGS,
&returned_algo_count,
fwd_perf_stat.data(),
cudnn_workspace,
workspace_size_limit));
};
workspace_handle.RunFuncSync(cudnn_find_func, workspace_size_limit);
*cudnn_algo = fwd_perf_stat[0].algo;
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle,
x_desc,
w_desc,
cudnn_conv_desc,
o_desc,
fwd_perf_stat[0].algo,
wks_bytes));
}
};
auto cudnn_cache_info = CudnnConvDescManager::Instance()->GetCudnnCacheInfo(
phi::vectorize<int>(transformed_input.dims()),
phi::vectorize<int>(filter.dims()),
b_dims,
phi::vectorize<int>(output->dims()),
conv_attr_cache->paddings,
strides,
conv_attr_cache->dilations,
transformed_input.dtype(),
groups,
phi::backends::gpu::CudnnDataType<T>::type,
compute_format,
search_func,
activation);
auto x_desc = cudnn_cache_info->x_desc->desc();
auto w_desc = cudnn_cache_info->w_desc->desc();
auto b_desc = cudnn_cache_info->b_desc->desc();
auto o_desc = cudnn_cache_info->o_desc->desc();
auto cudnn_conv_desc = cudnn_cache_info->conv_desc->desc();
auto act_desc = cudnn_cache_info->act_desc->desc();
auto algo = cudnn_cache_info->algo;
auto workspace_size = cudnn_cache_info->workspace_size;
if ((activation == "identity") && (!residual.get_ptr())) {
// Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is
// enabled with CUDNN_ACTIVATION_IDENTITY in cuDNN lib.
// But test in some case, the speed is slower, change to use
// cudnnConvolutionForward and cudnnAddTensor
// ------------- cudnn conv forward and bias add ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnConvolutionForward(handle,
&alpha,
x_desc,
transformed_input.data(),
w_desc,
filter.data(),
cudnn_conv_desc,
algo,
cudnn_workspace,
workspace_size,
&beta,
o_desc,
output->data()));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnAddTensor(
handle, &alpha, b_desc, bias.data(), &alpha, o_desc, output->data()));
} else {
// Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is
// enabled with CUDNN_ACTIVATION_IDENTITY.
if (activation == "identity") {
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
}
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = residual.get_ptr() ? 1.0f : 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnConvolutionBiasActivationForward(
handle,
&alpha,
x_desc,
transformed_input.data(),
w_desc,
filter.data(),
cudnn_conv_desc,
algo,
cudnn_workspace,
workspace_size,
&beta,
o_desc,
residual.get_ptr() ? residual->data() : output->data(),
b_desc,
bias.data(),
act_desc,
o_desc,
output->data()));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
if (!channels.empty()) {
if (transformed_input.dims()[0] == 1 &&
compute_format == CUDNN_TENSOR_NCHW) {
// share data with Output
phi::DenseTensor t;
t.ShareDataWith(*output);
auto y_dims = output->dims();
t.Resize({y_dims[1], y_dims[2], y_dims[3]});
int s = 0;
for (size_t i = 0; i < channels.size(); ++i) {
int e = s + channels[i];
outs[i]->ShareDataWith(t.Slice(s, e));
outs[i]->Resize(
{transformed_input.dims()[0], channels[i], y_dims[2], y_dims[3]});
s = e;
}
} else {
// TODO(qingiqng): do copy when batch size large than 1
PADDLE_THROW(phi::errors::Unimplemented(
"Input with batch size greater than 1 is unsupported. The received "
"batch size is %d, Input's shape is [%s].",
transformed_input.dims()[0],
transformed_input.dims()));
}
}
}
} // namespace fusion
} // namespace phi
PD_REGISTER_KERNEL(conv2d_fusion, // cuda_only
GPUDNN,
ALL_LAYOUT,
phi::fusion::ConvFusionKernel,
float,
double,
phi::dtype::float16) {}
#endif
|
c5db4fcc607e02adb19816cabbab7ff423288c8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "../NativeOps.h"
#include <hip/hip_runtime.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <helpers/shape.h>
#include "../Environment.h"
#include <helpers/TAD.h>
#include <ops/specials.h>
#include <loops/reduce3.h>
#include <loops/reduce.h>
#include <loops/indexreduce.h>
#include <loops/pairwise_transform.h>
#include <loops/transform.h>
#include <loops/scalar.h>
#include <loops/broadcasting.h>
#include <loops/summarystatsreduce.h>
#include <loops/random.h>
//#include <thread>
#include <map>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
#include <stdlib.h>
#include <loops/type_conversions.h>
#include <op_boilerplate.h>
#include <loops/grid_shaped.h>
#include <loops/grid_strided.h>
#include <loops/aggregates.h>
#include <helpers/threshold.h>
#include <ShapeList.h>
#include <Context.h>
#include <ops/specials_cuda.h>
// FIXME: we need cuda-specific implementations
#include <helpers/logger.h>
#include <NDArray.h>
#include <NDArrayFactory.h>
#include <GraphExecutioner.h>
#include <graph/GraphHolder.h>
#include <graph/VariablesSet.h>
#include <ops/declarable/OpRegistrator.h>
#include <ops/declarable/CustomOperations.h>
//#include <sys/time.h>
// b40c only available for gcc :(
#ifdef __clang__
// do nothing
#elif __GNUC__
#include <b40c/util/error_utils.cuh>
#include <b40c/util/multiple_buffering.cuh>
#include <b40c/radix_sort/enactor.cuh>
#endif
#include <hiprand/hiprand.h>
hipDeviceProp_t *deviceProperties;
hipFuncAttributes *funcAttributes = new hipFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef __EXPERIMENTAL__
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
typedef struct {
long streamId;
long callId;
} __syncInfo;
typedef __syncInfo SyncInfo;
// this method isn't used, left here for legacy and caution purposes
// TLDR: don't use this way, it sucks
void CUDART_CB syncCallback(hipStream_t stream, hipError_t status, void *data){
SyncInfo *sync = (SyncInfo *) data;
printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId);
}
// this method just does type conversion in fancy way
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jIndex)ptrToDeviceId;
}
template <typename T>
dim3 getOptimalDimensions(Nd4jIndex n,hipFuncAttributes attributes, hipDeviceProp_t properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if (num_blocks > blockLimit) num_blocks = blockLimit;
if (num_blocks < 4 && n > 128) {
num_blocks = 4;
num_threads = n / num_blocks;
}
if (num_threads >= 768) {
num_blocks = num_blocks * 2;
num_threads = num_threads / 2;
}
if(n % num_threads && num_blocks < blockLimit) ++num_blocks;
//(num_threads * sizeof(T)) + attributes.sharedSizeBytes);
return dim3(num_blocks,num_threads, 3000);
}
int getBaseMemorySize(int xRank, hipFuncAttributes funcAttr) {
int memory_limit = 256; //funcAttr.sharedSizeBytes;
// TODO: remove this later
memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4)
/*
if (xRank == 0) xRank = 2;
memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes
memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4;
memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4;
memory_limit += (xRank * 4) * 6;
memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase
*/
return memory_limit;
}
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, hipFuncAttributes funcAttr) {
int countMP = deviceProperties[deviceId].multiProcessorCount;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int num_threads = problemLength / (countMP * blockThreshold);
num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads);
num_threads = nd4j::math::nd4j_max<int>(num_threads, 64);
num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads);
int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr);
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit);
return launchDims;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) {
int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int warpSize = deviceProperties[deviceId].warpSize;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
// round num_threads to nearest warpSize
num_threads -= num_threads % warpSize;
num_threads = nd4j::math::nd4j_max<int>(1, num_threads);
if (num_threads < warpSize && tadLength < warpSize)
num_threads = tadLength;
// since we use shared memory as fast memory for some cases - we need to count that in
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int memory_floor = memory_limit;
int effective_block_limit = countMP * blockThreshold;
int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit);
int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize);
// at this moment we've stored all required information for things. time to count in reduction multipliers
int reduction_per_block = 0;
bool found = false;
if (reduction > 0)
while (!found) {
reduction_per_block = (num_threads * elementSize * reduction);
if (memory_limit + reduction_per_block < desiredShared) {
memory_limit += reduction_per_block;
found = true;
} else {
if (num_threads > minThreads) {
num_threads -= 32;
} else {
memory_limit += reduction_per_block;
found = true;
}
}
}
// at this moment we know total memory used per block, and we also know per-mp limit.
int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block);
// we don't want to spawn more blocks, that gpu can actually handle without queue
//num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// if (num_blocks > countMP)
// num_blocks = num_blocks - (num_blocks % countMP);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
reduction_per_block = (num_threads * elementSize * reduction);
memory_limit = memory_floor + reduction_per_block;
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP);
return dim3(num_blocks,num_threads, memory_limit);
}
/*
* This method returns kernel launch param for linear memory access
*/
dim3 getFlatLaunchParams(int deviceId, int *xShapeInfo, int *yShapeInfo, hipFuncAttributes funcAttr) {
int xRank = shape::rank(xShapeInfo);
int yRank = yShapeInfo == nullptr ? 0 : shape::rank(yShapeInfo);
int zRank = 0;
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
int xLength = shape::length(xShapeInfo);
int effective_block_limit = countMP * blockThreshold;
// for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here
int num_threads = xLength / effective_block_limit;
if (num_threads < minThreads)
num_threads = minThreads;
num_threads = num_threads - (num_threads % 32);
int memory_floor = memory_limit;
int num_blocks = xLength / num_threads;
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
}
if (xLength / num_threads > blockLimit)
num_blocks *= 2;
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit);
return launchDims;
}
/**
* This method returns kernel launch params with TAD-based memory access
*
* @param deviceId
* @param xShapeInfo
* @param tadShapeInfo
* @param funcAttr
* @param dimensionLength
* @param elementSize
* @param reductionSize
* @return
*/
dim3 getReduceLaunchParams(int deviceId, int *xShapeInfo, int *tadShapeInfo, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) {
int tadLength = 0;
int numTads = 0;
if (tadShapeInfo != nullptr) {
tadLength = shape::length(tadShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
if (tadLength == 1) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("A xLength: [%i], zLength: [%i]\n", shape::length(xShapeInfo), shape::length(tadShapeInfo));
}
} else{
// we have special case - reduction along all dimensions
tadLength = nd4j::math::nd4j_min<int>(shape::length(xShapeInfo), 768);
numTads = shape::length(xShapeInfo) / tadLength;
}
int xRank = shape::rank(xShapeInfo);
int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo);
dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize);
if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.x == 1
printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.x: [%i], launchDims.y: [%i], launchDims.z: [%i]\n", shape::length(xShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z);
}
return launchDims;
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, hipFuncAttributes attributes, hipDeviceProp_t properties) {
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
Nd4jIndex n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
nd4j::buffer::Buffer<int> * createScalarBuffer(hipStream_t stream) {
int *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<int> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<int> *scalarDimension;
nd4j::buffer::Buffer<int> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(hipStream_t stream) {
int *scalarDimensionBuff = (int *) malloc(sizeof(int));
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
int *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
int * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
int * getDimensionHostPointer() {
return scalarDimension->data;
}
int * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
hipStream_t streamRef;
public:
ScalarInfo(hipStream_t stream) {
T *scalarResult = (T*)malloc(sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
int *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the result pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
int *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum,
double *x,
int *xShapeInfo,
double *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D1 opNum:[%i]\n", opNum);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], 1, sizeof(double), 3);
hipLaunchKernelGGL(( indexReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *result,
int *resultShapeInfo,
int *dimension, int dimensionLength) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D2 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], dimensionLength, sizeof(double), 3);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
hipLaunchKernelGGL(( indexReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *y,
int *yShapeInfo,
double *result,
int *resultShapeInfo,
int *dimension, int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *deviceTADOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D3 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[26], dimensionLength, sizeof(double), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(broadcastSimple, double, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
int xStride,
double *y,
int yStride,
double *result,
int resultStride,
double *extraParams, Nd4jIndex n) {
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<double>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
int *xShapeInfo,
double *y,
int *yShapeInfo,
double *result,
int *resultShapeInfo,
double *extraParams,
int *xIndexes,
int *yIndexes,
int *resultIndexes) {
/*
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D5 opNum:[%i]\n", opNum);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[24]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
pairWiseTransformDoubleIndex <<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum,
dx,
y,
extraParams,
result,
xShapeInfo, shape::rank(hostXShapeInfo),
yShapeInfo, shape::rank(hostYShapeInfo),
resultShapeInfo, shape::rank(hostZShapeInfo),
xIndexes,
yIndexes,
resultIndexes, allocationPointer, deviceTADShapeInfo);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
*/
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
int *xShapeInfo,
double *y,
int *yShapeInfo,
double *result,
int *resultShapeInfo,
double *extraParams) {
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<double>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *result,
int *resultShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D7 opNum:[%i]\n", opNum);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
if (opNum == 19) {
execReduceDouble(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo);
}
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D8 opNum:[%i]\n", opNum);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
if (opNum == 19) {
execReduceDouble(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength);
//checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
* We have separate kernels, optimized for different number of dimensions for reductions
*/
if (dimensionLength == 1) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(double), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGeneric1D, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
} else if (shape::rank(hostTADShapeInfo) <= 3) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[33], dimensionLength, sizeof(double), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGeneric3D, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
} else {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGenericXD, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
double NativeOps::execReduceScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D9 opNum:[%i]\n", opNum);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[22]);
// for LogExpSum op we need to know max value, and store it
if (opNum == 19) {
double tmp = execReduceScalarDouble(extraPointers, 3, x, xShapeInfo, extraParams);
extraParams = resultPointer;
};
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
checkCudaErrors(hipStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *y,
int *yShapeInfo,
double *result,
int *resultShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D10 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
double NativeOps::execReduce3ScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *y,
int *yShapeInfo){
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D11 opNum:[%i]\n", opNum);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
hipLaunchKernelGGL(( reduce3ScalarDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
// since this method should return scalar value - we should block on this call
checkCudaErrors(hipStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *y,
int *yShapeInfo,
double *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D12 opNum:[%i]\n", opNum);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int xStride,
double *result,
int resultStride,
double scalar,
double *extraParams,
Nd4jIndex n) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[20]);
functions::scalar::ScalarTransform<double>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, scalar, extraParams, n);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *result,
int *resultShapeInfo,
double scalar,
double *extraParams){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[19]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleShaped, double, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<double>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *result,
int *resultShapeInfo,
double scalar,
double *extraParams,
Nd4jIndex n,
int *xIndexes,
int *resultIndexes){
printf("Unsupported operation: scalarIndices\n");
/*
}
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D15 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[18]);
scalarDoubleIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
x,
extraParams,
result,
resultIndexes, allocPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
*/
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execSummaryStatsScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(double), 8);
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
return functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *result,
int *resultShapeInfo,bool biasCorrected) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D17 opNum:[%i]\n", opNum);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(double), 8);
// we have to limit grid size here, due to limited nature of reduction/allocation pointers
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *result,
int *resultShapeInfo,
int *dimension, int dimensionLength,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(double), 8);
// we're limiting maximum grid size for summaryStats ops
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected);
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
int xStride,
double *z,
int zStride,
double *extraParams,
Nd4jIndex n) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D19 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[16]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformStrided, double, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
int *xShapeInfo,
double *result,
int *resultShapeInfo,
double *extraParams){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D20 opNum:[%i]\n", opNum);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
int *maskedAllocPointer = allocPointer;
// special pointer for special buffer for special ops
double *specialPointer = reinterpret_cast<double *>(extraPointers[6]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
int *dimension = (int *) specialPointer;
int *maxDimension = dimension + 1;
int *maxShapeBuffer = (int *) maxDimension + 1;
double * special = (double *) maxShapeBuffer + (MAX_RANK * 2 + 4);
int *devTadShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
Nd4jIndex *devTadOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]);
/**
* ops between 38 and 41 are special ops:
* SoftMax, LogSoftMax, SoftMaxDerivative, IsMax
* On cuda we execute them as
*/
// simple trick to get workaround over reductions into scalar
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
/*
* For vector cases of everything, but IsMax (41) we go for single-kernel calls
*/
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(256, length);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(double) * 4);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformShaped, double, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
} else {
// going for blockwise specials
// we'll do some pointers mangling here, and execute kernels one by one
int *shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
// TODO: we could get rid of this one eventually
hipLaunchKernelGGL(( prepareShapeBuffer) , dim3(1), dim3(1), 128, *stream, dimension, maxDimension, maxShapeBuffer, shape[0]);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceDouble(tempPointers, 3, dx, xShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcastDouble(tempPointers, 1, dx, xShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
// exp 3
execTransformDouble(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceDouble(tempPointers, 1, result, resultShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcastDouble(tempPointers, 3, result, resultShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
// log 3
if (opNum == 40)
execTransformDouble(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams);
else if (opNum == 39)
execTransformDouble(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams);
checkCudaErrors(hipStreamSynchronize(*stream));
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
if (scalarCheat) {
/**
* In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call
*/
int maxIdx = (int) execIndexReduceScalarDouble(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
hipLaunchKernelGGL(( fillIsMaxDouble), dim3(1), dim3(128), 0, *stream , result, shape::length(hostXShapeInfo), targetIdx);
} else {
int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
Nd4jIndex *tadMaxOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
special = reinterpret_cast<double *>(extraPointers[17]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduceDouble(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
// at this point, all IMax indexes are gathered, and we execute filler
hipLaunchKernelGGL(( fillDimensionalIsMaxDouble), dim3(blockLimit), dim3(64), funcAttributes[37].sharedSizeBytes, *stream, special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets );
checkCudaErrors(hipStreamSynchronize(*stream));
}
break;
}
default: {
printf("Bad case for transformDouble\n");
break;
}
}
}
} else {
// for Im2Col & Col2Im we enforce higher dimensionality
// TODO: investigate this on high-end gpus
if (opNum == 37 || opNum == 36 || opNum == 71) {
launchDims.x = 512;
launchDims.y = 512;
launchDims.z += 512 * sizeof(double);
} else if (opNum == 70) {
// we'll be using shared memory to speed up reverse
launchDims.z += launchDims.y * sizeof(double);
}
// Histogram op requires additional memory chunk
// FIXME: make this one to use cache
if (opNum == 48) {
int length = shape::length(hostZShapeInfo);
hipMalloc((void **)&maskedAllocPointer, length * launchDims.x * sizeof(double));
}
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformShaped, double, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
// we need guaranteed sync here, due to temp memory release
if (nd4j::Environment::getInstance()->isDebug() || opNum == 48)
checkCudaErrors(hipStreamSynchronize(*stream));
// release Histogram memory
if (opNum == 48) {
hipFree((void *)maskedAllocPointer);
}
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
int *xShapeInfo,
double *result,
int *resultShapeInfo,
double *extraParams,
int *xIndexes,
int *resultIndexes) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D21 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[14]);
hipLaunchKernelGGL(( transformDoubleIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultIndexes, allocPointer, reductionPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execIndexReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams){
if (nd4j::Environment::getInstance()->isDebug())
printf("F1 opNum:[%i]\n", opNum);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float), 4);
if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1)
printf("AF1 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( indexReduceFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
// once again - since we return scalar value in this method, we should block this kernel launch
checkCudaErrors(hipStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execIndexReduceScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams){
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H1 opNum:[%i]\n", opNum);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float16), 8);
if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1)
printf("AH1 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( indexReduceHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
// blocking for scalar output
checkCudaErrors(hipStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F2 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float), 4);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF2 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( indexReduceFloat), dim3(launchDims.x), dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execIndexReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H2 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float16), 8);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH2 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( indexReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *y,
int *yShapeInfo,
float *result,
int *resultShapeInfo,
int *dimension, int dimensionLength){
/*
hipEvent_t start;
hipEventCreateWithFlags(&start, hipEventDisableTiming);
timespec tsX;
timespec tsY;
clock_gettime(CLOCK_REALTIME, &tsX);
*/
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *deviceTADOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F3 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float), 0);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(broadcastSimple, float, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS))
/*
SyncInfo *info = new SyncInfo();
info->streamId = 32;
info->callId = 1234567890;
timespec ts1;
timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts1);
*/
/*
broadcastFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
y,
yShapeInfo, shape::rank(hostYShapeInfo),
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ);
*/
/*
clock_gettime(CLOCK_REALTIME, &ts2);
// hipEventRecord(start, 0);
// hipStreamAddCallback(*stream, syncCallback, (void*)info, 0);
*/
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
/*
clock_gettime(CLOCK_REALTIME, &tsY);
printf("Execution time: %i\n", (ts2.tv_nsec - ts1.tv_nsec));
printf("Overall time: %i\n", (tsY.tv_nsec - tsX.tv_nsec));
printf("Callback setup time: %i\n", (tsY.tv_nsec - ts2.tv_nsec));
printf("-------------------------------------\n");
*/
}
void NativeOps::execBroadcastHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *y,
int *yShapeInfo,
float16 *result,
int *resultShapeInfo,
int *dimension, int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *deviceTADOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H3 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float16), 0);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(broadcastSimple, float16, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
int xStride,
float *y,
int yStride,
float *result,
int resultStride,
float *extraParams, Nd4jIndex n){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n);
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
int xStride,
float16 *y,
int yStride,
float16 *result,
int resultStride,
float16 *extraParams, Nd4jIndex n){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float16>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
int *xShapeInfo,
float *y,
int *yShapeInfo,
float *result,
int *resultShapeInfo,
float *extraParams,
int *xIndexes,
int *yIndexes,
int *resultIndexes){
/*
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F5 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float), 0);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF5 opNum:[%i]\n", opNum);
pairWiseTransformFloatIndex<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
dx,
y,
extraParams,
result,
xShapeInfo, shape::rank(hostXShapeInfo),
yShapeInfo, shape::rank(hostYShapeInfo),
resultShapeInfo, shape::rank(hostZShapeInfo),
xIndexes,
yIndexes,
resultIndexes, allocationPointer, deviceTADShapeInfo);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
*/
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
int *xShapeInfo,
float16 *y,
int *yShapeInfo,
float16 *result,
int *resultShapeInfo,
float16 *extraParams,
int *xIndexes,
int *yIndexes,
int *resultIndexes){
/*
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H5 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float16), 0);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH5 opNum:[%i]\n", opNum);
pairWiseTransformHalfIndex<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
dx,
y,
extraParams,
result,
xShapeInfo, shape::rank(hostXShapeInfo),
yShapeInfo, shape::rank(hostYShapeInfo),
resultShapeInfo, shape::rank(hostZShapeInfo),
xIndexes,
yIndexes,
resultIndexes, allocationPointer, deviceTADShapeInfo);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
*/
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
int *xShapeInfo,
float *y,
int *yShapeInfo,
float *result,
int *resultShapeInfo,
float *extraParams){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);;
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
int *xShapeInfo,
float16 *y,
int *yShapeInfo,
float16 *result,
int *resultShapeInfo,
float16 *extraParams){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float16>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *result,
int *resultShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F7 opNum:[%i]\n", opNum);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float), 1);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF7 opNum:[%i]\n", opNum);
if (opNum == 19) {
execReduceFloat(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo);
}
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *result,
int *resultShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H7 opNum:[%i]\n", opNum);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float16), 1);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH7 opNum:[%i]\n", opNum);
if (opNum == 19) {
execReduceHalf(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo);
}
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *result,
int *resultShapeInfo,
int *dimension,int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F8 opNum:[%i]\n", opNum);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float), 1);
if (opNum == 19) {
execReduceFloat(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength);
}
// we call different kernels optimized for different number of dimensions in TAD
if (dimensionLength == 1) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(float), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGeneric1D, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
} else if (shape::rank(hostTADShapeInfo) <= 3) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(float), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGeneric3D, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
} else {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(float), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGenericXD, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *result,
int *resultShapeInfo,
int *dimension,int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H8 opNum:[%i]\n", opNum);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float16), 1);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH8 opNum:[%i]\n", opNum);
if (opNum == 19) {
execReduceHalf(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength);
}
// calling different kernels, depending on number of dimensions in TAD
if (dimensionLength == 1) {
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGeneric1D, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
} else if (shape::rank(hostTADShapeInfo) <= 3) {
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGeneric3D, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
} else {
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGenericXD, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
float NativeOps::execReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F9 opNum:[%i]\n", opNum);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[8]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF9 opNum:[%i]\n", opNum);
// for LogExpSum op we need to know max value, and store it
if (opNum == 19) {
float tmp = execReduceScalarFloat(extraPointers, 3, x, xShapeInfo, extraParams);
extraParams = resultPointer;
};
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
// blocking this one
checkCudaErrors(hipStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execReduceScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H9 opNum:[%i]\n", opNum);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 2, funcAttributes[8]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH9 opNum:[%i]\n", opNum);
// for LogExpSum op we need to know max value, and store it
if (opNum == 19) {
float tmp = execReduceScalarHalf(extraPointers, 3, x, xShapeInfo, extraParams);
extraParams = resultPointer;
};
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
// blocking call
checkCudaErrors(hipStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *y,
int *yShapeInfo,
float *result,
int *resultShapeInfo){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F10 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF10 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3ScalarFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execReduce3Half(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *y,
int *yShapeInfo,
float16 *result,
int *resultShapeInfo){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H10 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH10 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3ScalarHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
float NativeOps::execReduce3ScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *y,
int *yShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F11 opNum:[%i]\n", opNum);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 32, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF11 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3ScalarFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
// blocking call
checkCudaErrors(hipStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execReduce3ScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *y,
int *yShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H11 opNum:[%i]\n", opNum);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH11 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3ScalarHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z + 2048, *stream,
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
// blocking call
checkCudaErrors(hipStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *y,
int *yShapeInfo,
float *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F12 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF12 opNum:[%i]\n", opNum);
if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) {
reduce3ScalarFloat << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
} else {
reduce3Float << < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execReduce3Half(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *y,
int *yShapeInfo,
float16 *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H12 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH12 opNum:[%i]\n", opNum);
if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) {
reduce3ScalarHalf<< < launchDims.x, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
} else {
reduce3Half<< < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int xStride,
float *result,
int resultStride,
float scalar,
float *extraParams,
Nd4jIndex n){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
functions::scalar::ScalarTransform<float>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, scalar, extraParams, n);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int xStride,
float16 *result,
int resultStride,
float scalar,
float16 *extraParams,
Nd4jIndex n){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleStrided, float16, PARAMS(n, scalar, x, xStride, extraParams, result, resultStride, allocPointer), OPS_A(SCALAR_OPS))
float16 sc = (float16) scalar;
functions::scalar::ScalarTransform<float16>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, sc, extraParams, n);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *result,
int *resultShapeInfo,
float scalar,
float *extraParams){
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
Nd4jIndex n = shape::length(hostXShapeInfo);
// if (nd4j::Environment::getInstance()->isDebugAndVerbose())
// printf("F14 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]);
//if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
// printf("AF14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo));
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleShaped, float, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<float>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *result,
int *resultShapeInfo,
float scalarF,
float16 *extraParams){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
Nd4jIndex n = shape::length(hostXShapeInfo);
//if (nd4j::Environment::getInstance()->isDebugAndVerbose())
// printf("H14 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]);
float16 scalar = (float16) scalarF;
//if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
// printf("AH14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo));
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleShaped, float16, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<float16>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *result,
int *resultShapeInfo,
float scalar,
float *extraParams,
int *xIndexes,
int *resultIndexes){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
Nd4jIndex n = shape::length(hostXShapeInfo);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F15 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[4]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF15 opNum:[%i]\n", opNum);
/*
scalarFloatIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
x,
extraParams,
result,
resultIndexes, allocPointer);
*/
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execSummaryStatsScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float), 8);
// we limit grid size for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
return functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected);
}
float NativeOps::execSummaryStatsScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float16), 8);
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
return (float) functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *result,
int *resultShapeInfo,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float), 8);
// limiting number of blocks in grid, to match buffer memory size
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected);
}
void NativeOps::execSummaryStatsHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *result,
int *resultShapeInfo,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float16), 8);
// as everywhere else, we limit maximal number of blocks for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(float), 8);
// as everywhere else, we limit maximal number of blocks for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected);
}
void NativeOps::execSummaryStatsHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,bool biasCorrected){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(float16), 8);
// as everywhere else, we limit maximal number of blocks for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected);
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
int xStride,
float *z,
int zStride,
float *extraParams,
Nd4jIndex n) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F19 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformStrided, float, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
int xStride,
float16 *z,
int zStride,
float16 *extraParams,
Nd4jIndex n) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H19 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformStrided, float16, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
float *dx,
int *xShapeInfo,
float *result,
int *resultShapeInfo,
float *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F20 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// special pointer for special buffer for special ops
float *specialPointer = reinterpret_cast<float *>(extraPointers[6]);
int *dimension = (int *) specialPointer;
int *maxDimension = dimension + 1;
int *maxShapeBuffer = (int *) maxDimension + 1;
float * special = (float *) maxShapeBuffer + (MAX_RANK * 2 + 4);
int *maskedAllocPointer = allocPointer;
int *devTadShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
Nd4jIndex *devTadOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF20 opNum:[%i]\n", opNum);
// simple trick to get workaround over reductions into scalar
// that's special ops: SoftMax, SoftMaxDerivative, LogSoftMax, IsMax
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(float) * 4);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformShaped, float, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
} else {
// going for blockwise specials
int *shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
hipLaunchKernelGGL(( prepareShapeBuffer) , dim3(1), dim3(1), 128, *stream , dimension, maxDimension, maxShapeBuffer, shape[0]);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceFloat(tempPointers, 3, dx, xShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcastFloat(tempPointers, 1, dx, xShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
// exp 3
execTransformFloat(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceFloat(tempPointers, 1, result, resultShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcastFloat(tempPointers, 3, result, resultShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
// log 3
if (opNum == 40)
execTransformFloat(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams);
else if (opNum == 39)
execTransformFloat(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams);
checkCudaErrors(hipStreamSynchronize(*stream));
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
if (scalarCheat) {
// if that's 1D input - we'll just go for single dim IMax op call + filler
int maxIdx = (int) execIndexReduceScalarFloat(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
hipLaunchKernelGGL(( fillIsMaxFloat), dim3(1), dim3(128), 1536, *stream , result, shape::length(hostXShapeInfo), targetIdx);
checkCudaErrors(hipStreamSynchronize(*stream));
} else {
// going for dimension-based IsMax
int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
Nd4jIndex *tadMaxOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
special = reinterpret_cast<float *>(extraPointers[17]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduceFloat(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
// at this point, all IMax indexes are gathered, and we execute
hipLaunchKernelGGL(( fillDimensionalIsMaxFloat), dim3(blockLimit), dim3(64), funcAttributes[36].sharedSizeBytes, *stream, special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets );
checkCudaErrors(hipStreamSynchronize(*stream));
}
break;
}
default: {
printf("Bad case for transformFloat\n");
break;
}
}
}
} else {
// we're enforcing larger grids for Col2Im & Im2Col
// TODO: for high-end gpus we might use higher values here
if (opNum == 37 || opNum == 36) {
launchDims.x = 512;
launchDims.y = 512;
launchDims.z += 512 * sizeof(float);
} else if (opNum == 70) {
// we'll be using shared memory to speed up reverse
launchDims.z += launchDims.y * sizeof(float);
}
// histogram op requies additional memory chunk :(
if (opNum == 48) {
int length = shape::length(hostZShapeInfo);
hipMalloc((void **) &maskedAllocPointer, length * launchDims.x * sizeof(float));
}
DISPATCH_SIMPLE(transformShaped, float,
PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo,
shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
// we need guaranteed sync here, due to temp memory release
if (nd4j::Environment::getInstance()->isDebug() || opNum == 48)
checkCudaErrors(hipStreamSynchronize(*stream));
// release memory chunk
if (opNum == 48) {
hipFree((void *) maskedAllocPointer);
}
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execTransformHalf(Nd4jPointer *extraPointers,int opNum,
float16 *dx,
int *xShapeInfo,
float16 *result,
int *resultShapeInfo,
float16 *extraParams) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H20 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
int *maskedAllocPointer = allocPointer;
float16 *specialPointer = reinterpret_cast<float16 *>(extraPointers[6]);
int *dimension = (int *) specialPointer;
int *maxDimension = dimension + 1;
int *maxShapeBuffer = (int *) maxDimension + 1;
float16 * special = (float16 *) maxShapeBuffer + (MAX_RANK * 2 + 4);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH20 opNum:[%i]\n", opNum);
int *devTadShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
Nd4jIndex *devTadOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]);
// simple trick to get workaround over reductions into scalar
// SoftMax, SoftMaxDerivative, LogSoftMax, IsMax
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(float16) * 4);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformShaped, float16, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
} else {
// going for blockwise specials
int *shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
// FIXME: fix this
hipLaunchKernelGGL(( prepareShapeBuffer) , dim3(1), dim3(1), 128, *stream , dimension, maxDimension, maxShapeBuffer, shape[0]);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceHalf(tempPointers, 3, dx, xShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcastHalf(tempPointers, 1, dx, xShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
// exp 3
execTransformHalf(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceHalf(tempPointers, 1, result, resultShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcastHalf(tempPointers, 3, result, resultShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
if (opNum == 40) {
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
execTransformHalf(tempPointers, 47, result, resultShapeInfo, result, resultShapeInfo, extraParams);
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
// log 3
if (opNum == 40)
execTransformHalf(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams);
else if (opNum == 39)
execTransformHalf(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams);
checkCudaErrors(hipStreamSynchronize(*stream));
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
if (scalarCheat) {
// 1D input, aka vector
int maxIdx = (int) execIndexReduceScalarHalf(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
hipLaunchKernelGGL(( fillIsMaxHalf), dim3(1), dim3(128), 1536, *stream , result, shape::length(hostXShapeInfo), targetIdx);
} else {
// going for dimension-based IsMax
int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
Nd4jIndex *tadMaxOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
special = reinterpret_cast<float16 *>(extraPointers[17]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduceHalf(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
// at this point, all IMax indexes are gathered, and we execute
hipLaunchKernelGGL(( fillDimensionalIsMaxHalf), dim3(blockLimit), dim3(64), funcAttributes[36].sharedSizeBytes, *stream, special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets );
checkCudaErrors(hipStreamSynchronize(*stream));
}
break;
}
default: {
printf("Bad case for transformHalf\n");
break;
}
}
}
} else {
// Im2Col & Col2Im enforced grids
if (opNum == 37 || opNum == 36) {
launchDims.x = 512;
launchDims.y = 512;
launchDims.z += 512 * sizeof(float16);
} else if (opNum == 70) {
// we'll be using shared memory to speed up reverse
launchDims.z += launchDims.y * sizeof(float);
}
// Histogram op requires additional memory chunk
if (opNum == 48) {
int length = shape::length(hostZShapeInfo);
hipMalloc((void **)&maskedAllocPointer, length * launchDims.x * sizeof(float16));
}
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformShaped, float16, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
// we need guaranteed sync here, due to temp memory release
if (nd4j::Environment::getInstance()->isDebug() || opNum == 48)
checkCudaErrors(hipStreamSynchronize(*stream));
// release that histogram memory chunk
if (opNum == 48) {
hipFree((void *)maskedAllocPointer);
}
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
int *xShapeInfo,
float *result,
int *resultShapeInfo,
float *extraParams,
int *xIndexes,
int *resultIndexes) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F21 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF21 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( transformFloatIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultIndexes, allocPointer, reductionPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
int *xShapeInfo,
float16 *result,
int *resultShapeInfo,
float16 *extraParams,
int *xIndexes,
int *resultIndexes) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H21 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH21 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( transformHalfIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultIndexes, allocPointer, reductionPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
template <typename T>
__device__ void flattenKernelGeneric(int dOffset,
char order,
T *result,
int *resultShapeInfo,
T *input,
int *inputShapeInfo, int *allocationPointer) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 4, 4, sizeof(shape::TAD), 2);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int *zShape = shape::shapeOf(resultShapeInfo);
int *zStride = shape::stride(resultShapeInfo);
int *yShape = shape::shapeOf(inputShapeInfo);
int *yStride = shape::stride(inputShapeInfo);
char yOrder = shape::order(inputShapeInfo);
int len = shape::length(inputShapeInfo);
int resultEWS = shape::elementWiseStride(resultShapeInfo);
int inputEWS = shape::elementWiseStride(inputShapeInfo);
if (yOrder == order) {
if (resultEWS >= 1 && inputEWS >= 1) {
for (int i = tid; i < len; i+= gridDim.x * blockDim.x) {
result[i * resultEWS + dOffset] = input[i * inputEWS];
}
} else {
int rank = shape::rank(inputShapeInfo);
int coord[MAX_RANK];
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
}
} else {
int rank = shape::rank(inputShapeInfo);
int coord[MAX_RANK];
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
}
}
extern "C" __global__ void flattenKernelDouble(int offset,
char order,
double *result,
int *resultShapeInfo,
double *input,
int *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<double>(
offset,
order, result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
extern "C" __global__ void flattenKernelFloat(int offset,
char order,
float *result,
int *resultShapeInfo,
float *input,
int *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<float>(
offset,
order,
result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
extern "C" __global__ void flattenKernelHalf(int offset,
char order,
float16 *result,
int *resultShapeInfo,
float16 *input,
int *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<float16>(
offset,
order,
result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenFloat(
Nd4jPointer *extraPointers,
int offset,
char order,
float *result,
int *resultShapeInfo,
float *input,
int *inputShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F22 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF222 opNum:[7]\n");
hipLaunchKernelGGL(( flattenKernelFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::flattenHalf(
Nd4jPointer *extraPointers,
int offset,
char order,
float16 *result,
int *resultShapeInfo,
float16 *input,
int *inputShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H22 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH222 opNum:[7]\n");
hipLaunchKernelGGL(( flattenKernelHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenDouble(
Nd4jPointer *extraPointers,
int offset,
char order,
double *result,
int *resultShapeInfo,
double *input,
int *inputShapeInfo) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D30 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[34]);
hipLaunchKernelGGL(( flattenKernelDouble), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::checkP2P() {
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int x = 0; x < devCnt; x++) {
for (int y = 0; y < devCnt; y++) {
if (x == y)
continue;
int canAccess = 0;
hipSetDevice(x);
hipDeviceCanAccessPeer(&canAccess, x , y);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
hipSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void NativeOps::enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
if (devCnt > 1) {
for (int x = 0; x < devCnt; x++) {
for (int y = 0; y < devCnt; y++) {
if (x == y)
continue;
int canAccess = 0;
hipSetDevice(x);
hipDeviceCanAccessPeer(&canAccess, x , y);
if (canAccess) {
if (enable) {
hipDeviceEnablePeerAccess(y, 0);
} else {
hipDeviceDisablePeerAccess(y);
}
} else {
if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", x, y);
}
}
}
hipSetDevice(curDevice);
}
allowedP2P = enable;
hipSetDevice(curDevice);
}
bool NativeOps::isP2PAvailable() {
return supportedP2P;
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
deviceProperties = new hipDeviceProp_t[devCnt];
for (int i = 0; i < devCnt; i++) {
hipSetDevice(i);
hipGetDeviceProperties(&deviceProperties[i], i);
hipDeviceSetLimit(hipLimitStackSize, 4096);
}
hipSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1)
enableP2P(allowedP2P);
hipFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes);
//void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocationPointer, float *reductionPointer) = transformFloat;
// FIXME
hipFuncGetAttributes(&funcAttributes[1], transformFloatIndexes);
//void (*transformFloatPointer2)(int opNum, Nd4jIndex n, float *dy, int incy, float *params, float *result,int resultStride, int *allocationPointer, float *reductionPointer) = transformFloat;
// FIXME
hipFuncGetAttributes(&funcAttributes[2], transformFloatIndexes);
//hipFuncGetAttributes(&funcAttributes[3], (void *)functions::summarystats::summaryStatsReduceFloat);
//hipFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes);
// void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarFloat;
// hipFuncGetAttributes(&funcAttributes[5], scalarFloatIndexes);
// void (*scalarFloatPointer2)(int opNum, Nd4jIndex n,float dx, float *dy, int incy, float *params, float *result,int resultStride, int *allocPointer) = scalarFloat;
// hipFuncGetAttributes(&funcAttributes[6], scalarFloatIndexes);
hipFuncGetAttributes(&funcAttributes[7], reduce3Float);
hipFuncGetAttributes(&funcAttributes[8], reduceSimpleGenericXD_0_float);
// printf("reduceFloat regs: [%i], static shmem: [%i]\n", funcAttributes[8].numRegs, funcAttributes[8].sharedSizeBytes);
hipFuncGetAttributes(&funcAttributes[28], reduceSimpleGeneric1D_0_float); // 1D
// printf("reduceFloat1D regs: [%i], static shmem: [%i]\n", funcAttributes[28].numRegs, funcAttributes[28].sharedSizeBytes);
hipFuncGetAttributes(&funcAttributes[29], reduceSimpleGeneric3D_0_float); // 6D
// printf("reduceFloat6D regs: [%i], static shmem: [%i]\n", funcAttributes[29].numRegs, funcAttributes[29].sharedSizeBytes);
hipFuncGetAttributes(&funcAttributes[30], flattenKernelFloat);
hipFuncGetAttributes(&funcAttributes[31], concatKernelFloat);
// hipFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat);
// hipFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex);
// hipFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat);
hipFuncGetAttributes(&funcAttributes[12], broadcastSimple_0_float);
hipFuncGetAttributes(&funcAttributes[13], indexReduceFloat);
///////////////////////////////////////// Doubles are separate, just in case of...
hipFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes);
// void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocationPointer, double *reductionPointer) = transformDouble;
// FIXME
hipFuncGetAttributes(&funcAttributes[15], transformDoubleIndexes);
//void (*transformDoublePointer2)(int opNum, Nd4jIndex n, double *dy, int incy, double *params, double *result,int resultStride, int *allocationPointer, double *reductionPointer) = transformDouble;
// FIXME
hipFuncGetAttributes(&funcAttributes[16], transformDoubleIndexes);
//hipFuncGetAttributes(&funcAttributes[17], functions::summarystats::summaryStatsReduceDouble);
// hipFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes);
//void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarDouble;
// hipFuncGetAttributes(&funcAttributes[19], scalarDoubleIndexes);
//void (*scalarDoublePointer2)(int opNum, Nd4jIndex n,double dx, double *dy, int incy, double *params, double *result,int resultStride, int *allocPointer) = scalarDouble;
// hipFuncGetAttributes(&funcAttributes[20], scalarDoubleIndexes);
hipFuncGetAttributes(&funcAttributes[21], reduce3Double);
hipFuncGetAttributes(&funcAttributes[22], reduceSimpleGenericXD_0_double);
// hipFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble);
// hipFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex);
// hipFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble);
hipFuncGetAttributes(&funcAttributes[26], broadcastSimple_0_double);
hipFuncGetAttributes(&funcAttributes[27], indexReduceDouble);
hipFuncGetAttributes(&funcAttributes[32], reduceSimpleGeneric1D_0_double); // 1D
hipFuncGetAttributes(&funcAttributes[33], reduceSimpleGeneric3D_0_double); // 6D
hipFuncGetAttributes(&funcAttributes[34], flattenKernelDouble);
hipFuncGetAttributes(&funcAttributes[35], concatKernelDouble);
hipFuncGetAttributes(&funcAttributes[36], fillDimensionalIsMaxFloat);
hipFuncGetAttributes(&funcAttributes[37], fillDimensionalIsMaxDouble);
hipFuncGetAttributes(&funcAttributes[38], concatKernelScalarFloat);
hipFuncGetAttributes(&funcAttributes[39], concatKernelScalarDouble);
hipFuncGetAttributes(&funcAttributes[40], concatKernelVStackFloat);
hipFuncGetAttributes(&funcAttributes[41], concatKernelVStackDouble);
hipFuncGetAttributes(&funcAttributes[42], concatKernelHStackFloat);
hipFuncGetAttributes(&funcAttributes[43], concatKernelHStackDouble);
/////////////////////////
hipFuncGetAttributes(&funcAttributes[44], averagingKernelHalf);
hipFuncGetAttributes(&funcAttributes[45], averagingKernelFloat);
hipFuncGetAttributes(&funcAttributes[46], averagingKernelDouble);
//
//hipFuncGetAttributes(&funcAttributes[47], scalarAlongDimension_0_float);
//hipFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_float16);
//hipFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_double);
}
void NativeOps::initializeFunctions(Nd4jPointer *functions) {
nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions);
/*
this->hipblasSgemv = (CublasSgemv)functions[0];
this->hipblasDgemv = (CublasDgemv)functions[1];
this->hipblasHgemm = (CublasHgemm)functions[2];
this->hipblasSgemm = (CublasSgemm)functions[3];
this->hipblasDgemm = (CublasDgemm)functions[4];
this->cublasSgemmEx = (CublasSgemmEx)functions[5];
this->hipblasHgemmBatched = (CublasHgemmBatched)functions[6];
this->hipblasSgemmBatched = (CublasSgemmBatched)functions[7];
this->hipblasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(Nd4jIndex memorySize, int flags) {
Nd4jPointer pointer;
// hipHostMallocMapped |hipHostMallocPortable
hipError_t res = hipHostMalloc((void **)&pointer, memorySize, hipHostMallocDefault);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(Nd4jIndex memorySize, Nd4jPointer ptrToDeviceId, int flags) {
Nd4jPointer pointer;
hipError_t res = hipMalloc((void **)&pointer, memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int NativeOps::freeHost(Nd4jPointer pointer) {
hipError_t res = hipHostFree((void *) pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) {
hipError_t res = hipFree((void *)pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
Nd4jPointer NativeOps::createContext() {
return 0L;
}
Nd4jPointer NativeOps::createStream() {
Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(hipStream_t));
hipError_t result = hipStreamCreate((hipStream_t *) &nativeStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return nativeStream;
}
Nd4jPointer NativeOps::createEvent() {
Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(hipEvent_t));
hipError_t result = hipEventCreateWithFlags((hipEvent_t *) &nativeEvent, hipEventDisableTiming);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return nativeEvent;
}
int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream);
hipError_t result = hipEventRecord(*pEvent, *pStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) {
int deviceId = getDeviceId(ptrToDeviceId);
hipError_t result = hipSetDevice(deviceId);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
Nd4jIndex NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (Nd4jIndex) memFree;
}
Nd4jIndex NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (Nd4jIndex) memTotal;
}
int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) {
return memcpyAsync(dst, src, size, flags, reserved);
}
int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipMemcpyKind kind;
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*pStream));
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
default: {
printf("UNDEFINED MEMCPY!\n");
break;
}
}
hipError_t result = hipMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
if (result != 0) {
checkCudaErrors(result);
printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], result: [%i]\n", src, dst, size, flags, (int) result );
fflush(stdout);
fflush(stderr);
return 0L;
}
else return 1;
}
int NativeOps::memset(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) {
hipError_t result = hipMemset((void *) dst, value, (size_t) size);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipError_t result = hipMemsetAsync((void *) dst, value, (size_t) size, *pStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::destroyEvent(Nd4jPointer event) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipError_t result = hipEventDestroy(*pEvent);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::streamSynchronize(Nd4jPointer stream) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream);
hipError_t result = hipStreamSynchronize(*pStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1L;
}
int NativeOps::eventSynchronize(Nd4jPointer event) {
hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event);
hipError_t result = hipEventSynchronize(*pEvent);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1L;
}
int NativeOps::getAvailableDevices() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
return devCnt;
}
void NativeOps::enableDebugMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setDebug(reallyEnable);
}
void NativeOps::setGridLimit(int gridSize) {
if (gridSize > 8192)
gridSize = 8192;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int NativeOps::ompGetMaxThreads() {
return maxThreads;
}
int NativeOps::ompGetNumThreads() {
return maxThreads;
}
void NativeOps::setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void NativeOps::enableVerboseMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setVerbose(reallyEnable);
}
int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].major;
}
int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].minor;
}
const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].name;
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concatFloat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float *result,
int *resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 ||
shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') {
Nd4jIndex length0 = shape::length(hostShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
smem = funcAttributes[38].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelScalarFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
smem = funcAttributes[40].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelVStackFloat), dim3(128), dim3(512), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
smem = funcAttributes[42].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelHStackFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
//smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280);
int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *devZOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
hipLaunchKernelGGL(( concatKernelFloat), dim3(2048), dim3(128), funcAttributes[31].sharedSizeBytes , *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::concatHalf(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float16 *result,
int *resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') {
Nd4jIndex length0 = shape::length(hostShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
smem = funcAttributes[38].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelScalarHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
smem = funcAttributes[40].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelVStackHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
smem = funcAttributes[42].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelHStackHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
//smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280);
int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *devZOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
hipLaunchKernelGGL(( concatKernelHalf), dim3(2048), dim3(128), funcAttributes[31].sharedSizeBytes, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatHalf: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::specialConcatFloat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<float>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
result,
resultShapeInfo);
}
void NativeOps::specialConcatHalf(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float16 *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<float16>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
result,
resultShapeInfo);
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::specialConcatDouble(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
double *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<double>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
result,
resultShapeInfo);
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concatDouble(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
double *result,
int *resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') {
Nd4jIndex length0 = shape::length(hostShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
smem = funcAttributes[39].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelScalarDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
smem = funcAttributes[41].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelVStackDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
smem = funcAttributes[43].sharedSizeBytes;
hipLaunchKernelGGL(( concatKernelHStackDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *devZOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
hipLaunchKernelGGL(( concatKernelDouble), dim3(2048), dim3(128), funcAttributes[35].sharedSizeBytes, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatDouble: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
* This method saves
*/
void NativeOps::tadOnlyShapeInfo(int *xShapeInfo, int *dimension, int dimensionLength, int *target, Nd4jIndex *offsets) {
shape::TAD *tad = new shape::TAD();
tad->init(xShapeInfo, dimension, dimensionLength);
//tad->setOutputBuffer(target);
tad->createTadOnlyShapeInfo();
tad->createOffsets();
std::memcpy((void *) target, tad->tadOnlyShapeInfo, (tad->tadOnlyShapeInfo[0] * 2 + 4) * sizeof(int));
std::memcpy((void *) offsets, tad->tadOffsets, tad->numTads * sizeof(Nd4jIndex));
/*
shape::printShapeInfoLinear(hostXShapeInfo);
shape::printShapeInfoLinear(tad->tadOnlyShapeInfo);
shape::printShapeInfoLinear(target);
*/
delete tad;
}
int NativeOps::memcpyConstantAsync(Nd4jIndex dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved);
hipMemcpyKind kind;
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*pStream));
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
}
//hipError_t result = hipMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
hipError_t result = hipMemcpyToSymbolAsync(deviceConstantMemory, (const void *) src, size, dst, kind, *pStream);
checkCudaErrors(result);
if (result != 0) {
printf("Symbol failed on [%lu] -> [%lu], size: [%i], direction: [%i]\n", src, dst, size, flags );
return 0L;
}
else return 1;
}
Nd4jPointer NativeOps::getConstantSpace() {
Nd4jPointer dConstAddr;
hipError_t result = hipGetSymbolAddress((void **)&dConstAddr, deviceConstantMemory);
return dConstAddr;
}
void NativeOps::pullRowsHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, float16 *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipLaunchKernelGGL(( pullRowsKernelHalf), dim3(64), dim3(256), 1024, *stream, x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::pullRowsFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, float *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipLaunchKernelGGL(( pullRowsKernelFloat), dim3(64), dim3(256), 1024, *stream, x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::pullRowsDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, double *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipLaunchKernelGGL(( pullRowsKernelDouble), dim3(64), dim3(256), 1024, *stream, x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::averageHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jIndex length, bool propagate) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float16 **x = reinterpret_cast<float16 **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageHalf called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]);
hipLaunchKernelGGL(( averagingKernelHalf), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length, propagate);
checkCudaErrors(hipStreamSynchronize(*stream));
} else {
nd4j::SpecialMethods<float16>::averageGeneric(x, dz, n, length, propagate);
}
}
void NativeOps::averageFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jIndex length, bool propagate) {
hipStream_t * stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float **x = reinterpret_cast<float **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageFloat called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]);
hipLaunchKernelGGL(( averagingKernelFloat), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length, propagate);
checkCudaErrors(hipStreamSynchronize(*stream));
} else {
// launching on host memory
nd4j::SpecialMethods<float>::averageGeneric(x, dz, n, length, propagate);
}
}
void NativeOps::averageDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jIndex length, bool propagate) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
double **x = reinterpret_cast<double **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageDouble called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]);
averagingKernelDouble << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (x, dz, n, length, propagate);
checkCudaErrors(hipStreamSynchronize(*stream));
} else {
nd4j::SpecialMethods<double>::averageGeneric(x, dz, n, length, propagate);
}
}
void NativeOps::accumulateHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jIndex length) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float16 **x = reinterpret_cast<float16 **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateHalf called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]);
hipLaunchKernelGGL(( accumulateKernelHalf), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length);
checkCudaErrors(hipStreamSynchronize(*stream));
} else {
nd4j::SpecialMethods<float16>::accumulateGeneric(x, dz, n, length);
}
}
void NativeOps::accumulateFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jIndex length) {
hipStream_t * stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float **x = reinterpret_cast<float **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateFloat called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]);
hipLaunchKernelGGL(( accumulateKernelFloat), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length);
checkCudaErrors(hipStreamSynchronize(*stream));
} else {
// launching on host memory
nd4j::SpecialMethods<float>::accumulateGeneric(x, dz, n, length);
}
}
void NativeOps::accumulateDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jIndex length) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
double **x = reinterpret_cast<double **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateDouble called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]);
accumulateKernelDouble << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (x, dz, n, length);
checkCudaErrors(hipStreamSynchronize(*stream));
} else {
nd4j::SpecialMethods<double>::accumulateGeneric(x, dz, n, length);
}
}
void NativeOps::shuffleDouble(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
double **x = reinterpret_cast<double **>(dx);
double **z = reinterpret_cast<double **>(dz);
int **xShape = reinterpret_cast<int **>(xShapeInfo);
int **zShape = reinterpret_cast<int **>(zShapeInfo);
int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo);
Nd4jIndex **tadOffset = reinterpret_cast<Nd4jIndex **>(tadOffsets);
hipLaunchKernelGGL(( shuffleKernelDouble), dim3(32), dim3(128), 1024, *stream, x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::shuffleFloat(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
float **x = reinterpret_cast<float **>(dx);
float **z = reinterpret_cast<float **>(dz);
int **xShape = reinterpret_cast<int **>(xShapeInfo);
int **zShape = reinterpret_cast<int **>(zShapeInfo);
int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo);
Nd4jIndex **tadOffset = reinterpret_cast<Nd4jIndex **>(tadOffsets);
hipLaunchKernelGGL(( shuffleKernelFloat), dim3(32), dim3(128), 1024, *stream, x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::shuffleHalf(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
float16 **x = reinterpret_cast<float16 **>(dx);
float16 **z = reinterpret_cast<float16 **>(dz);
int **xShape = reinterpret_cast<int **>(xShapeInfo);
int **zShape = reinterpret_cast<int **>(zShapeInfo);
int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo);
Nd4jIndex **tadOffset = reinterpret_cast<Nd4jIndex **>(tadOffsets);
hipLaunchKernelGGL(( shuffleKernelHalf), dim3(32), dim3(128), 1024, *stream, x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateStridedFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float *dx, int xStride, float *dy, int yStride, float *dz, int zStride, float *extraA, float *extraB, float scalarA, float scalarB) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
// metaPredicateStridedFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
/*
if (opTypeA == 2) {
if (opTypeB == 0) {
DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB), float, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
}
}
*/
functions::grid::GRIDStrided<float>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateStridedDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, double *dx, int xStride, double *dy, int yStride, double *dz, int zStride, double *extraA, double *extraB, double scalarA, double scalarB) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
// metaPredicateStridedDouble<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
/*
if (opTypeA == 2) {
if (opTypeB == 0) {
DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB), double, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
}
}
*/
functions::grid::GRIDStrided<double>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateStridedHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float16 *dx, int xStride, float16 *dy, int yStride, float16 *dz, int zStride, float16 *extraA, float16 *extraB, float scalarA, float scalarB) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
// metaPredicateStridedHalf<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
float16 scalA = (float16) scalarA;
float16 scalB = (float16) scalarB;
/*
if (opTypeA == 2) {
if (opTypeB == 0) {
DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalA, scalB), float16, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
}
}
*/
functions::grid::GRIDStrided<float16>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateReduceFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, float *extraA, float *extraB, float scalarA, float scalarB, bool scalarReturned) {
// no-op
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
/*
metaPredicateReduceFloat(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB,
float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *tadShapeInfo, int *tadOffsets, float *reductionBuffer, float *extraA, float *extraB, float scalarA, float scalarB) {
*/
// metaPredicateReduceFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, nullptr, extraA, extraB, scalarA, scalarB, scalarReturned);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateShapeDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, double *dx, int *xShapeInfo, double *dy, int *yShapeInfo, double *dz, int *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) {
// no-op;
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
/*
if (opTypeA == 2) {
if (opTypeB == 0) {
DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), double, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
}
}
*/
functions::grid::GRIDShaped<double>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateShapeHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float16 *dx, int *xShapeInfo, float16 *dy, int *yShapeInfo, float16 *dz, int *zShapeInfo, float16 *extraA, float16 *extraB, float scalarA, float scalarB) {
// no-op;
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
// we have to converf float -> fp16 prior to kernel call
float16 scalA = (float16) scalarA;
float16 scalB = (float16) scalarB;
/*
if (opTypeA == 2) {
if (opTypeB == 0) {
DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalA, scalB), float16, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
}
}
*/
functions::grid::GRIDShaped<float16>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateShapeFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) {
// no-op;
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
/*
if (opTypeA == 2) {
if (opTypeB == 0) {
DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), float, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
}
}
*/
functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
bool NativeOps::isExperimentalEnabled() {
return experimentalSupport;
}
void NativeOps::setOmpMinThreads(int threads) {
minThreads = nd4j::math::nd4j_max<int>(32, threads);
minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads);
}
int NativeOps::getDevice() {
int curDevice = -1;
hipGetDevice(&curDevice);
return curDevice;
}
void NativeOps::setElementThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::setTADThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::execScalarFloat(Nd4jPointer *extraPointers,int opNum,
float *x,
int *xShapeInfo,
float *z,
int *zShapeInfo,
float *scalars,
float *extraParams,
int *dimension,
int dimensionLength) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTadShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]),hostXShapeInfo, hostTadShapeInfo, funcAttributes[47] ,dimensionLength, sizeof(float), 0);
dim3 launchDims = dim3(256, 256, 1024);
// this macro builds bunch of IF/ELSE selectors for kernel launc h
//DISPATCH_SIMPLE(scalarAlongDimension, float, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<float>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execScalarDouble(Nd4jPointer *extraPointers,int opNum,
double *x,
int *xShapeInfo,
double *z,
int *zShapeInfo,
double *scalars,
double *extraParams,
int *dimension,
int dimensionLength) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 256, 1024);
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarAlongDimension, double, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<double>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execScalarHalf(Nd4jPointer *extraPointers,int opNum,
float16 *x,
int *xShapeInfo,
float16 *z,
int *zShapeInfo,
float16 *scalars,
float16 *extraParams,
int *dimension,
int dimensionLength) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 256, 1024);
/*
int *tadShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *tadOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *tadShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *tadOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
*/
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarAlongDimension, float16, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<float16>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execAggregateFloat(Nd4jPointer *extraPointers,int opNum,
float **arguments,
int numArguments,
int **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
float *realArguments,
int numRealArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateSimple, float, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS))
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execAggregateDouble(Nd4jPointer *extraPointers,int opNum,
double **arguments,
int numArguments,
int **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
double *realArguments,
int numRealArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateSimple, double, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS))
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execAggregateHalf(Nd4jPointer *extraPointers,int opNum,
float16 **arguments,
int numArguments,
int **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
float16 *realArguments,
int numRealArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateSimple, float16, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS))
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execAggregateBatchFloat(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) {
// not implemented yet
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateBatchSimple, float, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execAggregateBatchDouble(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) {
// not implemented yet
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateBatchSimple, double, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execAggregateBatchHalf(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) {
// not implemented yet
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateBatchSimple, float16, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *z, int *zShapeBuffer, float *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) );
functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, int *xShapeBuffer, float *y, int *yShapeBuffer, float *z, int *zShapeBuffer, float *extraArguments) {
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) );
functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, int *xShapeBuffer, float *z, int *zShapeBuffer, float *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) );
functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *z, int *zShapeBuffer, double *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double)));
functions::random::RandomFunction<double>::executeCudaSingle(launchDims, extraPointers, opNum, state, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, int *xShapeBuffer, double *y, int *yShapeBuffer, double *z, int *zShapeBuffer, double *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double)));
functions::random::RandomFunction<double>::executeCudaTriple(launchDims, extraPointers, opNum, state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, int *xShapeBuffer, double *z, int *zShapeBuffer, double *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double)));
functions::random::RandomFunction<double>::executeCudaDouble(launchDims, extraPointers, opNum, state, x, xShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *z, int *zShapeBuffer, float16 *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16)));
functions::random::RandomFunction<float16>::executeCudaSingle(launchDims, extraPointers, opNum, state, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, int *xShapeBuffer, float16 *y, int *yShapeBuffer, float16 *z, int *zShapeBuffer, float16 *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16)));
functions::random::RandomFunction<float16>::executeCudaTriple(launchDims, extraPointers, opNum, state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, int *xShapeBuffer, float16 *z, int *zShapeBuffer, float16 *extraArguments) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16)));
functions::random::RandomFunction<float16>::executeCudaDouble(launchDims, extraPointers, opNum, state, x, xShapeBuffer, z, zShapeBuffer, extraArguments);
}
Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// hipStreamSynchronize(*stream);
unsigned long long *ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
nd4j::random::RandomBuffer *buffer = new nd4j::random::RandomBuffer(seed, bufferSize, (uint64_t *) ptrHost, (uint64_t *) ptrDev);
buffer->propagateToDevice(buffer, *stream);
checkCudaErrors(hipStreamSynchronize(*stream));
// we generate sequence in the host memory
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream);
checkCudaErrors(hipStreamSynchronize(*stream));
return buffer;
}
void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
hipDeviceSynchronize();
delete buffer;
}
void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream);
}
void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
*
* @param npyArray
* @return
*/
Nd4jPointer NativeOps::shapeBufferForNumpy(Nd4jPointer npyArray) {
/*
cnpy::NpyArray *arrPointer = reinterpret_cast<cnpy::NpyArray *>(npyArray);
int *shapeBuffer = shape::shapeBufferOfNpy(*arrPointer);
return reinterpret_cast<Nd4jPointer>(shapeBuffer);
*/
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
unsigned int *shape = new unsigned int[arr.shape.size()];
for(int i = 0; i < arr.shape.size(); i++) {
shape[i] = arr.shape[i];
}
int *shapeBuffer = shape::shapeBufferOfNpy(arr.shape.size(),
shape,
arr.fortranOrder);
delete[] shape;
return reinterpret_cast<Nd4jPointer>(shapeBuffer);
}
/**
*
* @param npyArray
* @return
*/
Nd4jPointer NativeOps::dataPointForNumpy(Nd4jPointer npyArray) {
char *buff = reinterpret_cast<char *>(npyArray);
//printf("Pointer contents %s\n",buff);
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
cnpy::NpyArray *arrPointer = &arr;
char *data = arrPointer->data;
if(arrPointer->wordSize == sizeof(float)) {
float *floatData = reinterpret_cast<float *>(data);
return reinterpret_cast<Nd4jPointer>(floatData);
}
else if(arrPointer->wordSize == sizeof(double)) {
double *doubleData = reinterpret_cast<double *>(data);
return reinterpret_cast<Nd4jPointer >(doubleData);
}
return reinterpret_cast<Nd4jPointer >(0);
}
/**
* Load a numpy array from a file
* and return it as an Nd4jPointer
* @param path
* @return
*/
Nd4jPointer NativeOps::numpyFromFile(std::string path) {
/*cnpy::NpyArray arr = cnpy::npyLoad(path);
return reinterpret_cast<Nd4jPointer >(&arr);
*/
char *numpyBuffer = cnpy::loadFile(path.data());
return reinterpret_cast<Nd4jPointer >(numpyBuffer);
}
void NativeOps::releaseNumpy(Nd4jPointer npyArray) {
free((void *) npyArray);
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) {
int *shapeBuffer = reinterpret_cast<int *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* Get the element size for a numpy array
* @param npyArray the numpy array's address
* to get the length for
* @return
*/
int NativeOps::elementSizeForNpyArray(Nd4jPointer npyArray) {
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
cnpy::NpyArray *arrPointer = &arr;
int size = arrPointer->wordSize;
return size;
/*
cnpy::NpyArray *arr = reinterpret_cast<cnpy::NpyArray *>(npyArray);
return arr->wordSize;
*/
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
Nd4jPointer NativeOps::pointerForAddress(Nd4jIndex address) {
return reinterpret_cast<Nd4jPointer >(address);
}
void NativeOps::tearDouble(Nd4jPointer *extras, double *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
hipLaunchKernelGGL(( tearKernelDouble), dim3(512), dim3(512), 512, *stream, x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::tearFloat(Nd4jPointer *extras, float *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
hipLaunchKernelGGL(( tearKernelFloat), dim3(512), dim3(512), 512, *stream, x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::tearHalf(Nd4jPointer *extras, float16 *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
hipLaunchKernelGGL(( tearKernelHalf), dim3(512), dim3(512), 512, *stream, x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP1Float(Nd4jPointer *extras, float *dx, Nd4jIndex N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( encoderKernelP1Float), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz, threshold);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP1Double(Nd4jPointer *extras, double *dx, Nd4jIndex N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( encoderKernelP1Double), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz, threshold);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP1Half(Nd4jPointer *extras, float16 *dx, Nd4jIndex N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( encoderKernelP1Half), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz, threshold);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jIndex N, int *dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
//encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz);
// it
prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP3Float(Nd4jPointer *extraPointers, float *dx, int *offsets, Nd4jIndex N, int *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( encoderKernelP3Float), dim3(numBlocks), dim3(blockSize) , 4096, *stream, dx, offsets, N, dz);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP3Double(Nd4jPointer *extraPointers, double *dx, int *offsets, Nd4jIndex N, int *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( encoderKernelP3Double), dim3(numBlocks), dim3(blockSize) , 4096, *stream, dx, offsets, N, dz);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP3Half(Nd4jPointer *extraPointers, float16 *dx, int *offsets, Nd4jIndex N, int *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( encoderKernelP3Half), dim3(numBlocks), dim3(blockSize) , 4096, *stream, dx, offsets, N, dz);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::decodeThresholdFloat(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( decoderKernelFloat), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::decodeThresholdDouble(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, double *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( decoderKernelDouble), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::decodeThresholdHalf(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float16 *dz){
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
hipLaunchKernelGGL(( decoderKernelHalf), dim3(numBlocks), dim3(blockSize) , 1024, *stream, dx, N, dz);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execReduce3AllDouble(Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xInfo,
double *extraParamsVals,
double *y,
int *yInfo,
double *result,
int *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
int *xTadShapeInfo,
Nd4jIndex *xOffsets,
int *yTadShapeInfo,
Nd4jIndex *yOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(double), 2);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AD119 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3AllDouble), dim3(launchDims.x), dim3(512), (512 * 8 * 2 + 512), *stream,
opNum,
x,
xInfo,
y,
yInfo,
extraParamsVals,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength,
1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execReduce3AllFloat(Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xInfo,
float *extraParamsVals,
float *y,
int *yInfo,
float *result,
int *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
int *xTadShapeInfo,
Nd4jIndex *xOffsets,
int *yTadShapeInfo,
Nd4jIndex *yOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(float), 2);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF119 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3AllFloat), dim3(launchDims.x), dim3(512), (512 * 4 * 2 + 512), *stream,
opNum,
x,
xInfo,
y,
yInfo,
extraParamsVals,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength,
1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::execReduce3AllHalf(Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xInfo,
float16 *extraParamsVals,
float16 *y,
int *yInfo,
float16 *result,
int *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
int *xTadShapeInfo,
Nd4jIndex *xOffsets,
int *yTadShapeInfo,
Nd4jIndex *yOffsets) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(float16), 2);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH119 opNum:[%i]\n", opNum);
hipLaunchKernelGGL(( reduce3AllHalf), dim3(launchDims.x), dim3(512), (512 * 2 * 2 + 512), *stream,
opNum,
x,
xInfo,
y,
yInfo,
extraParamsVals,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength,
1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::sortFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, bool descending) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[ 1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int xLength = shape::length(hostXShapeInfo);
int xEWS = shape::elementWiseStride(hostXShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
hipLaunchKernelGGL(( cudaBitonicSortFloat), dim3(numBlocks), dim3(numThreads), 512, *stream, x, xShapeInfo, j, k, xLength, descending);
}
}
} else {
#ifdef __clang__
if (1 > 0) {
#elif __GNUC__
if ((xLength > 1024 * 1024 * 10) && xEWS == 1) {
b40c::radix_sort::Enactor enactor;
b40c::util::DoubleBuffer<float> sort_storage(x);
enactor.Sort(sort_storage, xLength);
// fire reverse op
if (descending)
execTransformFloat(extraPointers, 70, x, xShapeInfo, x, xShapeInfo, nullptr);
} else {
#else
if (1 > 0) {
#endif
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
hipLaunchKernelGGL(( cudaSortFloat), dim3(numBlocks), dim3(numThreads), numThreads * 2 * sizeof(float), *stream, x, xShapeInfo, n, xLength, rev, descending);
n>>=1;
rev = 1;
} while(n > 1);
}
}
}
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::sortDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, bool descending) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int xLength = shape::length(hostXShapeInfo);
int xEWS = shape::elementWiseStride(hostXShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
hipLaunchKernelGGL(( cudaBitonicSortDouble), dim3(numBlocks), dim3(numThreads), 512, *stream, x, xShapeInfo, j, k, xLength, descending);
}
}
} else {
#ifdef __clang__
if (1 > 0) {
#elif __GNUC__
if ((xLength > 1024 * 1024 * 10) && xEWS == 1) {
b40c::radix_sort::Enactor enactor;
b40c::util::DoubleBuffer<double> sort_storage(x);
enactor.Sort(sort_storage, xLength);
// fire reverse op
if (descending)
execTransformDouble(extraPointers, 70, x, xShapeInfo, x, xShapeInfo, nullptr);
} else {
#else
if ( 1 > 0) {
#endif
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
hipLaunchKernelGGL(( cudaSortDouble), dim3(numBlocks), dim3(numThreads), numThreads * 2 * sizeof(double), *stream, x, xShapeInfo, n, xLength, rev, descending);
n>>=1;
rev = 1;
} while(n > 1);
}
}
}
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::sortHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, bool descending) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int xLength = shape::length(hostXShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
hipLaunchKernelGGL(( cudaBitonicSortHalf), dim3(numBlocks), dim3(numThreads), 512, *stream, x, xShapeInfo, j, k, xLength, descending);
}
}
} else {
// half is incompatible with radix, so only bitonic here
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
hipLaunchKernelGGL(( cudaSortHalf), dim3(numBlocks), dim3(numThreads), numThreads * 2 * sizeof(float16), *stream, x, xShapeInfo, n, xLength, rev, descending);
n>>=1;
rev = 1;
} while(n > 1);
}
}
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::sortTadFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, bool descending) {
// to be implemented
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
hipLaunchKernelGGL(( cudaSortTadFloat), dim3(512), dim3(512), 1088 * sizeof(float), *stream, x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::sortTadHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, bool descending) {
// to be implemented
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
hipLaunchKernelGGL(( cudaSortTadHalf), dim3(512), dim3(512), 1088 * sizeof(float16), *stream, x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::sortTadDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, bool descending) {
// to be implemented
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
hipLaunchKernelGGL(( cudaSortTadDouble), dim3(512), dim3(512), 1088 * sizeof(double), *stream, x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::sortCooIndicesFloat(Nd4jPointer *extraPointers, int *indices, float *values, Nd4jIndex length, int rank) {
}
void NativeOps::sortCooIndicesDouble(Nd4jPointer *extraPointers, int *indices, double *values, Nd4jIndex length, int rank) {
}
void NativeOps::sortCooIndicesHalf(Nd4jPointer *extraPointers, int *indices, float16 *values, Nd4jIndex length, int rank) {
}
Nd4jIndex NativeOps::encodeBitmapFloat(Nd4jPointer *extraPointers, float *dx, Nd4jIndex N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
hipLaunchKernelGGL(( cudaEncodeBitmapFloat), dim3(512), dim3(512), 512 * 2 * sizeof(float) + 384, *stream, dx, N, dz, resultPointer, reductionPointer, threshold);
checkCudaErrors(hipStreamSynchronize(*stream));
Nd4jIndex result = (Nd4jIndex) resultPointer[0];
resultPointer[0] = 0;
return result;
}
Nd4jIndex NativeOps::encodeBitmapDouble(Nd4jPointer *extraPointers, double *dx, Nd4jIndex N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
hipLaunchKernelGGL(( cudaEncodeBitmapDouble), dim3(512), dim3(512), 512 * 2 * sizeof(double) + 384, *stream, dx, N, dz, resultPointer, reductionPointer, threshold);
checkCudaErrors(hipStreamSynchronize(*stream));
Nd4jIndex result = (Nd4jIndex) resultPointer[0];
resultPointer[0] = 0;
return result;
}
Nd4jIndex NativeOps::encodeBitmapHalf(Nd4jPointer *extraPointers, float16 *dx, Nd4jIndex N, int *dz, float threshold) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
hipLaunchKernelGGL(( cudaEncodeBitmapHalf), dim3(512), dim3(512), (512 * sizeof(float16)) + (512 * sizeof(int)) + 384, *stream, dx, N, dz, resultPointer, reductionPointer, threshold);
checkCudaErrors(hipStreamSynchronize(*stream));
Nd4jIndex result = (Nd4jIndex) resultPointer[0];
resultPointer[0] = 0;
return result;
}
void NativeOps::decodeBitmapFloat(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float *dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
hipLaunchKernelGGL(( cudaDecodeBitmapFloat), dim3(512), dim3(512), 512 * sizeof(float) + 384, *stream, dx, N, dz);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::decodeBitmapDouble(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, double *dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
hipLaunchKernelGGL(( cudaDecodeBitmapDouble), dim3(512), dim3(512), 512 * sizeof(double) + 384, *stream, dx, N, dz);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::decodeBitmapHalf(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float16 *dz) {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
hipLaunchKernelGGL(( cudaDecodeBitmapHalf), dim3(512), dim3(512), 512 * sizeof(float16) + 384, *stream, dx, N, dz);
checkCudaErrors(hipStreamSynchronize(*stream));
}
Nd4jIndex* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jIndex length) {
return nullptr;
}
void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jIndex* ptrMap, Nd4jIndex length) {
}
Nd4jPointer NativeOps::executeProtoGraphFloat(Nd4jPointer *extraPointers, Nd4jPointer protoBufferPointer) {
return nullptr;
}
Nd4jPointer NativeOps::executeProtoGraphFloat(Nd4jPointer *extraPointers, const char *fileName) {
return nullptr;
}
Nd4jPointer NativeOps::executeFlatGraphFloat(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
Nd4jPointer NativeOps::executeFlatGraphHalf(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
Nd4jPointer NativeOps::executeFlatGraphDouble(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
const char* NativeOps::getAllCustomOps() {
return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations();
}
template<typename T>
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, T* tArgs, int numTArgs, int *iArgs, int numIArgs) {
nd4j::graph::VariableSpace<T> varSpace;
Context<T> block(2, &varSpace);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = (int *) inputShapes[e];
auto buffer_ = (T *) inputBuffers[e];
auto array = new nd4j::NDArray<T>(buffer_, shape_);
array->triggerAllocationFlag(false, false);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapesFloat(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, float* tArgs, int numTArgs, int *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash);
return _calculateOutputShapes<float>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesHalf(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, float16* tArgs, int numTArgs, int *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash);
return _calculateOutputShapes<float16>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesDouble(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, int *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash);
return _calculateOutputShapes<double>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
template<typename T>
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* inputShapes, int numInputShapes, T* tArgs, int numTArgs, int *iArgs, int numIArgs) {
nd4j::graph::Context<T> block(1);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++)
inShapes.push_back((int *) inputShapes[e]);
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapesFloat(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputShapes, int numInputShapes, float* tArgs, int numTArgs, int *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash);
return _calculateOutputShapes<float>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesHalf(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputShapes, int numInputShapes, float16* tArgs, int numTArgs, int *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash);
return _calculateOutputShapes<float16>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesDouble(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, int *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash);
return _calculateOutputShapes<double>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
template<typename T>
static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, T* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) {
if (op == nullptr)
nd4j_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<nd4j::NDArray<T>*> inputs(numInputs);
std::vector<nd4j::NDArray<T>*> outputs;
std::vector<T> ttArgs(numTArgs);
std::vector<int> iiArgs(numIArgs);
// filling block now
for (int e = 0; e < numInputs; e++) {
auto buffer = (T *) inputBuffers[e];
auto shape = (int *) inputShapes[e];
// auto var = new Variable<T>(new NDArray<T>(buffer, shape));
// block.getVariables()->emplace_back(var);
auto array = new nd4j::NDArray<T>(buffer, shape);
//array->setSpecialBuffers( (T *) inputBuffers[e + numInputs], (int *) inputShapes[e + numInputs]);
inputs[e] = array;
}
for (int e = 0; e < numIArgs; e++)
iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++)
ttArgs[e] = tArgs[e];
// hypothetically at this point we have everything filled
auto result = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (result->status() != ND4J_STATUS_OK)
return result->status();
if (!isInplace) {
if (result->size() != numOutputs) {
return ND4J_STATUS_BAD_OUTPUT;
}
for (int e = 0; e < numOutputs; e++) {
auto buffer = (T *) outputBuffers[e];
auto shape = (int *) outputShapes[e];
nd4j::NDArray <T> tmp(buffer, shape);
tmp.assign(result->at(e));
}
}
delete result;
for (auto ptr: inputs)
delete ptr;
return ND4J_STATUS_OK;
}
int NativeOps::execCustomOpFloat(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, float* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash);
return realExec<float>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace);
}
int NativeOps::execCustomOpDouble(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash);
return realExec<double>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace);
}
int NativeOps::execCustomOpHalf(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, float16* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash);
return realExec<float16>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace);
}
int NativeOps::registerGraphFloat(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner<float>::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
int NativeOps::registerGraphDouble(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner<double>::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
int NativeOps::registerGraphHalf(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner<float16>::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
template <typename T>
static VariablesSet<T>* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph<T>(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<nd4j::NDArray<T> *> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new nd4j::NDArray<T>((T *) inputBuffers[e], (int *) inputShapes[e]);
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray())
delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto result = nd4j::graph::GraphExecutioner<T>::execute(graph, varSpace);
auto varSet = new nd4j::graph::VariablesSet<T>(result);
if (result == ND4J_STATUS_OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet<float>* NativeOps::executeStoredGraphFloat(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT<float>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
VariablesSet<float16>* NativeOps::executeStoredGraphHalf(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT<float16>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
VariablesSet<double>* NativeOps::executeStoredGraphDouble(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT<double>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jIndex graphId) {
nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId);
return ND4J_STATUS_OK;
}
void NativeOps::deletePointerArray(Nd4jPointer pointer) {
Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer);
delete[] ptr;
}
void NativeOps::deleteIntArray(Nd4jPointer pointer) {
int *ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
template <typename T>
static void deleteVariablesSetT(Nd4jPointer pointer) {
nd4j::graph::VariablesSet<T>* ptr = reinterpret_cast<nd4j::graph::VariablesSet<T>*>(pointer);
delete ptr;
}
void NativeOps::deleteVariablesSetFloat(Nd4jPointer pointer) {
deleteVariablesSetT<float>(pointer);
}
void NativeOps::deleteVariablesSetHalf(Nd4jPointer pointer) {
deleteVariablesSetT<float16>(pointer);
}
void NativeOps::deleteVariablesSetDouble(Nd4jPointer pointer) {
deleteVariablesSetT<double>(pointer);
}
void NativeOps::deleteShapeList(Nd4jPointer shapeList) {
nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList);
list->destroy();
delete list;
}
const char* NativeOps::getAllOperations() {
return nd4j::OpTracker::getInstance()->exportOperations();
}
| c5db4fcc607e02adb19816cabbab7ff423288c8a.cu |
#include "../NativeOps.h"
#include <cuda.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <helpers/shape.h>
#include "../Environment.h"
#include <helpers/TAD.h>
#include <ops/specials.h>
#include <loops/reduce3.h>
#include <loops/reduce.h>
#include <loops/indexreduce.h>
#include <loops/pairwise_transform.h>
#include <loops/transform.h>
#include <loops/scalar.h>
#include <loops/broadcasting.h>
#include <loops/summarystatsreduce.h>
#include <loops/random.h>
//#include <thread>
#include <map>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
#include <stdlib.h>
#include <loops/type_conversions.h>
#include <op_boilerplate.h>
#include <loops/grid_shaped.h>
#include <loops/grid_strided.h>
#include <loops/aggregates.h>
#include <helpers/threshold.h>
#include <ShapeList.h>
#include <Context.h>
#include <ops/specials_cuda.h>
// FIXME: we need cuda-specific implementations
#include <helpers/logger.h>
#include <NDArray.h>
#include <NDArrayFactory.h>
#include <GraphExecutioner.h>
#include <graph/GraphHolder.h>
#include <graph/VariablesSet.h>
#include <ops/declarable/OpRegistrator.h>
#include <ops/declarable/CustomOperations.h>
//#include <sys/time.h>
// b40c only available for gcc :(
#ifdef __clang__
// do nothing
#elif __GNUC__
#include <b40c/util/error_utils.cuh>
#include <b40c/util/multiple_buffering.cuh>
#include <b40c/radix_sort/enactor.cuh>
#endif
#include <curand.h>
cudaDeviceProp *deviceProperties;
cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef __EXPERIMENTAL__
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
typedef struct {
long streamId;
long callId;
} __syncInfo;
typedef __syncInfo SyncInfo;
// this method isn't used, left here for legacy and caution purposes
// TLDR: don't use this way, it sucks
void CUDART_CB syncCallback(cudaStream_t stream, cudaError_t status, void *data){
SyncInfo *sync = (SyncInfo *) data;
printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId);
}
// this method just does type conversion in fancy way
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jIndex)ptrToDeviceId;
}
template <typename T>
dim3 getOptimalDimensions(Nd4jIndex n,cudaFuncAttributes attributes, cudaDeviceProp properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if (num_blocks > blockLimit) num_blocks = blockLimit;
if (num_blocks < 4 && n > 128) {
num_blocks = 4;
num_threads = n / num_blocks;
}
if (num_threads >= 768) {
num_blocks = num_blocks * 2;
num_threads = num_threads / 2;
}
if(n % num_threads && num_blocks < blockLimit) ++num_blocks;
//(num_threads * sizeof(T)) + attributes.sharedSizeBytes);
return dim3(num_blocks,num_threads, 3000);
}
int getBaseMemorySize(int xRank, cudaFuncAttributes funcAttr) {
int memory_limit = 256; //funcAttr.sharedSizeBytes;
// TODO: remove this later
memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4)
/*
if (xRank == 0) xRank = 2;
memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes
memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4;
memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4;
memory_limit += (xRank * 4) * 6;
memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase
*/
return memory_limit;
}
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, cudaFuncAttributes funcAttr) {
int countMP = deviceProperties[deviceId].multiProcessorCount;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int num_threads = problemLength / (countMP * blockThreshold);
num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads);
num_threads = nd4j::math::nd4j_max<int>(num_threads, 64);
num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads);
int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr);
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit);
return launchDims;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) {
int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int warpSize = deviceProperties[deviceId].warpSize;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
// round num_threads to nearest warpSize
num_threads -= num_threads % warpSize;
num_threads = nd4j::math::nd4j_max<int>(1, num_threads);
if (num_threads < warpSize && tadLength < warpSize)
num_threads = tadLength;
// since we use shared memory as fast memory for some cases - we need to count that in
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int memory_floor = memory_limit;
int effective_block_limit = countMP * blockThreshold;
int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit);
int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize);
// at this moment we've stored all required information for things. time to count in reduction multipliers
int reduction_per_block = 0;
bool found = false;
if (reduction > 0)
while (!found) {
reduction_per_block = (num_threads * elementSize * reduction);
if (memory_limit + reduction_per_block < desiredShared) {
memory_limit += reduction_per_block;
found = true;
} else {
if (num_threads > minThreads) {
num_threads -= 32;
} else {
memory_limit += reduction_per_block;
found = true;
}
}
}
// at this moment we know total memory used per block, and we also know per-mp limit.
int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block);
// we don't want to spawn more blocks, that gpu can actually handle without queue
//num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks);
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// if (num_blocks > countMP)
// num_blocks = num_blocks - (num_blocks % countMP);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
reduction_per_block = (num_threads * elementSize * reduction);
memory_limit = memory_floor + reduction_per_block;
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP);
return dim3(num_blocks,num_threads, memory_limit);
}
/*
* This method returns kernel launch param for linear memory access
*/
dim3 getFlatLaunchParams(int deviceId, int *xShapeInfo, int *yShapeInfo, cudaFuncAttributes funcAttr) {
int xRank = shape::rank(xShapeInfo);
int yRank = yShapeInfo == nullptr ? 0 : shape::rank(yShapeInfo);
int zRank = 0;
int memory_limit = getBaseMemorySize(xRank, funcAttr);
int countMP = deviceProperties[deviceId].multiProcessorCount;
int regPerBlock = deviceProperties[deviceId].regsPerBlock;
int blockThreshold = getDeviceBlockThreshold(deviceId);
int shmemThreshold = getDeviceSharedThreshold(deviceId);
int xLength = shape::length(xShapeInfo);
int effective_block_limit = countMP * blockThreshold;
// for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here
int num_threads = xLength / effective_block_limit;
if (num_threads < minThreads)
num_threads = minThreads;
num_threads = num_threads - (num_threads % 32);
int memory_floor = memory_limit;
int num_blocks = xLength / num_threads;
num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit);
// num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit);
num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1);
int targetBlocksPerMP = num_blocks / countMP;
// now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM
if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) {
while (targetBlocksPerMP * num_threads > 2048) {
if (num_threads <= minThreads)
break;
num_threads -= 32;
}
}
if (xLength / num_threads > blockLimit)
num_blocks *= 2;
dim3 launchDims = dim3(num_blocks, num_threads, memory_limit);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit);
return launchDims;
}
/**
* This method returns kernel launch params with TAD-based memory access
*
* @param deviceId
* @param xShapeInfo
* @param tadShapeInfo
* @param funcAttr
* @param dimensionLength
* @param elementSize
* @param reductionSize
* @return
*/
dim3 getReduceLaunchParams(int deviceId, int *xShapeInfo, int *tadShapeInfo, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) {
int tadLength = 0;
int numTads = 0;
if (tadShapeInfo != nullptr) {
tadLength = shape::length(tadShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
if (tadLength == 1) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("A xLength: [%i], zLength: [%i]\n", shape::length(xShapeInfo), shape::length(tadShapeInfo));
}
} else{
// we have special case - reduction along all dimensions
tadLength = nd4j::math::nd4j_min<int>(shape::length(xShapeInfo), 768);
numTads = shape::length(xShapeInfo) / tadLength;
}
int xRank = shape::rank(xShapeInfo);
int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo);
dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize);
if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.x == 1
printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.x: [%i], launchDims.y: [%i], launchDims.z: [%i]\n", shape::length(xShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z);
}
return launchDims;
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, cudaFuncAttributes attributes, cudaDeviceProp properties) {
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
Nd4jIndex n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
nd4j::buffer::Buffer<int> * createScalarBuffer(cudaStream_t stream) {
int *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<int> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<int> *scalarDimension;
nd4j::buffer::Buffer<int> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(cudaStream_t stream) {
int *scalarDimensionBuff = (int *) malloc(sizeof(int));
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
int *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
int * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
int * getDimensionHostPointer() {
return scalarDimension->data;
}
int * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
cudaStream_t streamRef;
public:
ScalarInfo(cudaStream_t stream) {
T *scalarResult = (T*)malloc(sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
int *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the result pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
int *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum,
double *x,
int *xShapeInfo,
double *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D1 opNum:[%i]\n", opNum);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], 1, sizeof(double), 3);
indexReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *result,
int *resultShapeInfo,
int *dimension, int dimensionLength) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D2 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], dimensionLength, sizeof(double), 3);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
indexReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *y,
int *yShapeInfo,
double *result,
int *resultShapeInfo,
int *dimension, int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *deviceTADOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D3 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[26], dimensionLength, sizeof(double), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(broadcastSimple, double, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
int xStride,
double *y,
int yStride,
double *result,
int resultStride,
double *extraParams, Nd4jIndex n) {
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<double>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
int *xShapeInfo,
double *y,
int *yShapeInfo,
double *result,
int *resultShapeInfo,
double *extraParams,
int *xIndexes,
int *yIndexes,
int *resultIndexes) {
/*
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D5 opNum:[%i]\n", opNum);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[24]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
pairWiseTransformDoubleIndex <<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum,
dx,
y,
extraParams,
result,
xShapeInfo, shape::rank(hostXShapeInfo),
yShapeInfo, shape::rank(hostYShapeInfo),
resultShapeInfo, shape::rank(hostZShapeInfo),
xIndexes,
yIndexes,
resultIndexes, allocationPointer, deviceTADShapeInfo);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
*/
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
int *xShapeInfo,
double *y,
int *yShapeInfo,
double *result,
int *resultShapeInfo,
double *extraParams) {
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<double>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *result,
int *resultShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D7 opNum:[%i]\n", opNum);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
if (opNum == 19) {
execReduceDouble(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo);
}
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D8 opNum:[%i]\n", opNum);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
if (opNum == 19) {
execReduceDouble(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength);
//checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
* We have separate kernels, optimized for different number of dimensions for reductions
*/
if (dimensionLength == 1) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(double), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGeneric1D, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
} else if (shape::rank(hostTADShapeInfo) <= 3) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[33], dimensionLength, sizeof(double), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGeneric3D, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
} else {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGenericXD, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
double NativeOps::execReduceScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D9 opNum:[%i]\n", opNum);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[22]);
// for LogExpSum op we need to know max value, and store it
if (opNum == 19) {
double tmp = execReduceScalarDouble(extraPointers, 3, x, xShapeInfo, extraParams);
extraParams = resultPointer;
};
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *y,
int *yShapeInfo,
double *result,
int *resultShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D10 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
nullptr,
1,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
double NativeOps::execReduce3ScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *y,
int *yShapeInfo){
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D11 opNum:[%i]\n", opNum);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
reduce3ScalarDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
// since this method should return scalar value - we should block on this call
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *y,
int *yShapeInfo,
double *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D12 opNum:[%i]\n", opNum);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]);
reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int xStride,
double *result,
int resultStride,
double scalar,
double *extraParams,
Nd4jIndex n) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[20]);
functions::scalar::ScalarTransform<double>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, scalar, extraParams, n);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *result,
int *resultShapeInfo,
double scalar,
double *extraParams){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[19]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleShaped, double, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<double>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *result,
int *resultShapeInfo,
double scalar,
double *extraParams,
Nd4jIndex n,
int *xIndexes,
int *resultIndexes){
printf("Unsupported operation: scalarIndices\n");
/*
}
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D15 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[18]);
scalarDoubleIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
x,
extraParams,
result,
resultIndexes, allocPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
*/
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execSummaryStatsScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(double), 8);
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
return functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *result,
int *resultShapeInfo,bool biasCorrected) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D17 opNum:[%i]\n", opNum);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(double), 8);
// we have to limit grid size here, due to limited nature of reduction/allocation pointers
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xShapeInfo,
double *extraParams,
double *result,
int *resultShapeInfo,
int *dimension, int dimensionLength,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(double), 8);
// we're limiting maximum grid size for summaryStats ops
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<double>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected);
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
int xStride,
double *z,
int zStride,
double *extraParams,
Nd4jIndex n) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D19 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[16]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformStrided, double, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
int *xShapeInfo,
double *result,
int *resultShapeInfo,
double *extraParams){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D20 opNum:[%i]\n", opNum);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
int *maskedAllocPointer = allocPointer;
// special pointer for special buffer for special ops
double *specialPointer = reinterpret_cast<double *>(extraPointers[6]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
int *dimension = (int *) specialPointer;
int *maxDimension = dimension + 1;
int *maxShapeBuffer = (int *) maxDimension + 1;
double * special = (double *) maxShapeBuffer + (MAX_RANK * 2 + 4);
int *devTadShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
Nd4jIndex *devTadOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]);
/**
* ops between 38 and 41 are special ops:
* SoftMax, LogSoftMax, SoftMaxDerivative, IsMax
* On cuda we execute them as
*/
// simple trick to get workaround over reductions into scalar
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
/*
* For vector cases of everything, but IsMax (41) we go for single-kernel calls
*/
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(256, length);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(double) * 4);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformShaped, double, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
} else {
// going for blockwise specials
// we'll do some pointers mangling here, and execute kernels one by one
int *shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
// TODO: we could get rid of this one eventually
prepareShapeBuffer <<<1, 1, 128, *stream>>> (dimension, maxDimension, maxShapeBuffer, shape[0]);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceDouble(tempPointers, 3, dx, xShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcastDouble(tempPointers, 1, dx, xShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
// exp 3
execTransformDouble(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams);
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceDouble(tempPointers, 1, result, resultShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcastDouble(tempPointers, 3, result, resultShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
// log 3
if (opNum == 40)
execTransformDouble(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams);
else if (opNum == 39)
execTransformDouble(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams);
checkCudaErrors(cudaStreamSynchronize(*stream));
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
if (scalarCheat) {
/**
* In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call
*/
int maxIdx = (int) execIndexReduceScalarDouble(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
fillIsMaxDouble<<< 1, 128, 0, *stream >>>(result, shape::length(hostXShapeInfo), targetIdx);
} else {
int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
Nd4jIndex *tadMaxOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
special = reinterpret_cast<double *>(extraPointers[17]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduceDouble(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
// at this point, all IMax indexes are gathered, and we execute filler
fillDimensionalIsMaxDouble<<<blockLimit, 64, funcAttributes[37].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets );
checkCudaErrors(cudaStreamSynchronize(*stream));
}
break;
}
default: {
printf("Bad case for transformDouble\n");
break;
}
}
}
} else {
// for Im2Col & Col2Im we enforce higher dimensionality
// TODO: investigate this on high-end gpus
if (opNum == 37 || opNum == 36 || opNum == 71) {
launchDims.x = 512;
launchDims.y = 512;
launchDims.z += 512 * sizeof(double);
} else if (opNum == 70) {
// we'll be using shared memory to speed up reverse
launchDims.z += launchDims.y * sizeof(double);
}
// Histogram op requires additional memory chunk
// FIXME: make this one to use cache
if (opNum == 48) {
int length = shape::length(hostZShapeInfo);
cudaMalloc((void **)&maskedAllocPointer, length * launchDims.x * sizeof(double));
}
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformShaped, double, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
// we need guaranteed sync here, due to temp memory release
if (nd4j::Environment::getInstance()->isDebug() || opNum == 48)
checkCudaErrors(cudaStreamSynchronize(*stream));
// release Histogram memory
if (opNum == 48) {
cudaFree((void *)maskedAllocPointer);
}
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
double *dx,
int *xShapeInfo,
double *result,
int *resultShapeInfo,
double *extraParams,
int *xIndexes,
int *resultIndexes) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D21 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[14]);
transformDoubleIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultIndexes, allocPointer, reductionPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execIndexReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams){
if (nd4j::Environment::getInstance()->isDebug())
printf("F1 opNum:[%i]\n", opNum);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float), 4);
if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1)
printf("AF1 opNum:[%i]\n", opNum);
indexReduceFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
// once again - since we return scalar value in this method, we should block this kernel launch
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execIndexReduceScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams){
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H1 opNum:[%i]\n", opNum);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float16), 8);
if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1)
printf("AH1 opNum:[%i]\n", opNum);
indexReduceHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
resultPointer,
nullptr, 0,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
// blocking for scalar output
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F2 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float), 4);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF2 opNum:[%i]\n", opNum);
indexReduceFloat<<<launchDims.x, launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execIndexReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H2 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float16), 8);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH2 opNum:[%i]\n", opNum);
indexReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *y,
int *yShapeInfo,
float *result,
int *resultShapeInfo,
int *dimension, int dimensionLength){
/*
cudaEvent_t start;
cudaEventCreateWithFlags(&start, cudaEventDisableTiming);
timespec tsX;
timespec tsY;
clock_gettime(CLOCK_REALTIME, &tsX);
*/
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *deviceTADOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F3 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float), 0);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(broadcastSimple, float, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS))
/*
SyncInfo *info = new SyncInfo();
info->streamId = 32;
info->callId = 1234567890;
timespec ts1;
timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts1);
*/
/*
broadcastFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo, shape::rank(hostXShapeInfo),
y,
yShapeInfo, shape::rank(hostYShapeInfo),
result,
resultShapeInfo, shape::rank(hostZShapeInfo),
dimension,
dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ);
*/
/*
clock_gettime(CLOCK_REALTIME, &ts2);
// cudaEventRecord(start, 0);
// cudaStreamAddCallback(*stream, syncCallback, (void*)info, 0);
*/
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
/*
clock_gettime(CLOCK_REALTIME, &tsY);
printf("Execution time: %i\n", (ts2.tv_nsec - ts1.tv_nsec));
printf("Overall time: %i\n", (tsY.tv_nsec - tsX.tv_nsec));
printf("Callback setup time: %i\n", (tsY.tv_nsec - ts2.tv_nsec));
printf("-------------------------------------\n");
*/
}
void NativeOps::execBroadcastHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *y,
int *yShapeInfo,
float16 *result,
int *resultShapeInfo,
int *dimension, int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *deviceTADOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H3 opNum:[%i]\n", opNum);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float16), 0);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(broadcastSimple, float16, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
int xStride,
float *y,
int yStride,
float *result,
int resultStride,
float *extraParams, Nd4jIndex n){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n);
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
int xStride,
float16 *y,
int yStride,
float16 *result,
int resultStride,
float16 *extraParams, Nd4jIndex n){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float16>::execudaCudaStrided(launchDims, extraPointers, opNum, dx, xStride, y, yStride, result, resultStride, extraParams, n);
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
int *xShapeInfo,
float *y,
int *yShapeInfo,
float *result,
int *resultShapeInfo,
float *extraParams,
int *xIndexes,
int *yIndexes,
int *resultIndexes){
/*
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F5 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float), 0);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF5 opNum:[%i]\n", opNum);
pairWiseTransformFloatIndex<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
dx,
y,
extraParams,
result,
xShapeInfo, shape::rank(hostXShapeInfo),
yShapeInfo, shape::rank(hostYShapeInfo),
resultShapeInfo, shape::rank(hostZShapeInfo),
xIndexes,
yIndexes,
resultIndexes, allocationPointer, deviceTADShapeInfo);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
*/
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
int *xShapeInfo,
float16 *y,
int *yShapeInfo,
float16 *result,
int *resultShapeInfo,
float16 *extraParams,
int *xIndexes,
int *yIndexes,
int *resultIndexes){
/*
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H5 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float16), 0);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH5 opNum:[%i]\n", opNum);
pairWiseTransformHalfIndex<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
dx,
y,
extraParams,
result,
xShapeInfo, shape::rank(hostXShapeInfo),
yShapeInfo, shape::rank(hostYShapeInfo),
resultShapeInfo, shape::rank(hostZShapeInfo),
xIndexes,
yIndexes,
resultIndexes, allocationPointer, deviceTADShapeInfo);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
*/
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
int *xShapeInfo,
float *y,
int *yShapeInfo,
float *result,
int *resultShapeInfo,
float *extraParams){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);;
}
void NativeOps::execPairwiseTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
int *xShapeInfo,
float16 *y,
int *yShapeInfo,
float16 *result,
int *resultShapeInfo,
float16 *extraParams){
dim3 launchDims(512, 512, 2048);
functions::pairwise_transforms::PairWiseTransform<float16>::execudaCudaShaped(launchDims, extraPointers, opNum, dx, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, extraParams);;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *result,
int *resultShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F7 opNum:[%i]\n", opNum);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float), 1);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF7 opNum:[%i]\n", opNum);
if (opNum == 19) {
execReduceFloat(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo);
}
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *result,
int *resultShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H7 opNum:[%i]\n", opNum);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float16), 1);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH7 opNum:[%i]\n", opNum);
if (opNum == 19) {
execReduceHalf(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo);
}
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *result,
int *resultShapeInfo,
int *dimension,int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F8 opNum:[%i]\n", opNum);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float), 1);
if (opNum == 19) {
execReduceFloat(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength);
}
// we call different kernels optimized for different number of dimensions in TAD
if (dimensionLength == 1) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(float), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGeneric1D, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
} else if (shape::rank(hostTADShapeInfo) <= 3) {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(float), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGeneric3D, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
} else {
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(float), 2);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGenericXD, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execReduceHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *result,
int *resultShapeInfo,
int *dimension,int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H8 opNum:[%i]\n", opNum);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float16), 1);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH8 opNum:[%i]\n", opNum);
if (opNum == 19) {
execReduceHalf(extraPointers, 3, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength);
}
// calling different kernels, depending on number of dimensions in TAD
if (dimensionLength == 1) {
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGeneric1D, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
} else if (shape::rank(hostTADShapeInfo) <= 3) {
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGeneric3D, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
} else {
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceSimpleGenericXD, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS))
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
float NativeOps::execReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F9 opNum:[%i]\n", opNum);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[8]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF9 opNum:[%i]\n", opNum);
// for LogExpSum op we need to know max value, and store it
if (opNum == 19) {
float tmp = execReduceScalarFloat(extraPointers, 3, x, xShapeInfo, extraParams);
extraParams = resultPointer;
};
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
// blocking this one
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execReduceScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H9 opNum:[%i]\n", opNum);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 2, funcAttributes[8]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH9 opNum:[%i]\n", opNum);
// for LogExpSum op we need to know max value, and store it
if (opNum == 19) {
float tmp = execReduceScalarHalf(extraPointers, 3, x, xShapeInfo, extraParams);
extraParams = resultPointer;
};
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS))
// blocking call
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *y,
int *yShapeInfo,
float *result,
int *resultShapeInfo){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F10 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF10 opNum:[%i]\n", opNum);
reduce3ScalarFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execReduce3Half(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *y,
int *yShapeInfo,
float16 *result,
int *resultShapeInfo){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H10 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH10 opNum:[%i]\n", opNum);
reduce3ScalarHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
float NativeOps::execReduce3ScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *y,
int *yShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F11 opNum:[%i]\n", opNum);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 32, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF11 opNum:[%i]\n", opNum);
reduce3ScalarFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
// blocking call
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
float NativeOps::execReduce3ScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *y,
int *yShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H11 opNum:[%i]\n", opNum);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH11 opNum:[%i]\n", opNum);
reduce3ScalarHalf<<<launchDims.x,launchDims.y,launchDims.z + 2048, *stream>>>(
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
resultPointer,
nullptr,
nullptr,
1,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
// blocking call
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = (float) resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *y,
int *yShapeInfo,
float *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F12 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF12 opNum:[%i]\n", opNum);
if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) {
reduce3ScalarFloat << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
} else {
reduce3Float << < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execReduce3Half(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *y,
int *yShapeInfo,
float16 *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *yDeviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H12 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH12 opNum:[%i]\n", opNum);
if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) {
reduce3ScalarHalf<< < launchDims.x, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
} else {
reduce3Half<< < 1, launchDims.y, launchDims.z, *stream >> > (
opNum,
x,
xShapeInfo,
y,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets);
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int xStride,
float *result,
int resultStride,
float scalar,
float *extraParams,
Nd4jIndex n){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
functions::scalar::ScalarTransform<float>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, scalar, extraParams, n);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int xStride,
float16 *result,
int resultStride,
float scalar,
float16 *extraParams,
Nd4jIndex n){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]);
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleStrided, float16, PARAMS(n, scalar, x, xStride, extraParams, result, resultStride, allocPointer), OPS_A(SCALAR_OPS))
float16 sc = (float16) scalar;
functions::scalar::ScalarTransform<float16>::executeCudaStrided(launchDims, extraPointers, opNum, x, xStride, result, resultStride, sc, extraParams, n);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *result,
int *resultShapeInfo,
float scalar,
float *extraParams){
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
Nd4jIndex n = shape::length(hostXShapeInfo);
// if (nd4j::Environment::getInstance()->isDebugAndVerbose())
// printf("F14 opNum:[%i]\n", opNum);
//dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]);
//if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
// printf("AF14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo));
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleShaped, float, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<float>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *result,
int *resultShapeInfo,
float scalarF,
float16 *extraParams){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
Nd4jIndex n = shape::length(hostXShapeInfo);
//if (nd4j::Environment::getInstance()->isDebugAndVerbose())
// printf("H14 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]);
float16 scalar = (float16) scalarF;
//if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
// printf("AH14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo));
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarSimpleShaped, float16, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<float16>::executeCudaShaped(launchDims, extraPointers, opNum, x, xShapeInfo, result, resultShapeInfo, scalar, extraParams);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *result,
int *resultShapeInfo,
float scalar,
float *extraParams,
int *xIndexes,
int *resultIndexes){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
Nd4jIndex n = shape::length(hostXShapeInfo);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F15 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[4]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF15 opNum:[%i]\n", opNum);
/*
scalarFloatIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
x,
extraParams,
result,
resultIndexes, allocPointer);
*/
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execSummaryStatsScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float), 8);
// we limit grid size for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
return functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected);
}
float NativeOps::execSummaryStatsScalarHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float16), 8);
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
return (float) functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduceScalar(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *result,
int *resultShapeInfo,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float), 8);
// limiting number of blocks in grid, to match buffer memory size
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected);
}
void NativeOps::execSummaryStatsHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *result,
int *resultShapeInfo,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], 1, sizeof(float16), 8);
// as everywhere else, we limit maximal number of blocks for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, biasCorrected);
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xShapeInfo,
float *extraParams,
float *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(float), 8);
// as everywhere else, we limit maximal number of blocks for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected);
}
void NativeOps::execSummaryStatsHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xShapeInfo,
float16 *extraParams,
float16 *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,bool biasCorrected){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *deviceTADOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[1], dimensionLength, sizeof(float16), 8);
// as everywhere else, we limit maximal number of blocks for SummaryStats calls
launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x);
functions::summarystats::SummaryStatsReduce<float16>::execSummaryStatsReduce(launchDims, extraPointers, opNum, x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, biasCorrected);
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
int xStride,
float *z,
int zStride,
float *extraParams,
Nd4jIndex n) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F19 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformStrided, float, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
int xStride,
float16 *z,
int zStride,
float16 *extraParams,
Nd4jIndex n) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H19 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo));
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformStrided, float16, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
float *dx,
int *xShapeInfo,
float *result,
int *resultShapeInfo,
float *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F20 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
// special pointer for special buffer for special ops
float *specialPointer = reinterpret_cast<float *>(extraPointers[6]);
int *dimension = (int *) specialPointer;
int *maxDimension = dimension + 1;
int *maxShapeBuffer = (int *) maxDimension + 1;
float * special = (float *) maxShapeBuffer + (MAX_RANK * 2 + 4);
int *maskedAllocPointer = allocPointer;
int *devTadShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
Nd4jIndex *devTadOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF20 opNum:[%i]\n", opNum);
// simple trick to get workaround over reductions into scalar
// that's special ops: SoftMax, SoftMaxDerivative, LogSoftMax, IsMax
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(float) * 4);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformShaped, float, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
} else {
// going for blockwise specials
int *shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
prepareShapeBuffer <<< 1, 1, 128, *stream >>> (dimension, maxDimension, maxShapeBuffer, shape[0]);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceFloat(tempPointers, 3, dx, xShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcastFloat(tempPointers, 1, dx, xShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
// exp 3
execTransformFloat(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceFloat(tempPointers, 1, result, resultShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcastFloat(tempPointers, 3, result, resultShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
// log 3
if (opNum == 40)
execTransformFloat(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams);
else if (opNum == 39)
execTransformFloat(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams);
checkCudaErrors(cudaStreamSynchronize(*stream));
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
if (scalarCheat) {
// if that's 1D input - we'll just go for single dim IMax op call + filler
int maxIdx = (int) execIndexReduceScalarFloat(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
fillIsMaxFloat<<< 1, 128, 1536, *stream >>>(result, shape::length(hostXShapeInfo), targetIdx);
checkCudaErrors(cudaStreamSynchronize(*stream));
} else {
// going for dimension-based IsMax
int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
Nd4jIndex *tadMaxOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
special = reinterpret_cast<float *>(extraPointers[17]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduceFloat(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
// at this point, all IMax indexes are gathered, and we execute
fillDimensionalIsMaxFloat<<<blockLimit, 64, funcAttributes[36].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets );
checkCudaErrors(cudaStreamSynchronize(*stream));
}
break;
}
default: {
printf("Bad case for transformFloat\n");
break;
}
}
}
} else {
// we're enforcing larger grids for Col2Im & Im2Col
// TODO: for high-end gpus we might use higher values here
if (opNum == 37 || opNum == 36) {
launchDims.x = 512;
launchDims.y = 512;
launchDims.z += 512 * sizeof(float);
} else if (opNum == 70) {
// we'll be using shared memory to speed up reverse
launchDims.z += launchDims.y * sizeof(float);
}
// histogram op requies additional memory chunk :(
if (opNum == 48) {
int length = shape::length(hostZShapeInfo);
cudaMalloc((void **) &maskedAllocPointer, length * launchDims.x * sizeof(float));
}
DISPATCH_SIMPLE(transformShaped, float,
PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo,
shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
// we need guaranteed sync here, due to temp memory release
if (nd4j::Environment::getInstance()->isDebug() || opNum == 48)
checkCudaErrors(cudaStreamSynchronize(*stream));
// release memory chunk
if (opNum == 48) {
cudaFree((void *) maskedAllocPointer);
}
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execTransformHalf(Nd4jPointer *extraPointers,int opNum,
float16 *dx,
int *xShapeInfo,
float16 *result,
int *resultShapeInfo,
float16 *extraParams) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H20 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
int *maskedAllocPointer = allocPointer;
float16 *specialPointer = reinterpret_cast<float16 *>(extraPointers[6]);
int *dimension = (int *) specialPointer;
int *maxDimension = dimension + 1;
int *maxShapeBuffer = (int *) maxDimension + 1;
float16 * special = (float16 *) maxShapeBuffer + (MAX_RANK * 2 + 4);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH20 opNum:[%i]\n", opNum);
int *devTadShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
Nd4jIndex *devTadOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]);
// simple trick to get workaround over reductions into scalar
// SoftMax, SoftMaxDerivative, LogSoftMax, IsMax
if (opNum >= 38 && opNum <= 41) {
if (shape::isVector(hostXShapeInfo) && opNum != 41) {
// if that's vector, we just go directly to op in 1 block
int length = shape::length(hostXShapeInfo);
int block = nd4j::math::nd4j_min<int>(length, 256);
launchDims.x = 1;
launchDims.y = block;
launchDims.z += (block * sizeof(float16) * 4);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformShaped, float16, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
} else {
// going for blockwise specials
int *shape = shape::shapeOf(hostXShapeInfo);
switch (opNum) {
case 40: // LogSoftMax
case 39: // SoftMax Derivative
case 38: {// softmax
Nd4jPointer tempPointers[16];
tempPointers[0] = extraPointers[0];
tempPointers[1] = extraPointers[1];
tempPointers[2] = extraPointers[2];
tempPointers[3] = extraPointers[3];
tempPointers[4] = extraPointers[4];
tempPointers[5] = extraPointers[5];
tempPointers[6] = extraPointers[6];
tempPointers[7] = extraPointers[7];
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[12];
tempPointers[13] = extraPointers[13];
tempPointers[14] = extraPointers[14];
tempPointers[15] = extraPointers[15];
int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1};
int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape);
tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer;
tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer;
// FIXME: fix this
prepareShapeBuffer <<< 1, 1, 128, *stream >>> (dimension, maxDimension, maxShapeBuffer, shape[0]);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
//shape::printShapeInfo(maxShapeBuffer);
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
// max 3
execReduceHalf(tempPointers, 3, dx, xShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// sub 1
execBroadcastHalf(tempPointers, 1, dx, xShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
// exp 3
execTransformHalf(extraPointers, 3, result, resultShapeInfo, result, resultShapeInfo, extraParams);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[8] = tempPointers[7];
tempPointers[9] = extraPointers[12];
tempPointers[10] = extraPointers[13];
tempPointers[11] = extraPointers[14];
//sum 1
execReduceHalf(tempPointers, 1, result, resultShapeInfo, extraParams, special,
maxShapeBuffer, maxDimension, 1);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
tempPointers[8] = extraPointers[8];
tempPointers[9] = extraPointers[9];
tempPointers[10] = extraPointers[10];
tempPointers[11] = extraPointers[11];
tempPointers[12] = extraPointers[10];
tempPointers[13] = extraPointers[11];
// divide 3
execBroadcastHalf(tempPointers, 3, result, resultShapeInfo, special,
maxShapeBuffer, result, resultShapeInfo, dimension, 1);
if (opNum == 40) {
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
execTransformHalf(tempPointers, 47, result, resultShapeInfo, result, resultShapeInfo, extraParams);
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
// log 3
if (opNum == 40)
execTransformHalf(extraPointers, 5, result, resultShapeInfo, result, resultShapeInfo, extraParams);
else if (opNum == 39)
execTransformHalf(extraPointers, 42, result, resultShapeInfo, result, resultShapeInfo, extraParams);
checkCudaErrors(cudaStreamSynchronize(*stream));
delete hostMaxShapeBuffer;
break;
}
case 41: {
// IsMax along all dimensions
bool scalarCheat = false;
if (extraParams == nullptr) {
scalarCheat = true;
}
if (scalarCheat) {
// 1D input, aka vector
int maxIdx = (int) execIndexReduceScalarHalf(extraPointers, 0, dx, xShapeInfo, extraParams);
int targetIdx = 0;
if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo))
targetIdx = maxIdx;
else
targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1];
fillIsMaxHalf<<< 1, 128, 1536, *stream >>>(result, shape::length(hostXShapeInfo), targetIdx);
} else {
// going for dimension-based IsMax
int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]);
Nd4jIndex *tadMaxOffsets = reinterpret_cast<Nd4jIndex *> (extraPointers[11]);
int *dimension = reinterpret_cast<int *> (extraPointers[15]);
special = reinterpret_cast<float16 *>(extraPointers[17]);
int dimensionLength = getDeviceId(extraPointers[18]);
// we call for IMax on specified dimension
execIndexReduceHalf(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
// at this point, all IMax indexes are gathered, and we execute
fillDimensionalIsMaxHalf<<<blockLimit, 64, funcAttributes[36].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets );
checkCudaErrors(cudaStreamSynchronize(*stream));
}
break;
}
default: {
printf("Bad case for transformHalf\n");
break;
}
}
}
} else {
// Im2Col & Col2Im enforced grids
if (opNum == 37 || opNum == 36) {
launchDims.x = 512;
launchDims.y = 512;
launchDims.z += 512 * sizeof(float16);
} else if (opNum == 70) {
// we'll be using shared memory to speed up reverse
launchDims.z += launchDims.y * sizeof(float);
}
// Histogram op requires additional memory chunk
if (opNum == 48) {
int length = shape::length(hostZShapeInfo);
cudaMalloc((void **)&maskedAllocPointer, length * launchDims.x * sizeof(float16));
}
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(transformShaped, float16, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer, devTadShapeInfo, devTadOffsets), OPS_A(TRANSFORM_OPS))
// we need guaranteed sync here, due to temp memory release
if (nd4j::Environment::getInstance()->isDebug() || opNum == 48)
checkCudaErrors(cudaStreamSynchronize(*stream));
// release that histogram memory chunk
if (opNum == 48) {
cudaFree((void *)maskedAllocPointer);
}
}
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
float *dx,
int *xShapeInfo,
float *result,
int *resultShapeInfo,
float *extraParams,
int *xIndexes,
int *resultIndexes) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F21 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF21 opNum:[%i]\n", opNum);
transformFloatIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultIndexes, allocPointer, reductionPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execTransformHalf(
Nd4jPointer *extraPointers,
int opNum,
float16 *dx,
int *xShapeInfo,
float16 *result,
int *resultShapeInfo,
float16 *extraParams,
int *xIndexes,
int *resultIndexes) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H21 opNum:[%i]\n", opNum);
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH21 opNum:[%i]\n", opNum);
transformHalfIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, shape::rank(hostXShapeInfo),
extraParams,
result,
resultIndexes, allocPointer, reductionPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
template <typename T>
__device__ void flattenKernelGeneric(int dOffset,
char order,
T *result,
int *resultShapeInfo,
T *input,
int *inputShapeInfo, int *allocationPointer) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 4, 4, sizeof(shape::TAD), 2);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int *zShape = shape::shapeOf(resultShapeInfo);
int *zStride = shape::stride(resultShapeInfo);
int *yShape = shape::shapeOf(inputShapeInfo);
int *yStride = shape::stride(inputShapeInfo);
char yOrder = shape::order(inputShapeInfo);
int len = shape::length(inputShapeInfo);
int resultEWS = shape::elementWiseStride(resultShapeInfo);
int inputEWS = shape::elementWiseStride(inputShapeInfo);
if (yOrder == order) {
if (resultEWS >= 1 && inputEWS >= 1) {
for (int i = tid; i < len; i+= gridDim.x * blockDim.x) {
result[i * resultEWS + dOffset] = input[i * inputEWS];
}
} else {
int rank = shape::rank(inputShapeInfo);
int coord[MAX_RANK];
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
}
} else {
int rank = shape::rank(inputShapeInfo);
int coord[MAX_RANK];
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
}
}
extern "C" __global__ void flattenKernelDouble(int offset,
char order,
double *result,
int *resultShapeInfo,
double *input,
int *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<double>(
offset,
order, result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
extern "C" __global__ void flattenKernelFloat(int offset,
char order,
float *result,
int *resultShapeInfo,
float *input,
int *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<float>(
offset,
order,
result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
extern "C" __global__ void flattenKernelHalf(int offset,
char order,
float16 *result,
int *resultShapeInfo,
float16 *input,
int *inputShapeInfo, int *allocationPointer) {
flattenKernelGeneric<float16>(
offset,
order,
result,
resultShapeInfo,
input,
inputShapeInfo,
allocationPointer);
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenFloat(
Nd4jPointer *extraPointers,
int offset,
char order,
float *result,
int *resultShapeInfo,
float *input,
int *inputShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F22 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF222 opNum:[7]\n");
flattenKernelFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::flattenHalf(
Nd4jPointer *extraPointers,
int offset,
char order,
float16 *result,
int *resultShapeInfo,
float16 *input,
int *inputShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H22 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH222 opNum:[7]\n");
flattenKernelHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenDouble(
Nd4jPointer *extraPointers,
int offset,
char order,
double *result,
int *resultShapeInfo,
double *input,
int *inputShapeInfo) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D30 opNum:[7]\n");
int *allocPointer = reinterpret_cast<int *>(extraPointers[3]);
dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[34]);
flattenKernelDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::checkP2P() {
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int x = 0; x < devCnt; x++) {
for (int y = 0; y < devCnt; y++) {
if (x == y)
continue;
int canAccess = 0;
cudaSetDevice(x);
cudaDeviceCanAccessPeer(&canAccess, x , y);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
cudaSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void NativeOps::enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
if (devCnt > 1) {
for (int x = 0; x < devCnt; x++) {
for (int y = 0; y < devCnt; y++) {
if (x == y)
continue;
int canAccess = 0;
cudaSetDevice(x);
cudaDeviceCanAccessPeer(&canAccess, x , y);
if (canAccess) {
if (enable) {
cudaDeviceEnablePeerAccess(y, 0);
} else {
cudaDeviceDisablePeerAccess(y);
}
} else {
if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", x, y);
}
}
}
cudaSetDevice(curDevice);
}
allowedP2P = enable;
cudaSetDevice(curDevice);
}
bool NativeOps::isP2PAvailable() {
return supportedP2P;
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
deviceProperties = new cudaDeviceProp[devCnt];
for (int i = 0; i < devCnt; i++) {
cudaSetDevice(i);
cudaGetDeviceProperties(&deviceProperties[i], i);
cudaDeviceSetLimit(cudaLimitStackSize, 4096);
}
cudaSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1)
enableP2P(allowedP2P);
cudaFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes);
//void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocationPointer, float *reductionPointer) = transformFloat;
// FIXME
cudaFuncGetAttributes(&funcAttributes[1], transformFloatIndexes);
//void (*transformFloatPointer2)(int opNum, Nd4jIndex n, float *dy, int incy, float *params, float *result,int resultStride, int *allocationPointer, float *reductionPointer) = transformFloat;
// FIXME
cudaFuncGetAttributes(&funcAttributes[2], transformFloatIndexes);
//cudaFuncGetAttributes(&funcAttributes[3], (void *)functions::summarystats::summaryStatsReduceFloat);
//cudaFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes);
// void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarFloat;
// cudaFuncGetAttributes(&funcAttributes[5], scalarFloatIndexes);
// void (*scalarFloatPointer2)(int opNum, Nd4jIndex n,float dx, float *dy, int incy, float *params, float *result,int resultStride, int *allocPointer) = scalarFloat;
// cudaFuncGetAttributes(&funcAttributes[6], scalarFloatIndexes);
cudaFuncGetAttributes(&funcAttributes[7], reduce3Float);
cudaFuncGetAttributes(&funcAttributes[8], reduceSimpleGenericXD_0_float);
// printf("reduceFloat regs: [%i], static shmem: [%i]\n", funcAttributes[8].numRegs, funcAttributes[8].sharedSizeBytes);
cudaFuncGetAttributes(&funcAttributes[28], reduceSimpleGeneric1D_0_float); // 1D
// printf("reduceFloat1D regs: [%i], static shmem: [%i]\n", funcAttributes[28].numRegs, funcAttributes[28].sharedSizeBytes);
cudaFuncGetAttributes(&funcAttributes[29], reduceSimpleGeneric3D_0_float); // 6D
// printf("reduceFloat6D regs: [%i], static shmem: [%i]\n", funcAttributes[29].numRegs, funcAttributes[29].sharedSizeBytes);
cudaFuncGetAttributes(&funcAttributes[30], flattenKernelFloat);
cudaFuncGetAttributes(&funcAttributes[31], concatKernelFloat);
// cudaFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat);
// cudaFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex);
// cudaFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat);
cudaFuncGetAttributes(&funcAttributes[12], broadcastSimple_0_float);
cudaFuncGetAttributes(&funcAttributes[13], indexReduceFloat);
///////////////////////////////////////// Doubles are separate, just in case of...
cudaFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes);
// void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocationPointer, double *reductionPointer) = transformDouble;
// FIXME
cudaFuncGetAttributes(&funcAttributes[15], transformDoubleIndexes);
//void (*transformDoublePointer2)(int opNum, Nd4jIndex n, double *dy, int incy, double *params, double *result,int resultStride, int *allocationPointer, double *reductionPointer) = transformDouble;
// FIXME
cudaFuncGetAttributes(&funcAttributes[16], transformDoubleIndexes);
//cudaFuncGetAttributes(&funcAttributes[17], functions::summarystats::summaryStatsReduceDouble);
// cudaFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes);
//void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarDouble;
// cudaFuncGetAttributes(&funcAttributes[19], scalarDoubleIndexes);
//void (*scalarDoublePointer2)(int opNum, Nd4jIndex n,double dx, double *dy, int incy, double *params, double *result,int resultStride, int *allocPointer) = scalarDouble;
// cudaFuncGetAttributes(&funcAttributes[20], scalarDoubleIndexes);
cudaFuncGetAttributes(&funcAttributes[21], reduce3Double);
cudaFuncGetAttributes(&funcAttributes[22], reduceSimpleGenericXD_0_double);
// cudaFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble);
// cudaFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex);
// cudaFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble);
cudaFuncGetAttributes(&funcAttributes[26], broadcastSimple_0_double);
cudaFuncGetAttributes(&funcAttributes[27], indexReduceDouble);
cudaFuncGetAttributes(&funcAttributes[32], reduceSimpleGeneric1D_0_double); // 1D
cudaFuncGetAttributes(&funcAttributes[33], reduceSimpleGeneric3D_0_double); // 6D
cudaFuncGetAttributes(&funcAttributes[34], flattenKernelDouble);
cudaFuncGetAttributes(&funcAttributes[35], concatKernelDouble);
cudaFuncGetAttributes(&funcAttributes[36], fillDimensionalIsMaxFloat);
cudaFuncGetAttributes(&funcAttributes[37], fillDimensionalIsMaxDouble);
cudaFuncGetAttributes(&funcAttributes[38], concatKernelScalarFloat);
cudaFuncGetAttributes(&funcAttributes[39], concatKernelScalarDouble);
cudaFuncGetAttributes(&funcAttributes[40], concatKernelVStackFloat);
cudaFuncGetAttributes(&funcAttributes[41], concatKernelVStackDouble);
cudaFuncGetAttributes(&funcAttributes[42], concatKernelHStackFloat);
cudaFuncGetAttributes(&funcAttributes[43], concatKernelHStackDouble);
/////////////////////////
cudaFuncGetAttributes(&funcAttributes[44], averagingKernelHalf);
cudaFuncGetAttributes(&funcAttributes[45], averagingKernelFloat);
cudaFuncGetAttributes(&funcAttributes[46], averagingKernelDouble);
//
//cudaFuncGetAttributes(&funcAttributes[47], scalarAlongDimension_0_float);
//cudaFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_float16);
//cudaFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_double);
}
void NativeOps::initializeFunctions(Nd4jPointer *functions) {
nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions);
/*
this->cublasSgemv = (CublasSgemv)functions[0];
this->cublasDgemv = (CublasDgemv)functions[1];
this->cublasHgemm = (CublasHgemm)functions[2];
this->cublasSgemm = (CublasSgemm)functions[3];
this->cublasDgemm = (CublasDgemm)functions[4];
this->cublasSgemmEx = (CublasSgemmEx)functions[5];
this->cublasHgemmBatched = (CublasHgemmBatched)functions[6];
this->cublasSgemmBatched = (CublasSgemmBatched)functions[7];
this->cublasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(Nd4jIndex memorySize, int flags) {
Nd4jPointer pointer;
// cudaHostAllocMapped |cudaHostAllocPortable
cudaError_t res = cudaHostAlloc((void **)&pointer, memorySize, cudaHostAllocDefault);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(Nd4jIndex memorySize, Nd4jPointer ptrToDeviceId, int flags) {
Nd4jPointer pointer;
cudaError_t res = cudaMalloc((void **)&pointer, memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int NativeOps::freeHost(Nd4jPointer pointer) {
cudaError_t res = cudaFreeHost((void *) pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) {
cudaError_t res = cudaFree((void *)pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
Nd4jPointer NativeOps::createContext() {
return 0L;
}
Nd4jPointer NativeOps::createStream() {
Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(cudaStream_t));
cudaError_t result = cudaStreamCreate((cudaStream_t *) &nativeStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return nativeStream;
}
Nd4jPointer NativeOps::createEvent() {
Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(cudaEvent_t));
cudaError_t result = cudaEventCreateWithFlags((cudaEvent_t *) &nativeEvent, cudaEventDisableTiming);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return nativeEvent;
}
int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream);
cudaError_t result = cudaEventRecord(*pEvent, *pStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) {
int deviceId = getDeviceId(ptrToDeviceId);
cudaError_t result = cudaSetDevice(deviceId);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
Nd4jIndex NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (Nd4jIndex) memFree;
}
Nd4jIndex NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (Nd4jIndex) memTotal;
}
int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) {
return memcpyAsync(dst, src, size, flags, reserved);
}
int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaMemcpyKind kind;
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*pStream));
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
default: {
printf("UNDEFINED MEMCPY!\n");
break;
}
}
cudaError_t result = cudaMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
if (result != 0) {
checkCudaErrors(result);
printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], result: [%i]\n", src, dst, size, flags, (int) result );
fflush(stdout);
fflush(stderr);
return 0L;
}
else return 1;
}
int NativeOps::memset(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) {
cudaError_t result = cudaMemset((void *) dst, value, (size_t) size);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaError_t result = cudaMemsetAsync((void *) dst, value, (size_t) size, *pStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::destroyEvent(Nd4jPointer event) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaError_t result = cudaEventDestroy(*pEvent);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1;
}
int NativeOps::streamSynchronize(Nd4jPointer stream) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream);
cudaError_t result = cudaStreamSynchronize(*pStream);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1L;
}
int NativeOps::eventSynchronize(Nd4jPointer event) {
cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event);
cudaError_t result = cudaEventSynchronize(*pEvent);
checkCudaErrors(result);
if (result != 0)
return 0L;
else return 1L;
}
int NativeOps::getAvailableDevices() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
return devCnt;
}
void NativeOps::enableDebugMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setDebug(reallyEnable);
}
void NativeOps::setGridLimit(int gridSize) {
if (gridSize > 8192)
gridSize = 8192;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int NativeOps::ompGetMaxThreads() {
return maxThreads;
}
int NativeOps::ompGetNumThreads() {
return maxThreads;
}
void NativeOps::setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void NativeOps::enableVerboseMode(bool reallyEnable) {
nd4j::Environment::getInstance()->setVerbose(reallyEnable);
}
int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].major;
}
int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].minor;
}
const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) {
int device = getDeviceId(ptrToDeviceId);
return deviceProperties[device].name;
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concatFloat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float *result,
int *resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 ||
shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') {
Nd4jIndex length0 = shape::length(hostShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
smem = funcAttributes[38].sharedSizeBytes;
concatKernelScalarFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
smem = funcAttributes[40].sharedSizeBytes;
concatKernelVStackFloat<<< 128, 512, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
smem = funcAttributes[42].sharedSizeBytes;
concatKernelHStackFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
//smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280);
int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *devZOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
concatKernelFloat<<< 2048, 128, funcAttributes[31].sharedSizeBytes , *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::concatHalf(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float16 *result,
int *resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') {
Nd4jIndex length0 = shape::length(hostShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
smem = funcAttributes[38].sharedSizeBytes;
concatKernelScalarHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
smem = funcAttributes[40].sharedSizeBytes;
concatKernelVStackHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
smem = funcAttributes[42].sharedSizeBytes;
concatKernelHStackHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
//smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280);
int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *devZOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
concatKernelHalf<<< 2048, 128, funcAttributes[31].sharedSizeBytes, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatHalf: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::specialConcatFloat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<float>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
result,
resultShapeInfo);
}
void NativeOps::specialConcatHalf(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float16 *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<float16>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
result,
resultShapeInfo);
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::specialConcatDouble(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
double *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
nd4j::SpecialMethods<double>::concatCpuGeneric(
dimension,
numArrays,
data,
inputShapeInfo,
result,
resultShapeInfo);
}
/**
* Concatneate multi array of the same shape together
* along a particular dimension
*/
void NativeOps::concatDouble(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
double *result,
int *resultShapeInfo,
Nd4jPointer *tadPointers,
Nd4jPointer *offsetPointers) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]);
// numArrays will be used as number of TADs, so each block process 1 input
int smem = 0;
bool isVstack = false;
bool isScalar = true;
bool isHstack = false;
for (int i = 0; i < numArrays; i++) {
if (!shape::isScalar(hostShapePointers[i])) {
isScalar = false;
break;
}
}
if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) {
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') {
isVstack = false;
break;
}
}
}
// let's try to fit N-dimensional vstack
if (!isVstack && !isScalar && dimension == 0 && shape::order(hostXShapeInfo) == 'c') {
Nd4jIndex length0 = shape::length(hostShapePointers[0]);
isVstack = true;
for (int i = 0; i < numArrays; i++) {
if (shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c' || length0 != shape::length(hostShapePointers[i])) {
isVstack = false;
break;
}
}
}
if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) {
isHstack = true;
for (int i = 0; i < numArrays; i++) {
if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) {
isHstack = false;
break;
}
}
}
if (isScalar) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going scalar concat\n");
smem = funcAttributes[39].sharedSizeBytes;
concatKernelScalarDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isVstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going VStack concat\n");
smem = funcAttributes[41].sharedSizeBytes;
concatKernelVStackDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else if (isHstack) {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going HStack concat\n");
smem = funcAttributes[43].sharedSizeBytes;
concatKernelHStackDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]);
} else {
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("Going generic concat\n");
int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *devZOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
concatKernelDouble<<< 2048, 128, funcAttributes[35].sharedSizeBytes, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets);
}
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("sharedMemory requested for concatDouble: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
* This method saves
*/
void NativeOps::tadOnlyShapeInfo(int *xShapeInfo, int *dimension, int dimensionLength, int *target, Nd4jIndex *offsets) {
shape::TAD *tad = new shape::TAD();
tad->init(xShapeInfo, dimension, dimensionLength);
//tad->setOutputBuffer(target);
tad->createTadOnlyShapeInfo();
tad->createOffsets();
std::memcpy((void *) target, tad->tadOnlyShapeInfo, (tad->tadOnlyShapeInfo[0] * 2 + 4) * sizeof(int));
std::memcpy((void *) offsets, tad->tadOffsets, tad->numTads * sizeof(Nd4jIndex));
/*
shape::printShapeInfoLinear(hostXShapeInfo);
shape::printShapeInfoLinear(tad->tadOnlyShapeInfo);
shape::printShapeInfoLinear(target);
*/
delete tad;
}
int NativeOps::memcpyConstantAsync(Nd4jIndex dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved);
cudaMemcpyKind kind;
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*pStream));
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
}
//cudaError_t result = cudaMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream);
cudaError_t result = cudaMemcpyToSymbolAsync(deviceConstantMemory, (const void *) src, size, dst, kind, *pStream);
checkCudaErrors(result);
if (result != 0) {
printf("Symbol failed on [%lu] -> [%lu], size: [%i], direction: [%i]\n", src, dst, size, flags );
return 0L;
}
else return 1;
}
Nd4jPointer NativeOps::getConstantSpace() {
Nd4jPointer dConstAddr;
cudaError_t result = cudaGetSymbolAddress((void **)&dConstAddr, deviceConstantMemory);
return dConstAddr;
}
void NativeOps::pullRowsHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, float16 *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
pullRowsKernelHalf<<<64, 256, 1024, *stream>>>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::pullRowsFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, float *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
pullRowsKernelFloat<<<64, 256, 1024, *stream>>>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::pullRowsDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, double *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
pullRowsKernelDouble<<<64, 256, 1024, *stream>>>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::averageHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jIndex length, bool propagate) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float16 **x = reinterpret_cast<float16 **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageHalf called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]);
averagingKernelHalf<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length, propagate);
checkCudaErrors(cudaStreamSynchronize(*stream));
} else {
nd4j::SpecialMethods<float16>::averageGeneric(x, dz, n, length, propagate);
}
}
void NativeOps::averageFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jIndex length, bool propagate) {
cudaStream_t * stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float **x = reinterpret_cast<float **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageFloat called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]);
averagingKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length, propagate);
checkCudaErrors(cudaStreamSynchronize(*stream));
} else {
// launching on host memory
nd4j::SpecialMethods<float>::averageGeneric(x, dz, n, length, propagate);
}
}
void NativeOps::averageDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jIndex length, bool propagate) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
double **x = reinterpret_cast<double **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("averageDouble called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]);
averagingKernelDouble << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (x, dz, n, length, propagate);
checkCudaErrors(cudaStreamSynchronize(*stream));
} else {
nd4j::SpecialMethods<double>::averageGeneric(x, dz, n, length, propagate);
}
}
void NativeOps::accumulateHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jIndex length) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float16 **x = reinterpret_cast<float16 **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateHalf called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]);
accumulateKernelHalf<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length);
checkCudaErrors(cudaStreamSynchronize(*stream));
} else {
nd4j::SpecialMethods<float16>::accumulateGeneric(x, dz, n, length);
}
}
void NativeOps::accumulateFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jIndex length) {
cudaStream_t * stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
float **x = reinterpret_cast<float **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateFloat called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]);
accumulateKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length);
checkCudaErrors(cudaStreamSynchronize(*stream));
} else {
// launching on host memory
nd4j::SpecialMethods<float>::accumulateGeneric(x, dz, n, length);
}
}
void NativeOps::accumulateDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jIndex length) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int mode = getDeviceId(extras[3]);
double **x = reinterpret_cast<double **>(dx);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateDouble called\n");
// launching on gpu
if (mode == 0) {
dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]);
accumulateKernelDouble << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (x, dz, n, length);
checkCudaErrors(cudaStreamSynchronize(*stream));
} else {
nd4j::SpecialMethods<double>::accumulateGeneric(x, dz, n, length);
}
}
void NativeOps::shuffleDouble(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
double **x = reinterpret_cast<double **>(dx);
double **z = reinterpret_cast<double **>(dz);
int **xShape = reinterpret_cast<int **>(xShapeInfo);
int **zShape = reinterpret_cast<int **>(zShapeInfo);
int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo);
Nd4jIndex **tadOffset = reinterpret_cast<Nd4jIndex **>(tadOffsets);
shuffleKernelDouble<<<32, 128, 1024, *stream>>>(x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::shuffleFloat(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
float **x = reinterpret_cast<float **>(dx);
float **z = reinterpret_cast<float **>(dz);
int **xShape = reinterpret_cast<int **>(xShapeInfo);
int **zShape = reinterpret_cast<int **>(zShapeInfo);
int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo);
Nd4jIndex **tadOffset = reinterpret_cast<Nd4jIndex **>(tadOffsets);
shuffleKernelFloat<<<32, 128, 1024, *stream>>>(x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::shuffleHalf(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
float16 **x = reinterpret_cast<float16 **>(dx);
float16 **z = reinterpret_cast<float16 **>(dz);
int **xShape = reinterpret_cast<int **>(xShapeInfo);
int **zShape = reinterpret_cast<int **>(zShapeInfo);
int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo);
Nd4jIndex **tadOffset = reinterpret_cast<Nd4jIndex **>(tadOffsets);
shuffleKernelHalf<<<32, 128, 1024, *stream>>>(x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateStridedFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float *dx, int xStride, float *dy, int yStride, float *dz, int zStride, float *extraA, float *extraB, float scalarA, float scalarB) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
// metaPredicateStridedFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
/*
if (opTypeA == 2) {
if (opTypeB == 0) {
DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB), float, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
}
}
*/
functions::grid::GRIDStrided<float>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateStridedDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, double *dx, int xStride, double *dy, int yStride, double *dz, int zStride, double *extraA, double *extraB, double scalarA, double scalarB) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
// metaPredicateStridedDouble<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
/*
if (opTypeA == 2) {
if (opTypeB == 0) {
DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB), double, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
}
}
*/
functions::grid::GRIDStrided<double>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateStridedHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float16 *dx, int xStride, float16 *dy, int yStride, float16 *dz, int zStride, float16 *extraA, float16 *extraB, float scalarA, float scalarB) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
// metaPredicateStridedHalf<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
float16 scalA = (float16) scalarA;
float16 scalB = (float16) scalarB;
/*
if (opTypeA == 2) {
if (opTypeB == 0) {
DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalA, scalB), float16, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
}
}
*/
functions::grid::GRIDStrided<float16>::execMetaPredicateStrided(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateReduceFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, float *extraA, float *extraB, float scalarA, float scalarB, bool scalarReturned) {
// no-op
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
/*
metaPredicateReduceFloat(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB,
float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *tadShapeInfo, int *tadOffsets, float *reductionBuffer, float *extraA, float *extraB, float scalarA, float scalarB) {
*/
// metaPredicateReduceFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, nullptr, extraA, extraB, scalarA, scalarB, scalarReturned);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateShapeDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, double *dx, int *xShapeInfo, double *dy, int *yShapeInfo, double *dz, int *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) {
// no-op;
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
/*
if (opTypeA == 2) {
if (opTypeB == 0) {
DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), double, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
}
}
*/
functions::grid::GRIDShaped<double>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateShapeHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float16 *dx, int *xShapeInfo, float16 *dy, int *yShapeInfo, float16 *dz, int *zShapeInfo, float16 *extraA, float16 *extraB, float scalarA, float scalarB) {
// no-op;
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
// we have to converf float -> fp16 prior to kernel call
float16 scalA = (float16) scalarA;
float16 scalB = (float16) scalarB;
/*
if (opTypeA == 2) {
if (opTypeB == 0) {
DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalA, scalB), float16, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
}
}
*/
functions::grid::GRIDShaped<float16>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execMetaPredicateShapeFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) {
// no-op;
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
/*
if (opTypeA == 2) {
if (opTypeB == 0) {
DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), float, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
}
}
*/
functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
bool NativeOps::isExperimentalEnabled() {
return experimentalSupport;
}
void NativeOps::setOmpMinThreads(int threads) {
minThreads = nd4j::math::nd4j_max<int>(32, threads);
minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads);
}
int NativeOps::getDevice() {
int curDevice = -1;
cudaGetDevice(&curDevice);
return curDevice;
}
void NativeOps::setElementThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::setTADThreshold(int num) {
// this is no-op for CUDA
}
void NativeOps::execScalarFloat(Nd4jPointer *extraPointers,int opNum,
float *x,
int *xShapeInfo,
float *z,
int *zShapeInfo,
float *scalars,
float *extraParams,
int *dimension,
int dimensionLength) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostTadShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
//dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]),hostXShapeInfo, hostTadShapeInfo, funcAttributes[47] ,dimensionLength, sizeof(float), 0);
dim3 launchDims = dim3(256, 256, 1024);
// this macro builds bunch of IF/ELSE selectors for kernel launc h
//DISPATCH_SIMPLE(scalarAlongDimension, float, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<float>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execScalarDouble(Nd4jPointer *extraPointers,int opNum,
double *x,
int *xShapeInfo,
double *z,
int *zShapeInfo,
double *scalars,
double *extraParams,
int *dimension,
int dimensionLength) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 256, 1024);
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarAlongDimension, double, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<double>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execScalarHalf(Nd4jPointer *extraPointers,int opNum,
float16 *x,
int *xShapeInfo,
float16 *z,
int *zShapeInfo,
float16 *scalars,
float16 *extraParams,
int *dimension,
int dimensionLength) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(256, 256, 1024);
/*
int *tadShapeInfo = reinterpret_cast<int *>(extraPointers[10]);
Nd4jIndex *tadOffsets = reinterpret_cast<Nd4jIndex *>(extraPointers[11]);
int *tadShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]);
Nd4jIndex *tadOffsetsZ = reinterpret_cast<Nd4jIndex *>(extraPointers[13]);
*/
// this macro builds bunch of IF/ELSE selectors for kernel launch
//DISPATCH_SIMPLE(scalarAlongDimension, float16, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS))
functions::scalar::ScalarTransform<float16>::executeCudaAlongDimension(launchDims, extraPointers, opNum, x, xShapeInfo, z, zShapeInfo, scalars, extraParams, dimension, dimensionLength);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execAggregateFloat(Nd4jPointer *extraPointers,int opNum,
float **arguments,
int numArguments,
int **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
float *realArguments,
int numRealArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateSimple, float, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS))
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execAggregateDouble(Nd4jPointer *extraPointers,int opNum,
double **arguments,
int numArguments,
int **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
double *realArguments,
int numRealArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateSimple, double, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS))
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execAggregateHalf(Nd4jPointer *extraPointers,int opNum,
float16 **arguments,
int numArguments,
int **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
float16 *realArguments,
int numRealArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numBlocks, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateSimple, float16, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS))
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execAggregateBatchFloat(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) {
// not implemented yet
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateBatchSimple, float, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execAggregateBatchDouble(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) {
// not implemented yet
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateBatchSimple, double, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execAggregateBatchHalf(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) {
// not implemented yet
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int numBlocks = getDeviceId(extraPointers[2]);
int numThreads = getDeviceId(extraPointers[3]);
int shmem = getDeviceId(extraPointers[4]);
dim3 launchDims = dim3(numAggregates, numThreads, shmem);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(aggregateBatchSimple, float16, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS))
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *z, int *zShapeBuffer, float *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) );
functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, int *xShapeBuffer, float *y, int *yShapeBuffer, float *z, int *zShapeBuffer, float *extraArguments) {
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) );
functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, int *xShapeBuffer, float *z, int *zShapeBuffer, float *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) );
functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *z, int *zShapeBuffer, double *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double)));
functions::random::RandomFunction<double>::executeCudaSingle(launchDims, extraPointers, opNum, state, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, int *xShapeBuffer, double *y, int *yShapeBuffer, double *z, int *zShapeBuffer, double *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double)));
functions::random::RandomFunction<double>::executeCudaTriple(launchDims, extraPointers, opNum, state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, int *xShapeBuffer, double *z, int *zShapeBuffer, double *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double)));
functions::random::RandomFunction<double>::executeCudaDouble(launchDims, extraPointers, opNum, state, x, xShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *z, int *zShapeBuffer, float16 *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16)));
functions::random::RandomFunction<float16>::executeCudaSingle(launchDims, extraPointers, opNum, state, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, int *xShapeBuffer, float16 *y, int *yShapeBuffer, float16 *z, int *zShapeBuffer, float16 *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16)));
functions::random::RandomFunction<float16>::executeCudaTriple(launchDims, extraPointers, opNum, state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments);
}
void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, int *xShapeBuffer, float16 *z, int *zShapeBuffer, float16 *extraArguments) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16)));
functions::random::RandomFunction<float16>::executeCudaDouble(launchDims, extraPointers, opNum, state, x, xShapeBuffer, z, zShapeBuffer, extraArguments);
}
Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// cudaStreamSynchronize(*stream);
unsigned long long *ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
nd4j::random::RandomBuffer *buffer = new nd4j::random::RandomBuffer(seed, bufferSize, (uint64_t *) ptrHost, (uint64_t *) ptrDev);
buffer->propagateToDevice(buffer, *stream);
checkCudaErrors(cudaStreamSynchronize(*stream));
// we generate sequence in the host memory
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream);
checkCudaErrors(cudaStreamSynchronize(*stream));
return buffer;
}
void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
cudaDeviceSynchronize();
delete buffer;
}
void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
cudaStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
nd4j::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream);
}
void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
cudaStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
*
* @param npyArray
* @return
*/
Nd4jPointer NativeOps::shapeBufferForNumpy(Nd4jPointer npyArray) {
/*
cnpy::NpyArray *arrPointer = reinterpret_cast<cnpy::NpyArray *>(npyArray);
int *shapeBuffer = shape::shapeBufferOfNpy(*arrPointer);
return reinterpret_cast<Nd4jPointer>(shapeBuffer);
*/
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
unsigned int *shape = new unsigned int[arr.shape.size()];
for(int i = 0; i < arr.shape.size(); i++) {
shape[i] = arr.shape[i];
}
int *shapeBuffer = shape::shapeBufferOfNpy(arr.shape.size(),
shape,
arr.fortranOrder);
delete[] shape;
return reinterpret_cast<Nd4jPointer>(shapeBuffer);
}
/**
*
* @param npyArray
* @return
*/
Nd4jPointer NativeOps::dataPointForNumpy(Nd4jPointer npyArray) {
char *buff = reinterpret_cast<char *>(npyArray);
//printf("Pointer contents %s\n",buff);
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
cnpy::NpyArray *arrPointer = &arr;
char *data = arrPointer->data;
if(arrPointer->wordSize == sizeof(float)) {
float *floatData = reinterpret_cast<float *>(data);
return reinterpret_cast<Nd4jPointer>(floatData);
}
else if(arrPointer->wordSize == sizeof(double)) {
double *doubleData = reinterpret_cast<double *>(data);
return reinterpret_cast<Nd4jPointer >(doubleData);
}
return reinterpret_cast<Nd4jPointer >(0);
}
/**
* Load a numpy array from a file
* and return it as an Nd4jPointer
* @param path
* @return
*/
Nd4jPointer NativeOps::numpyFromFile(std::string path) {
/*cnpy::NpyArray arr = cnpy::npyLoad(path);
return reinterpret_cast<Nd4jPointer >(&arr);
*/
char *numpyBuffer = cnpy::loadFile(path.data());
return reinterpret_cast<Nd4jPointer >(numpyBuffer);
}
void NativeOps::releaseNumpy(Nd4jPointer npyArray) {
free((void *) npyArray);
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) {
int *shapeBuffer = reinterpret_cast<int *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* Get the element size for a numpy array
* @param npyArray the numpy array's address
* to get the length for
* @return
*/
int NativeOps::elementSizeForNpyArray(Nd4jPointer npyArray) {
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
cnpy::NpyArray *arrPointer = &arr;
int size = arrPointer->wordSize;
return size;
/*
cnpy::NpyArray *arr = reinterpret_cast<cnpy::NpyArray *>(npyArray);
return arr->wordSize;
*/
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
Nd4jPointer NativeOps::pointerForAddress(Nd4jIndex address) {
return reinterpret_cast<Nd4jPointer >(address);
}
void NativeOps::tearDouble(Nd4jPointer *extras, double *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
tearKernelDouble<<<512, 512, 512, *stream>>>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::tearFloat(Nd4jPointer *extras, float *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
tearKernelFloat<<<512, 512, 512, *stream>>>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::tearHalf(Nd4jPointer *extras, float16 *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
tearKernelHalf<<<512, 512, 512, *stream>>>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP1Float(Nd4jPointer *extras, float *dx, Nd4jIndex N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
encoderKernelP1Float<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz, threshold);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP1Double(Nd4jPointer *extras, double *dx, Nd4jIndex N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
encoderKernelP1Double<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz, threshold);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP1Half(Nd4jPointer *extras, float16 *dx, Nd4jIndex N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
encoderKernelP1Half<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz, threshold);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jIndex N, int *dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
//encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz);
// it
prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP3Float(Nd4jPointer *extraPointers, float *dx, int *offsets, Nd4jIndex N, int *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
encoderKernelP3Float<<<numBlocks, blockSize , 4096, *stream>>>(dx, offsets, N, dz);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP3Double(Nd4jPointer *extraPointers, double *dx, int *offsets, Nd4jIndex N, int *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
encoderKernelP3Double<<<numBlocks, blockSize , 4096, *stream>>>(dx, offsets, N, dz);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::encodeThresholdP3Half(Nd4jPointer *extraPointers, float16 *dx, int *offsets, Nd4jIndex N, int *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int blockSize = 1024;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
encoderKernelP3Half<<<numBlocks, blockSize , 4096, *stream>>>(dx, offsets, N, dz);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::decodeThresholdFloat(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
decoderKernelFloat<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::decodeThresholdDouble(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, double *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
decoderKernelDouble<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::decodeThresholdHalf(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float16 *dz){
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
// we probably want to have smaller blocks here, memory writes are misaligned anyway
int blockSize = 128;
int numBlocks = N / blockSize + (N % blockSize ? 1 : 0);
decoderKernelHalf<<<numBlocks, blockSize , 1024, *stream>>>(dx, N, dz);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execReduce3AllDouble(Nd4jPointer *extraPointers,
int opNum,
double *x,
int *xInfo,
double *extraParamsVals,
double *y,
int *yInfo,
double *result,
int *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
int *xTadShapeInfo,
Nd4jIndex *xOffsets,
int *yTadShapeInfo,
Nd4jIndex *yOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("D119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(double), 2);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AD119 opNum:[%i]\n", opNum);
reduce3AllDouble<<<launchDims.x, 512, (512 * 8 * 2 + 512), *stream>>>(
opNum,
x,
xInfo,
y,
yInfo,
extraParamsVals,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength,
1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execReduce3AllFloat(Nd4jPointer *extraPointers,
int opNum,
float *x,
int *xInfo,
float *extraParamsVals,
float *y,
int *yInfo,
float *result,
int *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
int *xTadShapeInfo,
Nd4jIndex *xOffsets,
int *yTadShapeInfo,
Nd4jIndex *yOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("F119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(float), 2);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AF119 opNum:[%i]\n", opNum);
reduce3AllFloat<<<launchDims.x, 512, (512 * 4 * 2 + 512), *stream>>>(
opNum,
x,
xInfo,
y,
yInfo,
extraParamsVals,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength,
1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::execReduce3AllHalf(Nd4jPointer *extraPointers,
int opNum,
float16 *x,
int *xInfo,
float16 *extraParamsVals,
float16 *y,
int *yInfo,
float16 *result,
int *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
int *xTadShapeInfo,
Nd4jIndex *xOffsets,
int *yTadShapeInfo,
Nd4jIndex *yOffsets) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]);
int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]);
if (nd4j::Environment::getInstance()->isDebugAndVerbose())
printf("H119 opNum:[%i]\n", opNum);
int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]);
float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]);
dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[7], dimensionLength, sizeof(float16), 2);
if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1)
printf("AH119 opNum:[%i]\n", opNum);
reduce3AllHalf<<<launchDims.x, 512, (512 * 2 * 2 + 512), *stream>>>(
opNum,
x,
xInfo,
y,
yInfo,
extraParamsVals,
result,
resultShapeInfoBuffer,
dimension,
dimensionLength,
1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::sortFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, bool descending) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[ 1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int xLength = shape::length(hostXShapeInfo);
int xEWS = shape::elementWiseStride(hostXShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
cudaBitonicSortFloat<<<numBlocks, numThreads, 512, *stream>>>(x, xShapeInfo, j, k, xLength, descending);
}
}
} else {
#ifdef __clang__
if (1 > 0) {
#elif __GNUC__
if ((xLength > 1024 * 1024 * 10) && xEWS == 1) {
b40c::radix_sort::Enactor enactor;
b40c::util::DoubleBuffer<float> sort_storage(x);
enactor.Sort(sort_storage, xLength);
// fire reverse op
if (descending)
execTransformFloat(extraPointers, 70, x, xShapeInfo, x, xShapeInfo, nullptr);
} else {
#else
if (1 > 0) {
#endif
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
cudaSortFloat<<<numBlocks, numThreads, numThreads * 2 * sizeof(float), *stream>>>(x, xShapeInfo, n, xLength, rev, descending);
n>>=1;
rev = 1;
} while(n > 1);
}
}
}
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::sortDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, bool descending) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int xLength = shape::length(hostXShapeInfo);
int xEWS = shape::elementWiseStride(hostXShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
cudaBitonicSortDouble<<<numBlocks, numThreads, 512, *stream>>>(x, xShapeInfo, j, k, xLength, descending);
}
}
} else {
#ifdef __clang__
if (1 > 0) {
#elif __GNUC__
if ((xLength > 1024 * 1024 * 10) && xEWS == 1) {
b40c::radix_sort::Enactor enactor;
b40c::util::DoubleBuffer<double> sort_storage(x);
enactor.Sort(sort_storage, xLength);
// fire reverse op
if (descending)
execTransformDouble(extraPointers, 70, x, xShapeInfo, x, xShapeInfo, nullptr);
} else {
#else
if ( 1 > 0) {
#endif
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
cudaSortDouble<<<numBlocks, numThreads, numThreads * 2 * sizeof(double), *stream>>>(x, xShapeInfo, n, xLength, rev, descending);
n>>=1;
rev = 1;
} while(n > 1);
}
}
}
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::sortHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, bool descending) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int xLength = shape::length(hostXShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0)) {
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
for (int k = 2; k <= xLength; k = 2*k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
cudaBitonicSortHalf<<<numBlocks, numThreads, 512, *stream>>>(x, xShapeInfo, j, k, xLength, descending);
}
}
} else {
// half is incompatible with radix, so only bitonic here
int numThreads = nd4j::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window<<=1) {
int n = window;
int rev = 0;
do{
int half = n >> 1;
cudaSortHalf<<<numBlocks, numThreads, numThreads * 2 * sizeof(float16), *stream>>>(x, xShapeInfo, n, xLength, rev, descending);
n>>=1;
rev = 1;
} while(n > 1);
}
}
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::sortTadFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, bool descending) {
// to be implemented
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
cudaSortTadFloat<<<512, 512, 1088 * sizeof(float), *stream>>>(x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::sortTadHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, bool descending) {
// to be implemented
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
cudaSortTadHalf<<<512, 512, 1088 * sizeof(float16), *stream>>>(x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::sortTadDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffsets, bool descending) {
// to be implemented
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
cudaSortTadDouble<<<512, 512, 1088 * sizeof(double), *stream>>>(x, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::sortCooIndicesFloat(Nd4jPointer *extraPointers, int *indices, float *values, Nd4jIndex length, int rank) {
}
void NativeOps::sortCooIndicesDouble(Nd4jPointer *extraPointers, int *indices, double *values, Nd4jIndex length, int rank) {
}
void NativeOps::sortCooIndicesHalf(Nd4jPointer *extraPointers, int *indices, float16 *values, Nd4jIndex length, int rank) {
}
Nd4jIndex NativeOps::encodeBitmapFloat(Nd4jPointer *extraPointers, float *dx, Nd4jIndex N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
cudaEncodeBitmapFloat<<<512, 512, 512 * 2 * sizeof(float) + 384, *stream>>>(dx, N, dz, resultPointer, reductionPointer, threshold);
checkCudaErrors(cudaStreamSynchronize(*stream));
Nd4jIndex result = (Nd4jIndex) resultPointer[0];
resultPointer[0] = 0;
return result;
}
Nd4jIndex NativeOps::encodeBitmapDouble(Nd4jPointer *extraPointers, double *dx, Nd4jIndex N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
cudaEncodeBitmapDouble<<<512, 512, 512 * 2 * sizeof(double) + 384, *stream>>>(dx, N, dz, resultPointer, reductionPointer, threshold);
checkCudaErrors(cudaStreamSynchronize(*stream));
Nd4jIndex result = (Nd4jIndex) resultPointer[0];
resultPointer[0] = 0;
return result;
}
Nd4jIndex NativeOps::encodeBitmapHalf(Nd4jPointer *extraPointers, float16 *dx, Nd4jIndex N, int *dz, float threshold) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int *resultPointer = reinterpret_cast<int *>(extraPointers[2]);
int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]);
cudaEncodeBitmapHalf<<<512, 512, (512 * sizeof(float16)) + (512 * sizeof(int)) + 384, *stream>>>(dx, N, dz, resultPointer, reductionPointer, threshold);
checkCudaErrors(cudaStreamSynchronize(*stream));
Nd4jIndex result = (Nd4jIndex) resultPointer[0];
resultPointer[0] = 0;
return result;
}
void NativeOps::decodeBitmapFloat(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float *dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
cudaDecodeBitmapFloat<<<512, 512, 512 * sizeof(float) + 384, *stream>>>(dx, N, dz);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::decodeBitmapDouble(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, double *dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
cudaDecodeBitmapDouble<<<512, 512, 512 * sizeof(double) + 384, *stream>>>(dx, N, dz);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::decodeBitmapHalf(Nd4jPointer *extraPointers, void *dx, Nd4jIndex N, float16 *dz) {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
cudaDecodeBitmapHalf<<<512, 512, 512 * sizeof(float16) + 384, *stream>>>(dx, N, dz);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
Nd4jIndex* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jIndex length) {
return nullptr;
}
void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jIndex* ptrMap, Nd4jIndex length) {
}
Nd4jPointer NativeOps::executeProtoGraphFloat(Nd4jPointer *extraPointers, Nd4jPointer protoBufferPointer) {
return nullptr;
}
Nd4jPointer NativeOps::executeProtoGraphFloat(Nd4jPointer *extraPointers, const char *fileName) {
return nullptr;
}
Nd4jPointer NativeOps::executeFlatGraphFloat(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
Nd4jPointer NativeOps::executeFlatGraphHalf(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
Nd4jPointer NativeOps::executeFlatGraphDouble(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
return nullptr;
}
const char* NativeOps::getAllCustomOps() {
return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations();
}
template<typename T>
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, T* tArgs, int numTArgs, int *iArgs, int numIArgs) {
nd4j::graph::VariableSpace<T> varSpace;
Context<T> block(2, &varSpace);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = (int *) inputShapes[e];
auto buffer_ = (T *) inputBuffers[e];
auto array = new nd4j::NDArray<T>(buffer_, shape_);
array->triggerAllocationFlag(false, false);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapesFloat(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, float* tArgs, int numTArgs, int *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash);
return _calculateOutputShapes<float>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesHalf(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, float16* tArgs, int numTArgs, int *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash);
return _calculateOutputShapes<float16>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesDouble(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, int *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash);
return _calculateOutputShapes<double>(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
template<typename T>
nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* inputShapes, int numInputShapes, T* tArgs, int numTArgs, int *iArgs, int numIArgs) {
nd4j::graph::Context<T> block(1);
nd4j::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++)
inShapes.push_back((int *) inputShapes[e]);
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
nd4j::ShapeList* NativeOps::calculateOutputShapesFloat(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputShapes, int numInputShapes, float* tArgs, int numTArgs, int *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash);
return _calculateOutputShapes<float>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesHalf(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputShapes, int numInputShapes, float16* tArgs, int numTArgs, int *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash);
return _calculateOutputShapes<float16>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
nd4j::ShapeList* NativeOps::calculateOutputShapesDouble(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, int *iArgs, int numIArgs) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash);
return _calculateOutputShapes<double>(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
}
template<typename T>
static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp<T>* op, Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, T* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) {
if (op == nullptr)
nd4j_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<nd4j::NDArray<T>*> inputs(numInputs);
std::vector<nd4j::NDArray<T>*> outputs;
std::vector<T> ttArgs(numTArgs);
std::vector<int> iiArgs(numIArgs);
// filling block now
for (int e = 0; e < numInputs; e++) {
auto buffer = (T *) inputBuffers[e];
auto shape = (int *) inputShapes[e];
// auto var = new Variable<T>(new NDArray<T>(buffer, shape));
// block.getVariables()->emplace_back(var);
auto array = new nd4j::NDArray<T>(buffer, shape);
//array->setSpecialBuffers( (T *) inputBuffers[e + numInputs], (int *) inputShapes[e + numInputs]);
inputs[e] = array;
}
for (int e = 0; e < numIArgs; e++)
iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++)
ttArgs[e] = tArgs[e];
// hypothetically at this point we have everything filled
auto result = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (result->status() != ND4J_STATUS_OK)
return result->status();
if (!isInplace) {
if (result->size() != numOutputs) {
return ND4J_STATUS_BAD_OUTPUT;
}
for (int e = 0; e < numOutputs; e++) {
auto buffer = (T *) outputBuffers[e];
auto shape = (int *) outputShapes[e];
nd4j::NDArray <T> tmp(buffer, shape);
tmp.assign(result->at(e));
}
}
delete result;
for (auto ptr: inputs)
delete ptr;
return ND4J_STATUS_OK;
}
int NativeOps::execCustomOpFloat(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, float* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationFloat(hash);
return realExec<float>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace);
}
int NativeOps::execCustomOpDouble(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationDouble(hash);
return realExec<double>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace);
}
int NativeOps::execCustomOpHalf(Nd4jPointer* extraPointers, Nd4jIndex hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, float16* tArgs, int numTArgs, int *iArgs, int numIArgs, bool isInplace) {
auto op = nd4j::ops::OpRegistrator::getInstance()->getOperationHalf(hash);
return realExec<float16>(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, isInplace);
}
int NativeOps::registerGraphFloat(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner<float>::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
int NativeOps::registerGraphDouble(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner<double>::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
int NativeOps::registerGraphHalf(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer flatBufferPointer) {
auto graph = nd4j::graph::GraphExecutioner<float16>::importFromFlatPointer(flatBufferPointer);
nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
}
template <typename T>
static VariablesSet<T>* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph<T>(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<nd4j::NDArray<T> *> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new nd4j::NDArray<T>((T *) inputBuffers[e], (int *) inputShapes[e]);
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray())
delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto result = nd4j::graph::GraphExecutioner<T>::execute(graph, varSpace);
auto varSet = new nd4j::graph::VariablesSet<T>(result);
if (result == ND4J_STATUS_OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet<float>* NativeOps::executeStoredGraphFloat(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT<float>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
VariablesSet<float16>* NativeOps::executeStoredGraphHalf(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT<float16>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
VariablesSet<double>* NativeOps::executeStoredGraphDouble(Nd4jPointer *extraPointers, Nd4jIndex graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
return executeStoredGraphT<double>(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
}
int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jIndex graphId) {
nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId);
return ND4J_STATUS_OK;
}
void NativeOps::deletePointerArray(Nd4jPointer pointer) {
Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer);
delete[] ptr;
}
void NativeOps::deleteIntArray(Nd4jPointer pointer) {
int *ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
template <typename T>
static void deleteVariablesSetT(Nd4jPointer pointer) {
nd4j::graph::VariablesSet<T>* ptr = reinterpret_cast<nd4j::graph::VariablesSet<T>*>(pointer);
delete ptr;
}
void NativeOps::deleteVariablesSetFloat(Nd4jPointer pointer) {
deleteVariablesSetT<float>(pointer);
}
void NativeOps::deleteVariablesSetHalf(Nd4jPointer pointer) {
deleteVariablesSetT<float16>(pointer);
}
void NativeOps::deleteVariablesSetDouble(Nd4jPointer pointer) {
deleteVariablesSetT<double>(pointer);
}
void NativeOps::deleteShapeList(Nd4jPointer shapeList) {
nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList);
list->destroy();
delete list;
}
const char* NativeOps::getAllOperations() {
return nd4j::OpTracker::getInstance()->exportOperations();
}
|
29c183eee0bb0a92448c2467a6a4cc15e4e8afdd.hip | // !!! This is a file automatically generated by hipify!!!
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cuda_reduction
#define EIGEN_USE_GPU
#include <unsupported/Eigen/CXX11/Tensor>
#include "main.h"
#include <iostream>
using Eigen::Tensor;
void test_cuda_reduction()
{
Tensor<float, 4> in1(72,53,97,113);
Tensor<float, 2> out(72,97);
in1.setRandom();
std::size_t in1_bytes = in1.size() * sizeof(float);
std::size_t out_bytes = out.size() * sizeof(float);
float* d_in1;
float* d_out;
hipMalloc((void**)(&d_in1), in1_bytes);
hipMalloc((void**)(&d_out), out_bytes);
hipMemcpy(d_in1, in1.data(), in1_bytes, hipMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4> > gpu_in1(d_in1, 72,53,97,113);
Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97);
array<Eigen::DenseIndex, 2> reduction_axis;
reduction_axis[0] = 1;
reduction_axis[1] = 3;
gpu_out.device(gpu_device) = gpu_in1.mean(reduction_axis);
assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
for (int i = 0; i < 72; ++i) {
for (int j = 0; j < 97; ++j) {
float sum = 0;
int count = 0;
for (int k = 0; k < 53; ++k) {
for (int l = 0; l < 113; ++l) {
sum += in1(i, k, j, l);
count++;
}
}
float mean = sum / count;
VERIFY_IS_APPROX(out(i,j), mean);
}
}
hipFree(d_in1);
hipFree(d_out);
}
| 29c183eee0bb0a92448c2467a6a4cc15e4e8afdd.cu | #define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cuda_reduction
#define EIGEN_USE_GPU
#include <unsupported/Eigen/CXX11/Tensor>
#include "main.h"
#include <iostream>
using Eigen::Tensor;
void test_cuda_reduction()
{
Tensor<float, 4> in1(72,53,97,113);
Tensor<float, 2> out(72,97);
in1.setRandom();
std::size_t in1_bytes = in1.size() * sizeof(float);
std::size_t out_bytes = out.size() * sizeof(float);
float* d_in1;
float* d_out;
cudaMalloc((void**)(&d_in1), in1_bytes);
cudaMalloc((void**)(&d_out), out_bytes);
cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4> > gpu_in1(d_in1, 72,53,97,113);
Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97);
array<Eigen::DenseIndex, 2> reduction_axis;
reduction_axis[0] = 1;
reduction_axis[1] = 3;
gpu_out.device(gpu_device) = gpu_in1.mean(reduction_axis);
assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
for (int i = 0; i < 72; ++i) {
for (int j = 0; j < 97; ++j) {
float sum = 0;
int count = 0;
for (int k = 0; k < 53; ++k) {
for (int l = 0; l < 113; ++l) {
sum += in1(i, k, j, l);
count++;
}
}
float mean = sum / count;
VERIFY_IS_APPROX(out(i,j), mean);
}
}
cudaFree(d_in1);
cudaFree(d_out);
}
|
68a2e264790fa080b9cd8e23fcde7585027b9881.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void bucket_init(int *bucket, int range){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >=range) return;
bucket[i] = 0;
__syncthreads();
}
__global__ void bucket_find(int *bucket, int *key, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
//bucket[key[i]]++;
if (i >=n) return;
atomicAdd(&bucket[key[i]],1);
__syncthreads();
}
__global__ void bucket_sort(int *bucket, int *key, int range, int n){
//for (int i=0, j=0; i<range; i++) {
// for (; bucket[i]>0; bucket[i]--) {
// key[j++] = i;
//}
//}
int i = blockIdx.x * blockDim.x + threadIdx.x;
int (i >= range) return;
for (int a=0, b=0; a <= i; b++){
key[i] = b;
a += bucket[b];
}
__syncthreads();
}
int main() {
int n = 50;
int range = 5;
//std::vector<int> key(n);
int *key;
hipMallocManaged(&key, n * sizeof(int));
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
/*
std::vector<int> bucket(range);
for (int i=0; i<range; i++) {
bucket[i] = 0;
}
*/
int *bucket;
//allocate memory for bucket array
hipMallocManaged(&bucket, range * sizeof(int));
// initialize bucket array
hipLaunchKernelGGL(( bucket_init), dim3(1), dim3(range), 0, 0, bucket, range);
hipDeviceSynchronize();
/*
for (int i=0; i<n; i++) {
bucket[key[i]]++;
}
*/
hipLaunchKernelGGL(( bucket_find), dim3(1),dim3(n), 0, 0, bucket, key, n);
hipDeviceSynchronize();
/*
for (int i=0, j=0; i<range; i++) {
for (; bucket[i]>0; bucket[i]--) {
key[j++] = i;
}
}
*/
hipLaunchKernelGGL(( bucket_sort), dim3(1),dim3(range), 0, 0, bucket, key, range);
hipDeviceSynchronize();
for (int i=0; i<n; i++) {
printf("%d ",key[i]);
}
printf("\n");
}
| 68a2e264790fa080b9cd8e23fcde7585027b9881.cu | #include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void bucket_init(int *bucket, int range){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >=range) return;
bucket[i] = 0;
__syncthreads();
}
__global__ void bucket_find(int *bucket, int *key, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
//bucket[key[i]]++;
if (i >=n) return;
atomicAdd(&bucket[key[i]],1);
__syncthreads();
}
__global__ void bucket_sort(int *bucket, int *key, int range, int n){
//for (int i=0, j=0; i<range; i++) {
// for (; bucket[i]>0; bucket[i]--) {
// key[j++] = i;
//}
//}
int i = blockIdx.x * blockDim.x + threadIdx.x;
int (i >= range) return;
for (int a=0, b=0; a <= i; b++){
key[i] = b;
a += bucket[b];
}
__syncthreads();
}
int main() {
int n = 50;
int range = 5;
//std::vector<int> key(n);
int *key;
cudaMallocManaged(&key, n * sizeof(int));
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
/*
std::vector<int> bucket(range);
for (int i=0; i<range; i++) {
bucket[i] = 0;
}
*/
int *bucket;
//allocate memory for bucket array
cudaMallocManaged(&bucket, range * sizeof(int));
// initialize bucket array
bucket_init<<<1, range>>>(bucket, range);
cudaDeviceSynchronize();
/*
for (int i=0; i<n; i++) {
bucket[key[i]]++;
}
*/
bucket_find<<<1,n>>>(bucket, key, n);
cudaDeviceSynchronize();
/*
for (int i=0, j=0; i<range; i++) {
for (; bucket[i]>0; bucket[i]--) {
key[j++] = i;
}
}
*/
bucket_sort<<<1,range>>>(bucket, key, range);
cudaDeviceSynchronize();
for (int i=0; i<n; i++) {
printf("%d ",key[i]);
}
printf("\n");
}
|
c1f08ae0fdb4215b1ae1fe2b3a6d067fb4f0d782.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2007-2009 The Regents of the University of California, Davis
campus ("The Regents") and NVIDIA Corporation ("NVIDIA"). All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the The Regents, nor NVIDIA, nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: 1.5 $
// $Date: 2012/09/30 00:09:07 $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt in
// the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* rand_cta.cu
*
* @brief CUDPP CTA-level rand routines
*/
/** \addtogroup cudpp_cta
* @{
*/
/** @name Rand Functions
* @{
*/
//------------MD5 ROTATING FUNCTIONS------------------------
/**
* @brief Does a GLSL-style swizzle assigning f->xyzw = f->yzwx
*
* It does the equvalent of f->xyzw = f->yzwx since this functionality is
* in shading languages but not exposed in CUDA.
* @param[in] f the uint4 data type which will have its elements shifted. Passed in as pointer.
*
**/
__device__ void swizzleShift(uint4 *f)
{
unsigned int temp;
temp = f->x;
f->x = f->y;
f->y = f->z;
f->z = f->w;
f->w = temp;
}
/**
* @brief Rotates the bits in \a x over by \a n bits.
*
* This is the equivalent of the ROTATELEFT operation as described in the MD5 working memo.
* It takes the bits in \a x and circular shifts it over by \a n bits.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in] x the variable with the bits
* @param[in] n the number of bits to shift left by.
**/
__device__ unsigned int leftRotate(unsigned int x, unsigned int n)
{
unsigned int t = ( ((x) << (n)) | ((x) >> (32-n)) ) ;
return t;
}
/**
* @brief The F scrambling function.
*
* The F function in the MD5 technical memo scrambles three variables
* \a x, \a y, and \a z in the following way using bitwise logic:
*
* (x & y) | ((~x) & z)
*
* The resulting value is returned as an unsigned int.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in] x See the above formula
* @param[in] y See the above formula
* @param[in] z See the above formula
*
* @see FF()
**/
__device__ unsigned int F(unsigned int x, unsigned int y, unsigned int z)
{
unsigned int t;
t = ( (x&y) | ((~x) & z) );
return t;
}
/**
* @brief The G scrambling function.
*
* The G function in the MD5 technical memo scrambles three variables
* \a x, \a y, and \a z in the following way using bitwise logic:
*
* (x & z) | ((~z) & y)
*
* The resulting value is returned as an unsigned int.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in] x See the above formula
* @param[in] y See the above formula
* @param[in] z See the above formula
*
* @see GG()
**/
__device__ unsigned int G(unsigned int x, unsigned int y, unsigned int z)
{
unsigned int t;
t = ( (x&z) | ((~z) & y) );
return t;
}
/**
* @brief The H scrambling function.
*
* The H function in the MD5 technical memo scrambles three variables
* \a x, \a y, and \a z in the following way using bitwise logic:
*
* (x ^ y ^ z)
*
* The resulting value is returned as an unsigned int.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in] x See the above formula
* @param[in] y See the above formula
* @param[in] z See the above formula
*
* @see HH()
**/
__device__ unsigned int H(unsigned int x, unsigned int y, unsigned int z)
{
unsigned int t;
t = (x ^ y ^ z );
return t;
}
/**
* @brief The I scrambling function.
*
* The I function in the MD5 technical memo scrambles three variables
* \a x, \a y, and \a z in the following way using bitwise logic:
*
* (y ^ (x | ~z))
*
* The resulting value is returned as an unsigned int.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in] x See the above formula
* @param[in] y See the above formula
* @param[in] z See the above formula
*
* @see II()
**/
__device__ unsigned int I(unsigned int x, unsigned int y, unsigned int z)
{
unsigned int t;
t = ( y ^ (x | ~z) );
return t;
}
/**
* @brief The FF scrambling function
*
* The FF function in the MD5 technical memo is a wrapper for the F scrambling function
* as well as performing its own rotations using LeftRotate and swizzleShift. The variable
* \a td is the current scrambled digest which is passed along and scrambled using the current
* iteration \a i, the rotation information \a Fr, and the starting input \a data. \a p is kept as a
* constant of 2^32.
* The resulting value is stored in \a td.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in,out] td The current value of the digest stored as an uint4.
* @param[in] i The current iteration of the algorithm. This affects the values in \a data.
* @param[in] Fr The current rotation order.
* @param[in] p The constant 2^32.
* @param[in] data The starting input to MD5. Padded from setupInput().
*
* @see F()
* @see swizzleShift()
* @see leftRotate()
* @see setupInput()
**/
__device__ void FF(uint4 * td, int i, uint4 * Fr, float p, unsigned int * data)
{
unsigned int Ft = F(td->y, td->z, td->w);
unsigned int r = Fr->x;
swizzleShift(Fr);
float t = sin(__int_as_float(i)) * p;
unsigned int trigFunc = __float2uint_rd(t);
td->x = td->y + leftRotate(td->x + Ft + trigFunc + data[i], r);
swizzleShift(td);
}
/**
* @brief The GG scrambling function
*
* The GG function in the MD5 technical memo is a wrapper for the G scrambling function
* as well as performing its own rotations using LeftRotate() and swizzleShift(). The variable
* \a td is the current scrambled digest which is passed along and scrambled using the current
* iteration \a i, the rotation information \a Gr, and the starting input \a data. \a p is kept as a
* constant of 2^32.
* The resulting value is stored in \a td.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in,out] td The current value of the digest stored as an uint4.
* @param[in] i The current iteration of the algorithm. This affects the values in \a data.
* @param[in] Gr The current rotation order.
* @param[in] p The constant 2^32.
* @param[in] data The starting input to MD5. Padded from setupInput().
*
* @see G()
* @see swizzleShift()
* @see leftRotate()
* @see setupInput()
**/
__device__ void GG(uint4 * td, int i, uint4 * Gr, float p, unsigned int * data)
{
unsigned int Ft = G(td->y, td->z, td->w);
i = (5*i+1) %16;
unsigned int r = Gr->x;
swizzleShift(Gr);
float t = sin(__int_as_float(i)) * p;
unsigned int trigFunc = __float2uint_rd(t);
td->x = td->y + leftRotate(td->x + Ft + trigFunc + data[i], r);
swizzleShift(td);
}
/**
* @brief The HH scrambling function
*
* The HH function in the MD5 technical memo is a wrapper for the H scrambling function
* as well as performing its own rotations using LeftRotate() and swizzleShift(). The variable
* \a td is the current scrambled digest which is passed along and scrambled using the current
* iteration \a i, the rotation information \a Hr, and the starting input \a data. \a p is kept as a
* constant of 2^32.
* The resulting value is stored in \a td.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in,out] td The current value of the digest stored as an uint4.
* @param[in] i The current iteration of the algorithm. This affects the values in \a data.
* @param[in] Hr The current rotation order.
* @param[in] p The constant 2^32.
* @param[in] data The starting input to MD5. Padded from setupInput().
*
* @see H()
* @see swizzleShift()
* @see leftRotate()
* @see setupInput()
**/
__device__ void HH(uint4 * td, int i, uint4 * Hr, float p, unsigned int * data)
{
unsigned int Ft = H(td->y, td->z, td->w);
i = (3*i+5) %16;
unsigned int r = Hr->x;
swizzleShift(Hr);
float t = sin(__int_as_float(i)) * p;
unsigned int trigFunc = __float2uint_rd(t);
td->x = td->y + leftRotate(td->x + Ft + trigFunc + data[i], r);
swizzleShift(td);
}
/**
* @brief The II scrambling function
*
* The II function in the MD5 technical memo is a wrapper for the I scrambling function
* as well as performing its own rotations using LeftRotate() and swizzleShift(). The variable
* \a td is the current scrambled digest which is passed along and scrambled using the current
* iteration \a i, the rotation information \a Ir, and the starting input \a data. \a p is kept as a
* constant of 2^32.
* The resulting value is stored in \a td.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in,out] td The current value of the digest stored as an uint4.
* @param[in] i The current iteration of the algorithm. This affects the values in \a data.
* @param[in] Ir The current rotation order.
* @param[in] p The constant 2^32.
* @param[in] data The starting input to MD5. Padded from setupInput().
*
* @see I()
* @see swizzleShift()
* @see leftRotate()
* @see setupInput()
**/
__device__ void II(uint4 * td, int i, uint4 * Ir, float p, unsigned int * data)
{
unsigned int Ft = G(td->y, td->z, td->w);
i = (7*i) %16;
unsigned int r = Ir->x;
swizzleShift(Ir);
float t = sin(__int_as_float(i)) * p;
unsigned int trigFunc = __float2uint_rd(t);
td->x = td->y + leftRotate(td->x + Ft + trigFunc + data[i], r);
swizzleShift(td);
}
/**
* @brief Sets up the \a input array using information of \a seed, and \a threadIdx
*
* This function sets up the \a input array using a combination of the current thread's id and the
* user supplied \a seed.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[out] input The array which will contain the initial values for all the scrambling functions.
* @param[in] seed The user supplied seed as an unsigned int.
*
* @see FF()
* @see GG()
* @see HH()
* @see II()
* @see gen_randMD5()
**/
__device__ void setupInput(unsigned int * input, unsigned int seed)
{
//loop unroll, also do this more intelligently
input[0] = threadIdx.x ^ seed;
input[1] = threadIdx.y ^ seed;
input[2] = threadIdx.z ^ seed;
input[3] = 0x80000000 ^ seed;
input[4] = blockIdx.x ^ seed;
input[5] = seed;
input[6] = seed;
input[7] = blockDim.x ^ seed;
input[8] = seed;
input[9] = seed;
input[10] = seed;
input[11] = seed;
input[12] = seed;
input[13] = seed;
input[14] = seed;
input[15] = 128 ^ seed;
}
//-------------------END MD5 FUNCTIONS--------------------------------------
/** @} */ // end rand functions
/** @} */ // end cudpp_cta
// -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: 1.5 $
// $Date: 2012/09/30 00:09:07 $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* rand_kernel.cu
*
* @brief CUDPP kernel-level rand routines
*/
/** \addtogroup cudpp_kernel
* @{
*/
/** @name Rand Functions
* @{
*/
/**
* @brief The main MD5 generation algorithm.
*
* This function runs the MD5 hashing random number generator. It generates
* MD5 hashes, and uses the output as randomized bits. To repeatedly call this
* function, always call cudppRandSeed() first to set a new seed or else the output
* may be the same due to the deterministic nature of hashes. gen_randMD5 generates
* 128 random bits per thread. Therefore, the parameter \a d_out is expected to be
* an array of type uint4 with \a numElements indicies.
*
* @param[out] d_out the output array of type uint4.
* @param[in] numElements the number of elements in \a d_out
* @param[in] seed the random seed used to vary the output
*
* @see launchRandMD5Kernel()
*/
__global__ void gen_randMD5(uint4 *d_out, mint numElements, mint seed)
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int data[16];
setupInput(data, seed);
unsigned int h0 = 0x67452301;
unsigned int h1 = 0xEFCDAB89;
unsigned int h2 = 0x98BADCFE;
unsigned int h3 = 0x10325476;
uint4 result = make_uint4(h0,h1,h2,h3);
uint4 td = result;
float p = pow(2.0,32.0);
uint4 Fr = make_uint4(7,12,17,22);
uint4 Gr = make_uint4(5,9,14,20);
uint4 Hr = make_uint4(4,11,16,23);
uint4 Ir = make_uint4(6,10,15,21);
//for optimization, this is loop unrolled
FF(&td, 0, &Fr,p,data);
FF(&td, 1, &Fr,p,data);
FF(&td, 2, &Fr,p,data);
FF(&td, 3, &Fr,p,data);
FF(&td, 4, &Fr,p,data);
FF(&td, 5, &Fr,p,data);
FF(&td, 6, &Fr,p,data);
FF(&td, 7, &Fr,p,data);
FF(&td, 8, &Fr,p,data);
FF(&td, 9, &Fr,p,data);
FF(&td,10, &Fr,p,data);
FF(&td,11, &Fr,p,data);
FF(&td,12, &Fr,p,data);
FF(&td,13, &Fr,p,data);
FF(&td,14, &Fr,p,data);
FF(&td,15, &Fr,p,data);
GG(&td,16, &Gr,p,data);
GG(&td,17, &Gr,p,data);
GG(&td,18, &Gr,p,data);
GG(&td,19, &Gr,p,data);
GG(&td,20, &Gr,p,data);
GG(&td,21, &Gr,p,data);
GG(&td,22, &Gr,p,data);
GG(&td,23, &Gr,p,data);
GG(&td,24, &Gr,p,data);
GG(&td,25, &Gr,p,data);
GG(&td,26, &Gr,p,data);
GG(&td,27, &Gr,p,data);
GG(&td,28, &Gr,p,data);
GG(&td,29, &Gr,p,data);
GG(&td,30, &Gr,p,data);
GG(&td,31, &Gr,p,data);
HH(&td,32, &Hr,p,data);
HH(&td,33, &Hr,p,data);
HH(&td,34, &Hr,p,data);
HH(&td,35, &Hr,p,data);
HH(&td,36, &Hr,p,data);
HH(&td,37, &Hr,p,data);
HH(&td,38, &Hr,p,data);
HH(&td,39, &Hr,p,data);
HH(&td,40, &Hr,p,data);
HH(&td,41, &Hr,p,data);
HH(&td,42, &Hr,p,data);
HH(&td,43, &Hr,p,data);
HH(&td,44, &Hr,p,data);
HH(&td,45, &Hr,p,data);
HH(&td,46, &Hr,p,data);
HH(&td,47, &Hr,p,data);
II(&td,48, &Ir,p,data);
II(&td,49, &Ir,p,data);
II(&td,50, &Ir,p,data);
II(&td,51, &Ir,p,data);
II(&td,52, &Ir,p,data);
II(&td,53, &Ir,p,data);
II(&td,54, &Ir,p,data);
II(&td,55, &Ir,p,data);
II(&td,56, &Ir,p,data);
II(&td,57, &Ir,p,data);
II(&td,58, &Ir,p,data);
II(&td,59, &Ir,p,data);
II(&td,60, &Ir,p,data);
II(&td,61, &Ir,p,data);
II(&td,62, &Ir,p,data);
II(&td,63, &Ir,p,data);
/* */
result.x = result.x + td.x;
result.y = result.y + td.y;
result.z = result.z + td.z;
result.w = result.w + td.w;
__syncthreads();
if (idx < numElements)
{
d_out[idx].x = result.x;
d_out[idx].y = result.y;
d_out[idx].z = result.z;
d_out[idx].w = result.w;
}
}
/** @} */ // end rand functions
/** @} */ // end cudpp_kernel
| c1f08ae0fdb4215b1ae1fe2b3a6d067fb4f0d782.cu | /*
Copyright (c) 2007-2009 The Regents of the University of California, Davis
campus ("The Regents") and NVIDIA Corporation ("NVIDIA"). All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the The Regents, nor NVIDIA, nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: 1.5 $
// $Date: 2012/09/30 00:09:07 $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt in
// the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* rand_cta.cu
*
* @brief CUDPP CTA-level rand routines
*/
/** \addtogroup cudpp_cta
* @{
*/
/** @name Rand Functions
* @{
*/
//------------MD5 ROTATING FUNCTIONS------------------------
/**
* @brief Does a GLSL-style swizzle assigning f->xyzw = f->yzwx
*
* It does the equvalent of f->xyzw = f->yzwx since this functionality is
* in shading languages but not exposed in CUDA.
* @param[in] f the uint4 data type which will have its elements shifted. Passed in as pointer.
*
**/
__device__ void swizzleShift(uint4 *f)
{
unsigned int temp;
temp = f->x;
f->x = f->y;
f->y = f->z;
f->z = f->w;
f->w = temp;
}
/**
* @brief Rotates the bits in \a x over by \a n bits.
*
* This is the equivalent of the ROTATELEFT operation as described in the MD5 working memo.
* It takes the bits in \a x and circular shifts it over by \a n bits.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in] x the variable with the bits
* @param[in] n the number of bits to shift left by.
**/
__device__ unsigned int leftRotate(unsigned int x, unsigned int n)
{
unsigned int t = ( ((x) << (n)) | ((x) >> (32-n)) ) ;
return t;
}
/**
* @brief The F scrambling function.
*
* The F function in the MD5 technical memo scrambles three variables
* \a x, \a y, and \a z in the following way using bitwise logic:
*
* (x & y) | ((~x) & z)
*
* The resulting value is returned as an unsigned int.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in] x See the above formula
* @param[in] y See the above formula
* @param[in] z See the above formula
*
* @see FF()
**/
__device__ unsigned int F(unsigned int x, unsigned int y, unsigned int z)
{
unsigned int t;
t = ( (x&y) | ((~x) & z) );
return t;
}
/**
* @brief The G scrambling function.
*
* The G function in the MD5 technical memo scrambles three variables
* \a x, \a y, and \a z in the following way using bitwise logic:
*
* (x & z) | ((~z) & y)
*
* The resulting value is returned as an unsigned int.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in] x See the above formula
* @param[in] y See the above formula
* @param[in] z See the above formula
*
* @see GG()
**/
__device__ unsigned int G(unsigned int x, unsigned int y, unsigned int z)
{
unsigned int t;
t = ( (x&z) | ((~z) & y) );
return t;
}
/**
* @brief The H scrambling function.
*
* The H function in the MD5 technical memo scrambles three variables
* \a x, \a y, and \a z in the following way using bitwise logic:
*
* (x ^ y ^ z)
*
* The resulting value is returned as an unsigned int.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in] x See the above formula
* @param[in] y See the above formula
* @param[in] z See the above formula
*
* @see HH()
**/
__device__ unsigned int H(unsigned int x, unsigned int y, unsigned int z)
{
unsigned int t;
t = (x ^ y ^ z );
return t;
}
/**
* @brief The I scrambling function.
*
* The I function in the MD5 technical memo scrambles three variables
* \a x, \a y, and \a z in the following way using bitwise logic:
*
* (y ^ (x | ~z))
*
* The resulting value is returned as an unsigned int.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in] x See the above formula
* @param[in] y See the above formula
* @param[in] z See the above formula
*
* @see II()
**/
__device__ unsigned int I(unsigned int x, unsigned int y, unsigned int z)
{
unsigned int t;
t = ( y ^ (x | ~z) );
return t;
}
/**
* @brief The FF scrambling function
*
* The FF function in the MD5 technical memo is a wrapper for the F scrambling function
* as well as performing its own rotations using LeftRotate and swizzleShift. The variable
* \a td is the current scrambled digest which is passed along and scrambled using the current
* iteration \a i, the rotation information \a Fr, and the starting input \a data. \a p is kept as a
* constant of 2^32.
* The resulting value is stored in \a td.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in,out] td The current value of the digest stored as an uint4.
* @param[in] i The current iteration of the algorithm. This affects the values in \a data.
* @param[in] Fr The current rotation order.
* @param[in] p The constant 2^32.
* @param[in] data The starting input to MD5. Padded from setupInput().
*
* @see F()
* @see swizzleShift()
* @see leftRotate()
* @see setupInput()
**/
__device__ void FF(uint4 * td, int i, uint4 * Fr, float p, unsigned int * data)
{
unsigned int Ft = F(td->y, td->z, td->w);
unsigned int r = Fr->x;
swizzleShift(Fr);
float t = sin(__int_as_float(i)) * p;
unsigned int trigFunc = __float2uint_rd(t);
td->x = td->y + leftRotate(td->x + Ft + trigFunc + data[i], r);
swizzleShift(td);
}
/**
* @brief The GG scrambling function
*
* The GG function in the MD5 technical memo is a wrapper for the G scrambling function
* as well as performing its own rotations using LeftRotate() and swizzleShift(). The variable
* \a td is the current scrambled digest which is passed along and scrambled using the current
* iteration \a i, the rotation information \a Gr, and the starting input \a data. \a p is kept as a
* constant of 2^32.
* The resulting value is stored in \a td.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in,out] td The current value of the digest stored as an uint4.
* @param[in] i The current iteration of the algorithm. This affects the values in \a data.
* @param[in] Gr The current rotation order.
* @param[in] p The constant 2^32.
* @param[in] data The starting input to MD5. Padded from setupInput().
*
* @see G()
* @see swizzleShift()
* @see leftRotate()
* @see setupInput()
**/
__device__ void GG(uint4 * td, int i, uint4 * Gr, float p, unsigned int * data)
{
unsigned int Ft = G(td->y, td->z, td->w);
i = (5*i+1) %16;
unsigned int r = Gr->x;
swizzleShift(Gr);
float t = sin(__int_as_float(i)) * p;
unsigned int trigFunc = __float2uint_rd(t);
td->x = td->y + leftRotate(td->x + Ft + trigFunc + data[i], r);
swizzleShift(td);
}
/**
* @brief The HH scrambling function
*
* The HH function in the MD5 technical memo is a wrapper for the H scrambling function
* as well as performing its own rotations using LeftRotate() and swizzleShift(). The variable
* \a td is the current scrambled digest which is passed along and scrambled using the current
* iteration \a i, the rotation information \a Hr, and the starting input \a data. \a p is kept as a
* constant of 2^32.
* The resulting value is stored in \a td.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in,out] td The current value of the digest stored as an uint4.
* @param[in] i The current iteration of the algorithm. This affects the values in \a data.
* @param[in] Hr The current rotation order.
* @param[in] p The constant 2^32.
* @param[in] data The starting input to MD5. Padded from setupInput().
*
* @see H()
* @see swizzleShift()
* @see leftRotate()
* @see setupInput()
**/
__device__ void HH(uint4 * td, int i, uint4 * Hr, float p, unsigned int * data)
{
unsigned int Ft = H(td->y, td->z, td->w);
i = (3*i+5) %16;
unsigned int r = Hr->x;
swizzleShift(Hr);
float t = sin(__int_as_float(i)) * p;
unsigned int trigFunc = __float2uint_rd(t);
td->x = td->y + leftRotate(td->x + Ft + trigFunc + data[i], r);
swizzleShift(td);
}
/**
* @brief The II scrambling function
*
* The II function in the MD5 technical memo is a wrapper for the I scrambling function
* as well as performing its own rotations using LeftRotate() and swizzleShift(). The variable
* \a td is the current scrambled digest which is passed along and scrambled using the current
* iteration \a i, the rotation information \a Ir, and the starting input \a data. \a p is kept as a
* constant of 2^32.
* The resulting value is stored in \a td.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[in,out] td The current value of the digest stored as an uint4.
* @param[in] i The current iteration of the algorithm. This affects the values in \a data.
* @param[in] Ir The current rotation order.
* @param[in] p The constant 2^32.
* @param[in] data The starting input to MD5. Padded from setupInput().
*
* @see I()
* @see swizzleShift()
* @see leftRotate()
* @see setupInput()
**/
__device__ void II(uint4 * td, int i, uint4 * Ir, float p, unsigned int * data)
{
unsigned int Ft = G(td->y, td->z, td->w);
i = (7*i) %16;
unsigned int r = Ir->x;
swizzleShift(Ir);
float t = sin(__int_as_float(i)) * p;
unsigned int trigFunc = __float2uint_rd(t);
td->x = td->y + leftRotate(td->x + Ft + trigFunc + data[i], r);
swizzleShift(td);
}
/**
* @brief Sets up the \a input array using information of \a seed, and \a threadIdx
*
* This function sets up the \a input array using a combination of the current thread's id and the
* user supplied \a seed.
*
* For more information see: <a href="http://tools.ietf.org/html/rfc1321">The MD5 Message-Digest Algorithm</a>
*
* @param[out] input The array which will contain the initial values for all the scrambling functions.
* @param[in] seed The user supplied seed as an unsigned int.
*
* @see FF()
* @see GG()
* @see HH()
* @see II()
* @see gen_randMD5()
**/
__device__ void setupInput(unsigned int * input, unsigned int seed)
{
//loop unroll, also do this more intelligently
input[0] = threadIdx.x ^ seed;
input[1] = threadIdx.y ^ seed;
input[2] = threadIdx.z ^ seed;
input[3] = 0x80000000 ^ seed;
input[4] = blockIdx.x ^ seed;
input[5] = seed;
input[6] = seed;
input[7] = blockDim.x ^ seed;
input[8] = seed;
input[9] = seed;
input[10] = seed;
input[11] = seed;
input[12] = seed;
input[13] = seed;
input[14] = seed;
input[15] = 128 ^ seed;
}
//-------------------END MD5 FUNCTIONS--------------------------------------
/** @} */ // end rand functions
/** @} */ // end cudpp_cta
// -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: 1.5 $
// $Date: 2012/09/30 00:09:07 $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* rand_kernel.cu
*
* @brief CUDPP kernel-level rand routines
*/
/** \addtogroup cudpp_kernel
* @{
*/
/** @name Rand Functions
* @{
*/
/**
* @brief The main MD5 generation algorithm.
*
* This function runs the MD5 hashing random number generator. It generates
* MD5 hashes, and uses the output as randomized bits. To repeatedly call this
* function, always call cudppRandSeed() first to set a new seed or else the output
* may be the same due to the deterministic nature of hashes. gen_randMD5 generates
* 128 random bits per thread. Therefore, the parameter \a d_out is expected to be
* an array of type uint4 with \a numElements indicies.
*
* @param[out] d_out the output array of type uint4.
* @param[in] numElements the number of elements in \a d_out
* @param[in] seed the random seed used to vary the output
*
* @see launchRandMD5Kernel()
*/
__global__ void gen_randMD5(uint4 *d_out, mint numElements, mint seed)
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int data[16];
setupInput(data, seed);
unsigned int h0 = 0x67452301;
unsigned int h1 = 0xEFCDAB89;
unsigned int h2 = 0x98BADCFE;
unsigned int h3 = 0x10325476;
uint4 result = make_uint4(h0,h1,h2,h3);
uint4 td = result;
float p = pow(2.0,32.0);
uint4 Fr = make_uint4(7,12,17,22);
uint4 Gr = make_uint4(5,9,14,20);
uint4 Hr = make_uint4(4,11,16,23);
uint4 Ir = make_uint4(6,10,15,21);
//for optimization, this is loop unrolled
FF(&td, 0, &Fr,p,data);
FF(&td, 1, &Fr,p,data);
FF(&td, 2, &Fr,p,data);
FF(&td, 3, &Fr,p,data);
FF(&td, 4, &Fr,p,data);
FF(&td, 5, &Fr,p,data);
FF(&td, 6, &Fr,p,data);
FF(&td, 7, &Fr,p,data);
FF(&td, 8, &Fr,p,data);
FF(&td, 9, &Fr,p,data);
FF(&td,10, &Fr,p,data);
FF(&td,11, &Fr,p,data);
FF(&td,12, &Fr,p,data);
FF(&td,13, &Fr,p,data);
FF(&td,14, &Fr,p,data);
FF(&td,15, &Fr,p,data);
GG(&td,16, &Gr,p,data);
GG(&td,17, &Gr,p,data);
GG(&td,18, &Gr,p,data);
GG(&td,19, &Gr,p,data);
GG(&td,20, &Gr,p,data);
GG(&td,21, &Gr,p,data);
GG(&td,22, &Gr,p,data);
GG(&td,23, &Gr,p,data);
GG(&td,24, &Gr,p,data);
GG(&td,25, &Gr,p,data);
GG(&td,26, &Gr,p,data);
GG(&td,27, &Gr,p,data);
GG(&td,28, &Gr,p,data);
GG(&td,29, &Gr,p,data);
GG(&td,30, &Gr,p,data);
GG(&td,31, &Gr,p,data);
HH(&td,32, &Hr,p,data);
HH(&td,33, &Hr,p,data);
HH(&td,34, &Hr,p,data);
HH(&td,35, &Hr,p,data);
HH(&td,36, &Hr,p,data);
HH(&td,37, &Hr,p,data);
HH(&td,38, &Hr,p,data);
HH(&td,39, &Hr,p,data);
HH(&td,40, &Hr,p,data);
HH(&td,41, &Hr,p,data);
HH(&td,42, &Hr,p,data);
HH(&td,43, &Hr,p,data);
HH(&td,44, &Hr,p,data);
HH(&td,45, &Hr,p,data);
HH(&td,46, &Hr,p,data);
HH(&td,47, &Hr,p,data);
II(&td,48, &Ir,p,data);
II(&td,49, &Ir,p,data);
II(&td,50, &Ir,p,data);
II(&td,51, &Ir,p,data);
II(&td,52, &Ir,p,data);
II(&td,53, &Ir,p,data);
II(&td,54, &Ir,p,data);
II(&td,55, &Ir,p,data);
II(&td,56, &Ir,p,data);
II(&td,57, &Ir,p,data);
II(&td,58, &Ir,p,data);
II(&td,59, &Ir,p,data);
II(&td,60, &Ir,p,data);
II(&td,61, &Ir,p,data);
II(&td,62, &Ir,p,data);
II(&td,63, &Ir,p,data);
/* */
result.x = result.x + td.x;
result.y = result.y + td.y;
result.z = result.z + td.z;
result.w = result.w + td.w;
__syncthreads();
if (idx < numElements)
{
d_out[idx].x = result.x;
d_out[idx].y = result.y;
d_out[idx].z = result.z;
d_out[idx].w = result.w;
}
}
/** @} */ // end rand functions
/** @} */ // end cudpp_kernel
|
f29ac2a636636534ee509b356f5eea302d0060f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
//for random intialize
#include <stdlib.h>
#include <time.h>
//for memset
#include <cstring>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char * file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPU assert: %s %s %d\n", hipGetErrorString(code), file, line );
if (abort) exit(code);
}
}
__global__ void sum_array_gpu( int* a, int* b, int* c, int* result, int size)
{
//infinite loop in gpu using while
while(1)
{
}
/*
int gid = blockIdx.x *blockDim.x +threadIdx.x;
if(gid < size)
{
result[gid] = a[gid] * b[gid] * c[gid];
}*/
}
void sum_array_cpu( int* a, int* b, int* c, int* result, int size)
{
for (int i=0; i < size; i++)
{
result[i] = a[i] * b[i] * c[i];
}
}
void compare_arrays (int* gpu, int* cpu, int size){
for ( int i = 0; i < size ; i++){
if(gpu[i]!= cpu[i]){
printf("Arrays are different \n");
return;
}
}
printf("Arrays are same \n");
}
int main()
{
int size = pow(2,10);
int block_size = 512;
int NO_BYTES = size * sizeof(int);
// Allocate memory in Host
int* h_a, *h_b, *h_c, *gpu_results, *cpu_results;
h_a = (int*)malloc(NO_BYTES);
h_b = (int*)malloc(NO_BYTES);
h_c = (int*)malloc(NO_BYTES);
cpu_results = (int*)malloc(NO_BYTES);
gpu_results = (int*)malloc(NO_BYTES);
time_t t;
srand((unsigned)time(&t));
// Initialise random values for the array
for (int i=0; i <size; i++)
{
h_a[i] = (int)(rand() & 0xff);
}
for (int i=0; i <size; i++)
{
h_b[i] = (int)(rand() & 0xff);
}
for (int i=0; i <size; i++)
{
h_c[i] = (int)(rand() & 0xff);
}
memset(gpu_results,0,NO_BYTES);
memset(cpu_results,0,NO_BYTES);
//Summation in CPU
clock_t cpu_start, cpu_end;
cpu_start = clock();
sum_array_cpu(h_a, h_b, h_c, cpu_results, size);
cpu_end = clock();
// Allocate memory in device
int* d_a, *d_b, *d_c, *d_result;
gpuErrchk(hipMalloc((int**)&d_a,NO_BYTES));
gpuErrchk(hipMalloc((int**)&d_b,NO_BYTES));
gpuErrchk(hipMalloc((int**)&d_c,NO_BYTES));
gpuErrchk(hipMalloc((int**)&d_result,NO_BYTES));
clock_t htod_start, htod_end;
htod_start = clock();
// Transfer the data from host to device
gpuErrchk(hipMemcpy(d_a, h_a, NO_BYTES, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_b, h_b, NO_BYTES, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_c, h_c, NO_BYTES, hipMemcpyHostToDevice));
htod_end = clock();
// Designing grid and block size
dim3 block(block_size);
dim3 grid((size/block.x)+1);
// Launch kernel function
clock_t gpu_start, gpu_end;
gpu_start = clock();
sum_array_gpu << < grid, block >> > (d_a, d_b, d_c, d_result, size);
hipDeviceSynchronize();
gpu_end = clock();
clock_t dtoh_start, dtoh_end;
dtoh_start = clock();
gpuErrchk(hipMemcpy(gpu_results, d_result, NO_BYTES, hipMemcpyDeviceToHost));
dtoh_end = clock();
//compare the arrays
compare_arrays(gpu_results,cpu_results, size);
printf("Sum array CPU execution time : %4.6f \n",
(double)((double)(cpu_end - cpu_start)/ CLOCKS_PER_SEC));
printf("Sum array GPU execution time : %4.6f \n",
(double)((double)(gpu_end - gpu_start)/ CLOCKS_PER_SEC));
printf("htod mem transfer time : %4.6f \n",
(double)((double)(htod_end - htod_start)/ CLOCKS_PER_SEC));
printf("dtoh mem transfer time : %4.6f \n",
(double)((double)(dtoh_end - dtoh_start)/ CLOCKS_PER_SEC));
printf("Sum array GPU total execution time : %4.6f \n",
(double)((double)(dtoh_end - htod_start)/ CLOCKS_PER_SEC));
hipFree(d_result);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(gpu_results);
free(h_a);
free(h_b);
free(h_c);
hipDeviceReset();
} | f29ac2a636636534ee509b356f5eea302d0060f9.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
//for random intialize
#include <stdlib.h>
#include <time.h>
//for memset
#include <cstring>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char * file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line );
if (abort) exit(code);
}
}
__global__ void sum_array_gpu( int* a, int* b, int* c, int* result, int size)
{
//infinite loop in gpu using while
while(1)
{
}
/*
int gid = blockIdx.x *blockDim.x +threadIdx.x;
if(gid < size)
{
result[gid] = a[gid] * b[gid] * c[gid];
}*/
}
void sum_array_cpu( int* a, int* b, int* c, int* result, int size)
{
for (int i=0; i < size; i++)
{
result[i] = a[i] * b[i] * c[i];
}
}
void compare_arrays (int* gpu, int* cpu, int size){
for ( int i = 0; i < size ; i++){
if(gpu[i]!= cpu[i]){
printf("Arrays are different \n");
return;
}
}
printf("Arrays are same \n");
}
int main()
{
int size = pow(2,10);
int block_size = 512;
int NO_BYTES = size * sizeof(int);
// Allocate memory in Host
int* h_a, *h_b, *h_c, *gpu_results, *cpu_results;
h_a = (int*)malloc(NO_BYTES);
h_b = (int*)malloc(NO_BYTES);
h_c = (int*)malloc(NO_BYTES);
cpu_results = (int*)malloc(NO_BYTES);
gpu_results = (int*)malloc(NO_BYTES);
time_t t;
srand((unsigned)time(&t));
// Initialise random values for the array
for (int i=0; i <size; i++)
{
h_a[i] = (int)(rand() & 0xff);
}
for (int i=0; i <size; i++)
{
h_b[i] = (int)(rand() & 0xff);
}
for (int i=0; i <size; i++)
{
h_c[i] = (int)(rand() & 0xff);
}
memset(gpu_results,0,NO_BYTES);
memset(cpu_results,0,NO_BYTES);
//Summation in CPU
clock_t cpu_start, cpu_end;
cpu_start = clock();
sum_array_cpu(h_a, h_b, h_c, cpu_results, size);
cpu_end = clock();
// Allocate memory in device
int* d_a, *d_b, *d_c, *d_result;
gpuErrchk(cudaMalloc((int**)&d_a,NO_BYTES));
gpuErrchk(cudaMalloc((int**)&d_b,NO_BYTES));
gpuErrchk(cudaMalloc((int**)&d_c,NO_BYTES));
gpuErrchk(cudaMalloc((int**)&d_result,NO_BYTES));
clock_t htod_start, htod_end;
htod_start = clock();
// Transfer the data from host to device
gpuErrchk(cudaMemcpy(d_a, h_a, NO_BYTES, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, h_b, NO_BYTES, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_c, h_c, NO_BYTES, cudaMemcpyHostToDevice));
htod_end = clock();
// Designing grid and block size
dim3 block(block_size);
dim3 grid((size/block.x)+1);
// Launch kernel function
clock_t gpu_start, gpu_end;
gpu_start = clock();
sum_array_gpu << < grid, block >> > (d_a, d_b, d_c, d_result, size);
cudaDeviceSynchronize();
gpu_end = clock();
clock_t dtoh_start, dtoh_end;
dtoh_start = clock();
gpuErrchk(cudaMemcpy(gpu_results, d_result, NO_BYTES, cudaMemcpyDeviceToHost));
dtoh_end = clock();
//compare the arrays
compare_arrays(gpu_results,cpu_results, size);
printf("Sum array CPU execution time : %4.6f \n",
(double)((double)(cpu_end - cpu_start)/ CLOCKS_PER_SEC));
printf("Sum array GPU execution time : %4.6f \n",
(double)((double)(gpu_end - gpu_start)/ CLOCKS_PER_SEC));
printf("htod mem transfer time : %4.6f \n",
(double)((double)(htod_end - htod_start)/ CLOCKS_PER_SEC));
printf("dtoh mem transfer time : %4.6f \n",
(double)((double)(dtoh_end - dtoh_start)/ CLOCKS_PER_SEC));
printf("Sum array GPU total execution time : %4.6f \n",
(double)((double)(dtoh_end - htod_start)/ CLOCKS_PER_SEC));
cudaFree(d_result);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(gpu_results);
free(h_a);
free(h_b);
free(h_c);
cudaDeviceReset();
} |
8267b06c0299665d4296987e09a8d78d533aab82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
texture<float, 2> tex;
texture<uchar4, 2, hipReadModeNormalizedFloat> rgbaTex;
// process row
__device__ void
d_boxfilter_x(float *id, float *od, int w, int h, int r)
{
float scale = 1.0f / (float)((r << 1) + 1);
float t;
// do left edge
t = id[0] * r;
for (int x = 0; x < (r + 1); x++)
{
t += id[x];
}
od[0] = t * scale;
for (int x = 1; x < (r + 1); x++)
{
t += id[x + r];
t -= id[0];
od[x] = t * scale;
}
// main loop
for (int x = (r + 1); x < w - r; x++)
{
t += id[x + r];
t -= id[x - r - 1];
od[x] = t * scale;
}
// do right edge
for (int x = w - r; x < w; x++)
{
t += id[w - 1];
t -= id[x - r - 1];
od[x] = t * scale;
}
}
// process column
__device__ void
d_boxfilter_y(float *id, float *od, int w, int h, int r)
{
float scale = 1.0f / (float)((r << 1) + 1);
float t;
// do left edge
t = id[0] * r;
for (int y = 0; y < (r + 1); y++)
{
t += id[y * w];
}
od[0] = t * scale;
for (int y = 1; y < (r + 1); y++)
{
t += id[(y + r) * w];
t -= id[0];
od[y * w] = t * scale;
}
// main loop
for (int y = (r + 1); y < (h - r); y++)
{
t += id[(y + r) * w];
t -= id[((y - r) * w) - w];
od[y * w] = t * scale;
}
// do right edge
for (int y = h - r; y < h; y++)
{
t += id[(h-1) * w];
t -= id[((y - r) * w) - w];
od[y * w] = t * scale;
}
}
__global__ void
d_boxfilter_x_global(float *id, float *od, int w, int h, int r)
{
unsigned int y = blockIdx.x*blockDim.x + threadIdx.x;
d_boxfilter_x(&id[y * w], &od[y * w], w, h, r);
}
__global__ void
d_boxfilter_y_global(float *id, float *od, int w, int h, int r)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
d_boxfilter_y(&id[x], &od[x], w, h, r);
}
// texture version
// texture fetches automatically clamp to edge of image
__global__ void
d_boxfilter_x_tex(float *od, int w, int h, int r)
{
float scale = 1.0f / (float)((r << 1) + 1);
unsigned int y = blockIdx.x*blockDim.x + threadIdx.x;
float t = 0.0f;
for (int x =- r; x <= r; x++)
{
t += tex2D(tex, x, y);
}
od[y * w] = t * scale;
for (int x = 1; x < w; x++)
{
t += tex2D(tex, x + r, y);
t -= tex2D(tex, x - r - 1, y);
od[y * w + x] = t * scale;
}
}
__global__ void
d_boxfilter_y_tex(float *od, int w, int h, int r)
{
float scale = 1.0f / (float)((r << 1) + 1);
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
float t = 0.0f;
for (int y = -r; y <= r; y++)
{
t += tex2D(tex, x, y);
}
od[x] = t * scale;
for (int y = 1; y < h; y++)
{
t += tex2D(tex, x, y + r);
t -= tex2D(tex, x, y - r - 1);
od[y * w + x] = t * scale;
}
}
} | 8267b06c0299665d4296987e09a8d78d533aab82.cu | extern "C" {
texture<float, 2> tex;
texture<uchar4, 2, cudaReadModeNormalizedFloat> rgbaTex;
// process row
__device__ void
d_boxfilter_x(float *id, float *od, int w, int h, int r)
{
float scale = 1.0f / (float)((r << 1) + 1);
float t;
// do left edge
t = id[0] * r;
for (int x = 0; x < (r + 1); x++)
{
t += id[x];
}
od[0] = t * scale;
for (int x = 1; x < (r + 1); x++)
{
t += id[x + r];
t -= id[0];
od[x] = t * scale;
}
// main loop
for (int x = (r + 1); x < w - r; x++)
{
t += id[x + r];
t -= id[x - r - 1];
od[x] = t * scale;
}
// do right edge
for (int x = w - r; x < w; x++)
{
t += id[w - 1];
t -= id[x - r - 1];
od[x] = t * scale;
}
}
// process column
__device__ void
d_boxfilter_y(float *id, float *od, int w, int h, int r)
{
float scale = 1.0f / (float)((r << 1) + 1);
float t;
// do left edge
t = id[0] * r;
for (int y = 0; y < (r + 1); y++)
{
t += id[y * w];
}
od[0] = t * scale;
for (int y = 1; y < (r + 1); y++)
{
t += id[(y + r) * w];
t -= id[0];
od[y * w] = t * scale;
}
// main loop
for (int y = (r + 1); y < (h - r); y++)
{
t += id[(y + r) * w];
t -= id[((y - r) * w) - w];
od[y * w] = t * scale;
}
// do right edge
for (int y = h - r; y < h; y++)
{
t += id[(h-1) * w];
t -= id[((y - r) * w) - w];
od[y * w] = t * scale;
}
}
__global__ void
d_boxfilter_x_global(float *id, float *od, int w, int h, int r)
{
unsigned int y = blockIdx.x*blockDim.x + threadIdx.x;
d_boxfilter_x(&id[y * w], &od[y * w], w, h, r);
}
__global__ void
d_boxfilter_y_global(float *id, float *od, int w, int h, int r)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
d_boxfilter_y(&id[x], &od[x], w, h, r);
}
// texture version
// texture fetches automatically clamp to edge of image
__global__ void
d_boxfilter_x_tex(float *od, int w, int h, int r)
{
float scale = 1.0f / (float)((r << 1) + 1);
unsigned int y = blockIdx.x*blockDim.x + threadIdx.x;
float t = 0.0f;
for (int x =- r; x <= r; x++)
{
t += tex2D(tex, x, y);
}
od[y * w] = t * scale;
for (int x = 1; x < w; x++)
{
t += tex2D(tex, x + r, y);
t -= tex2D(tex, x - r - 1, y);
od[y * w + x] = t * scale;
}
}
__global__ void
d_boxfilter_y_tex(float *od, int w, int h, int r)
{
float scale = 1.0f / (float)((r << 1) + 1);
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
float t = 0.0f;
for (int y = -r; y <= r; y++)
{
t += tex2D(tex, x, y);
}
od[x] = t * scale;
for (int y = 1; y < h; y++)
{
t += tex2D(tex, x, y + r);
t -= tex2D(tex, x, y - r - 1);
od[y * w + x] = t * scale;
}
}
} |
accac378b4d8b653ddec4f22ef593e0676a20ae0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "markNonPrimeKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *dev_chunk = NULL;
hipMalloc(&dev_chunk, XSIZE*YSIZE);
number_type currentValue = 1;
number_type currentValueSqr = 1;
const number_type startValue = 1;
const number_type endValue = 1;
const int thread_size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
markNonPrimeKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_chunk,currentValue,currentValueSqr,startValue,endValue,thread_size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
markNonPrimeKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_chunk,currentValue,currentValueSqr,startValue,endValue,thread_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
markNonPrimeKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_chunk,currentValue,currentValueSqr,startValue,endValue,thread_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | accac378b4d8b653ddec4f22ef593e0676a20ae0.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "markNonPrimeKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *dev_chunk = NULL;
cudaMalloc(&dev_chunk, XSIZE*YSIZE);
number_type currentValue = 1;
number_type currentValueSqr = 1;
const number_type startValue = 1;
const number_type endValue = 1;
const int thread_size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
markNonPrimeKernel<<<gridBlock,threadBlock>>>(dev_chunk,currentValue,currentValueSqr,startValue,endValue,thread_size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
markNonPrimeKernel<<<gridBlock,threadBlock>>>(dev_chunk,currentValue,currentValueSqr,startValue,endValue,thread_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
markNonPrimeKernel<<<gridBlock,threadBlock>>>(dev_chunk,currentValue,currentValueSqr,startValue,endValue,thread_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c9abd297d9f6d6ebd981f5859ff87d3568b43312.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file tut5_fileread.cu
* CASPER Tutorial 5: Heterogeneous Instrumentation
* Functions to read input data files
*
* @author Jayanth Chennamangalam
* @date 2011.07.08
*/
#include "tut5_fileread.h"
extern char4* g_pc4InBuf;
extern char4* g_pc4InBufRead;
extern char4* g_pc4DataRead_d;
extern char4* g_pc4Data_d;
extern int g_iSizeRead;
extern int g_iNTaps;
extern int g_iNFFT;
extern int g_iNumSubBands;
extern int g_iIsDataReadDone;
int g_iCurFileSeqNum = 0;
int g_iSizeFile = 0;
/* function that reads data from the data file and loads it into memory */
int LoadDataToMem()
{
struct stat stFileStats = {0};
int iRet = EXIT_SUCCESS;
int iFileData = 0;
char acFileData[LEN_GENSTRING] = {0};
/* build the filename */
BuildFilename(g_iCurFileSeqNum, acFileData);
(void) printf("Opening file %s for processing...\n", acFileData);
iRet = stat(acFileData, &stFileStats);
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Failed to stat %s: %s!\n",
acFileData,
strerror(errno));
return EXIT_FAILURE;
}
g_iSizeFile = stFileStats.st_size;
/* allocate memory if this is the first file */
if (0 == g_iCurFileSeqNum)
{
g_pc4InBuf = (char4*) malloc(g_iSizeFile);
if (NULL == g_pc4InBuf)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
}
iFileData = open(acFileData, O_RDONLY);
if (iFileData < EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR! Opening data file %s failed! %s.\n",
acFileData,
strerror(errno));
return EXIT_FAILURE;
}
iRet = read(iFileData, g_pc4InBuf, g_iSizeFile);
if (iRet < EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Data reading failed! %s.\n",
strerror(errno));
(void) close(iFileData);
return EXIT_FAILURE;
}
else if (iRet != stFileStats.st_size)
{
(void) printf("File read done!\n");
}
(void) close(iFileData);
/* set the read pointer to the beginning of the data array */
g_pc4InBufRead = g_pc4InBuf;
/* increment the file sequence number */
++g_iCurFileSeqNum;
return EXIT_SUCCESS;
}
/*
* void BuildFilename(int iCount, char acFilename[])
*
* Builds a formatted filename string
*/
void BuildFilename(int iCount, char acFilename[])
{
char acCount[LEN_SEQ_NUM+1] = {0};
char acTemp[2] = {0};
int iDigits[LEN_SEQ_NUM] = {0};
int iNumDigits = 0;
int i = 0;
/* convert iCount to acCount */
for (i = 0; i < LEN_SEQ_NUM; ++i)
{
if (0 == (iCount / 10))
{
iDigits[i] = iCount % 10;
iNumDigits = i + 1;
break;
}
else
{
iDigits[i] = iCount % 10;
iCount = iCount / 10;
}
}
for (i = (LEN_SEQ_NUM - iNumDigits); i > 0; --i)
{
(void) strcat(acCount, "0");
}
for (i = iNumDigits; i > 0; --i)
{
(void) sprintf(acTemp, "%d", iDigits[i-1]);
(void) strcat(acCount, acTemp);
}
(void) sprintf(acFilename,
"%s%s",
FILENAME_PREFIX,
acCount);
return;
}
/* function that reads data from input buffer */
int ReadData()
{
/* write new data to the write buffer */
CUDASafeCallWithCleanUp(hipMemcpy(g_pc4Data_d,
g_pc4InBufRead,
g_iSizeRead,
hipMemcpyHostToDevice));
/* update the read pointer to where data needs to be read in from, in the
next read */
g_pc4InBufRead += ((g_iSizeRead
- ((g_iNTaps - 1)
* g_iNumSubBands
* g_iNFFT
* sizeof(char4)))
/ sizeof(char4));
/* whenever there is a read, reset the read pointer to the beginning */
g_pc4DataRead_d = g_pc4Data_d;
/* BUG: won't read last block */
if ((((char *) g_pc4InBuf) + g_iSizeFile) - ((char *) g_pc4InBufRead)
<= g_iSizeRead)
{
(void) printf("Data read done!\n");
g_iIsDataReadDone = TRUE;
}
return EXIT_SUCCESS;
}
| c9abd297d9f6d6ebd981f5859ff87d3568b43312.cu | /**
* @file tut5_fileread.cu
* CASPER Tutorial 5: Heterogeneous Instrumentation
* Functions to read input data files
*
* @author Jayanth Chennamangalam
* @date 2011.07.08
*/
#include "tut5_fileread.h"
extern char4* g_pc4InBuf;
extern char4* g_pc4InBufRead;
extern char4* g_pc4DataRead_d;
extern char4* g_pc4Data_d;
extern int g_iSizeRead;
extern int g_iNTaps;
extern int g_iNFFT;
extern int g_iNumSubBands;
extern int g_iIsDataReadDone;
int g_iCurFileSeqNum = 0;
int g_iSizeFile = 0;
/* function that reads data from the data file and loads it into memory */
int LoadDataToMem()
{
struct stat stFileStats = {0};
int iRet = EXIT_SUCCESS;
int iFileData = 0;
char acFileData[LEN_GENSTRING] = {0};
/* build the filename */
BuildFilename(g_iCurFileSeqNum, acFileData);
(void) printf("Opening file %s for processing...\n", acFileData);
iRet = stat(acFileData, &stFileStats);
if (iRet != EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Failed to stat %s: %s!\n",
acFileData,
strerror(errno));
return EXIT_FAILURE;
}
g_iSizeFile = stFileStats.st_size;
/* allocate memory if this is the first file */
if (0 == g_iCurFileSeqNum)
{
g_pc4InBuf = (char4*) malloc(g_iSizeFile);
if (NULL == g_pc4InBuf)
{
(void) fprintf(stderr,
"ERROR: Memory allocation failed! %s.\n",
strerror(errno));
return EXIT_FAILURE;
}
}
iFileData = open(acFileData, O_RDONLY);
if (iFileData < EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR! Opening data file %s failed! %s.\n",
acFileData,
strerror(errno));
return EXIT_FAILURE;
}
iRet = read(iFileData, g_pc4InBuf, g_iSizeFile);
if (iRet < EXIT_SUCCESS)
{
(void) fprintf(stderr,
"ERROR: Data reading failed! %s.\n",
strerror(errno));
(void) close(iFileData);
return EXIT_FAILURE;
}
else if (iRet != stFileStats.st_size)
{
(void) printf("File read done!\n");
}
(void) close(iFileData);
/* set the read pointer to the beginning of the data array */
g_pc4InBufRead = g_pc4InBuf;
/* increment the file sequence number */
++g_iCurFileSeqNum;
return EXIT_SUCCESS;
}
/*
* void BuildFilename(int iCount, char acFilename[])
*
* Builds a formatted filename string
*/
void BuildFilename(int iCount, char acFilename[])
{
char acCount[LEN_SEQ_NUM+1] = {0};
char acTemp[2] = {0};
int iDigits[LEN_SEQ_NUM] = {0};
int iNumDigits = 0;
int i = 0;
/* convert iCount to acCount */
for (i = 0; i < LEN_SEQ_NUM; ++i)
{
if (0 == (iCount / 10))
{
iDigits[i] = iCount % 10;
iNumDigits = i + 1;
break;
}
else
{
iDigits[i] = iCount % 10;
iCount = iCount / 10;
}
}
for (i = (LEN_SEQ_NUM - iNumDigits); i > 0; --i)
{
(void) strcat(acCount, "0");
}
for (i = iNumDigits; i > 0; --i)
{
(void) sprintf(acTemp, "%d", iDigits[i-1]);
(void) strcat(acCount, acTemp);
}
(void) sprintf(acFilename,
"%s%s",
FILENAME_PREFIX,
acCount);
return;
}
/* function that reads data from input buffer */
int ReadData()
{
/* write new data to the write buffer */
CUDASafeCallWithCleanUp(cudaMemcpy(g_pc4Data_d,
g_pc4InBufRead,
g_iSizeRead,
cudaMemcpyHostToDevice));
/* update the read pointer to where data needs to be read in from, in the
next read */
g_pc4InBufRead += ((g_iSizeRead
- ((g_iNTaps - 1)
* g_iNumSubBands
* g_iNFFT
* sizeof(char4)))
/ sizeof(char4));
/* whenever there is a read, reset the read pointer to the beginning */
g_pc4DataRead_d = g_pc4Data_d;
/* BUG: won't read last block */
if ((((char *) g_pc4InBuf) + g_iSizeFile) - ((char *) g_pc4InBufRead)
<= g_iSizeRead)
{
(void) printf("Data read done!\n");
g_iIsDataReadDone = TRUE;
}
return EXIT_SUCCESS;
}
|
834e5e03b7afd81a8bbdba73b59430291ad26efa.hip | // !!! This is a file automatically generated by hipify!!!
#include "StiffnessMatrix.h"
StiffnessMatrix::StiffnessMatrix(Material& mat, Geometry& geo, unsigned int n)
: material(&mat), geometry(&geo), numberOfIntegrationPoint(n)
{
numberOfElements = geometry->get_numberOfElementsG();
nipSquared = numberOfIntegrationPoint*numberOfIntegrationPoint;
simulationSize = numberOfElements*nipSquared;
// integration points
hipMallocManaged(&integrationNode, numberOfIntegrationPoint*sizeof(float));
hipMallocManaged(&integrationPos, numberOfIntegrationPoint*dimention*numberOfIntegrationPoint*sizeof(unsigned int));
hipMallocManaged(&integrationWeight, numberOfIntegrationPoint*sizeof(float));
integrationPoint();
Log::Logger().Info("StiffnessMatrix Created by CPU");
};
StiffnessMatrix::~StiffnessMatrix()
{
Log::Logger().Info("StiffnessMatrix Deleted by CPU");
hipFree(integrationNode);
hipFree(integrationPos);
hipFree(integrationWeight);
};
void StiffnessMatrix::integrationPoint()
// Creats the integration points
// XI = integrationNode[integrationPos[i]] YI = integrationNode[integrationPos[i+1]]
{
unsigned int counter = 0;
for (unsigned int i = 0; i < numberOfIntegrationPoint; i++)
for (unsigned int j = 0; j < numberOfIntegrationPoint; j++)
{
integrationPos[counter++] = i;
integrationPos[counter++] = j;
};
if (numberOfIntegrationPoint == 1) {
integrationNode[0] = 0; integrationWeight[0] = 4;
} else if (numberOfIntegrationPoint == 2) {
integrationNode[0] = -0.57735; integrationWeight[0] = 1.0;
integrationNode[1] = 0.57735; integrationWeight[1] = 1.0;
} else if (numberOfIntegrationPoint == 3) {
integrationNode[0] = -0.774596; integrationWeight[0] = 0.555556;
integrationNode[1] = 0.0 ; integrationWeight[1] = 0.888889;
integrationNode[2] = 0.774596; integrationWeight[2] = 0.555556;
} else if (numberOfIntegrationPoint == 4) {
integrationNode[0] = -0.861136; integrationWeight[0] = 0.347855;
integrationNode[1] = -0.339981; integrationWeight[1] = 0.652145;
integrationNode[2] = 0.339981; integrationWeight[2] = 0.652145;
integrationNode[3] = 0.861136; integrationWeight[3] = 0.347855;
} else if (numberOfIntegrationPoint == 5) {
integrationNode[0] = -0.90618; integrationWeight[0] = 0.236927;
integrationNode[1] = -0.538469; integrationWeight[1] = 0.478629;
integrationNode[2] = 0.0 ; integrationWeight[2] = 0.568889;
integrationNode[3] = 0.538469; integrationWeight[3] = 0.478629;
integrationNode[4] = 0.90618; integrationWeight[4] = 0.236927;
} else {
printf("Integration points more than five is under construction");
}
};
// This function calculates the reapearance of each node in all elements
// each dof needs a row of stiffness matrix this function adds the size required by each two dof (dependent to a node) in global stiffness matrix
unsigned int StiffnessMatrix::globalStiffMatSizeCalculator(unsigned int* mesh, unsigned int meshSize, unsigned int numberOfNodes ) {
unsigned int* nodeReAppear = new unsigned int [numberOfNodes]();
for (unsigned int i = 0; i < meshSize ; i++) {nodeReAppear[mesh[i]] = nodeReAppear[mesh[i]] + 1;}
unsigned int sizeArray[5] = {0,8,12,16,18};
unsigned int sizeG = 0; // size of the global stiffness matrix
for (unsigned int i = 0; i < numberOfNodes; i++) {sizeG = sizeG + sizeArray[nodeReAppear[i]]; }
delete[] nodeReAppear;
return sizeG;
}
| 834e5e03b7afd81a8bbdba73b59430291ad26efa.cu | #include "StiffnessMatrix.h"
StiffnessMatrix::StiffnessMatrix(Material& mat, Geometry& geo, unsigned int n)
: material(&mat), geometry(&geo), numberOfIntegrationPoint(n)
{
numberOfElements = geometry->get_numberOfElementsG();
nipSquared = numberOfIntegrationPoint*numberOfIntegrationPoint;
simulationSize = numberOfElements*nipSquared;
// integration points
cudaMallocManaged(&integrationNode, numberOfIntegrationPoint*sizeof(float));
cudaMallocManaged(&integrationPos, numberOfIntegrationPoint*dimention*numberOfIntegrationPoint*sizeof(unsigned int));
cudaMallocManaged(&integrationWeight, numberOfIntegrationPoint*sizeof(float));
integrationPoint();
Log::Logger().Info("StiffnessMatrix Created by CPU");
};
StiffnessMatrix::~StiffnessMatrix()
{
Log::Logger().Info("StiffnessMatrix Deleted by CPU");
cudaFree(integrationNode);
cudaFree(integrationPos);
cudaFree(integrationWeight);
};
void StiffnessMatrix::integrationPoint()
// Creats the integration points
// XI = integrationNode[integrationPos[i]] YI = integrationNode[integrationPos[i+1]]
{
unsigned int counter = 0;
for (unsigned int i = 0; i < numberOfIntegrationPoint; i++)
for (unsigned int j = 0; j < numberOfIntegrationPoint; j++)
{
integrationPos[counter++] = i;
integrationPos[counter++] = j;
};
if (numberOfIntegrationPoint == 1) {
integrationNode[0] = 0; integrationWeight[0] = 4;
} else if (numberOfIntegrationPoint == 2) {
integrationNode[0] = -0.57735; integrationWeight[0] = 1.0;
integrationNode[1] = 0.57735; integrationWeight[1] = 1.0;
} else if (numberOfIntegrationPoint == 3) {
integrationNode[0] = -0.774596; integrationWeight[0] = 0.555556;
integrationNode[1] = 0.0 ; integrationWeight[1] = 0.888889;
integrationNode[2] = 0.774596; integrationWeight[2] = 0.555556;
} else if (numberOfIntegrationPoint == 4) {
integrationNode[0] = -0.861136; integrationWeight[0] = 0.347855;
integrationNode[1] = -0.339981; integrationWeight[1] = 0.652145;
integrationNode[2] = 0.339981; integrationWeight[2] = 0.652145;
integrationNode[3] = 0.861136; integrationWeight[3] = 0.347855;
} else if (numberOfIntegrationPoint == 5) {
integrationNode[0] = -0.90618; integrationWeight[0] = 0.236927;
integrationNode[1] = -0.538469; integrationWeight[1] = 0.478629;
integrationNode[2] = 0.0 ; integrationWeight[2] = 0.568889;
integrationNode[3] = 0.538469; integrationWeight[3] = 0.478629;
integrationNode[4] = 0.90618; integrationWeight[4] = 0.236927;
} else {
printf("Integration points more than five is under construction");
}
};
// This function calculates the reapearance of each node in all elements
// each dof needs a row of stiffness matrix this function adds the size required by each two dof (dependent to a node) in global stiffness matrix
unsigned int StiffnessMatrix::globalStiffMatSizeCalculator(unsigned int* mesh, unsigned int meshSize, unsigned int numberOfNodes ) {
unsigned int* nodeReAppear = new unsigned int [numberOfNodes]();
for (unsigned int i = 0; i < meshSize ; i++) {nodeReAppear[mesh[i]] = nodeReAppear[mesh[i]] + 1;}
unsigned int sizeArray[5] = {0,8,12,16,18};
unsigned int sizeG = 0; // size of the global stiffness matrix
for (unsigned int i = 0; i < numberOfNodes; i++) {sizeG = sizeG + sizeArray[nodeReAppear[i]]; }
delete[] nodeReAppear;
return sizeG;
}
|
7b837f147d463eb6e0fedec404ed6e0278aa84a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "histogram.hpp"
#include "checkCudaErrors.hpp"
#include "cudaMemory.hpp"
#include "functions.hpp"
#include <hip/hip_runtime.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include "reduce.hpp"
// -------------------- Make Histogram in Shared Memory --------------------
__global__ void kernelMakeHistogram(
const unsigned int* const d_bin_idxs,
unsigned int length,
unsigned int n_bins,
unsigned int* const d_bins,
unsigned int binidxs_per_thread,
unsigned int bin_grid_size
) {
extern __shared__ unsigned int sh_bins[];
unsigned int absIdx = blockIdx.x*blockDim.x*binidxs_per_thread + threadIdx.x;
unsigned int bDim = blockDim.x;
unsigned int idx = threadIdx.x;
unsigned int k;
for (unsigned int i = 0; i < bin_grid_size; i++) {
k = idx + i*bDim;
if (k >= n_bins)
break;
sh_bins[k] = 0;
}
__syncthreads();
unsigned int bin;
for (unsigned int i = 0; i < binidxs_per_thread; i++) {
k = absIdx + i*bDim;
if (k >= length)
break;
bin = d_bin_idxs[k];
atomicAdd(&sh_bins[bin], 1);
}
__syncthreads();
for (unsigned int i = 0; i < bin_grid_size; i++) {
k = idx + i*bDim;
if (k >= n_bins)
return;
atomicAdd(&d_bins[k], sh_bins[k]);
}
}
// -------------------- GPU Parallel Histogram (thrust) --------------------
void thrustHistogramWithBinIdxs(
const float* const d_in,
const unsigned int length,
const unsigned int n_bins,
unsigned int* const d_bins,
unsigned int* const d_bin_idxs
) {
hipDeviceProp_t deviceProp;
checkCudaErrors( hipGetDeviceProperties(&deviceProp, 0) );
unsigned int maxRunningThreads = deviceProp.multiProcessorCount*deviceProp.maxThreadsPerMultiProcessor;
checkCudaErrors(hipMemset(d_bins, 0, n_bins*sizeof(unsigned int)));
float minimum, maximum;
thrustGPUfindMinMaxFloat(d_in, length, minimum, maximum);
float oneOverBinWidth = (1.0F*n_bins)/(maximum - minimum);
auto d_lmbd = [d_in, minimum, n_bins, oneOverBinWidth, d_bin_idxs] __device__ (unsigned int i) {
unsigned int bin = (d_in[i] - minimum)*oneOverBinWidth;
d_bin_idxs[i] = min(bin, n_bins - 1);
};
thrust::counting_iterator<unsigned int> k(0U);
thrust::for_each(thrust::device, k, k+length, d_lmbd);
dim3 blockDim(1024, 1, 1);
unsigned int binidxsPerThread = ui_ceilDiv(length, maxRunningThreads);
unsigned int binGridSize = ui_ceilDiv(n_bins, blockDim.x);
unsigned int gridX = ui_ceilDiv(length, binidxsPerThread*blockDim.x);
dim3 gridDim(gridX, 1, 1);
hipLaunchKernelGGL(( kernelMakeHistogram), dim3(gridDim), dim3(blockDim), n_bins*sizeof(unsigned int), 0, d_bin_idxs, length, n_bins, d_bins, binidxsPerThread, binGridSize);
}
// -------------------- GPU Parallel Histogram --------------------
__global__ void kernelComputeBinidx(
const float* const d_in,
unsigned int length,
unsigned int n_bins,
const float minimum,
const float oneOverBinWidth,
unsigned int* const d_bin_idxs
) {
unsigned int absIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (absIdx >= length)
return;
unsigned int bin = (d_in[absIdx] - minimum)*oneOverBinWidth;
d_bin_idxs[absIdx] = min( bin, n_bins - 1 );
}
void parallelHistogramWithBinIdxs(
const float* const d_in,
unsigned int length,
unsigned int n_bins,
unsigned int* const d_bins,
unsigned int* const d_bin_idxs
) {
hipDeviceProp_t deviceProp;
checkCudaErrors( hipGetDeviceProperties(&deviceProp, 0) );
unsigned int maxRunningThreads = deviceProp.multiProcessorCount*deviceProp.maxThreadsPerMultiProcessor;
memsetZero(d_bins, n_bins*sizeof(unsigned int));
float min, max;
parallelGPUfindMinMaxFloat(d_in, length, min, max);
float oneOverBinWidth = (1.0F*n_bins)/(max - min);
dim3 blockDim(1024, 1, 1);
unsigned int gridX = ui_ceilDiv(length, blockDim.x);
dim3 gridDim(gridX, 1, 1);
hipLaunchKernelGGL(( kernelComputeBinidx), dim3(gridDim), dim3(blockDim), 0, 0, d_in, length, n_bins, min, oneOverBinWidth, d_bin_idxs);
unsigned int binidxsPerThread = ui_ceilDiv(length, maxRunningThreads);
unsigned int binGridSize = ui_ceilDiv(n_bins, blockDim.x);
gridX = ui_ceilDiv(length, binidxsPerThread*blockDim.x);
gridDim.x = gridX;
hipLaunchKernelGGL(( kernelMakeHistogram), dim3(gridDim), dim3(blockDim), n_bins*sizeof(unsigned int), 0, d_bin_idxs, length, n_bins, d_bins, binidxsPerThread, binGridSize);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
| 7b837f147d463eb6e0fedec404ed6e0278aa84a3.cu | #include "histogram.hpp"
#include "checkCudaErrors.hpp"
#include "cudaMemory.hpp"
#include "functions.hpp"
#include <cuda.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include "reduce.hpp"
// -------------------- Make Histogram in Shared Memory --------------------
__global__ void kernelMakeHistogram(
const unsigned int* const d_bin_idxs,
unsigned int length,
unsigned int n_bins,
unsigned int* const d_bins,
unsigned int binidxs_per_thread,
unsigned int bin_grid_size
) {
extern __shared__ unsigned int sh_bins[];
unsigned int absIdx = blockIdx.x*blockDim.x*binidxs_per_thread + threadIdx.x;
unsigned int bDim = blockDim.x;
unsigned int idx = threadIdx.x;
unsigned int k;
for (unsigned int i = 0; i < bin_grid_size; i++) {
k = idx + i*bDim;
if (k >= n_bins)
break;
sh_bins[k] = 0;
}
__syncthreads();
unsigned int bin;
for (unsigned int i = 0; i < binidxs_per_thread; i++) {
k = absIdx + i*bDim;
if (k >= length)
break;
bin = d_bin_idxs[k];
atomicAdd(&sh_bins[bin], 1);
}
__syncthreads();
for (unsigned int i = 0; i < bin_grid_size; i++) {
k = idx + i*bDim;
if (k >= n_bins)
return;
atomicAdd(&d_bins[k], sh_bins[k]);
}
}
// -------------------- GPU Parallel Histogram (thrust) --------------------
void thrustHistogramWithBinIdxs(
const float* const d_in,
const unsigned int length,
const unsigned int n_bins,
unsigned int* const d_bins,
unsigned int* const d_bin_idxs
) {
cudaDeviceProp deviceProp;
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, 0) );
unsigned int maxRunningThreads = deviceProp.multiProcessorCount*deviceProp.maxThreadsPerMultiProcessor;
checkCudaErrors(cudaMemset(d_bins, 0, n_bins*sizeof(unsigned int)));
float minimum, maximum;
thrustGPUfindMinMaxFloat(d_in, length, minimum, maximum);
float oneOverBinWidth = (1.0F*n_bins)/(maximum - minimum);
auto d_lmbd = [d_in, minimum, n_bins, oneOverBinWidth, d_bin_idxs] __device__ (unsigned int i) {
unsigned int bin = (d_in[i] - minimum)*oneOverBinWidth;
d_bin_idxs[i] = min(bin, n_bins - 1);
};
thrust::counting_iterator<unsigned int> k(0U);
thrust::for_each(thrust::device, k, k+length, d_lmbd);
dim3 blockDim(1024, 1, 1);
unsigned int binidxsPerThread = ui_ceilDiv(length, maxRunningThreads);
unsigned int binGridSize = ui_ceilDiv(n_bins, blockDim.x);
unsigned int gridX = ui_ceilDiv(length, binidxsPerThread*blockDim.x);
dim3 gridDim(gridX, 1, 1);
kernelMakeHistogram<<<gridDim, blockDim, n_bins*sizeof(unsigned int)>>>(d_bin_idxs, length, n_bins, d_bins, binidxsPerThread, binGridSize);
}
// -------------------- GPU Parallel Histogram --------------------
__global__ void kernelComputeBinidx(
const float* const d_in,
unsigned int length,
unsigned int n_bins,
const float minimum,
const float oneOverBinWidth,
unsigned int* const d_bin_idxs
) {
unsigned int absIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (absIdx >= length)
return;
unsigned int bin = (d_in[absIdx] - minimum)*oneOverBinWidth;
d_bin_idxs[absIdx] = min( bin, n_bins - 1 );
}
void parallelHistogramWithBinIdxs(
const float* const d_in,
unsigned int length,
unsigned int n_bins,
unsigned int* const d_bins,
unsigned int* const d_bin_idxs
) {
cudaDeviceProp deviceProp;
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, 0) );
unsigned int maxRunningThreads = deviceProp.multiProcessorCount*deviceProp.maxThreadsPerMultiProcessor;
memsetZero(d_bins, n_bins*sizeof(unsigned int));
float min, max;
parallelGPUfindMinMaxFloat(d_in, length, min, max);
float oneOverBinWidth = (1.0F*n_bins)/(max - min);
dim3 blockDim(1024, 1, 1);
unsigned int gridX = ui_ceilDiv(length, blockDim.x);
dim3 gridDim(gridX, 1, 1);
kernelComputeBinidx<<<gridDim, blockDim>>>(d_in, length, n_bins, min, oneOverBinWidth, d_bin_idxs);
unsigned int binidxsPerThread = ui_ceilDiv(length, maxRunningThreads);
unsigned int binGridSize = ui_ceilDiv(n_bins, blockDim.x);
gridX = ui_ceilDiv(length, binidxsPerThread*blockDim.x);
gridDim.x = gridX;
kernelMakeHistogram<<<gridDim, blockDim, n_bins*sizeof(unsigned int)>>>(d_bin_idxs, length, n_bins, d_bins, binidxsPerThread, binGridSize);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
|
bd658f15c4ed9380df5d8db23ccd690ec23fbc45.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <algorithm>
#include "paddle/fluid/inference/tensorrt/plugin/roi_align_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
template <class T>
__inline__ __device__ T BilinearInterpolate(const T* input_data,
const int height, const int width,
T y, T x) {
if (y < -1.f || y > height || x < -1.f || x > width) return 0;
y = y <= 0.f ? 0.f : y;
x = x <= 0.f ? 0.f : x;
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = static_cast<T>(y_low);
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = static_cast<T>(x_low);
} else {
x_high = x_low + 1;
}
T ly = y - y_low, lx = x - x_low;
T hy = 1.f - ly, hx = 1.f - lx;
T v1 = input_data[y_low * width + x_low];
T v2 = input_data[y_low * width + x_high];
T v3 = input_data[y_high * width + x_low];
T v4 = input_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T, typename OutT, bool USE_SMEM>
__global__ void GPUROIAlignOpt(const int nthreads,
const T* __restrict__ input_data,
const T* __restrict__ input_rois,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio, const int num_rois,
OutT* __restrict__ output_data) {
const int batch = blockIdx.x;
const int channel = blockIdx.y;
const T* offset_input_data =
input_data + (batch * channels + channel) * height * width;
extern __shared__ T s_input_data[];
if (USE_SMEM) {
for (int idx = threadIdx.x; idx < height * width; idx += blockDim.x) {
s_input_data[idx] = offset_input_data[idx];
}
__syncthreads();
}
for (int idx = threadIdx.x; idx < num_rois * pooled_height * pooled_width;
idx += blockDim.x) {
const int pw = idx % pooled_width;
const int ph = (idx / pooled_width) % pooled_height;
const int roi_idx = (idx / pooled_width / pooled_height) % num_rois;
const int n = batch * num_rois + roi_idx;
const float4 rois_offset = reinterpret_cast<const float4*>(input_rois)[n];
const T roi_xmin = rois_offset.x * spatial_scale;
const T roi_ymin = rois_offset.y * spatial_scale;
const T roi_xmax = rois_offset.z * spatial_scale;
const T roi_ymax = rois_offset.w * spatial_scale;
const T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.f));
const T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.f));
const T bin_size_h = roi_height / static_cast<T>(pooled_height);
const T bin_size_w = roi_width / static_cast<T>(pooled_width);
const int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
const int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
T output_val = 0.f;
for (int iy = 0; iy < roi_bin_grid_h; ++iy) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ++ix) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
if (USE_SMEM) {
T val = BilinearInterpolate<T>(s_input_data, height, width, y, x);
output_val += val;
} else {
T val =
BilinearInterpolate<T>(offset_input_data, height, width, y, x);
output_val += val;
}
}
}
output_val /= count;
const int out_offset =
batch * num_rois * channels * pooled_height * pooled_width +
roi_idx * channels * pooled_height * pooled_width +
channel * pooled_height * pooled_width + ph * pooled_width + pw;
output_data[out_offset] = static_cast<OutT>(output_val);
}
}
#if IS_TRT_VERSION_GE(6000)
RoiAlignPluginDynamic::RoiAlignPluginDynamic(const nvinfer1::DataType data_type,
const int pooled_height,
const int pooled_width,
float spatial_scale,
int sampling_ratio)
: data_type_(data_type),
pooled_height_(pooled_height),
pooled_width_(pooled_width),
spatial_scale_(spatial_scale),
sampling_ratio_(sampling_ratio) {
bool data_type_is_valid = data_type_ == nvinfer1::DataType::kFLOAT ||
data_type_ == nvinfer1::DataType::kHALF;
PADDLE_ENFORCE_EQ(data_type_is_valid, true,
platform::errors::InvalidArgument(
"TRT RoiAlign plugin only accepts kFLOAT(%d) or "
"kHALF(%d) data type, but the received data type = %d",
static_cast<int>(nvinfer1::DataType::kFLOAT),
static_cast<int>(nvinfer1::DataType::kHALF),
static_cast<int>(data_type_)));
PADDLE_ENFORCE_GT(pooled_height_, 0,
platform::errors::InvalidArgument(
"TRT RoiAlign plugin only accepts pooled_height "
"greater than %d, but the received pooled_height = %d",
0, pooled_height_));
PADDLE_ENFORCE_GT(pooled_width_, 0,
platform::errors::InvalidArgument(
"TRT RoiAlign plugin only accepts pooled_width greater "
"than %d, but the received pooled_width = %d",
0, pooled_height_));
PADDLE_ENFORCE_GT(spatial_scale_, 0.f,
platform::errors::InvalidArgument(
"TRT RoiAlign plugin only accepts spatial_scale "
"greater than %f, but the received spatial_scale = %f",
0, spatial_scale_));
int smem_per_block = -1;
int device = -1;
hipGetDevice(&device);
PADDLE_ENFORCE_GE(
device, 0,
platform::errors::InvalidArgument(
"The cuda device ID should be greater than %d, but device ID is %d",
0, device));
hipDeviceGetAttribute(&smem_per_block, hipDeviceAttributeMaxSharedMemoryPerBlock,
device);
smem_per_block_ = smem_per_block;
}
RoiAlignPluginDynamic::RoiAlignPluginDynamic(void const* data, size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &pooled_height_);
DeserializeValue(&data, &length, &pooled_width_);
DeserializeValue(&data, &length, &spatial_scale_);
DeserializeValue(&data, &length, &sampling_ratio_);
int smem_per_block = -1;
int device = -1;
hipGetDevice(&device);
PADDLE_ENFORCE_GE(
device, 0,
platform::errors::InvalidArgument(
"The cuda device ID should be greater than %d, but device ID is %d",
0, device));
hipDeviceGetAttribute(&smem_per_block, hipDeviceAttributeMaxSharedMemoryPerBlock,
device);
smem_per_block_ = smem_per_block;
}
nvinfer1::IPluginV2DynamicExt* RoiAlignPluginDynamic::clone() const
TRT_NOEXCEPT {
auto* plugin =
new RoiAlignPluginDynamic(data_type_, pooled_height_, pooled_width_,
spatial_scale_, sampling_ratio_);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
nvinfer1::DimsExprs RoiAlignPluginDynamic::getOutputDimensions(
int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs,
nvinfer1::IExprBuilder& exprBuilder) TRT_NOEXCEPT {
nvinfer1::DimsExprs ret{};
ret.nbDims = 4;
ret.d[0] = inputs[1].d[0]; // roi
ret.d[1] = inputs[0].d[1]; // X
ret.d[2] = exprBuilder.constant(pooled_height_);
ret.d[3] = exprBuilder.constant(pooled_width_);
return ret;
}
bool RoiAlignPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs,
int nbOutputs) TRT_NOEXCEPT {
if (inOut[pos].format != nvinfer1::TensorFormat::kLINEAR) {
return false;
}
if (pos < 2) { // input
return inOut[pos].type == nvinfer1::DataType::kFLOAT;
}
return inOut[pos].type == data_type_;
}
void RoiAlignPluginDynamic::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) TRT_NOEXCEPT {}
size_t RoiAlignPluginDynamic::getWorkspaceSize(
const nvinfer1::PluginTensorDesc* inputs, int nbInputs,
const nvinfer1::PluginTensorDesc* outputs,
int nbOutputs) const TRT_NOEXCEPT {
return 0;
}
template <typename T, typename OutT>
int RoiAlignPluginDynamic::enqueue_impl(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs,
void* const* outputs, void* workspace, hipStream_t stream) {
auto in_dims = inputDesc[0].dims;
auto rois_dims = inputDesc[1].dims;
auto out_dims = outputDesc[0].dims;
int rois_num = rois_dims.d[0];
if (rois_num == 0) return hipGetLastError() != hipSuccess;
int batch = in_dims.d[0];
int channels = in_dims.d[1];
int height = in_dims.d[2];
int width = in_dims.d[3];
int output_size =
out_dims.d[0] * out_dims.d[1] * out_dims.d[2] * out_dims.d[3];
const dim3 blocks(batch, channels);
const int threads = 512;
if (smem_per_block_ < width * height * sizeof(T)) {
hipLaunchKernelGGL(( GPUROIAlignOpt<T, OutT, false>), dim3(blocks), dim3(threads), 0, stream,
output_size, static_cast<const T*>(inputs[0]),
static_cast<const T*>(inputs[1]), spatial_scale_, channels, height,
width, pooled_height_, pooled_width_, sampling_ratio_, rois_num / batch,
static_cast<OutT*>(outputs[0]));
} else {
hipLaunchKernelGGL(( GPUROIAlignOpt<
T, OutT, true>), dim3(blocks), dim3(threads), width * height * sizeof(T), stream,
output_size, static_cast<const T*>(inputs[0]),
static_cast<const T*>(inputs[1]), spatial_scale_, channels, height,
width, pooled_height_, pooled_width_, sampling_ratio_, rois_num / batch,
static_cast<OutT*>(outputs[0]));
}
return hipGetLastError() != hipSuccess;
}
int RoiAlignPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs,
void* const* outputs, void* workspace,
hipStream_t stream) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(outputDesc[0].type, data_type_,
platform::errors::InvalidArgument(
"TRT RoiAlignPluginDynamic expects outputDesc[0].type "
"equal to data_type_"));
if (data_type_ == nvinfer1::DataType::kHALF) {
return enqueue_impl<float, half>(inputDesc, outputDesc, inputs, outputs,
workspace, stream);
}
return enqueue_impl<float, float>(inputDesc, outputDesc, inputs, outputs,
workspace, stream);
}
nvinfer1::DataType RoiAlignPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType* inputTypes,
int nbInputs) const TRT_NOEXCEPT {
return inputTypes[0];
}
const char* RoiAlignPluginDynamic::getPluginType() const TRT_NOEXCEPT {
return "roi_align_plugin_dynamic";
}
int RoiAlignPluginDynamic::getNbOutputs() const TRT_NOEXCEPT { return 1; }
int RoiAlignPluginDynamic::initialize() TRT_NOEXCEPT { return 0; }
void RoiAlignPluginDynamic::terminate() TRT_NOEXCEPT {}
size_t RoiAlignPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(pooled_height_);
serialize_size += SerializedSize(pooled_width_);
serialize_size += SerializedSize(spatial_scale_);
serialize_size += SerializedSize(sampling_ratio_);
return serialize_size;
}
void RoiAlignPluginDynamic::serialize(void* buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, pooled_height_);
SerializeValue(&buffer, pooled_width_);
SerializeValue(&buffer, spatial_scale_);
SerializeValue(&buffer, sampling_ratio_);
}
void RoiAlignPluginDynamic::destroy() TRT_NOEXCEPT {}
RoiAlignPluginDynamicCreator::RoiAlignPluginDynamicCreator() {}
void RoiAlignPluginDynamicCreator::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
namespace_ = std::string(lib_namespace);
}
const char* RoiAlignPluginDynamicCreator::getPluginNamespace() const
TRT_NOEXCEPT {
return namespace_.c_str();
}
const char* RoiAlignPluginDynamicCreator::getPluginName() const TRT_NOEXCEPT {
return "roi_align_plugin_dynamic";
}
const char* RoiAlignPluginDynamicCreator::getPluginVersion() const
TRT_NOEXCEPT {
return "1";
}
const nvinfer1::PluginFieldCollection*
RoiAlignPluginDynamicCreator::getFieldNames() TRT_NOEXCEPT {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* RoiAlignPluginDynamicCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) TRT_NOEXCEPT {
const nvinfer1::PluginField* fields = fc->fields;
return nullptr;
}
nvinfer1::IPluginV2Ext* RoiAlignPluginDynamicCreator::deserializePlugin(
const char* name, const void* serial_data,
size_t serial_length) TRT_NOEXCEPT {
auto plugin = new RoiAlignPluginDynamic(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| bd658f15c4ed9380df5d8db23ccd690ec23fbc45.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <algorithm>
#include "paddle/fluid/inference/tensorrt/plugin/roi_align_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
template <class T>
__inline__ __device__ T BilinearInterpolate(const T* input_data,
const int height, const int width,
T y, T x) {
if (y < -1.f || y > height || x < -1.f || x > width) return 0;
y = y <= 0.f ? 0.f : y;
x = x <= 0.f ? 0.f : x;
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = static_cast<T>(y_low);
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = static_cast<T>(x_low);
} else {
x_high = x_low + 1;
}
T ly = y - y_low, lx = x - x_low;
T hy = 1.f - ly, hx = 1.f - lx;
T v1 = input_data[y_low * width + x_low];
T v2 = input_data[y_low * width + x_high];
T v3 = input_data[y_high * width + x_low];
T v4 = input_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T, typename OutT, bool USE_SMEM>
__global__ void GPUROIAlignOpt(const int nthreads,
const T* __restrict__ input_data,
const T* __restrict__ input_rois,
const float spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio, const int num_rois,
OutT* __restrict__ output_data) {
const int batch = blockIdx.x;
const int channel = blockIdx.y;
const T* offset_input_data =
input_data + (batch * channels + channel) * height * width;
extern __shared__ T s_input_data[];
if (USE_SMEM) {
for (int idx = threadIdx.x; idx < height * width; idx += blockDim.x) {
s_input_data[idx] = offset_input_data[idx];
}
__syncthreads();
}
for (int idx = threadIdx.x; idx < num_rois * pooled_height * pooled_width;
idx += blockDim.x) {
const int pw = idx % pooled_width;
const int ph = (idx / pooled_width) % pooled_height;
const int roi_idx = (idx / pooled_width / pooled_height) % num_rois;
const int n = batch * num_rois + roi_idx;
const float4 rois_offset = reinterpret_cast<const float4*>(input_rois)[n];
const T roi_xmin = rois_offset.x * spatial_scale;
const T roi_ymin = rois_offset.y * spatial_scale;
const T roi_xmax = rois_offset.z * spatial_scale;
const T roi_ymax = rois_offset.w * spatial_scale;
const T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.f));
const T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.f));
const T bin_size_h = roi_height / static_cast<T>(pooled_height);
const T bin_size_w = roi_width / static_cast<T>(pooled_width);
const int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
const int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
T output_val = 0.f;
for (int iy = 0; iy < roi_bin_grid_h; ++iy) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ++ix) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
if (USE_SMEM) {
T val = BilinearInterpolate<T>(s_input_data, height, width, y, x);
output_val += val;
} else {
T val =
BilinearInterpolate<T>(offset_input_data, height, width, y, x);
output_val += val;
}
}
}
output_val /= count;
const int out_offset =
batch * num_rois * channels * pooled_height * pooled_width +
roi_idx * channels * pooled_height * pooled_width +
channel * pooled_height * pooled_width + ph * pooled_width + pw;
output_data[out_offset] = static_cast<OutT>(output_val);
}
}
#if IS_TRT_VERSION_GE(6000)
RoiAlignPluginDynamic::RoiAlignPluginDynamic(const nvinfer1::DataType data_type,
const int pooled_height,
const int pooled_width,
float spatial_scale,
int sampling_ratio)
: data_type_(data_type),
pooled_height_(pooled_height),
pooled_width_(pooled_width),
spatial_scale_(spatial_scale),
sampling_ratio_(sampling_ratio) {
bool data_type_is_valid = data_type_ == nvinfer1::DataType::kFLOAT ||
data_type_ == nvinfer1::DataType::kHALF;
PADDLE_ENFORCE_EQ(data_type_is_valid, true,
platform::errors::InvalidArgument(
"TRT RoiAlign plugin only accepts kFLOAT(%d) or "
"kHALF(%d) data type, but the received data type = %d",
static_cast<int>(nvinfer1::DataType::kFLOAT),
static_cast<int>(nvinfer1::DataType::kHALF),
static_cast<int>(data_type_)));
PADDLE_ENFORCE_GT(pooled_height_, 0,
platform::errors::InvalidArgument(
"TRT RoiAlign plugin only accepts pooled_height "
"greater than %d, but the received pooled_height = %d",
0, pooled_height_));
PADDLE_ENFORCE_GT(pooled_width_, 0,
platform::errors::InvalidArgument(
"TRT RoiAlign plugin only accepts pooled_width greater "
"than %d, but the received pooled_width = %d",
0, pooled_height_));
PADDLE_ENFORCE_GT(spatial_scale_, 0.f,
platform::errors::InvalidArgument(
"TRT RoiAlign plugin only accepts spatial_scale "
"greater than %f, but the received spatial_scale = %f",
0, spatial_scale_));
int smem_per_block = -1;
int device = -1;
cudaGetDevice(&device);
PADDLE_ENFORCE_GE(
device, 0,
platform::errors::InvalidArgument(
"The cuda device ID should be greater than %d, but device ID is %d",
0, device));
cudaDeviceGetAttribute(&smem_per_block, cudaDevAttrMaxSharedMemoryPerBlock,
device);
smem_per_block_ = smem_per_block;
}
RoiAlignPluginDynamic::RoiAlignPluginDynamic(void const* data, size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &pooled_height_);
DeserializeValue(&data, &length, &pooled_width_);
DeserializeValue(&data, &length, &spatial_scale_);
DeserializeValue(&data, &length, &sampling_ratio_);
int smem_per_block = -1;
int device = -1;
cudaGetDevice(&device);
PADDLE_ENFORCE_GE(
device, 0,
platform::errors::InvalidArgument(
"The cuda device ID should be greater than %d, but device ID is %d",
0, device));
cudaDeviceGetAttribute(&smem_per_block, cudaDevAttrMaxSharedMemoryPerBlock,
device);
smem_per_block_ = smem_per_block;
}
nvinfer1::IPluginV2DynamicExt* RoiAlignPluginDynamic::clone() const
TRT_NOEXCEPT {
auto* plugin =
new RoiAlignPluginDynamic(data_type_, pooled_height_, pooled_width_,
spatial_scale_, sampling_ratio_);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
nvinfer1::DimsExprs RoiAlignPluginDynamic::getOutputDimensions(
int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs,
nvinfer1::IExprBuilder& exprBuilder) TRT_NOEXCEPT {
nvinfer1::DimsExprs ret{};
ret.nbDims = 4;
ret.d[0] = inputs[1].d[0]; // roi
ret.d[1] = inputs[0].d[1]; // X
ret.d[2] = exprBuilder.constant(pooled_height_);
ret.d[3] = exprBuilder.constant(pooled_width_);
return ret;
}
bool RoiAlignPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs,
int nbOutputs) TRT_NOEXCEPT {
if (inOut[pos].format != nvinfer1::TensorFormat::kLINEAR) {
return false;
}
if (pos < 2) { // input
return inOut[pos].type == nvinfer1::DataType::kFLOAT;
}
return inOut[pos].type == data_type_;
}
void RoiAlignPluginDynamic::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) TRT_NOEXCEPT {}
size_t RoiAlignPluginDynamic::getWorkspaceSize(
const nvinfer1::PluginTensorDesc* inputs, int nbInputs,
const nvinfer1::PluginTensorDesc* outputs,
int nbOutputs) const TRT_NOEXCEPT {
return 0;
}
template <typename T, typename OutT>
int RoiAlignPluginDynamic::enqueue_impl(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs,
void* const* outputs, void* workspace, cudaStream_t stream) {
auto in_dims = inputDesc[0].dims;
auto rois_dims = inputDesc[1].dims;
auto out_dims = outputDesc[0].dims;
int rois_num = rois_dims.d[0];
if (rois_num == 0) return cudaGetLastError() != cudaSuccess;
int batch = in_dims.d[0];
int channels = in_dims.d[1];
int height = in_dims.d[2];
int width = in_dims.d[3];
int output_size =
out_dims.d[0] * out_dims.d[1] * out_dims.d[2] * out_dims.d[3];
const dim3 blocks(batch, channels);
const int threads = 512;
if (smem_per_block_ < width * height * sizeof(T)) {
GPUROIAlignOpt<T, OutT, false><<<blocks, threads, 0, stream>>>(
output_size, static_cast<const T*>(inputs[0]),
static_cast<const T*>(inputs[1]), spatial_scale_, channels, height,
width, pooled_height_, pooled_width_, sampling_ratio_, rois_num / batch,
static_cast<OutT*>(outputs[0]));
} else {
GPUROIAlignOpt<
T, OutT, true><<<blocks, threads, width * height * sizeof(T), stream>>>(
output_size, static_cast<const T*>(inputs[0]),
static_cast<const T*>(inputs[1]), spatial_scale_, channels, height,
width, pooled_height_, pooled_width_, sampling_ratio_, rois_num / batch,
static_cast<OutT*>(outputs[0]));
}
return cudaGetLastError() != cudaSuccess;
}
int RoiAlignPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs,
void* const* outputs, void* workspace,
cudaStream_t stream) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(outputDesc[0].type, data_type_,
platform::errors::InvalidArgument(
"TRT RoiAlignPluginDynamic expects outputDesc[0].type "
"equal to data_type_"));
if (data_type_ == nvinfer1::DataType::kHALF) {
return enqueue_impl<float, half>(inputDesc, outputDesc, inputs, outputs,
workspace, stream);
}
return enqueue_impl<float, float>(inputDesc, outputDesc, inputs, outputs,
workspace, stream);
}
nvinfer1::DataType RoiAlignPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType* inputTypes,
int nbInputs) const TRT_NOEXCEPT {
return inputTypes[0];
}
const char* RoiAlignPluginDynamic::getPluginType() const TRT_NOEXCEPT {
return "roi_align_plugin_dynamic";
}
int RoiAlignPluginDynamic::getNbOutputs() const TRT_NOEXCEPT { return 1; }
int RoiAlignPluginDynamic::initialize() TRT_NOEXCEPT { return 0; }
void RoiAlignPluginDynamic::terminate() TRT_NOEXCEPT {}
size_t RoiAlignPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(pooled_height_);
serialize_size += SerializedSize(pooled_width_);
serialize_size += SerializedSize(spatial_scale_);
serialize_size += SerializedSize(sampling_ratio_);
return serialize_size;
}
void RoiAlignPluginDynamic::serialize(void* buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, pooled_height_);
SerializeValue(&buffer, pooled_width_);
SerializeValue(&buffer, spatial_scale_);
SerializeValue(&buffer, sampling_ratio_);
}
void RoiAlignPluginDynamic::destroy() TRT_NOEXCEPT {}
RoiAlignPluginDynamicCreator::RoiAlignPluginDynamicCreator() {}
void RoiAlignPluginDynamicCreator::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
namespace_ = std::string(lib_namespace);
}
const char* RoiAlignPluginDynamicCreator::getPluginNamespace() const
TRT_NOEXCEPT {
return namespace_.c_str();
}
const char* RoiAlignPluginDynamicCreator::getPluginName() const TRT_NOEXCEPT {
return "roi_align_plugin_dynamic";
}
const char* RoiAlignPluginDynamicCreator::getPluginVersion() const
TRT_NOEXCEPT {
return "1";
}
const nvinfer1::PluginFieldCollection*
RoiAlignPluginDynamicCreator::getFieldNames() TRT_NOEXCEPT {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* RoiAlignPluginDynamicCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) TRT_NOEXCEPT {
const nvinfer1::PluginField* fields = fc->fields;
return nullptr;
}
nvinfer1::IPluginV2Ext* RoiAlignPluginDynamicCreator::deserializePlugin(
const char* name, const void* serial_data,
size_t serial_length) TRT_NOEXCEPT {
auto plugin = new RoiAlignPluginDynamic(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
c3d224dbc4e30592e0941b8a70aa48b82306c67f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THH/THHAtomics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
// The maximum block size in CUDA
constexpr int MAX_BLOCK_SIZE = 1024;
/* This code computes the sum of the weights in two-steps:
1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces`
2) Each partial-sum from 1) are summed and scatter into `grad_weight`
Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the
kernel execution. If it is high, the size of the thread blocks will be
too small to achieve good occupancy. Similarly, a very low value will
make the size of the thread blocks in the final sum in step 2) too small.
*/
constexpr int NROWS_PER_THREAD = 10;
// Fast ceil division (no overflow checking)
__host__ __device__ __forceinline__
int64_t ceil_div(int64_t x, int64_t y) {
return (x + y - 1) / y;
}
__global__
void krn_partials_per_segment(int64_t *ret, const int64_t *segment_offsets,
int64_t num_of_segments, int64_t numel) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
const int64_t idx_start = segment_offsets[id];
const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
const int64_t size = idx_end - idx_start;
ret[id] = ceil_div(size, NROWS_PER_THREAD);
}
}
__global__
void krn_partial_segment_offset(
int64_t *ret,
const int64_t *partials_per_segment,
const int64_t *partials_per_segment_offset,
const int64_t *segment_offsets,
int64_t num_of_segments) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
int64_t idx = partials_per_segment_offset[id];
const int64_t num_partials = partials_per_segment[id];
const int64_t segment_offset = segment_offsets[id];
for (int64_t i=0; i<num_partials; ++i) {
ret[idx++] = segment_offset + i * NROWS_PER_THREAD;
}
}
}
template <typename scalar_t>
__global__ void compute_grad_weight_bags(
int64_t *indices, scalar_t *gradOutput,
int64_t *offset2bag, int64_t *count, ptrdiff_t numel,
int64_t stride, int mode_mean, const int64_t *bag_size,
scalar_t* per_sample_weights, int64_t per_sample_weights_stride,
int64_t* segment_offsets, int64_t num_of_segments,
acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const int origRow = indices[idx];
const int seq_number = offset2bag[origRow];
const int gradOutputRow = seq_number * stride;
acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0;
if (per_sample_weights) {
scale *= per_sample_weights[origRow * per_sample_weights_stride];
}
acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature];
if (mode_mean) {
gradient /= bag_size[seq_number];
}
weight += gradient * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
template <typename scalar_t>
__global__ void compute_grad_weight(
int64_t *indices,
scalar_t *gradOutput,
int64_t *count,
ptrdiff_t numel,
int64_t stride,
int64_t* segment_offsets,
int64_t num_of_segments,
acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t stride_warped) {
using accscalar_t = acc_type<scalar_t, true>;
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
accscalar_t weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const int64_t target_row = indices[idx];
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
weight += gradOutput[target_row * stride + startFeature] * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
// This kernel assumes that all input tensors are contiguous.
template <typename scalar_t>
__global__ void sum_and_scatter(
int64_t *input, scalar_t *gradWeight, int64_t stride,
int64_t* segment_offsets, int64_t num_of_segments,
const acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t *segment_sizes_offsets, int64_t num_of_partial_segments,
const int64_t padding_idx,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_sizes_offsets[id];
const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
weight += grad_weight_per_segment[idx*stride + startFeature];
}
int64_t target_row = input[segment_offsets[id]];
if (target_row != padding_idx) {
gradWeight[target_row * stride + startFeature] = weight;
}
}
} // anon namespace
Tensor embedding_backward_cuda_kernel(
const Tensor &grad,
const Tensor &orig_indices,
const Tensor &sorted_indices,
const Tensor &count,
int64_t num_weights,
int padding_idx,
bool scale_grad_by_freq,
bool mode_mean,
const Tensor &offset2bag,
const Tensor &bag_size,
const Tensor &per_sample_weights) {
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
const ptrdiff_t numel = sorted_indices.numel();
auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options());
const int64_t stride = grad_weight.stride(0);
// Compute the number of segments and their start position so that we do not have to
// spawn a warp per index. In this context, a segment is a number of rows that should
// be summarized.
// Unit: index in `sorted_indices` and `orig_indices`
auto segment_offsets = at::empty({numel}, orig_indices.options());
int64_t num_of_segments;
{
auto sorted_indices_dev = thrust::device_ptr<int64_t>(sorted_indices.data_ptr<int64_t>());
auto dummy = at::empty_like(sorted_indices);
auto dummy_dev = thrust::device_ptr<int64_t>(dummy.data_ptr<int64_t>());
auto ends = thrust::unique_by_key_copy(
policy,
sorted_indices_dev,
sorted_indices_dev + numel,
thrust::make_counting_iterator(0),
dummy_dev,
thrust::device_ptr<int64_t>(segment_offsets.data_ptr<int64_t>()));
num_of_segments = thrust::get<0>(ends) - dummy_dev;
}
// We split the segments up into sizes of `NROWS_PER_THREAD`
// Compute the number partial-segments per segment (some partial-segments
// may not be the full `NROWS_PER_THREAD` number of rows)
auto partials_per_segment = at::empty({num_of_segments}, orig_indices.options());
{
hipLaunchKernelGGL(( krn_partials_per_segment), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream,
partials_per_segment.data_ptr<int64_t>(),
segment_offsets.data_ptr<int64_t>(),
num_of_segments,
numel);
}
// In order to compute `partial_segment_offset`, which is the start index
// of each partial-segment in `sorted_indices`, we need to compute the
// start position of each _segment_ in `partial_segment_offset`.
// Unit: index in `partial_segment_offset`
auto partials_per_segment_offset = at::empty({num_of_segments}, orig_indices.options());
thrust::exclusive_scan(
policy,
thrust::device_ptr<int64_t>(partials_per_segment.data_ptr<int64_t>()),
thrust::device_ptr<int64_t>(partials_per_segment.data_ptr<int64_t>()+num_of_segments),
thrust::device_ptr<int64_t>(partials_per_segment_offset.data_ptr<int64_t>()));
// The total number of partial-segments is the sum of `partials_per_segment_offset`
const int num_of_partial_segments = partials_per_segment[num_of_segments-1].item<int64_t>() +
partials_per_segment_offset[num_of_segments-1].item<int64_t>();
// Now we can compute the start position of each partial-segment
// Unit: index in `sorted_indices` and `orig_indices`
auto partial_segment_offset = at::empty({num_of_partial_segments}, orig_indices.options());
{
hipLaunchKernelGGL(( krn_partial_segment_offset), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream,
partial_segment_offset.data_ptr<int64_t>(),
partials_per_segment.data_ptr<int64_t>(),
partials_per_segment_offset.data_ptr<int64_t>(),
segment_offsets.data_ptr<int64_t>(),
num_of_segments);
}
const int stride_warped = ceil_div(stride, C10_WARP_SIZE)*C10_WARP_SIZE;
const int block = ::min(stride_warped, MAX_BLOCK_SIZE);
const int grid = ceil_div(num_of_partial_segments*stride_warped, block);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] {
// For numerical stability, the dtype of `grad_weight_per_segment`
// should match `acc_type`
using partial_weight_t = acc_type<scalar_t, true>;
TensorOptions op;
if(grad.dtype() == at::kHalf) {
op = grad.options().dtype(at::kFloat);
} else {
op = grad.options();
}
auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, op);
// Compute the sum of each partial-segment and handle bags
if (offset2bag.defined()) {
hipLaunchKernelGGL(( compute_grad_weight_bags<scalar_t>), dim3(grid), dim3(block), 0, stream,
orig_indices.data_ptr<int64_t>(),
grad.data_ptr<scalar_t>(),
offset2bag.data_ptr<int64_t>(),
count.defined() ? count.data_ptr<int64_t>() : nullptr, numel, stride,
mode_mean, bag_size.data_ptr<int64_t>(),
per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.stride(0) : 0,
partial_segment_offset.data_ptr<int64_t>(),
num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(),
stride_warped);
} else {
hipLaunchKernelGGL(( compute_grad_weight<scalar_t>), dim3(grid), dim3(block), 0, stream,
orig_indices.data_ptr<int64_t>(),
grad.data_ptr<scalar_t>(),
count.defined() ? count.data_ptr<int64_t>() : nullptr,
numel, stride,
partial_segment_offset.data_ptr<int64_t>(),
num_of_partial_segments,
grad_weight_per_segment.data_ptr<partial_weight_t>(),
stride_warped);
}
THCudaCheck(hipGetLastError());
// Finally, we sum all the partial-sums and scatter them
// into `grad_weight`.
const int grid2 = ceil_div(num_of_segments*stride_warped, block);
hipLaunchKernelGGL(( sum_and_scatter<scalar_t>), dim3(grid2), dim3(block), 0, stream,
sorted_indices.data_ptr<int64_t>(),
grad_weight.data_ptr<scalar_t>(),
stride,
segment_offsets.data_ptr<int64_t>(),
num_of_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(),
partials_per_segment_offset.data_ptr<int64_t>(),
num_of_partial_segments,
padding_idx,
stride_warped);
THCudaCheck(hipGetLastError());
});
return grad_weight;
}
}}
| c3d224dbc4e30592e0941b8a70aa48b82306c67f.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THC/THCAtomics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
// The maximum block size in CUDA
constexpr int MAX_BLOCK_SIZE = 1024;
/* This code computes the sum of the weights in two-steps:
1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces`
2) Each partial-sum from 1) are summed and scatter into `grad_weight`
Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the
kernel execution. If it is high, the size of the thread blocks will be
too small to achieve good occupancy. Similarly, a very low value will
make the size of the thread blocks in the final sum in step 2) too small.
*/
constexpr int NROWS_PER_THREAD = 10;
// Fast ceil division (no overflow checking)
__host__ __device__ __forceinline__
int64_t ceil_div(int64_t x, int64_t y) {
return (x + y - 1) / y;
}
__global__
void krn_partials_per_segment(int64_t *ret, const int64_t *segment_offsets,
int64_t num_of_segments, int64_t numel) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
const int64_t idx_start = segment_offsets[id];
const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
const int64_t size = idx_end - idx_start;
ret[id] = ceil_div(size, NROWS_PER_THREAD);
}
}
__global__
void krn_partial_segment_offset(
int64_t *ret,
const int64_t *partials_per_segment,
const int64_t *partials_per_segment_offset,
const int64_t *segment_offsets,
int64_t num_of_segments) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
int64_t idx = partials_per_segment_offset[id];
const int64_t num_partials = partials_per_segment[id];
const int64_t segment_offset = segment_offsets[id];
for (int64_t i=0; i<num_partials; ++i) {
ret[idx++] = segment_offset + i * NROWS_PER_THREAD;
}
}
}
template <typename scalar_t>
__global__ void compute_grad_weight_bags(
int64_t *indices, scalar_t *gradOutput,
int64_t *offset2bag, int64_t *count, ptrdiff_t numel,
int64_t stride, int mode_mean, const int64_t *bag_size,
scalar_t* per_sample_weights, int64_t per_sample_weights_stride,
int64_t* segment_offsets, int64_t num_of_segments,
acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const int origRow = indices[idx];
const int seq_number = offset2bag[origRow];
const int gradOutputRow = seq_number * stride;
acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0;
if (per_sample_weights) {
scale *= per_sample_weights[origRow * per_sample_weights_stride];
}
acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature];
if (mode_mean) {
gradient /= bag_size[seq_number];
}
weight += gradient * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
template <typename scalar_t>
__global__ void compute_grad_weight(
int64_t *indices,
scalar_t *gradOutput,
int64_t *count,
ptrdiff_t numel,
int64_t stride,
int64_t* segment_offsets,
int64_t num_of_segments,
acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t stride_warped) {
using accscalar_t = acc_type<scalar_t, true>;
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
accscalar_t weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const int64_t target_row = indices[idx];
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
weight += gradOutput[target_row * stride + startFeature] * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
// This kernel assumes that all input tensors are contiguous.
template <typename scalar_t>
__global__ void sum_and_scatter(
int64_t *input, scalar_t *gradWeight, int64_t stride,
int64_t* segment_offsets, int64_t num_of_segments,
const acc_type<scalar_t, true> *grad_weight_per_segment,
const int64_t *segment_sizes_offsets, int64_t num_of_partial_segments,
const int64_t padding_idx,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_sizes_offsets[id];
const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
weight += grad_weight_per_segment[idx*stride + startFeature];
}
int64_t target_row = input[segment_offsets[id]];
if (target_row != padding_idx) {
gradWeight[target_row * stride + startFeature] = weight;
}
}
} // anon namespace
Tensor embedding_backward_cuda_kernel(
const Tensor &grad,
const Tensor &orig_indices,
const Tensor &sorted_indices,
const Tensor &count,
int64_t num_weights,
int padding_idx,
bool scale_grad_by_freq,
bool mode_mean,
const Tensor &offset2bag,
const Tensor &bag_size,
const Tensor &per_sample_weights) {
auto stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
const ptrdiff_t numel = sorted_indices.numel();
auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options());
const int64_t stride = grad_weight.stride(0);
// Compute the number of segments and their start position so that we do not have to
// spawn a warp per index. In this context, a segment is a number of rows that should
// be summarized.
// Unit: index in `sorted_indices` and `orig_indices`
auto segment_offsets = at::empty({numel}, orig_indices.options());
int64_t num_of_segments;
{
auto sorted_indices_dev = thrust::device_ptr<int64_t>(sorted_indices.data_ptr<int64_t>());
auto dummy = at::empty_like(sorted_indices);
auto dummy_dev = thrust::device_ptr<int64_t>(dummy.data_ptr<int64_t>());
auto ends = thrust::unique_by_key_copy(
policy,
sorted_indices_dev,
sorted_indices_dev + numel,
thrust::make_counting_iterator(0),
dummy_dev,
thrust::device_ptr<int64_t>(segment_offsets.data_ptr<int64_t>()));
num_of_segments = thrust::get<0>(ends) - dummy_dev;
}
// We split the segments up into sizes of `NROWS_PER_THREAD`
// Compute the number partial-segments per segment (some partial-segments
// may not be the full `NROWS_PER_THREAD` number of rows)
auto partials_per_segment = at::empty({num_of_segments}, orig_indices.options());
{
krn_partials_per_segment<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> (
partials_per_segment.data_ptr<int64_t>(),
segment_offsets.data_ptr<int64_t>(),
num_of_segments,
numel);
}
// In order to compute `partial_segment_offset`, which is the start index
// of each partial-segment in `sorted_indices`, we need to compute the
// start position of each _segment_ in `partial_segment_offset`.
// Unit: index in `partial_segment_offset`
auto partials_per_segment_offset = at::empty({num_of_segments}, orig_indices.options());
thrust::exclusive_scan(
policy,
thrust::device_ptr<int64_t>(partials_per_segment.data_ptr<int64_t>()),
thrust::device_ptr<int64_t>(partials_per_segment.data_ptr<int64_t>()+num_of_segments),
thrust::device_ptr<int64_t>(partials_per_segment_offset.data_ptr<int64_t>()));
// The total number of partial-segments is the sum of `partials_per_segment_offset`
const int num_of_partial_segments = partials_per_segment[num_of_segments-1].item<int64_t>() +
partials_per_segment_offset[num_of_segments-1].item<int64_t>();
// Now we can compute the start position of each partial-segment
// Unit: index in `sorted_indices` and `orig_indices`
auto partial_segment_offset = at::empty({num_of_partial_segments}, orig_indices.options());
{
krn_partial_segment_offset<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> (
partial_segment_offset.data_ptr<int64_t>(),
partials_per_segment.data_ptr<int64_t>(),
partials_per_segment_offset.data_ptr<int64_t>(),
segment_offsets.data_ptr<int64_t>(),
num_of_segments);
}
const int stride_warped = ceil_div(stride, C10_WARP_SIZE)*C10_WARP_SIZE;
const int block = std::min(stride_warped, MAX_BLOCK_SIZE);
const int grid = ceil_div(num_of_partial_segments*stride_warped, block);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] {
// For numerical stability, the dtype of `grad_weight_per_segment`
// should match `acc_type`
using partial_weight_t = acc_type<scalar_t, true>;
TensorOptions op;
if(grad.dtype() == at::kHalf) {
op = grad.options().dtype(at::kFloat);
} else {
op = grad.options();
}
auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, op);
// Compute the sum of each partial-segment and handle bags
if (offset2bag.defined()) {
compute_grad_weight_bags<scalar_t><<<grid, block, 0, stream>>>(
orig_indices.data_ptr<int64_t>(),
grad.data_ptr<scalar_t>(),
offset2bag.data_ptr<int64_t>(),
count.defined() ? count.data_ptr<int64_t>() : nullptr, numel, stride,
mode_mean, bag_size.data_ptr<int64_t>(),
per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.stride(0) : 0,
partial_segment_offset.data_ptr<int64_t>(),
num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(),
stride_warped);
} else {
compute_grad_weight<scalar_t><<<grid, block, 0, stream>>>(
orig_indices.data_ptr<int64_t>(),
grad.data_ptr<scalar_t>(),
count.defined() ? count.data_ptr<int64_t>() : nullptr,
numel, stride,
partial_segment_offset.data_ptr<int64_t>(),
num_of_partial_segments,
grad_weight_per_segment.data_ptr<partial_weight_t>(),
stride_warped);
}
THCudaCheck(cudaGetLastError());
// Finally, we sum all the partial-sums and scatter them
// into `grad_weight`.
const int grid2 = ceil_div(num_of_segments*stride_warped, block);
sum_and_scatter<scalar_t><<<grid2, block, 0, stream>>>(
sorted_indices.data_ptr<int64_t>(),
grad_weight.data_ptr<scalar_t>(),
stride,
segment_offsets.data_ptr<int64_t>(),
num_of_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(),
partials_per_segment_offset.data_ptr<int64_t>(),
num_of_partial_segments,
padding_idx,
stride_warped);
THCudaCheck(cudaGetLastError());
});
return grad_weight;
}
}}
|
b5ac0ec93fc0f83eab4bf12b87547730b2297e32.hip | // !!! This is a file automatically generated by hipify!!!
// Optimized using shared memory and on chip memory
// nvcc nBodySimulation.cu -o nBody -lglut -lm -lGLU -lGL; ./nBody
//To stop hit "control c" in the window you launched it from.
#include <GL/glut.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "../cudaErrCheck.cuh"
#define N 8*8*8
#define BLOCK 256
#define XWindowSize 2500
#define YWindowSize 2500
#define DRAW 10
#define DAMP 1.0
#define DT 0.001
#define STOP_TIME 10.0
#define G 1.0
#define H 1.0
#define EYE 10.0
#define FAR 90.0
// Globals
float4 *p;
float3 *v, *f;
float4 *p_GPU0, *p_GPU1;
float rot=0.0; //to rotate cheerios
/*------------------------------------
|
| DataStruct is to work on each GPU
|
------------------------------------*/
struct DataStruct {
int deviceID;
int size;
int offset;
float4 *pos;
float3 *vel;
float3 *force;
};
void set_initial_conditions()
{
p = (float4*)malloc(N*sizeof(float4));
v = (float3*)malloc(N*sizeof(float3));
f = (float3*)malloc(N*sizeof(float3));
int i,j,k,num,particles_per_side;
float position_start, temp;
float initail_seperation;
temp = pow((float)N,1.0/3.0) + 0.99999;
particles_per_side = temp;
printf("\n cube root of N = %d \n", particles_per_side);
position_start = -(particles_per_side -1.0)/2.0;
initail_seperation = 2.0;
for(i=0; i<N; i++)
{
p[i].w = 1.0;
}
num = 0;
for(i=0; i<particles_per_side; i++)
{
for(j=0; j<particles_per_side; j++)
{
for(k=0; k<particles_per_side; k++)
{
if(N <= num) break;
p[num].x = position_start + i*initail_seperation;
p[num].y = position_start + j*initail_seperation;
p[num].z = position_start + k*initail_seperation;
v[num].x = 0.0;
v[num].y = 0.0;
v[num].z = 0.0;
num++;
}
}
}
}
void draw_picture()
{
int i;
glClear(GL_COLOR_BUFFER_BIT);
glClear(GL_DEPTH_BUFFER_BIT);
//gray white (powdered donut)
//0.9955,0.8622,0.6711
glColor3d(0.87,0.87,0.87);
for(i=0; i<N; i++)
{
glPushMatrix();
glTranslatef(p[i].x, p[i].y, p[i].z);
//make some cheerios
glRotatef(rot*i,p[i].x, p[i].y, p[i].z);
glutSolidTorus(0.04,0.08,15,15);
glPopMatrix();
}
rot+=0.1;
glutSwapBuffers();
}
__device__ float3 getBodyBodyForce(float4 p0, float4 p1)
{
float3 f;
float dx = p1.x - p0.x;
float dy = p1.y - p0.y;
float dz = p1.z - p0.z;
float r2 = dx*dx + dy*dy + dz*dz;
float r = sqrt(r2);
float force = (G*p0.w*p1.w)/(r2) - (H*p0.w*p1.w)/(r2*r2);
f.x = force*dx/r;
f.y = force*dy/r;
f.z = force*dz/r;
return(f);
}
__global__ void getForces(float4 *g_pos, float3 * force, int offset)
{
int j,ii;
float3 force_mag, forceSum;
float4 posMe;
__shared__ float4 shPos[BLOCK];
int id = threadIdx.x + blockDim.x*blockIdx.x;
forceSum.x = 0.0;
forceSum.y = 0.0;
forceSum.z = 0.0;
posMe.x = g_pos[id+offset].x;
posMe.y = g_pos[id+offset].y;
posMe.z = g_pos[id+offset].z;
posMe.w = g_pos[id+offset].w;
for(j=0; j < gridDim.x*2; j++)
{
shPos[threadIdx.x] = g_pos[threadIdx.x + blockDim.x*j];
__syncthreads();
#pragma unroll 32
for(int i=0; i < blockDim.x; i++)
{
ii = i + blockDim.x*j;
if(ii != id+offset && ii < N)
{
force_mag = getBodyBodyForce(posMe, shPos[i]);
forceSum.x += force_mag.x;
forceSum.y += force_mag.y;
forceSum.z += force_mag.z;
}
}
}
if(id <N)
{
force[id].x = forceSum.x;
force[id].y = forceSum.y;
force[id].z = forceSum.z;
}
}
__global__ void moveBodies(float4 *g_pos, float4 *d_pos, float3 *vel, float3 * force, int offset)
{
int id = threadIdx.x + blockDim.x*blockIdx.x;
if(id < N)
{
vel[id].x += ((force[id].x-DAMP*vel[id].x)/d_pos[id].w)*DT;
vel[id].y += ((force[id].y-DAMP*vel[id].y)/d_pos[id].w)*DT;
vel[id].z += ((force[id].z-DAMP*vel[id].z)/d_pos[id].w)*DT;
d_pos[id].x += vel[id].x*DT;
d_pos[id].y += vel[id].y*DT;
d_pos[id].z += vel[id].z*DT;
g_pos[id+offset].x = d_pos[id].x;
g_pos[id+offset].y = d_pos[id].y;
g_pos[id+offset].z = d_pos[id].z;
}
}
void n_body()
{
int deviceCount;
ERROR_CHECK( hipGetDeviceCount ( &deviceCount ) );
p_GPU0 = (float4*)malloc(N*sizeof(float4));
p_GPU1 = (float4*)malloc(N*sizeof(float4));
DataStruct* dev = (DataStruct*)malloc(deviceCount*sizeof(DataStruct));
for(int i = 0; i<deviceCount; i++)
{
hipSetDevice(i);
if(i==0)
{
ERROR_CHECK( hipMalloc(&p_GPU0, N*sizeof(float4)) );
ERROR_CHECK( hipMemcpy(p_GPU0, p, N*sizeof(float4), hipMemcpyHostToDevice) );
}
if(i==1)
{
ERROR_CHECK( hipMalloc(&p_GPU1, N*sizeof(float4)) );
ERROR_CHECK( hipMemcpy(p_GPU1, p, N*sizeof(float4), hipMemcpyHostToDevice) );
}
dev[i].deviceID = i;
dev[i].size = N/deviceCount;
dev[i].offset = i*N/deviceCount;
ERROR_CHECK( hipMalloc(&dev[i].pos, dev[i].size * sizeof(float4)) );
ERROR_CHECK( hipMalloc(&dev[i].vel, dev[i].size * sizeof(float3)) );
ERROR_CHECK( hipMalloc(&dev[i].force, dev[i].size * sizeof(float3)) );
ERROR_CHECK( hipMemcpy(dev[i].pos, p+dev[i].offset, dev[i].size * sizeof(float4), hipMemcpyHostToDevice) );
ERROR_CHECK( hipMemcpy(dev[i].vel, v+dev[i].offset, dev[i].size * sizeof(float3), hipMemcpyHostToDevice) );
ERROR_CHECK( hipMemcpy(dev[i].force, f+dev[i].offset, dev[i].size * sizeof(float3), hipMemcpyHostToDevice) );
}
dim3 block(BLOCK);
dim3 grid((N/deviceCount - 1)/BLOCK + 1);
float dt;
int tdraw = 0;
float time = 0.0;
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
dt = DT;
while(time < STOP_TIME)
{
for(int i = 0; i < deviceCount; i++)
{
float4 *temp;
temp = i?p_GPU1:p_GPU0;
hipSetDevice( dev[i].deviceID );
hipLaunchKernelGGL(( getForces), dim3(grid), dim3(block), 0, 0, temp, dev[i].force, dev[i].offset);
ERROR_CHECK( hipPeekAtLastError() );
hipLaunchKernelGGL(( moveBodies), dim3(grid), dim3(block), 0, 0, temp, dev[i].pos, dev[i].vel, dev[i].force, dev[i].offset);
ERROR_CHECK( hipPeekAtLastError() );
}
hipDeviceSynchronize();
if(deviceCount > 1)
{
hipSetDevice( 0 );
ERROR_CHECK( hipMemcpy(p_GPU1+dev[0].offset, dev[0].pos, dev[1].size*sizeof(float4), hipMemcpyDeviceToDevice) );
hipSetDevice( 1 );
ERROR_CHECK( hipMemcpy(p_GPU0+dev[1].offset, dev[1].pos, dev[0].size*sizeof(float4), hipMemcpyDeviceToDevice) );
}
hipDeviceSynchronize();
//To kill the draw comment out the next 7 lines.
if(tdraw == DRAW)
{
hipSetDevice(0);
ERROR_CHECK( hipMemcpy(p, p_GPU0, N * sizeof(float4), hipMemcpyDeviceToHost) );
draw_picture();
tdraw = 0;
}
tdraw++;
time += dt;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("\n\nGPU time = %3.1f milliseconds\n", elapsedTime);
}
void control()
{
set_initial_conditions();
draw_picture();
n_body();
printf("\n DONE \n");
}
void Display(void)
{
gluLookAt(EYE, EYE, EYE, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
control();
}
void reshape(int w, int h)
{
glViewport(0, 0, (GLsizei) w, (GLsizei) h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glFrustum(-0.2, 0.2, -0.2, 0.2, 0.2, FAR);
glMatrixMode(GL_MODELVIEW);
}
int main(int argc, char** argv)
{
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB);
glutInitWindowSize(XWindowSize,YWindowSize);
glutInitWindowPosition(0,0);
glutCreateWindow("2 Body 3D");
GLfloat light_position[] = {1.0, 1.0, 1.0, 0.0};
GLfloat light_ambient[] = {0.0, 0.0, 0.0, 1.0};
GLfloat light_diffuse[] = {1.0, 1.0, 1.0, 1.0};
GLfloat light_specular[] = {1.0, 1.0, 1.0, 1.0};
GLfloat lmodel_ambient[] = {0.2, 0.2, 0.2, 1.0};
GLfloat mat_specular[] = {1.0, 1.0, 1.0, 1.0};
GLfloat mat_shininess[] = {10.0};
glClearColor(0.0, 0.0, 0.0, 0.0);
glShadeModel(GL_SMOOTH);
glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE);
glLightfv(GL_LIGHT0, GL_POSITION, light_position);
glLightfv(GL_LIGHT0, GL_AMBIENT, light_ambient);
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse);
glLightfv(GL_LIGHT0, GL_SPECULAR, light_specular);
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, lmodel_ambient);
glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular);
glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_DEPTH_TEST);
glutDisplayFunc(Display);
glutReshapeFunc(reshape);
glutMainLoop();
return 0;
}
| b5ac0ec93fc0f83eab4bf12b87547730b2297e32.cu | // Optimized using shared memory and on chip memory
// nvcc nBodySimulation.cu -o nBody -lglut -lm -lGLU -lGL; ./nBody
//To stop hit "control c" in the window you launched it from.
#include <GL/glut.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include "../cudaErrCheck.cuh"
#define N 8*8*8
#define BLOCK 256
#define XWindowSize 2500
#define YWindowSize 2500
#define DRAW 10
#define DAMP 1.0
#define DT 0.001
#define STOP_TIME 10.0
#define G 1.0
#define H 1.0
#define EYE 10.0
#define FAR 90.0
// Globals
float4 *p;
float3 *v, *f;
float4 *p_GPU0, *p_GPU1;
float rot=0.0; //to rotate cheerios
/*------------------------------------
|
| DataStruct is to work on each GPU
|
------------------------------------*/
struct DataStruct {
int deviceID;
int size;
int offset;
float4 *pos;
float3 *vel;
float3 *force;
};
void set_initial_conditions()
{
p = (float4*)malloc(N*sizeof(float4));
v = (float3*)malloc(N*sizeof(float3));
f = (float3*)malloc(N*sizeof(float3));
int i,j,k,num,particles_per_side;
float position_start, temp;
float initail_seperation;
temp = pow((float)N,1.0/3.0) + 0.99999;
particles_per_side = temp;
printf("\n cube root of N = %d \n", particles_per_side);
position_start = -(particles_per_side -1.0)/2.0;
initail_seperation = 2.0;
for(i=0; i<N; i++)
{
p[i].w = 1.0;
}
num = 0;
for(i=0; i<particles_per_side; i++)
{
for(j=0; j<particles_per_side; j++)
{
for(k=0; k<particles_per_side; k++)
{
if(N <= num) break;
p[num].x = position_start + i*initail_seperation;
p[num].y = position_start + j*initail_seperation;
p[num].z = position_start + k*initail_seperation;
v[num].x = 0.0;
v[num].y = 0.0;
v[num].z = 0.0;
num++;
}
}
}
}
void draw_picture()
{
int i;
glClear(GL_COLOR_BUFFER_BIT);
glClear(GL_DEPTH_BUFFER_BIT);
//gray white (powdered donut)
//0.9955,0.8622,0.6711
glColor3d(0.87,0.87,0.87);
for(i=0; i<N; i++)
{
glPushMatrix();
glTranslatef(p[i].x, p[i].y, p[i].z);
//make some cheerios
glRotatef(rot*i,p[i].x, p[i].y, p[i].z);
glutSolidTorus(0.04,0.08,15,15);
glPopMatrix();
}
rot+=0.1;
glutSwapBuffers();
}
__device__ float3 getBodyBodyForce(float4 p0, float4 p1)
{
float3 f;
float dx = p1.x - p0.x;
float dy = p1.y - p0.y;
float dz = p1.z - p0.z;
float r2 = dx*dx + dy*dy + dz*dz;
float r = sqrt(r2);
float force = (G*p0.w*p1.w)/(r2) - (H*p0.w*p1.w)/(r2*r2);
f.x = force*dx/r;
f.y = force*dy/r;
f.z = force*dz/r;
return(f);
}
__global__ void getForces(float4 *g_pos, float3 * force, int offset)
{
int j,ii;
float3 force_mag, forceSum;
float4 posMe;
__shared__ float4 shPos[BLOCK];
int id = threadIdx.x + blockDim.x*blockIdx.x;
forceSum.x = 0.0;
forceSum.y = 0.0;
forceSum.z = 0.0;
posMe.x = g_pos[id+offset].x;
posMe.y = g_pos[id+offset].y;
posMe.z = g_pos[id+offset].z;
posMe.w = g_pos[id+offset].w;
for(j=0; j < gridDim.x*2; j++)
{
shPos[threadIdx.x] = g_pos[threadIdx.x + blockDim.x*j];
__syncthreads();
#pragma unroll 32
for(int i=0; i < blockDim.x; i++)
{
ii = i + blockDim.x*j;
if(ii != id+offset && ii < N)
{
force_mag = getBodyBodyForce(posMe, shPos[i]);
forceSum.x += force_mag.x;
forceSum.y += force_mag.y;
forceSum.z += force_mag.z;
}
}
}
if(id <N)
{
force[id].x = forceSum.x;
force[id].y = forceSum.y;
force[id].z = forceSum.z;
}
}
__global__ void moveBodies(float4 *g_pos, float4 *d_pos, float3 *vel, float3 * force, int offset)
{
int id = threadIdx.x + blockDim.x*blockIdx.x;
if(id < N)
{
vel[id].x += ((force[id].x-DAMP*vel[id].x)/d_pos[id].w)*DT;
vel[id].y += ((force[id].y-DAMP*vel[id].y)/d_pos[id].w)*DT;
vel[id].z += ((force[id].z-DAMP*vel[id].z)/d_pos[id].w)*DT;
d_pos[id].x += vel[id].x*DT;
d_pos[id].y += vel[id].y*DT;
d_pos[id].z += vel[id].z*DT;
g_pos[id+offset].x = d_pos[id].x;
g_pos[id+offset].y = d_pos[id].y;
g_pos[id+offset].z = d_pos[id].z;
}
}
void n_body()
{
int deviceCount;
ERROR_CHECK( cudaGetDeviceCount ( &deviceCount ) );
p_GPU0 = (float4*)malloc(N*sizeof(float4));
p_GPU1 = (float4*)malloc(N*sizeof(float4));
DataStruct* dev = (DataStruct*)malloc(deviceCount*sizeof(DataStruct));
for(int i = 0; i<deviceCount; i++)
{
cudaSetDevice(i);
if(i==0)
{
ERROR_CHECK( cudaMalloc(&p_GPU0, N*sizeof(float4)) );
ERROR_CHECK( cudaMemcpy(p_GPU0, p, N*sizeof(float4), cudaMemcpyHostToDevice) );
}
if(i==1)
{
ERROR_CHECK( cudaMalloc(&p_GPU1, N*sizeof(float4)) );
ERROR_CHECK( cudaMemcpy(p_GPU1, p, N*sizeof(float4), cudaMemcpyHostToDevice) );
}
dev[i].deviceID = i;
dev[i].size = N/deviceCount;
dev[i].offset = i*N/deviceCount;
ERROR_CHECK( cudaMalloc(&dev[i].pos, dev[i].size * sizeof(float4)) );
ERROR_CHECK( cudaMalloc(&dev[i].vel, dev[i].size * sizeof(float3)) );
ERROR_CHECK( cudaMalloc(&dev[i].force, dev[i].size * sizeof(float3)) );
ERROR_CHECK( cudaMemcpy(dev[i].pos, p+dev[i].offset, dev[i].size * sizeof(float4), cudaMemcpyHostToDevice) );
ERROR_CHECK( cudaMemcpy(dev[i].vel, v+dev[i].offset, dev[i].size * sizeof(float3), cudaMemcpyHostToDevice) );
ERROR_CHECK( cudaMemcpy(dev[i].force, f+dev[i].offset, dev[i].size * sizeof(float3), cudaMemcpyHostToDevice) );
}
dim3 block(BLOCK);
dim3 grid((N/deviceCount - 1)/BLOCK + 1);
float dt;
int tdraw = 0;
float time = 0.0;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dt = DT;
while(time < STOP_TIME)
{
for(int i = 0; i < deviceCount; i++)
{
float4 *temp;
temp = i?p_GPU1:p_GPU0;
cudaSetDevice( dev[i].deviceID );
getForces<<<grid, block>>>(temp, dev[i].force, dev[i].offset);
ERROR_CHECK( cudaPeekAtLastError() );
moveBodies<<<grid, block>>>(temp, dev[i].pos, dev[i].vel, dev[i].force, dev[i].offset);
ERROR_CHECK( cudaPeekAtLastError() );
}
cudaDeviceSynchronize();
if(deviceCount > 1)
{
cudaSetDevice( 0 );
ERROR_CHECK( cudaMemcpy(p_GPU1+dev[0].offset, dev[0].pos, dev[1].size*sizeof(float4), cudaMemcpyDeviceToDevice) );
cudaSetDevice( 1 );
ERROR_CHECK( cudaMemcpy(p_GPU0+dev[1].offset, dev[1].pos, dev[0].size*sizeof(float4), cudaMemcpyDeviceToDevice) );
}
cudaDeviceSynchronize();
//To kill the draw comment out the next 7 lines.
if(tdraw == DRAW)
{
cudaSetDevice(0);
ERROR_CHECK( cudaMemcpy(p, p_GPU0, N * sizeof(float4), cudaMemcpyDeviceToHost) );
draw_picture();
tdraw = 0;
}
tdraw++;
time += dt;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\n\nGPU time = %3.1f milliseconds\n", elapsedTime);
}
void control()
{
set_initial_conditions();
draw_picture();
n_body();
printf("\n DONE \n");
}
void Display(void)
{
gluLookAt(EYE, EYE, EYE, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
control();
}
void reshape(int w, int h)
{
glViewport(0, 0, (GLsizei) w, (GLsizei) h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glFrustum(-0.2, 0.2, -0.2, 0.2, 0.2, FAR);
glMatrixMode(GL_MODELVIEW);
}
int main(int argc, char** argv)
{
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB);
glutInitWindowSize(XWindowSize,YWindowSize);
glutInitWindowPosition(0,0);
glutCreateWindow("2 Body 3D");
GLfloat light_position[] = {1.0, 1.0, 1.0, 0.0};
GLfloat light_ambient[] = {0.0, 0.0, 0.0, 1.0};
GLfloat light_diffuse[] = {1.0, 1.0, 1.0, 1.0};
GLfloat light_specular[] = {1.0, 1.0, 1.0, 1.0};
GLfloat lmodel_ambient[] = {0.2, 0.2, 0.2, 1.0};
GLfloat mat_specular[] = {1.0, 1.0, 1.0, 1.0};
GLfloat mat_shininess[] = {10.0};
glClearColor(0.0, 0.0, 0.0, 0.0);
glShadeModel(GL_SMOOTH);
glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE);
glLightfv(GL_LIGHT0, GL_POSITION, light_position);
glLightfv(GL_LIGHT0, GL_AMBIENT, light_ambient);
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse);
glLightfv(GL_LIGHT0, GL_SPECULAR, light_specular);
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, lmodel_ambient);
glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular);
glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_DEPTH_TEST);
glutDisplayFunc(Display);
glutReshapeFunc(reshape);
glutMainLoop();
return 0;
}
|
5070f683c6e6baa1916e94332a083ccae55c0b37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* usage: nvcc ./stream_test_v2.cu -o ./stream_v2_legacy
* nvvp ./stream_v2_legacy ( or as root:
* nvvp -vm /usr/lib64/jvm/jre-1.8.0/bin/java ./stream_v2_legacy )
* ... versus ...
* nvcc --default-stream per-thread ./stream_test_v2.cu -o ./stream_v2_per-thread
* nvvp ./stream_v2_per-thread ( or as root:
* nvvp -vm /usr/lib64/jvm/jre-1.8.0/bin/java ./stream_v2_per-thread )
*
* purpose: just test whether substitution of hipMalloc() with hipMallocManaged()
* will work
* result: yes it does, however, one needs to be careful with synchronizing individual
* streams before accessing managed memory, hence the below inserted call was
* crucial, because without it only default stream 0 had printed out correct
* results and all other streams just 0 !
* n.b. out-commented line 'hipMalloc(&data[i]...' would have worked together with
* also commented lines 'hipMemcpy(... printf(...' in terms of low level
* checking of results
*
*/
#include <stdio.h>
const int N = 1 << 20;
__global__ void kernel(float *x, int n)
{
int tid = threadIdx.x;
for (int i = tid; i < n; i += blockDim.x) {
x[i] = sqrt(pow(3.14159,i));
}
}
int main()
{
const int num_streams = 8;
float localx[N];
hipStream_t streams[num_streams];
float *data[num_streams];
for (int i = 0; i < num_streams; i++) {
hipStreamCreate(&streams[i]);
//hipMalloc(&data[i], N * sizeof(float));
hipMallocManaged(&data[i], N * sizeof(float));
// launch one worker kernel per stream
hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, streams[i], data[i], N);
// launch a dummy kernel on the default stream
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, 0, 0);
}
// and a quick check of results because individual streams
// should have done identical calculations !
for (int i = 0; i < num_streams; i++) {
// hipMemcpy(localx, data[i], N * sizeof(float), hipMemcpyDeviceToHost);
// printf("*** %d %12.6lf%12.6lf%12.6lf\n", i, localx[0], localx[1], localx[2]);
hipStreamSynchronize(streams[i]);
printf("*** %d %12.6lf%12.6lf%12.6lf\n", i, data[i][0], data[i][1], data[i][2]);
}
hipDeviceReset();
return 0;
}
| 5070f683c6e6baa1916e94332a083ccae55c0b37.cu | /*
* usage: nvcc ./stream_test_v2.cu -o ./stream_v2_legacy
* nvvp ./stream_v2_legacy ( or as root:
* nvvp -vm /usr/lib64/jvm/jre-1.8.0/bin/java ./stream_v2_legacy )
* ... versus ...
* nvcc --default-stream per-thread ./stream_test_v2.cu -o ./stream_v2_per-thread
* nvvp ./stream_v2_per-thread ( or as root:
* nvvp -vm /usr/lib64/jvm/jre-1.8.0/bin/java ./stream_v2_per-thread )
*
* purpose: just test whether substitution of cudaMalloc() with cudaMallocManaged()
* will work
* result: yes it does, however, one needs to be careful with synchronizing individual
* streams before accessing managed memory, hence the below inserted call was
* crucial, because without it only default stream 0 had printed out correct
* results and all other streams just 0 !
* n.b. out-commented line 'cudaMalloc(&data[i]...' would have worked together with
* also commented lines 'cudaMemcpy(... printf(...' in terms of low level
* checking of results
*
*/
#include <stdio.h>
const int N = 1 << 20;
__global__ void kernel(float *x, int n)
{
int tid = threadIdx.x;
for (int i = tid; i < n; i += blockDim.x) {
x[i] = sqrt(pow(3.14159,i));
}
}
int main()
{
const int num_streams = 8;
float localx[N];
cudaStream_t streams[num_streams];
float *data[num_streams];
for (int i = 0; i < num_streams; i++) {
cudaStreamCreate(&streams[i]);
//cudaMalloc(&data[i], N * sizeof(float));
cudaMallocManaged(&data[i], N * sizeof(float));
// launch one worker kernel per stream
kernel<<<1, 64, 0, streams[i]>>>(data[i], N);
// launch a dummy kernel on the default stream
kernel<<<1, 1>>>(0, 0);
}
// and a quick check of results because individual streams
// should have done identical calculations !
for (int i = 0; i < num_streams; i++) {
// cudaMemcpy(localx, data[i], N * sizeof(float), cudaMemcpyDeviceToHost);
// printf("*** %d %12.6lf%12.6lf%12.6lf\n", i, localx[0], localx[1], localx[2]);
cudaStreamSynchronize(streams[i]);
printf("*** %d %12.6lf%12.6lf%12.6lf\n", i, data[i][0], data[i][1], data[i][2]);
}
cudaDeviceReset();
return 0;
}
|
cfe3033fedc37ff62490b193b970645e818a95fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0
// DeepSpeed Team
#include "conversion_utils.h"
#include "inference_cuda_layers.h"
#include "memory_access_utils.h"
namespace cg = cooperative_groups;
#define MAX_CAP 4
#define MAX_SEQ 2048
inline __device__ float relu(const float x) { return x < 0 ? 0 : x; }
/*
In-place relu(biasAdd(x)) for channels last
*/
template <typename T>
__global__ void fused_bias_relu(T* input, const T* bias, int total_count, int intermediate_size)
{
// Input restriction: intermediate_size % vals_per_access == 0
constexpr int granularity = 16;
constexpr int values_per_access = granularity / sizeof(T);
const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access;
if (offset < total_count) {
T data[values_per_access];
T data_bias[values_per_access];
mem_access::load_global<granularity>(data, input + offset);
mem_access::load_global<granularity>(data_bias, bias + (offset % intermediate_size));
#pragma unroll
for (int i = 0; i < values_per_access; i++) {
float data_f = conversion::to<float>(data[i]);
float bias_f = conversion::to<float>(data_bias[i]);
data[i] = conversion::to<T>(relu(data_f + bias_f));
}
mem_access::store_global<granularity>(input + offset, data);
}
}
template <typename T>
void launch_bias_relu(T* input,
const T* bias,
int intermediate_size,
int batch_size,
hipStream_t stream)
{
constexpr int threads = 1024;
constexpr int granularity = 16;
const int total_count = batch_size * intermediate_size;
const int elems_per_block = threads * (granularity / sizeof(T));
dim3 block_dims(threads);
dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block);
hipLaunchKernelGGL(( fused_bias_relu), dim3(grid_dims), dim3(block_dims), 0, stream,
input, bias, total_count, intermediate_size);
}
template void launch_bias_relu<float>(float*, const float*, int, int, hipStream_t);
template void launch_bias_relu<__half>(__half*, const __half*, int, int, hipStream_t);
| cfe3033fedc37ff62490b193b970645e818a95fd.cu | // Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0
// DeepSpeed Team
#include "conversion_utils.h"
#include "inference_cuda_layers.h"
#include "memory_access_utils.h"
namespace cg = cooperative_groups;
#define MAX_CAP 4
#define MAX_SEQ 2048
inline __device__ float relu(const float x) { return x < 0 ? 0 : x; }
/*
In-place relu(biasAdd(x)) for channels last
*/
template <typename T>
__global__ void fused_bias_relu(T* input, const T* bias, int total_count, int intermediate_size)
{
// Input restriction: intermediate_size % vals_per_access == 0
constexpr int granularity = 16;
constexpr int values_per_access = granularity / sizeof(T);
const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access;
if (offset < total_count) {
T data[values_per_access];
T data_bias[values_per_access];
mem_access::load_global<granularity>(data, input + offset);
mem_access::load_global<granularity>(data_bias, bias + (offset % intermediate_size));
#pragma unroll
for (int i = 0; i < values_per_access; i++) {
float data_f = conversion::to<float>(data[i]);
float bias_f = conversion::to<float>(data_bias[i]);
data[i] = conversion::to<T>(relu(data_f + bias_f));
}
mem_access::store_global<granularity>(input + offset, data);
}
}
template <typename T>
void launch_bias_relu(T* input,
const T* bias,
int intermediate_size,
int batch_size,
cudaStream_t stream)
{
constexpr int threads = 1024;
constexpr int granularity = 16;
const int total_count = batch_size * intermediate_size;
const int elems_per_block = threads * (granularity / sizeof(T));
dim3 block_dims(threads);
dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block);
fused_bias_relu<<<grid_dims, block_dims, 0, stream>>>(
input, bias, total_count, intermediate_size);
}
template void launch_bias_relu<float>(float*, const float*, int, int, cudaStream_t);
template void launch_bias_relu<__half>(__half*, const __half*, int, int, cudaStream_t);
|
912f90758f9194867c7d36212e80d4a92688a5eb.hip | // !!! This is a file automatically generated by hipify!!!
#include <benchmark/benchmark.h>
#include "init/init.hpp"
#include "reduction/args.hpp"
#include "utils/utils.hpp"
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
static void THRUST_FULL_REDUCTION(benchmark::State &state) {
const size_t num_elements = state.range(0);
hipEvent_t start, stop;
half *d_in_fp16 = nullptr;
half h_out; // thrust::reduce return a quantity that must be
// deposited in a
// host resident variable
try {
PRINT_IF_ERROR(hipMalloc(&d_in_fp16, num_elements * sizeof(half)));
cuda_memory_set(d_in_fp16, 0.001f, num_elements);
PRINT_IF_ERROR(hipDeviceSynchronize());
PRINT_IF_ERROR(hipEventCreate(&start));
PRINT_IF_ERROR(hipEventCreate(&stop));
defer(hipEventDestroy(start));
defer(hipEventDestroy(stop));
for (auto _ : state) {
PRINT_IF_ERROR(hipEventRecord(start));
h_out = thrust::reduce(thrust::device, d_in_fp16, d_in_fp16 + num_elements);
PRINT_IF_ERROR(hipEventRecord(stop));
PRINT_IF_ERROR(hipEventSynchronize(stop));
state.PauseTiming();
float msecTotal = 0.0f;
PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop));
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
(void) h_out;
state.counters.insert({{"num_elements", num_elements},
{"flops",
{state.iterations() * 1.0 * num_elements,
benchmark::Counter::kAvgThreadsRate}}});
#if 0
int errors = 0;
float correct_sum = 0;
for (int i = 0; i < num_elements; i++) {
correct_sum += h_in[i];
}
if (fabs(half_to_float(h_out) - correct_sum) > 0.001) {
errors++;
if (errors < 10) {
printf("Expected %f, get h_out = %f\n", correct_sum,
half_to_float(h_out));
}
}
if (errors > 0) {
printf("THRUST_FULL_REDUCTION does not agree with SEQUENTIAL! %d errors!\n",
errors);
} else {
printf("Results verified: they agree.\n\n");
}
#endif
hipFree(d_in_fp16);
} catch (...) {
hipFree(d_in_fp16);
hipDeviceReset();
const auto p = std::current_exception();
std::rethrow_exception(p);
}
}
BENCHMARK(THRUST_FULL_REDUCTION)->ARGS()->UseManualTime();
| 912f90758f9194867c7d36212e80d4a92688a5eb.cu |
#include <benchmark/benchmark.h>
#include "init/init.hpp"
#include "reduction/args.hpp"
#include "utils/utils.hpp"
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
static void THRUST_FULL_REDUCTION(benchmark::State &state) {
const size_t num_elements = state.range(0);
cudaEvent_t start, stop;
half *d_in_fp16 = nullptr;
half h_out; // thrust::reduce return a quantity that must be
// deposited in a
// host resident variable
try {
PRINT_IF_ERROR(cudaMalloc(&d_in_fp16, num_elements * sizeof(half)));
cuda_memory_set(d_in_fp16, 0.001f, num_elements);
PRINT_IF_ERROR(cudaDeviceSynchronize());
PRINT_IF_ERROR(cudaEventCreate(&start));
PRINT_IF_ERROR(cudaEventCreate(&stop));
defer(cudaEventDestroy(start));
defer(cudaEventDestroy(stop));
for (auto _ : state) {
PRINT_IF_ERROR(cudaEventRecord(start));
h_out = thrust::reduce(thrust::device, d_in_fp16, d_in_fp16 + num_elements);
PRINT_IF_ERROR(cudaEventRecord(stop));
PRINT_IF_ERROR(cudaEventSynchronize(stop));
state.PauseTiming();
float msecTotal = 0.0f;
PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop));
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
(void) h_out;
state.counters.insert({{"num_elements", num_elements},
{"flops",
{state.iterations() * 1.0 * num_elements,
benchmark::Counter::kAvgThreadsRate}}});
#if 0
int errors = 0;
float correct_sum = 0;
for (int i = 0; i < num_elements; i++) {
correct_sum += h_in[i];
}
if (fabs(half_to_float(h_out) - correct_sum) > 0.001) {
errors++;
if (errors < 10) {
printf("Expected %f, get h_out = %f\n", correct_sum,
half_to_float(h_out));
}
}
if (errors > 0) {
printf("THRUST_FULL_REDUCTION does not agree with SEQUENTIAL! %d errors!\n",
errors);
} else {
printf("Results verified: they agree.\n\n");
}
#endif
cudaFree(d_in_fp16);
} catch (...) {
cudaFree(d_in_fp16);
cudaDeviceReset();
const auto p = std::current_exception();
std::rethrow_exception(p);
}
}
BENCHMARK(THRUST_FULL_REDUCTION)->ARGS()->UseManualTime();
|
27ad869f45d847ee7f19e86479bef432459b7357.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_ROCM
// CUDA kernele for forward
template<typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template<typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data,
Dtype* out_diff, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index]
* ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template<typename Dtype>
__global__ void PReLUParamBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
}
}
#endif // USE_ROCM
template<typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
if (top[0] == bottom[0]) {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0,
(cl_mem) (bottom_memory_.mutable_gpu_data()), 0,
&ctx);
}
viennacl::ocl::kernel &oclk_prelu = program.get_kernel(
CL_KERNEL_SELECT("prelu_forward"));
viennacl::ocl::enqueue(
oclk_prelu(count, channels, dim, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) slope_data, &ctx), div_factor),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = channels * dim;
Dtype dsum = 0.;
for (int n = 0; n < bottom[0]->num(); ++n) {
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUParamBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(cdim),
CAFFE_CUDA_NUM_THREADS)(
cdim, top_diff + top[0]->offset(n),
bottom_data + bottom[0]->offset(n),
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype d;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &d);
dsum += d;
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(),
multiplier_.gpu_data(), 1., slope_diff);
}
}
if (channel_shared_) {
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = channels * dim;
Dtype dsum = 0.;
for (int n = 0; n < bottom[0]->num(); ++n) {
viennacl::ocl::kernel &oclk_prelu_param = program.get_kernel(
CL_KERNEL_SELECT("prelu_param_backward"));
viennacl::ocl::enqueue(
oclk_prelu_param(
cdim, WrapHandle((cl_mem) top_diff, &ctx), top[0]->offset(n),
WrapHandle((cl_mem) bottom_data, &ctx), bottom[0]->offset(n),
WrapHandle((cl_mem) (backward_buff_.mutable_gpu_diff()), &ctx)),
ctx.get_queue());
if (channel_shared_) {
Dtype d;
greentea_gpu_dot<Dtype>(this->device_context_->id(), channels * dim,
(cl_mem) (backward_buff_.gpu_diff()), 0,
(cl_mem) (multiplier_.gpu_data()), 0, &d);
dsum += d;
} else {
greentea_gpu_gemv<Dtype>(this->device_context_->id(), CblasNoTrans,
channels, dim, 1.,
(cl_mem) (backward_buff_.gpu_diff()), 0,
(cl_mem) (multiplier_.gpu_data()), 0, 1.,
(cl_mem) slope_diff, 0);
}
}
if (channel_shared_) {
greentea_gpu_add_scalar<Dtype>(this->device_context_->id(),
this->blobs_[0]->count(), Dtype(dsum),
(cl_mem) slope_diff, 0);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
viennacl::ocl::kernel &oclk_prelu = program.get_kernel(
CL_KERNEL_SELECT("prelu_backward"));
viennacl::ocl::enqueue(
oclk_prelu(count, channels, dim, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx),
WrapHandle((cl_mem) slope_data, &ctx), div_factor),
ctx.get_queue());
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
| 27ad869f45d847ee7f19e86479bef432459b7357.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_CUDA
// CUDA kernele for forward
template<typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template<typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data,
Dtype* out_diff, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index]
* ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template<typename Dtype>
__global__ void PReLUParamBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
}
}
#endif // USE_CUDA
template<typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
if (top[0] == bottom[0]) {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0,
(cl_mem) (bottom_memory_.mutable_gpu_data()), 0,
&ctx);
}
viennacl::ocl::kernel &oclk_prelu = program.get_kernel(
CL_KERNEL_SELECT("prelu_forward"));
viennacl::ocl::enqueue(
oclk_prelu(count, channels, dim, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) slope_data, &ctx), div_factor),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = channels * dim;
Dtype dsum = 0.;
for (int n = 0; n < bottom[0]->num(); ++n) {
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUParamBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(cdim),
CAFFE_CUDA_NUM_THREADS)(
cdim, top_diff + top[0]->offset(n),
bottom_data + bottom[0]->offset(n),
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype d;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &d);
dsum += d;
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(),
multiplier_.gpu_data(), 1., slope_diff);
}
}
if (channel_shared_) {
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = channels * dim;
Dtype dsum = 0.;
for (int n = 0; n < bottom[0]->num(); ++n) {
viennacl::ocl::kernel &oclk_prelu_param = program.get_kernel(
CL_KERNEL_SELECT("prelu_param_backward"));
viennacl::ocl::enqueue(
oclk_prelu_param(
cdim, WrapHandle((cl_mem) top_diff, &ctx), top[0]->offset(n),
WrapHandle((cl_mem) bottom_data, &ctx), bottom[0]->offset(n),
WrapHandle((cl_mem) (backward_buff_.mutable_gpu_diff()), &ctx)),
ctx.get_queue());
if (channel_shared_) {
Dtype d;
greentea_gpu_dot<Dtype>(this->device_context_->id(), channels * dim,
(cl_mem) (backward_buff_.gpu_diff()), 0,
(cl_mem) (multiplier_.gpu_data()), 0, &d);
dsum += d;
} else {
greentea_gpu_gemv<Dtype>(this->device_context_->id(), CblasNoTrans,
channels, dim, 1.,
(cl_mem) (backward_buff_.gpu_diff()), 0,
(cl_mem) (multiplier_.gpu_data()), 0, 1.,
(cl_mem) slope_diff, 0);
}
}
if (channel_shared_) {
greentea_gpu_add_scalar<Dtype>(this->device_context_->id(),
this->blobs_[0]->count(), Dtype(dsum),
(cl_mem) slope_diff, 0);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
viennacl::ocl::kernel &oclk_prelu = program.get_kernel(
CL_KERNEL_SELECT("prelu_backward"));
viennacl::ocl::enqueue(
oclk_prelu(count, channels, dim, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx),
WrapHandle((cl_mem) slope_data, &ctx), div_factor),
ctx.get_queue());
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
|
lab4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "CUDA error: ", hipGetErrorString(err)); \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
//@@ Define any useful program-wide constants here
#define MASK_WIDTH 3
#define TILE_WIDTH 4
//@@ Define constant memory for device kernel here
__constant__ float Mc[MASK_WIDTH][MASK_WIDTH][MASK_WIDTH];
__global__ void conv3d(float *input, float *output, const int z_size,
const int y_size, const int x_size) {
//@@ Insert kernel code here
int radius = MASK_WIDTH / 2;
__shared__ float N_ds[TILE_WIDTH + MASK_WIDTH - 1][TILE_WIDTH + MASK_WIDTH - 1][TILE_WIDTH + MASK_WIDTH - 1];
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
int row_o = blockIdx.y * TILE_WIDTH + ty;
int col_o = blockIdx.x * TILE_WIDTH + tx;
int z_o = blockIdx.z * TILE_WIDTH + tz;
int row_i = row_o - radius;
int col_i = col_o - radius;
int z_i = z_o - radius;
float pValue = 0.0f;
if (row_i >= 0 && row_i < y_size && col_i >= 0 && col_i < x_size && z_i >= 0 && z_i < z_size) {
N_ds[tz][ty][tx] = input[z_i * x_size * y_size + row_i * x_size + col_i];
} else {
N_ds[tz][ty][tx] = 0.0f;
}
__syncthreads();
if (tz < TILE_WIDTH && ty < TILE_WIDTH && tx < TILE_WIDTH) {
for (int i = 0; i < MASK_WIDTH; i++) {
for (int j = 0; j < MASK_WIDTH; j++) {
for (int k = 0; k < MASK_WIDTH; k++) {
pValue += Mc[i][j][k] * N_ds[i + tz][j + ty][k + tx];
}
}
}
if (z_o < z_size && row_o < y_size && col_o < x_size) {
output[z_o * y_size * x_size + row_o * x_size + col_o] = pValue;
}
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int z_size;
int y_size;
int x_size;
int inputLength, kernelLength;
float *hostInput;
float *hostKernel;
float *hostOutput;
float *deviceInput;
float *deviceOutput;
args = wbArg_read(argc, argv);
// Import data
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostKernel =
(float *) wbImport(wbArg_getInputFile(args, 1), &kernelLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
// First three elements are the input dimensions
z_size = hostInput[0];
y_size = hostInput[1];
x_size = hostInput[2];
wbLog(TRACE, "The input size is ", z_size, "x", y_size, "x", x_size);
assert(z_size * y_size * x_size == inputLength - 3);
assert(kernelLength == 27);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ Allocate GPU memory here
// Recall that inputLength is 3 elements longer than the input data
// because the first three elements were the dimensions
int inputSize = (inputLength - 3) * sizeof(float);
hipMalloc((void **) &deviceInput, inputSize);
hipMalloc((void **) &deviceOutput, inputSize);
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ Copy input and kernel to GPU here
// Recall that the first three elements of hostInput are dimensions and
// do
// not need to be copied to the gpu
wbTime_stop(Copy, "Copying data to the GPU");
hipMemcpy(deviceInput, &hostInput[3], inputSize, hipMemcpyHostToDevice);
hipMemcpyToSymbol(Mc, hostKernel, kernelLength * sizeof(float));
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ Initialize grid and block dimensions here
dim3 dimBlock(TILE_WIDTH + MASK_WIDTH - 1, TILE_WIDTH + MASK_WIDTH - 1, TILE_WIDTH + MASK_WIDTH - 1);
dim3 dinGrid(ceil(x_size / (1.0 * TILE_WIDTH)), ceil(y_size / (1.0 * TILE_WIDTH)),
ceil(z_size / (1.0 * TILE_WIDTH)));
//@@ Launch the GPU kernel here
hipLaunchKernelGGL(( conv3d), dim3(dinGrid), dim3(dimBlock), 0, 0, deviceInput, deviceOutput, z_size, y_size, x_size);
hipDeviceSynchronize();
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ Copy the device memory back to the host here
// Recall that the first three elements of the output are the dimensions
// and should not be set here (they are set below)
hipMemcpy(&hostOutput[3], deviceOutput, inputSize, hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
// Set the output dimensions for correctness checking
hostOutput[0] = z_size;
hostOutput[1] = y_size;
hostOutput[2] = x_size;
wbSolution(args, hostOutput, inputLength);
// Free device memory
hipFree(deviceInput);
hipFree(deviceOutput);
// Free host memory
free(hostInput);
free(hostOutput);
return 0;
}
| lab4.cu | #include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "CUDA error: ", cudaGetErrorString(err)); \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
//@@ Define any useful program-wide constants here
#define MASK_WIDTH 3
#define TILE_WIDTH 4
//@@ Define constant memory for device kernel here
__constant__ float Mc[MASK_WIDTH][MASK_WIDTH][MASK_WIDTH];
__global__ void conv3d(float *input, float *output, const int z_size,
const int y_size, const int x_size) {
//@@ Insert kernel code here
int radius = MASK_WIDTH / 2;
__shared__ float N_ds[TILE_WIDTH + MASK_WIDTH - 1][TILE_WIDTH + MASK_WIDTH - 1][TILE_WIDTH + MASK_WIDTH - 1];
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
int row_o = blockIdx.y * TILE_WIDTH + ty;
int col_o = blockIdx.x * TILE_WIDTH + tx;
int z_o = blockIdx.z * TILE_WIDTH + tz;
int row_i = row_o - radius;
int col_i = col_o - radius;
int z_i = z_o - radius;
float pValue = 0.0f;
if (row_i >= 0 && row_i < y_size && col_i >= 0 && col_i < x_size && z_i >= 0 && z_i < z_size) {
N_ds[tz][ty][tx] = input[z_i * x_size * y_size + row_i * x_size + col_i];
} else {
N_ds[tz][ty][tx] = 0.0f;
}
__syncthreads();
if (tz < TILE_WIDTH && ty < TILE_WIDTH && tx < TILE_WIDTH) {
for (int i = 0; i < MASK_WIDTH; i++) {
for (int j = 0; j < MASK_WIDTH; j++) {
for (int k = 0; k < MASK_WIDTH; k++) {
pValue += Mc[i][j][k] * N_ds[i + tz][j + ty][k + tx];
}
}
}
if (z_o < z_size && row_o < y_size && col_o < x_size) {
output[z_o * y_size * x_size + row_o * x_size + col_o] = pValue;
}
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int z_size;
int y_size;
int x_size;
int inputLength, kernelLength;
float *hostInput;
float *hostKernel;
float *hostOutput;
float *deviceInput;
float *deviceOutput;
args = wbArg_read(argc, argv);
// Import data
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostKernel =
(float *) wbImport(wbArg_getInputFile(args, 1), &kernelLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
// First three elements are the input dimensions
z_size = hostInput[0];
y_size = hostInput[1];
x_size = hostInput[2];
wbLog(TRACE, "The input size is ", z_size, "x", y_size, "x", x_size);
assert(z_size * y_size * x_size == inputLength - 3);
assert(kernelLength == 27);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ Allocate GPU memory here
// Recall that inputLength is 3 elements longer than the input data
// because the first three elements were the dimensions
int inputSize = (inputLength - 3) * sizeof(float);
cudaMalloc((void **) &deviceInput, inputSize);
cudaMalloc((void **) &deviceOutput, inputSize);
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ Copy input and kernel to GPU here
// Recall that the first three elements of hostInput are dimensions and
// do
// not need to be copied to the gpu
wbTime_stop(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInput, &hostInput[3], inputSize, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(Mc, hostKernel, kernelLength * sizeof(float));
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ Initialize grid and block dimensions here
dim3 dimBlock(TILE_WIDTH + MASK_WIDTH - 1, TILE_WIDTH + MASK_WIDTH - 1, TILE_WIDTH + MASK_WIDTH - 1);
dim3 dinGrid(ceil(x_size / (1.0 * TILE_WIDTH)), ceil(y_size / (1.0 * TILE_WIDTH)),
ceil(z_size / (1.0 * TILE_WIDTH)));
//@@ Launch the GPU kernel here
conv3d<<<dinGrid, dimBlock>>>(deviceInput, deviceOutput, z_size, y_size, x_size);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ Copy the device memory back to the host here
// Recall that the first three elements of the output are the dimensions
// and should not be set here (they are set below)
cudaMemcpy(&hostOutput[3], deviceOutput, inputSize, cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
// Set the output dimensions for correctness checking
hostOutput[0] = z_size;
hostOutput[1] = y_size;
hostOutput[2] = x_size;
wbSolution(args, hostOutput, inputLength);
// Free device memory
cudaFree(deviceInput);
cudaFree(deviceOutput);
// Free host memory
free(hostInput);
free(hostOutput);
return 0;
}
|
110e51e240cdf166d740f1bfea4ad87d0b769302.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include "nvshmem.h"
#include "nvshmemx.h"
#ifdef ENABLE_MPI_SUPPORT
#include "mpi.h"
#endif
#define NTHREADS 512
#undef CUDA_CHECK
#define CUDA_CHECK(stmt) \
do { \
hipError_t result = (stmt); \
if (hipSuccess != result) { \
fprintf(stderr, "[%s:%d] cuda failed with %s \n", __FILE__, __LINE__, \
hipGetErrorString(result)); \
exit(-1); \
} \
} while (0)
__global__ void distributed_vector_sum(int *x, int *y, int *partial_sum, int *sum, long *pSync,
int use_threadgroup, int mype, int npes) {
int index = threadIdx.x;
int nelems = blockDim.x;
int PE_start = 0;
int logPE_stride = 0;
partial_sum[index] = x[index] + y[index];
if (use_threadgroup) {
/* all threads realize the entire collect operation */
nvshmemx_collect32_block(sum, partial_sum, nelems, PE_start, logPE_stride, npes, pSync);
} else {
/* thread 0 realizes the entire collect operation */
if (0 == index) {
nvshmem_collect32(sum, partial_sum, nelems, PE_start, logPE_stride, npes, pSync);
}
}
}
int main(int c, char *v[]) {
int mype, npes;
int *x;
int *y;
int *partial_sum;
int *sum;
int use_threadgroup = 1;
long *pSync;
int nthreads = NTHREADS;
#ifdef ENABLE_MPI_SUPPORT
bool use_mpi = false;
char *value = getenv("NVSHMEMTEST_USE_MPI_LAUNCHER");
if (value) use_mpi = atoi(value);
#endif
#ifdef ENABLE_MPI_SUPPORT
if (use_mpi) {
MPI_Init(&c, &v);
int rank, nranks;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
MPI_Comm mpi_comm = MPI_COMM_WORLD;
nvshmemx_init_attr_t attr;
attr.mpi_comm = &mpi_comm;
nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr);
} else
nvshmem_init();
#else
nvshmem_init();
#endif
npes = nvshmem_n_pes();
mype = nvshmem_my_pe();
CUDA_CHECK(hipSetDevice(mype));
x = (int *)nvshmem_malloc(sizeof(int) * nthreads);
y = (int *)nvshmem_malloc(sizeof(int) * nthreads);
partial_sum = (int *)nvshmem_malloc(sizeof(int) * nthreads);
sum = (int *)nvshmem_malloc(sizeof(int) * nthreads * npes);
pSync = (long *)nvshmem_malloc(sizeof(long) * NVSHMEM_COLLECT_SYNC_SIZE);
void *args[] = {&x, &y, &partial_sum, &sum, &pSync, &use_threadgroup, &mype, &npes};
dim3 dimBlock(nthreads);
dim3 dimGrid(1);
nvshmemx_collective_launch((const void *)distributed_vector_sum, dimGrid, dimBlock, args, 0, 0);
CUDA_CHECK(hipDeviceSynchronize());
printf("[%d of %d] run complete \n", mype, npes);
nvshmem_free(x);
nvshmem_free(y);
nvshmem_free(partial_sum);
nvshmem_free(sum);
nvshmem_free(pSync);
nvshmem_finalize();
#ifdef ENABLE_MPI_SUPPORT
if (use_mpi) MPI_Finalize();
#endif
return 0;
}
| 110e51e240cdf166d740f1bfea4ad87d0b769302.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include "nvshmem.h"
#include "nvshmemx.h"
#ifdef ENABLE_MPI_SUPPORT
#include "mpi.h"
#endif
#define NTHREADS 512
#undef CUDA_CHECK
#define CUDA_CHECK(stmt) \
do { \
cudaError_t result = (stmt); \
if (cudaSuccess != result) { \
fprintf(stderr, "[%s:%d] cuda failed with %s \n", __FILE__, __LINE__, \
cudaGetErrorString(result)); \
exit(-1); \
} \
} while (0)
__global__ void distributed_vector_sum(int *x, int *y, int *partial_sum, int *sum, long *pSync,
int use_threadgroup, int mype, int npes) {
int index = threadIdx.x;
int nelems = blockDim.x;
int PE_start = 0;
int logPE_stride = 0;
partial_sum[index] = x[index] + y[index];
if (use_threadgroup) {
/* all threads realize the entire collect operation */
nvshmemx_collect32_block(sum, partial_sum, nelems, PE_start, logPE_stride, npes, pSync);
} else {
/* thread 0 realizes the entire collect operation */
if (0 == index) {
nvshmem_collect32(sum, partial_sum, nelems, PE_start, logPE_stride, npes, pSync);
}
}
}
int main(int c, char *v[]) {
int mype, npes;
int *x;
int *y;
int *partial_sum;
int *sum;
int use_threadgroup = 1;
long *pSync;
int nthreads = NTHREADS;
#ifdef ENABLE_MPI_SUPPORT
bool use_mpi = false;
char *value = getenv("NVSHMEMTEST_USE_MPI_LAUNCHER");
if (value) use_mpi = atoi(value);
#endif
#ifdef ENABLE_MPI_SUPPORT
if (use_mpi) {
MPI_Init(&c, &v);
int rank, nranks;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
MPI_Comm mpi_comm = MPI_COMM_WORLD;
nvshmemx_init_attr_t attr;
attr.mpi_comm = &mpi_comm;
nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr);
} else
nvshmem_init();
#else
nvshmem_init();
#endif
npes = nvshmem_n_pes();
mype = nvshmem_my_pe();
CUDA_CHECK(cudaSetDevice(mype));
x = (int *)nvshmem_malloc(sizeof(int) * nthreads);
y = (int *)nvshmem_malloc(sizeof(int) * nthreads);
partial_sum = (int *)nvshmem_malloc(sizeof(int) * nthreads);
sum = (int *)nvshmem_malloc(sizeof(int) * nthreads * npes);
pSync = (long *)nvshmem_malloc(sizeof(long) * NVSHMEM_COLLECT_SYNC_SIZE);
void *args[] = {&x, &y, &partial_sum, &sum, &pSync, &use_threadgroup, &mype, &npes};
dim3 dimBlock(nthreads);
dim3 dimGrid(1);
nvshmemx_collective_launch((const void *)distributed_vector_sum, dimGrid, dimBlock, args, 0, 0);
CUDA_CHECK(cudaDeviceSynchronize());
printf("[%d of %d] run complete \n", mype, npes);
nvshmem_free(x);
nvshmem_free(y);
nvshmem_free(partial_sum);
nvshmem_free(sum);
nvshmem_free(pSync);
nvshmem_finalize();
#ifdef ENABLE_MPI_SUPPORT
if (use_mpi) MPI_Finalize();
#endif
return 0;
}
|
3cada344317f2ffc04ad8628d6eb24dde31c3bb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** *****************************************************************************
* This program is the confidential and proprietary product of Overview
* Limited. Any unauthorised use, reproduction or transfer of this
* program is strictly prohibited.
* Copyright 2017 Overview Limited. (Subject to limited
* distribution and restricted disclosure only.) All rights reserved.
*
* @file DidoAnalytics_LidarBGSUB.cu
* @author SL
* @version 1
* @date 2017-10-02
* @brief GPU based Background subtraction for the Lidar
*****************************************************************************
**/
/*
* Algorithm Description - This keeps a background model of the lidar points by first putting the data into a 2.5d represention clustering points in neary
* angular space into bins. Then each for each bin of the model that bin searches the neighbourhood of bins in the observation. If a point in the observation
* is within the fixed threshold, the model point is considered observed, else it is considered unobserved. Observed points have their weights increased,
* unobserved points have their weight decreased. Then for each point in the observation it searches the bins of the model in the neighbourhood. The highest weight
* of model points within the bin is mainatained. If this weight is above a threshold, the observation is considered to be a background point, else it is a foreground point.
* Then for each point that was observed that had no points in the model close to it, those points are added to the model. Finally the points in the model are sorted in
* weight order, and any model points with a weight below a threshold are discarded.
* The learning rate used for updating the weights follows the standard history pattern of 1./min(nframes, history).
*
* The clustering is done using DBSCAN. The points are again clustered into bins (this time broader), and each point searches it's neighbourhood of bins to decide if it is a core
* point. Then in the next step each point looks through it's neighbourhood and takes the lowest core parent index in that index, which is iteratively repeated several times.
* finally all points are allocated the roots of the resulting tree structure as a parent index, and then the clusters are formed into vectors on the CPU by a single insertion sort pass
*/
#include "global_defines.h"
#include "DidoAnalytics_LidarBGSUB.h"
#include "CUDA_Exception.h"
#include "math_constants.h"
#ifdef _WIN32
#include <ppl.h>
#include <concurrent_unordered_map.h>
#else
#include <unordered_map>
#endif
#define DEBUG_TIMINGS 0
#if DEBUG_TIMINGS
#include <chrono>
#include <iostream>
#endif
//error handling function
static void HandleError(hipError_t err, const char *file, int line) {
if (err != hipSuccess) {
// hipDeviceReset();
throw overview::CUDA_Exception(hipGetErrorString(err), err, line, file);
}
}
#define HANDLE_ERROR(err) {HandleError((err), __FILE__, __LINE__);}
//data freeing simplification
inline void safe_Free(void * cudadata)
{
if (cudadata != nullptr)HANDLE_ERROR(hipFree(cudadata));
}
namespace overview
{
namespace lbgsCUDA
{
//convenience function for swapping with
__device__ void swap(float * array, int ind1, int ind2)
{
float tmp = array[ind1];
array[ind1] = array[ind2];
array[ind2] = tmp;
}
#define TILT_DISTANCE 0.1f
//as the lidar is in beams, it uses the actual distance in x y space, anda fixed multiplier on the distance in titlt space
__device__ __forceinline__ float getDist(float p1, float t1, float r1, float p2, float t2, float r2)
{
__align__(8) float st1 = sinf(t1), st2 = sinf(t2);
__align__(8) float tiltdist = TILT_DISTANCE * abs(t1 - t2);
return r2 * r2*st2*st2 + r1 * r1*st1*st1 - 2 * r1*r2*st1*st2*cosf(p1 - p2) + tiltdist * tiltdist;
}
__global__ void collatePoints(LidarBin obsBox, DidoLidar_rangeData* obs, int npts, int nrows, int ncols, float binWidth, float binHeight)
{
//iterate over the input points
const int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < npts)
{
//work out which bin you should be in
int x = (int)(obs[index].pan / binWidth) % ncols;
int y = min(max((int)((obs[index].tilt - CUDART_PIO4_F) / binHeight), 0), nrows - 1);
unsigned int binind = atomicAdd(&obsBox.npts[x + y * ncols], 1);
if (binind < LIDARBGSUB_MAX_BIN_PTS)
{
unsigned int oind = binind + LIDARBGSUB_MAX_BIN_PTS * (x + y * ncols);
obsBox.points_pan[oind] = obs[index].pan;
obsBox.points_tilt[oind] = obs[index].tilt;
obsBox.points_range[oind] = obs[index].range;
}
}
}
//we only search one above and below in tilt
//updates the current model weights
__global__ void bgsubKernel_pt1(LidarBin bgmodel, LidarBin obs, float * variances,
float * weights, float threshold, float mindist_init, int nrows, int ncols, float lr, int searchWidth)
{
//one block per bin, using parallel threads for improved operation
if (blockIdx.x >= ncols || blockIdx.y >= nrows)
return;
//correct the npoints for our inputs
const int index = blockIdx.x + blockIdx.y*ncols;
const int ind_y = blockIdx.y;
//first proceed through the model and incriment or decrement depending if they are observed or not
if (threadIdx.x < bgmodel.npts[index])
{
bool unobserved = true;
bool unoccluded = true;
bool lineobs = false; //checking if the packet is in the dataset (and no lost to occlusions/general IP stuff)
float mdlpt_pan = bgmodel.points_pan[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
float mdlpt_tilt = bgmodel.points_tilt[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
float mdlpt_range = bgmodel.points_range[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
//tilt values are fixed so there's no value to vertical searching
float mindist = variances[(index)*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
for (int i_x = -searchWidth; i_x <= searchWidth; i_x++)
{
//wrapping
int ind_x = (i_x + blockIdx.x + ncols) % ncols;
for (int j = 0; j < obs.npts[ind_x + ind_y * ncols] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
float dist = getDist(obs.points_pan[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
obs.points_tilt[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
obs.points_range[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j], mdlpt_pan, mdlpt_tilt, mdlpt_range);
if (dist < mindist * 3)
{
// mindist = mindist + lr*(dist - mindist);
unobserved = false;
}
else
//is it in the line at all?
{
if (abs(obs.points_pan[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j] - mdlpt_pan) < 0.0002f)
{
lineobs = true;
//is somethin closer and at the angle?
if ((obs.points_range[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j] + 0.5f < mdlpt_range) &&
(abs(obs.points_tilt[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j] - mdlpt_tilt) < 0.0001f))
{
unoccluded = false;
}
}
}
}
}
//update the point appropriately
if (!unobserved || (unoccluded && lineobs))
{
weights[(index)*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x] = weights[(index)*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x] * (1.0f - lr) + (unobserved ? 0 : lr);
//update my variance
variances[(index)*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x] = min(max(mindist, mindist_init / 3), mindist_init * 3);
}
}
}
__global__ void bgsubKernel_pt2(LidarBin bgmodel, LidarBin obs, DidoLidar_rangeData* output, int * noutput, float * variances,
float * weights, bool * addToModel, float threshold, int nrows, int ncols, int searchWidth)
{
//one block per bin, using parallel threads for improved operation
if (blockIdx.x >= ncols || blockIdx.y >= nrows)
return;
//correct the npoints for our inputs
const int index = blockIdx.x + blockIdx.y*ncols;
const int ind_y = blockIdx.y;
//then go through the observations and see if they are background and whether they are new
//this does duplicate effort, but is needed to keep parallel determinism
if (threadIdx.x < obs.npts[index])
{
float obsweight = -1.0f;
bool newpoint = true;
float obspt_pan = obs.points_pan[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
float obspt_tilt = obs.points_tilt[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
float obspt_range = obs.points_range[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
for (int i_x = -searchWidth; i_x <= searchWidth; i_x++)
{
//wrapping
int ind_x = (i_x + blockIdx.x + ncols) % ncols;
for (int j = 0; j < bgmodel.npts[ind_x + ind_y * ncols] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
float dist = getDist(bgmodel.points_pan[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
bgmodel.points_tilt[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
bgmodel.points_range[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
obspt_pan, obspt_tilt, obspt_range);
float mindist = variances[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j];
if (dist < 4 * mindist)
{
obsweight = max(obsweight, weights[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j]);
if (dist < mindist * 3)
{
newpoint = false;
//break;///its a sorted list (but this makes it slower due to awkwardness)
}
}
}
}
//update the point appropriately
//mark the point for output
addToModel[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x] = newpoint;
if (obsweight < threshold)
{
//put it in the output
int outind = atomicAdd(noutput, 1);
output[outind].range = obspt_range;
output[outind].pan = obspt_pan;
output[outind].tilt = obspt_tilt;
}
}
}
//sorts the models and culls the unobserved points and ones too close to each other
__global__ void sortModels(LidarBin bgmodel, LidarBin obs, bool *addToModel, float * variances, float * weights, int nrows, int ncols, float lr, float mindist_init)
{
if (blockIdx.x >= ncols || blockIdx.y >= nrows) return;
const int idx = (blockIdx.x + blockIdx.y*ncols);
//produce an insertion vector
__shared__ bool stillvalid[LIDARBGSUB_MAX_BIN_PTS];
if (threadIdx.x < obs.npts[idx] && addToModel[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x])
{
stillvalid[threadIdx.x] = true;
}
else stillvalid[threadIdx.x] = false;
__syncthreads();
//reduce it own by removing close vectors
for (int i = 1; i < LIDARBGSUB_MAX_BIN_PTS / 2; i++) //step size
{
int fidx = threadIdx.x + i * (threadIdx.x / i);
int sidx = fidx + i;
if (fidx < obs.npts[idx] && sidx < obs.npts[idx])
{
//compare and coalesc
if (stillvalid[fidx] && stillvalid[sidx] && getDist(obs.points_pan[idx*LIDARBGSUB_MAX_BIN_PTS + fidx],
obs.points_tilt[idx*LIDARBGSUB_MAX_BIN_PTS + fidx], obs.points_range[idx*LIDARBGSUB_MAX_BIN_PTS + fidx],
obs.points_pan[idx*LIDARBGSUB_MAX_BIN_PTS + sidx], obs.points_tilt[idx*LIDARBGSUB_MAX_BIN_PTS + sidx], obs.points_range[idx*LIDARBGSUB_MAX_BIN_PTS + sidx]) < mindist_init)
{
stillvalid[sidx] = false;
}
}
__syncthreads();
}
//then insert the remaining ones
if (stillvalid[threadIdx.x])
{
//inputs he new points here
unsigned int npts = atomicAdd(&bgmodel.npts[idx], 1);
if (npts < LIDARBGSUB_MAX_BIN_PTS)
{
int lidx = (idx)* LIDARBGSUB_MAX_BIN_PTS + npts;
//add it to the model
bgmodel.points_pan[lidx] = obs.points_pan[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
bgmodel.points_tilt[lidx] = obs.points_tilt[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
bgmodel.points_range[lidx] = obs.points_range[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
weights[lidx] = lr;
variances[lidx] = mindist_init;
}
}
__syncthreads();
//reset to max if we overflow
if (threadIdx.x == 0) bgmodel.npts[idx] = min(bgmodel.npts[idx], (unsigned int)LIDARBGSUB_MAX_BIN_PTS);
__syncthreads();
// bubble sort the models (in parallel) so we can remove the worse
__shared__ bool swapped;
if (threadIdx.x == 0) swapped = true;
while (swapped)
{
if (threadIdx.x == 0) swapped = false;
if (threadIdx.x < bgmodel.npts[idx] / 2)
{
int sidx1 = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x * 2 + 1;
int sidx2 = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x * 2;
if (weights[sidx1] > weights[sidx2])
{
swap(weights, sidx1, sidx2);
swap(variances, sidx1, sidx2);
swap(bgmodel.points_pan, sidx1, sidx2);
swap(bgmodel.points_tilt, sidx1, sidx2);
swap(bgmodel.points_range, sidx1, sidx2);
swapped = true;
}
sidx1 = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x * 2 + 2;
sidx2 = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x * 2 + 1;
if (threadIdx.x * 2 + 2 < bgmodel.npts[idx] && weights[sidx1] > weights[sidx2])
{
swap(weights, sidx1, sidx2);
swap(variances, sidx1, sidx2);
swap(bgmodel.points_pan, sidx1, sidx2);
swap(bgmodel.points_tilt, sidx1, sidx2);
swap(bgmodel.points_range, sidx1, sidx2);
swapped = true;
}
}
__syncthreads();
}
__syncthreads();
//now remove any that have negative weights
if (threadIdx.x < bgmodel.npts[idx])
{
if (weights[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x] < lr / 4) atomicDec(&bgmodel.npts[idx], 0);
}
}
#define NO_POINT_PARENT -2
#define NON_CORE_PARENT -1
__global__ void PDSCAN_init(const LidarBin fgpts, int* parents, bool *core, int nrows, int ncols, float mindist, int ncore, int searchwidth)
{
if (blockIdx.x >= ncols || blockIdx.y >= nrows) return;
const int idx = (blockIdx.x + blockIdx.y*ncols);
const int pind = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x;
if (threadIdx.x < fgpts.npts[idx])
{
//search your neighbourhood to see how many neighbours you gave
int nneighbours = 0;
float obspt_pan = fgpts.points_pan[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
float obspt_tilt = fgpts.points_tilt[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
float obspt_range = fgpts.points_range[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
for (int ind_y = (int)(blockIdx.y) - 2; ind_y <= blockIdx.y + 2; ind_y++)
{
if (ind_y >= 0 && ind_y < nrows)
{
for (int i_x = -searchwidth; i_x <= searchwidth; i_x++)
{
//wrapping
int ind_x = (i_x + blockIdx.x + ncols) % ncols;
for (int j = 0; j < fgpts.npts[ind_x + ind_y * ncols] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
if (getDist(obspt_pan, obspt_tilt, obspt_range,
fgpts.points_pan[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
fgpts.points_tilt[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
fgpts.points_range[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j]) < mindist)
{
nneighbours++;
}
}
}
}
}
if (nneighbours >= ncore)
{
core[pind] = true;
parents[pind] = pind;
}
else
{
core[pind] = false;
parents[pind] = NON_CORE_PARENT;
}
}
else if (threadIdx.x < LIDARBGSUB_MAX_BIN_PTS)
{
core[pind] = false;
parents[pind] = NO_POINT_PARENT;
}
}
__global__ void PDSCAN_local(const LidarBin fgpts, const int * parentsin, int * parentsout, const bool *core, int nrows, int ncols, float epsilon, int searchwidth)
{
if (blockIdx.x >= ncols || blockIdx.y >= nrows) return;
const int idx = (blockIdx.x + blockIdx.y*ncols);
const int pind = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x;
if (threadIdx.x < fgpts.npts[idx])
{
//check every point in your region to populate your neigbourhood vector
int my_parent = parentsin[pind];
float obspt_pan = fgpts.points_pan[pind];
float obspt_tilt = fgpts.points_tilt[pind];
float obspt_range = fgpts.points_range[pind];
for (int ind_y = (int)(blockIdx.y) - 2; ind_y <= blockIdx.y + 2; ind_y++)
{
if (ind_y >= 0 && ind_y < nrows)
{
for (int i_x = -searchwidth; i_x <= searchwidth; i_x++)
{
//wrapping
int s_indx = ind_y * ncols + ((i_x + blockIdx.x + ncols) % ncols);
for (int j = 0; j < fgpts.npts[s_indx] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
if (core[s_indx*LIDARBGSUB_MAX_BIN_PTS + j] && getDist(obspt_pan, obspt_tilt, obspt_range,
fgpts.points_pan[s_indx*LIDARBGSUB_MAX_BIN_PTS + j], fgpts.points_tilt[s_indx*LIDARBGSUB_MAX_BIN_PTS + j],
fgpts.points_range[s_indx*LIDARBGSUB_MAX_BIN_PTS + j]) < epsilon)
{
//check parent
my_parent = (core[(s_indx)*LIDARBGSUB_MAX_BIN_PTS + j] &&
parentsin[(s_indx)*LIDARBGSUB_MAX_BIN_PTS + j] > my_parent) ?
parentsin[(s_indx)*LIDARBGSUB_MAX_BIN_PTS + j] : my_parent;
}
}
}
}
}
parentsout[pind] = my_parent;
}
}
//larger merges using atomics
__global__ void PDSCAN_global(const LidarBin fgpts, const int * parentsin, int * parentsout, const bool *core, int rows, int cols, float epsilon, int searchwidth)
{
if (blockIdx.x >= cols || blockIdx.y >= rows) return;
const int idx = (blockIdx.x + blockIdx.y*cols);
const int pind = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x;
if (threadIdx.x < fgpts.npts[idx])
{
//get your currecnt root
int my_root = parentsin[pind];
int it = 0;
while (it < 10 && my_root >= 0 && my_root < rows*cols*LIDARBGSUB_MAX_BIN_PTS && my_root != parentsin[my_root])
{
my_root = parentsin[my_root];
it++;
}
float obspt_pan = fgpts.points_pan[pind];
float obspt_tilt = fgpts.points_tilt[pind];
float obspt_range = fgpts.points_range[pind];
for (int ind_y = (int)(blockIdx.y) - 2; ind_y <= blockIdx.y + 2; ind_y++)
{
if (ind_y >= 0 && ind_y < rows)
{
for (int i_x = -searchwidth; i_x <= searchwidth; i_x++)
{
//wrapping
int s_indx = ind_y * cols + ((i_x + blockIdx.x + cols) % cols);
for (int j = 0; j < fgpts.npts[s_indx] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
if (core[s_indx*LIDARBGSUB_MAX_BIN_PTS + j] && getDist(obspt_pan, obspt_tilt, obspt_range,
fgpts.points_pan[s_indx*LIDARBGSUB_MAX_BIN_PTS + j], fgpts.points_tilt[s_indx*LIDARBGSUB_MAX_BIN_PTS + j],
fgpts.points_range[s_indx*LIDARBGSUB_MAX_BIN_PTS + j]) < epsilon)
{
int otherroot = parentsin[s_indx*LIDARBGSUB_MAX_BIN_PTS + j];
it = 0;
while (it < 10 && otherroot >= 0 && otherroot < rows*cols && otherroot != parentsin[otherroot])
{
otherroot = parentsin[otherroot];
it++;
}
if (otherroot > my_root)
{
atomicMax(&(parentsout[my_root < 0 ? pind : my_root]), otherroot);
my_root = parentsout[my_root < 0 ? pind : my_root];
}
}
}
}
}
}
atomicMax(&parentsout[pind], my_root);
}
}
__global__ void setToRoot(const int * parentsin, int * parentsout, int npoints)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < npoints && parentsin[index] >= 0)
{
int head = parentsin[index];
while (head >= 0 && head < npoints && head != parentsin[head])
head = parentsin[head];
parentsout[index] = head;
}
}
std::vector<std::vector<DidoLidar_rangeData>> parseGraphsvec(LidarBin points, int * parents, int nbins)
{
#ifdef _WIN32
concurrency::concurrent_unordered_map<int, std::vector<DidoLidar_rangeData>> rootsinds;
concurrency::parallel_for(0, nbins, [&](int i)
{
for (unsigned int j = 0; j < points.npts[i] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
/*get it's root*/
int root = parents[i*LIDARBGSUB_MAX_BIN_PTS + j];
if (root < 0) continue; //skip the special valued ones
auto found = rootsinds.find(root);
/*add it to that blob*/
if (found != rootsinds.end())
{
found->second.push_back(DidoLidar_rangeData(points.points_range[i*LIDARBGSUB_MAX_BIN_PTS + j],
points.points_pan[i*LIDARBGSUB_MAX_BIN_PTS + j], points.points_tilt[i*LIDARBGSUB_MAX_BIN_PTS + j]));
}
/*else create a new blob*/
else
{
std::vector<DidoLidar_rangeData> temp;
temp.push_back(DidoLidar_rangeData(points.points_range[i*LIDARBGSUB_MAX_BIN_PTS + j],
points.points_pan[i*LIDARBGSUB_MAX_BIN_PTS + j], points.points_tilt[i*LIDARBGSUB_MAX_BIN_PTS + j]));
rootsinds[root] = temp;
}
}
}
);
//parse out the vectors
std::vector<std::vector<DidoLidar_rangeData>> retval;
for (auto & p : rootsinds)
{
retval.push_back(std::move(p.second));
}
return retval;
#else
std::unordered_map<int, int> rootsinds;
std::vector<std::vector<DidoLidar_rangeData>> retval;
for (int i = 0; i < nbins; i++)
{
for (unsigned int j = 0; j < points.npts[i] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
/*get it's root*/
int root = parents[i*LIDARBGSUB_MAX_BIN_PTS + j];
if (root < 0) continue; //skip the special valued ones
auto found = rootsinds.find(root);
/*add it to that blob*/
if (found != rootsinds.end())
{
retval[found->second].push_back(DidoLidar_rangeData(points.points_range[i*LIDARBGSUB_MAX_BIN_PTS + j],
points.points_pan[i*LIDARBGSUB_MAX_BIN_PTS + j], points.points_tilt[i*LIDARBGSUB_MAX_BIN_PTS + j]));
}
/*else create a new blob*/
else
{
rootsinds[root] = retval.size();
std::vector<DidoLidar_rangeData> temp;
temp.push_back(DidoLidar_rangeData(points.points_range[i*LIDARBGSUB_MAX_BIN_PTS + j],
points.points_pan[i*LIDARBGSUB_MAX_BIN_PTS + j], points.points_tilt[i*LIDARBGSUB_MAX_BIN_PTS + j]));
retval.push_back(temp);
}
}
}
return retval;
#endif
}
}
DidoAnalytics_LidarBGSUB::DidoAnalytics_LidarBGSUB(float binw, float binh, float mindist, int history, float bgthreshold, float _eps, int _ncore) :
binWidth(binw), binHeight(binh), sqmindist(mindist*mindist), hist(history), bgthresh(bgthreshold), epsilon(_eps), ncore(_ncore)
{
modelCols = abs((int)::floor(2 * CUDART_PI_F / binw)) + 2;
//no lidar in production I know of has a verticual FOV greater than 90' - maybe this should be a parameter?
modelRows = abs((int)::floor(CUDART_PIO2_F / binh)) + 2;
searchWidth = (int)(mindist / binw) + 1;
if (modelCols*modelRows < 1) throw std::runtime_error("the model size must be at least one in both dimensions");
#if DIDOLIDAR_NOGPU
#else
bgmodel.allocate(modelCols*modelRows);
HANDLE_ERROR(hipMemset(bgmodel.npts, 0, modelCols*modelRows * sizeof(unsigned int)));
HANDLE_ERROR(hipMalloc(&modelWeights, modelCols*modelRows * sizeof(float)*LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(hipMemset(modelWeights, 0, modelCols*modelRows * sizeof(float)*LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(hipMalloc(&variances, modelCols*modelRows * sizeof(float)*LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(hipMemset(variances, 0, modelCols*modelRows * sizeof(float)*LIDARBGSUB_MAX_BIN_PTS));
#endif
}
DidoAnalytics_LidarBGSUB::~DidoAnalytics_LidarBGSUB()
{
#if DIDOLIDAR_NOGPU
#else
bgmodel.deallocate();
safe_Free(modelWeights);
safe_Free(variances);
#endif
}
std::vector<DidoLidar_rangeData> DidoAnalytics_LidarBGSUB::apply(const DidoLidar_rangeData* points, int npts, float learningRate)
{
#if DIDOLIDAR_NOGPU
std::vector<DidoLidar_rangeData> rval;
#else
frameno++;
float lr = (learningRate < 0) ? 1.0f / min(frameno, hist) : learningRate;
if (lr != lr) throw std::runtime_error("learning rate was NaN");
#if DEBUG_TIMINGS
auto prevtime = std::chrono::high_resolution_clock::now();
#endif
//create GPU allocated data
LidarBin d_obs;
DidoLidar_rangeData * d_pts;
int * d_nout;
bool * d_addToModel;
if (modelCols*modelRows < 1) throw std::runtime_error("the model size must be at least one in both dimensions");
d_obs.allocate(modelCols*modelRows);
HANDLE_ERROR(hipMemset(d_obs.npts, 0, modelCols*modelRows * sizeof(unsigned int))); //make sure it's at zero
HANDLE_ERROR(hipMalloc(&d_pts, npts * sizeof(DidoLidar_rangeData)));
HANDLE_ERROR(hipMalloc(&d_nout, sizeof(int)));
HANDLE_ERROR(hipMalloc(&d_addToModel, modelCols*modelRows * sizeof(bool) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(hipMemset(d_addToModel, 0, modelCols*modelRows * sizeof(bool) * LIDARBGSUB_MAX_BIN_PTS)); //make sure it's at zero
HANDLE_ERROR(hipMemcpy(d_pts, points, npts * sizeof(DidoLidar_rangeData), hipMemcpyHostToDevice));
#if DEBUG_TIMINGS
auto ts = std::chrono::high_resolution_clock::now();
std::cout << "allocation took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::collatePoints << <npts / 128 + 1, 128 >> >(d_obs, d_pts, npts, modelRows, modelCols, binWidth, binHeight);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
//run the bgsub
dim3 grid(modelCols, modelRows);
dim3 block(LIDARBGSUB_MAX_BIN_PTS, 1);
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "colaltion took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::bgsubKernel_pt1 << <grid, block >> >(bgmodel, d_obs, variances, modelWeights, bgthresh, sqmindist, modelRows, modelCols, lr, searchWidth);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "subtraction part 1 took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::bgsubKernel_pt2 << <grid, block >> >(bgmodel, d_obs, d_pts, d_nout, variances, modelWeights, d_addToModel, bgthresh, modelRows, modelCols, searchWidth);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "subtraction pt2 took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//parse the data to the output
lbgsCUDA::sortModels << <grid, block >> >(bgmodel, d_obs, d_addToModel, variances, modelWeights, modelRows, modelCols, lr, sqmindist*0.75f);
int * nout = (int*)malloc(sizeof(int));
HANDLE_ERROR(hipMemcpy(nout, d_nout, sizeof(int), hipMemcpyDeviceToHost));
std::vector<DidoLidar_rangeData> rval(nout[0]);
HANDLE_ERROR(hipMemcpy(rval.data(), d_pts, nout[0] * sizeof(DidoLidar_rangeData), hipMemcpyDeviceToHost));
if (nout) free(nout);
hipDeviceSynchronize();
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "sorting took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
HANDLE_ERROR(hipGetLastError());
safe_Free(d_addToModel);
d_obs.deallocate();
safe_Free(d_pts);
safe_Free(d_nout);
#endif
return rval;
}
std::vector<std::vector<DidoLidar_rangeData>> DidoAnalytics_LidarBGSUB::applyAndCluster(const DidoLidar_rangeData * points, int npts, float learningRate)
{
#if DIDOLIDAR_NOGPU
std::vector<std::vector<DidoLidar_rangeData>> rval;
#else
frameno++;
float lr = (learningRate < 0) ? 1.0f / min(frameno, hist) : learningRate;
if (lr != lr) throw std::runtime_error("learning rate was NaN");
#if DEBUG_TIMINGS
auto prevtime = std::chrono::high_resolution_clock::now();
#endif
//create GPU allocated data
LidarBin d_obs;
DidoLidar_rangeData * d_pts;
int * d_nout;
bool * d_addToModel;
//clustering data
LidarBin d_cluster_obs;
int * d_cluster_parents_1, *d_cluster_parents_2;
bool * d_cluster_core;
float clusterWidth = min(binWidth * 4, epsilon / 3);
float clusterHeight = min(binHeight * 4, epsilon / 3);
int clusterCols = (int)::floor(2 * CUDART_PI_F / clusterWidth) + 2;
int clusterRows = (int)::floor(binHeight*modelRows / clusterHeight) + 1;
int clusterSearch = (int)(epsilon / clusterWidth) + 1;
if (clusterCols*clusterRows < 1) throw std::runtime_error("the cluster size must be at least one in both dimensions");
if (modelCols*modelRows < 1) throw std::runtime_error("the model size must be at least one in both dimensions");
d_cluster_obs.allocate(clusterCols*clusterRows);
HANDLE_ERROR(hipMemset(d_cluster_obs.npts, 0, clusterCols*clusterRows * sizeof(unsigned int))); //make sure it's at zero
HANDLE_ERROR(hipMalloc(&d_cluster_core, clusterCols*clusterRows * sizeof(bool) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(hipMalloc(&d_cluster_parents_1, clusterCols*clusterRows * sizeof(int) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(hipMalloc(&d_cluster_parents_2, clusterCols*clusterRows * sizeof(int) * LIDARBGSUB_MAX_BIN_PTS));
d_obs.allocate(modelRows*modelCols);
HANDLE_ERROR(hipMemset(d_obs.npts, 0, modelCols*modelRows * sizeof(unsigned int))); //make sure it's at zero
HANDLE_ERROR(hipMalloc(&d_pts, npts * sizeof(DidoLidar_rangeData)));
HANDLE_ERROR(hipMalloc(&d_nout, sizeof(int)));
HANDLE_ERROR(hipMalloc(&d_addToModel, modelCols*modelRows * sizeof(bool) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(hipMemset(d_addToModel, 0, modelCols*modelRows * sizeof(bool) * LIDARBGSUB_MAX_BIN_PTS)); //make sure it's at zero
HANDLE_ERROR(hipMemcpy(d_pts, points, npts * sizeof(DidoLidar_rangeData), hipMemcpyHostToDevice));
#if DEBUG_TIMINGS
auto ts = std::chrono::high_resolution_clock::now();
std::cout << "allocation took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::collatePoints << <npts / 128 + 1, 128 >> >(d_obs, d_pts, npts, modelRows, modelCols, binWidth, binHeight);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
//run the bgsub
dim3 grid(modelCols, modelRows);
dim3 block(LIDARBGSUB_MAX_BIN_PTS, 1);
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "colaltion took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::bgsubKernel_pt1 << <grid, block >> >(bgmodel, d_obs, variances, modelWeights, bgthresh, sqmindist, modelRows, modelCols, lr, searchWidth);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "subtraction part 1 took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::bgsubKernel_pt2 << <grid, block >> >(bgmodel, d_obs, d_pts, d_nout, variances, modelWeights, d_addToModel, bgthresh, modelRows, modelCols, searchWidth);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "bgsub pt 2 took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//parse the data to the output
lbgsCUDA::sortModels << <grid, block >> >(bgmodel, d_obs, d_addToModel, variances, modelWeights, modelRows, modelCols, lr, sqmindist*0.75f);
int * nout = (int*)malloc(sizeof(int));
HANDLE_ERROR(hipMemcpy(nout, d_nout, sizeof(int), hipMemcpyDeviceToHost));
//collate the foreground points for clustering
lbgsCUDA::collatePoints << <nout[0] / 128 + 1, 128 >> >(d_cluster_obs, d_pts, nout[0], clusterRows, clusterCols, clusterWidth, clusterHeight);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "sorting and clustering collation took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
dim3 clustergrid(clusterCols, clusterRows);
//initialise the parents
lbgsCUDA::PDSCAN_init << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_1, d_cluster_core, clusterRows, clusterCols, epsilon, ncore, clusterSearch);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "pdbscan init took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//quickly do local updates
lbgsCUDA::PDSCAN_local << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_1, d_cluster_parents_2, d_cluster_core, clusterRows, clusterCols, epsilon, clusterSearch);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "pdbscan local took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//now update the roots
lbgsCUDA::PDSCAN_global << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_2, d_cluster_parents_1, d_cluster_core, clusterRows, clusterCols, epsilon, clusterSearch);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
//twice so that we get everything
lbgsCUDA::PDSCAN_global << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_1, d_cluster_parents_2, d_cluster_core, clusterRows, clusterCols, epsilon, clusterSearch);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "pdbscan global took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::setToRoot << <clusterRows*clusterCols, LIDARBGSUB_MAX_BIN_PTS >> >(d_cluster_parents_2, d_cluster_parents_1, clusterRows*clusterCols*LIDARBGSUB_MAX_BIN_PTS);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "setting to root took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//now drop it onto the cpu and compute the output
LidarBin lbins;
lbins.local_allocate(clusterRows*clusterCols);
std::vector<int> lparents(clusterRows*clusterCols*LIDARBGSUB_MAX_BIN_PTS);
HANDLE_ERROR(hipMemcpy(lparents.data(), d_cluster_parents_1, clusterRows*clusterCols*LIDARBGSUB_MAX_BIN_PTS * sizeof(int), hipMemcpyDeviceToHost));
lbins.copyDownFrom(d_cluster_obs, clusterRows*clusterCols);
auto rval = lbgsCUDA::parseGraphsvec(lbins, lparents.data(), clusterRows*clusterCols);
lbins.local_deallocate();
if (nout) free(nout);
hipDeviceSynchronize();
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "generating output took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
HANDLE_ERROR(hipGetLastError());
safe_Free(d_cluster_parents_1);
safe_Free(d_cluster_parents_2);
d_cluster_obs.deallocate();
safe_Free(d_cluster_core);
safe_Free(d_addToModel);
d_obs.deallocate();
safe_Free(d_pts);
safe_Free(d_nout);
#endif
return rval;
}
std::vector<std::vector<DidoLidar_rangeData>> DidoAnalytics_LidarBGSUB::Cluster(const DidoLidar_rangeData * fgpts, size_t npts)
{
#if DIDOLIDAR_NOGPU
std::vector<std::vector<DidoLidar_rangeData>> rval;
#else
//create GPU allocated data
DidoLidar_rangeData * d_pts;
//clustering data
LidarBin d_cluster_obs;
int * d_cluster_parents_1, *d_cluster_parents_2;
bool * d_cluster_core;
float clusterWidth = min(binWidth * 4, epsilon / 3);
float clusterHeight = min(binHeight * 4, epsilon / 3);
int clusterCols = (int)::floor(2 * CUDART_PI_F / clusterWidth) + 2;
int clusterRows = (int)::floor(binHeight*modelRows / clusterHeight) + 1;
int clusterSearch = (int)(epsilon / clusterWidth) + 1;
if (clusterCols*clusterRows < 1) throw std::runtime_error("the cluster size must be at least one in both dimensions");
#if DEBUG_TIMINGS
auto prevtime = std::chrono::high_resolution_clock::now();
#endif
d_cluster_obs.allocate(clusterCols*clusterRows * sizeof(unsigned int));
HANDLE_ERROR(hipMemset(d_cluster_obs.npts, 0, clusterCols*clusterRows * sizeof(unsigned int))); //make sure it's at zero
HANDLE_ERROR(hipMalloc(&d_cluster_core, clusterCols*clusterRows * sizeof(bool) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(hipMalloc(&d_cluster_parents_1, clusterCols*clusterRows * sizeof(int) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(hipMalloc(&d_cluster_parents_2, clusterCols*clusterRows * sizeof(int) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(hipMalloc(&d_pts, npts * sizeof(DidoLidar_rangeData)));
HANDLE_ERROR(hipMemcpy(d_pts, fgpts, npts * sizeof(DidoLidar_rangeData), hipMemcpyHostToDevice));
#if DEBUG_TIMINGS
auto ts = std::chrono::high_resolution_clock::now();
std::cout << "cluster allocation took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//collate the foreground points for clustering
lbgsCUDA::collatePoints << <(npts / 128) + 1, 128 >> >(d_cluster_obs, d_pts, npts, clusterRows, clusterCols, clusterWidth, clusterHeight);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "cluster collation took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
dim3 block(LIDARBGSUB_MAX_BIN_PTS, 1);
dim3 clustergrid(clusterCols, clusterRows);
//initialise the parents
lbgsCUDA::PDSCAN_init << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_1, d_cluster_core, clusterRows, clusterCols, epsilon, ncore, clusterSearch);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "pdbscan inti took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//quickly do local updates
lbgsCUDA::PDSCAN_local << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_1, d_cluster_parents_2, d_cluster_core, clusterRows, clusterCols, epsilon, clusterSearch);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "pdbscan local took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//now update the roots
lbgsCUDA::PDSCAN_global << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_2, d_cluster_parents_1, d_cluster_core, clusterRows, clusterCols, epsilon, clusterSearch);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
//twice so that we get everything
lbgsCUDA::PDSCAN_global << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_1, d_cluster_parents_2, d_cluster_core, clusterRows, clusterCols, epsilon, clusterSearch);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "pdbscan global took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::setToRoot << <clusterRows*clusterCols, LIDARBGSUB_MAX_BIN_PTS >> >(d_cluster_parents_2, d_cluster_parents_1, clusterRows*clusterCols*LIDARBGSUB_MAX_BIN_PTS);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "set to root took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//now drop it onto the cpu and compute the output
LidarBin lbins;
lbins.local_allocate(clusterRows*clusterCols);
std::vector<int> lparents(clusterRows*clusterCols*LIDARBGSUB_MAX_BIN_PTS);
HANDLE_ERROR(hipMemcpy(lparents.data(), d_cluster_parents_1, clusterRows*clusterCols*LIDARBGSUB_MAX_BIN_PTS * sizeof(int), hipMemcpyDeviceToHost));
lbins.copyDownFrom(d_cluster_obs, clusterRows*clusterCols);
auto rval = lbgsCUDA::parseGraphsvec(lbins, lparents.data(), clusterRows*clusterCols);
hipDeviceSynchronize();
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "generating output took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbins.local_deallocate();
HANDLE_ERROR(hipGetLastError());
safe_Free(d_cluster_parents_1);
safe_Free(d_cluster_parents_2);
d_cluster_obs.deallocate();
safe_Free(d_cluster_core);
safe_Free(d_pts);
#endif
return rval;
}
__global__ void countPoints(LidarBin bins, unsigned char* out, int npts)
{
const int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < npts)
{
out[index] = (unsigned char)bins.npts[index];
}
}
void DidoAnalytics_LidarBGSUB::dispayBgmodelNpts(unsigned char * out, int npts)
{
unsigned char * d_counts;
HANDLE_ERROR(hipMalloc(&d_counts, modelRows*modelCols * sizeof(unsigned char)));
countPoints << <modelRows*modelCols / 128 + 1, 128 >> > (bgmodel, d_counts, modelRows*modelCols);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
HANDLE_ERROR(hipMemcpy(out, d_counts, min(modelRows*modelCols, npts) * sizeof(unsigned char), hipMemcpyDeviceToHost));
safe_Free(d_counts);
}
void LidarBin::allocate(size_t nbins)
{
HANDLE_ERROR(hipMalloc(&npts, nbins * sizeof(unsigned int)));
HANDLE_ERROR(hipMalloc(&points_pan, nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float)));
HANDLE_ERROR(hipMalloc(&points_tilt, nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float)));
HANDLE_ERROR(hipMalloc(&points_range, nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float)));
}
void LidarBin::local_allocate(size_t nbins)
{
npts = (unsigned int *)malloc(nbins * sizeof(unsigned int));
points_pan = (float*)malloc(nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float));
points_tilt = (float*)malloc(nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float));
points_range = (float*)malloc(nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float));
}
void LidarBin::deallocate()
{
safe_Free(npts);
safe_Free(points_pan);
safe_Free(points_tilt);
safe_Free(points_range);
}
void LidarBin::local_deallocate()
{
if (npts) free(npts);
if (points_pan) free(points_pan);
if (points_pan) free(points_tilt);
if (points_pan) free(points_range);
}
void LidarBin::copyDownFrom(LidarBin & src, size_t nbins)
{
HANDLE_ERROR(hipMemcpy(npts, src.npts, nbins * sizeof(unsigned int), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(points_pan, src.points_pan, nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(points_tilt, src.points_tilt, nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(points_range, src.points_range, nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float), hipMemcpyDeviceToHost));
}
} | 3cada344317f2ffc04ad8628d6eb24dde31c3bb5.cu | /** *****************************************************************************
* This program is the confidential and proprietary product of Overview
* Limited. Any unauthorised use, reproduction or transfer of this
* program is strictly prohibited.
* Copyright 2017 Overview Limited. (Subject to limited
* distribution and restricted disclosure only.) All rights reserved.
*
* @file DidoAnalytics_LidarBGSUB.cu
* @author SL
* @version 1
* @date 2017-10-02
* @brief GPU based Background subtraction for the Lidar
*****************************************************************************
**/
/*
* Algorithm Description - This keeps a background model of the lidar points by first putting the data into a 2.5d represention clustering points in neary
* angular space into bins. Then each for each bin of the model that bin searches the neighbourhood of bins in the observation. If a point in the observation
* is within the fixed threshold, the model point is considered observed, else it is considered unobserved. Observed points have their weights increased,
* unobserved points have their weight decreased. Then for each point in the observation it searches the bins of the model in the neighbourhood. The highest weight
* of model points within the bin is mainatained. If this weight is above a threshold, the observation is considered to be a background point, else it is a foreground point.
* Then for each point that was observed that had no points in the model close to it, those points are added to the model. Finally the points in the model are sorted in
* weight order, and any model points with a weight below a threshold are discarded.
* The learning rate used for updating the weights follows the standard history pattern of 1./min(nframes, history).
*
* The clustering is done using DBSCAN. The points are again clustered into bins (this time broader), and each point searches it's neighbourhood of bins to decide if it is a core
* point. Then in the next step each point looks through it's neighbourhood and takes the lowest core parent index in that index, which is iteratively repeated several times.
* finally all points are allocated the roots of the resulting tree structure as a parent index, and then the clusters are formed into vectors on the CPU by a single insertion sort pass
*/
#include "global_defines.h"
#include "DidoAnalytics_LidarBGSUB.h"
#include "CUDA_Exception.h"
#include "math_constants.h"
#ifdef _WIN32
#include <ppl.h>
#include <concurrent_unordered_map.h>
#else
#include <unordered_map>
#endif
#define DEBUG_TIMINGS 0
#if DEBUG_TIMINGS
#include <chrono>
#include <iostream>
#endif
//error handling function
static void HandleError(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
// cudaDeviceReset();
throw overview::CUDA_Exception(cudaGetErrorString(err), err, line, file);
}
}
#define HANDLE_ERROR(err) {HandleError((err), __FILE__, __LINE__);}
//data freeing simplification
inline void safe_Free(void * cudadata)
{
if (cudadata != nullptr)HANDLE_ERROR(cudaFree(cudadata));
}
namespace overview
{
namespace lbgsCUDA
{
//convenience function for swapping with
__device__ void swap(float * array, int ind1, int ind2)
{
float tmp = array[ind1];
array[ind1] = array[ind2];
array[ind2] = tmp;
}
#define TILT_DISTANCE 0.1f
//as the lidar is in beams, it uses the actual distance in x y space, anda fixed multiplier on the distance in titlt space
__device__ __forceinline__ float getDist(float p1, float t1, float r1, float p2, float t2, float r2)
{
__align__(8) float st1 = sinf(t1), st2 = sinf(t2);
__align__(8) float tiltdist = TILT_DISTANCE * abs(t1 - t2);
return r2 * r2*st2*st2 + r1 * r1*st1*st1 - 2 * r1*r2*st1*st2*cosf(p1 - p2) + tiltdist * tiltdist;
}
__global__ void collatePoints(LidarBin obsBox, DidoLidar_rangeData* obs, int npts, int nrows, int ncols, float binWidth, float binHeight)
{
//iterate over the input points
const int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < npts)
{
//work out which bin you should be in
int x = (int)(obs[index].pan / binWidth) % ncols;
int y = min(max((int)((obs[index].tilt - CUDART_PIO4_F) / binHeight), 0), nrows - 1);
unsigned int binind = atomicAdd(&obsBox.npts[x + y * ncols], 1);
if (binind < LIDARBGSUB_MAX_BIN_PTS)
{
unsigned int oind = binind + LIDARBGSUB_MAX_BIN_PTS * (x + y * ncols);
obsBox.points_pan[oind] = obs[index].pan;
obsBox.points_tilt[oind] = obs[index].tilt;
obsBox.points_range[oind] = obs[index].range;
}
}
}
//we only search one above and below in tilt
//updates the current model weights
__global__ void bgsubKernel_pt1(LidarBin bgmodel, LidarBin obs, float * variances,
float * weights, float threshold, float mindist_init, int nrows, int ncols, float lr, int searchWidth)
{
//one block per bin, using parallel threads for improved operation
if (blockIdx.x >= ncols || blockIdx.y >= nrows)
return;
//correct the npoints for our inputs
const int index = blockIdx.x + blockIdx.y*ncols;
const int ind_y = blockIdx.y;
//first proceed through the model and incriment or decrement depending if they are observed or not
if (threadIdx.x < bgmodel.npts[index])
{
bool unobserved = true;
bool unoccluded = true;
bool lineobs = false; //checking if the packet is in the dataset (and no lost to occlusions/general IP stuff)
float mdlpt_pan = bgmodel.points_pan[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
float mdlpt_tilt = bgmodel.points_tilt[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
float mdlpt_range = bgmodel.points_range[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
//tilt values are fixed so there's no value to vertical searching
float mindist = variances[(index)*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
for (int i_x = -searchWidth; i_x <= searchWidth; i_x++)
{
//wrapping
int ind_x = (i_x + blockIdx.x + ncols) % ncols;
for (int j = 0; j < obs.npts[ind_x + ind_y * ncols] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
float dist = getDist(obs.points_pan[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
obs.points_tilt[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
obs.points_range[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j], mdlpt_pan, mdlpt_tilt, mdlpt_range);
if (dist < mindist * 3)
{
// mindist = mindist + lr*(dist - mindist);
unobserved = false;
}
else
//is it in the line at all?
{
if (abs(obs.points_pan[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j] - mdlpt_pan) < 0.0002f)
{
lineobs = true;
//is somethin closer and at the angle?
if ((obs.points_range[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j] + 0.5f < mdlpt_range) &&
(abs(obs.points_tilt[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j] - mdlpt_tilt) < 0.0001f))
{
unoccluded = false;
}
}
}
}
}
//update the point appropriately
if (!unobserved || (unoccluded && lineobs))
{
weights[(index)*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x] = weights[(index)*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x] * (1.0f - lr) + (unobserved ? 0 : lr);
//update my variance
variances[(index)*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x] = min(max(mindist, mindist_init / 3), mindist_init * 3);
}
}
}
__global__ void bgsubKernel_pt2(LidarBin bgmodel, LidarBin obs, DidoLidar_rangeData* output, int * noutput, float * variances,
float * weights, bool * addToModel, float threshold, int nrows, int ncols, int searchWidth)
{
//one block per bin, using parallel threads for improved operation
if (blockIdx.x >= ncols || blockIdx.y >= nrows)
return;
//correct the npoints for our inputs
const int index = blockIdx.x + blockIdx.y*ncols;
const int ind_y = blockIdx.y;
//then go through the observations and see if they are background and whether they are new
//this does duplicate effort, but is needed to keep parallel determinism
if (threadIdx.x < obs.npts[index])
{
float obsweight = -1.0f;
bool newpoint = true;
float obspt_pan = obs.points_pan[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
float obspt_tilt = obs.points_tilt[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
float obspt_range = obs.points_range[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
for (int i_x = -searchWidth; i_x <= searchWidth; i_x++)
{
//wrapping
int ind_x = (i_x + blockIdx.x + ncols) % ncols;
for (int j = 0; j < bgmodel.npts[ind_x + ind_y * ncols] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
float dist = getDist(bgmodel.points_pan[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
bgmodel.points_tilt[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
bgmodel.points_range[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
obspt_pan, obspt_tilt, obspt_range);
float mindist = variances[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j];
if (dist < 4 * mindist)
{
obsweight = max(obsweight, weights[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j]);
if (dist < mindist * 3)
{
newpoint = false;
//break;///its a sorted list (but this makes it slower due to awkwardness)
}
}
}
}
//update the point appropriately
//mark the point for output
addToModel[index*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x] = newpoint;
if (obsweight < threshold)
{
//put it in the output
int outind = atomicAdd(noutput, 1);
output[outind].range = obspt_range;
output[outind].pan = obspt_pan;
output[outind].tilt = obspt_tilt;
}
}
}
//sorts the models and culls the unobserved points and ones too close to each other
__global__ void sortModels(LidarBin bgmodel, LidarBin obs, bool *addToModel, float * variances, float * weights, int nrows, int ncols, float lr, float mindist_init)
{
if (blockIdx.x >= ncols || blockIdx.y >= nrows) return;
const int idx = (blockIdx.x + blockIdx.y*ncols);
//produce an insertion vector
__shared__ bool stillvalid[LIDARBGSUB_MAX_BIN_PTS];
if (threadIdx.x < obs.npts[idx] && addToModel[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x])
{
stillvalid[threadIdx.x] = true;
}
else stillvalid[threadIdx.x] = false;
__syncthreads();
//reduce it own by removing close vectors
for (int i = 1; i < LIDARBGSUB_MAX_BIN_PTS / 2; i++) //step size
{
int fidx = threadIdx.x + i * (threadIdx.x / i);
int sidx = fidx + i;
if (fidx < obs.npts[idx] && sidx < obs.npts[idx])
{
//compare and coalesc
if (stillvalid[fidx] && stillvalid[sidx] && getDist(obs.points_pan[idx*LIDARBGSUB_MAX_BIN_PTS + fidx],
obs.points_tilt[idx*LIDARBGSUB_MAX_BIN_PTS + fidx], obs.points_range[idx*LIDARBGSUB_MAX_BIN_PTS + fidx],
obs.points_pan[idx*LIDARBGSUB_MAX_BIN_PTS + sidx], obs.points_tilt[idx*LIDARBGSUB_MAX_BIN_PTS + sidx], obs.points_range[idx*LIDARBGSUB_MAX_BIN_PTS + sidx]) < mindist_init)
{
stillvalid[sidx] = false;
}
}
__syncthreads();
}
//then insert the remaining ones
if (stillvalid[threadIdx.x])
{
//inputs he new points here
unsigned int npts = atomicAdd(&bgmodel.npts[idx], 1);
if (npts < LIDARBGSUB_MAX_BIN_PTS)
{
int lidx = (idx)* LIDARBGSUB_MAX_BIN_PTS + npts;
//add it to the model
bgmodel.points_pan[lidx] = obs.points_pan[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
bgmodel.points_tilt[lidx] = obs.points_tilt[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
bgmodel.points_range[lidx] = obs.points_range[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
weights[lidx] = lr;
variances[lidx] = mindist_init;
}
}
__syncthreads();
//reset to max if we overflow
if (threadIdx.x == 0) bgmodel.npts[idx] = min(bgmodel.npts[idx], (unsigned int)LIDARBGSUB_MAX_BIN_PTS);
__syncthreads();
// bubble sort the models (in parallel) so we can remove the worse
__shared__ bool swapped;
if (threadIdx.x == 0) swapped = true;
while (swapped)
{
if (threadIdx.x == 0) swapped = false;
if (threadIdx.x < bgmodel.npts[idx] / 2)
{
int sidx1 = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x * 2 + 1;
int sidx2 = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x * 2;
if (weights[sidx1] > weights[sidx2])
{
swap(weights, sidx1, sidx2);
swap(variances, sidx1, sidx2);
swap(bgmodel.points_pan, sidx1, sidx2);
swap(bgmodel.points_tilt, sidx1, sidx2);
swap(bgmodel.points_range, sidx1, sidx2);
swapped = true;
}
sidx1 = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x * 2 + 2;
sidx2 = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x * 2 + 1;
if (threadIdx.x * 2 + 2 < bgmodel.npts[idx] && weights[sidx1] > weights[sidx2])
{
swap(weights, sidx1, sidx2);
swap(variances, sidx1, sidx2);
swap(bgmodel.points_pan, sidx1, sidx2);
swap(bgmodel.points_tilt, sidx1, sidx2);
swap(bgmodel.points_range, sidx1, sidx2);
swapped = true;
}
}
__syncthreads();
}
__syncthreads();
//now remove any that have negative weights
if (threadIdx.x < bgmodel.npts[idx])
{
if (weights[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x] < lr / 4) atomicDec(&bgmodel.npts[idx], 0);
}
}
#define NO_POINT_PARENT -2
#define NON_CORE_PARENT -1
__global__ void PDSCAN_init(const LidarBin fgpts, int* parents, bool *core, int nrows, int ncols, float mindist, int ncore, int searchwidth)
{
if (blockIdx.x >= ncols || blockIdx.y >= nrows) return;
const int idx = (blockIdx.x + blockIdx.y*ncols);
const int pind = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x;
if (threadIdx.x < fgpts.npts[idx])
{
//search your neighbourhood to see how many neighbours you gave
int nneighbours = 0;
float obspt_pan = fgpts.points_pan[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
float obspt_tilt = fgpts.points_tilt[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
float obspt_range = fgpts.points_range[idx*LIDARBGSUB_MAX_BIN_PTS + threadIdx.x];
for (int ind_y = (int)(blockIdx.y) - 2; ind_y <= blockIdx.y + 2; ind_y++)
{
if (ind_y >= 0 && ind_y < nrows)
{
for (int i_x = -searchwidth; i_x <= searchwidth; i_x++)
{
//wrapping
int ind_x = (i_x + blockIdx.x + ncols) % ncols;
for (int j = 0; j < fgpts.npts[ind_x + ind_y * ncols] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
if (getDist(obspt_pan, obspt_tilt, obspt_range,
fgpts.points_pan[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
fgpts.points_tilt[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j],
fgpts.points_range[(ind_x + ind_y * ncols)*LIDARBGSUB_MAX_BIN_PTS + j]) < mindist)
{
nneighbours++;
}
}
}
}
}
if (nneighbours >= ncore)
{
core[pind] = true;
parents[pind] = pind;
}
else
{
core[pind] = false;
parents[pind] = NON_CORE_PARENT;
}
}
else if (threadIdx.x < LIDARBGSUB_MAX_BIN_PTS)
{
core[pind] = false;
parents[pind] = NO_POINT_PARENT;
}
}
__global__ void PDSCAN_local(const LidarBin fgpts, const int * parentsin, int * parentsout, const bool *core, int nrows, int ncols, float epsilon, int searchwidth)
{
if (blockIdx.x >= ncols || blockIdx.y >= nrows) return;
const int idx = (blockIdx.x + blockIdx.y*ncols);
const int pind = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x;
if (threadIdx.x < fgpts.npts[idx])
{
//check every point in your region to populate your neigbourhood vector
int my_parent = parentsin[pind];
float obspt_pan = fgpts.points_pan[pind];
float obspt_tilt = fgpts.points_tilt[pind];
float obspt_range = fgpts.points_range[pind];
for (int ind_y = (int)(blockIdx.y) - 2; ind_y <= blockIdx.y + 2; ind_y++)
{
if (ind_y >= 0 && ind_y < nrows)
{
for (int i_x = -searchwidth; i_x <= searchwidth; i_x++)
{
//wrapping
int s_indx = ind_y * ncols + ((i_x + blockIdx.x + ncols) % ncols);
for (int j = 0; j < fgpts.npts[s_indx] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
if (core[s_indx*LIDARBGSUB_MAX_BIN_PTS + j] && getDist(obspt_pan, obspt_tilt, obspt_range,
fgpts.points_pan[s_indx*LIDARBGSUB_MAX_BIN_PTS + j], fgpts.points_tilt[s_indx*LIDARBGSUB_MAX_BIN_PTS + j],
fgpts.points_range[s_indx*LIDARBGSUB_MAX_BIN_PTS + j]) < epsilon)
{
//check parent
my_parent = (core[(s_indx)*LIDARBGSUB_MAX_BIN_PTS + j] &&
parentsin[(s_indx)*LIDARBGSUB_MAX_BIN_PTS + j] > my_parent) ?
parentsin[(s_indx)*LIDARBGSUB_MAX_BIN_PTS + j] : my_parent;
}
}
}
}
}
parentsout[pind] = my_parent;
}
}
//larger merges using atomics
__global__ void PDSCAN_global(const LidarBin fgpts, const int * parentsin, int * parentsout, const bool *core, int rows, int cols, float epsilon, int searchwidth)
{
if (blockIdx.x >= cols || blockIdx.y >= rows) return;
const int idx = (blockIdx.x + blockIdx.y*cols);
const int pind = idx * LIDARBGSUB_MAX_BIN_PTS + threadIdx.x;
if (threadIdx.x < fgpts.npts[idx])
{
//get your currecnt root
int my_root = parentsin[pind];
int it = 0;
while (it < 10 && my_root >= 0 && my_root < rows*cols*LIDARBGSUB_MAX_BIN_PTS && my_root != parentsin[my_root])
{
my_root = parentsin[my_root];
it++;
}
float obspt_pan = fgpts.points_pan[pind];
float obspt_tilt = fgpts.points_tilt[pind];
float obspt_range = fgpts.points_range[pind];
for (int ind_y = (int)(blockIdx.y) - 2; ind_y <= blockIdx.y + 2; ind_y++)
{
if (ind_y >= 0 && ind_y < rows)
{
for (int i_x = -searchwidth; i_x <= searchwidth; i_x++)
{
//wrapping
int s_indx = ind_y * cols + ((i_x + blockIdx.x + cols) % cols);
for (int j = 0; j < fgpts.npts[s_indx] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
if (core[s_indx*LIDARBGSUB_MAX_BIN_PTS + j] && getDist(obspt_pan, obspt_tilt, obspt_range,
fgpts.points_pan[s_indx*LIDARBGSUB_MAX_BIN_PTS + j], fgpts.points_tilt[s_indx*LIDARBGSUB_MAX_BIN_PTS + j],
fgpts.points_range[s_indx*LIDARBGSUB_MAX_BIN_PTS + j]) < epsilon)
{
int otherroot = parentsin[s_indx*LIDARBGSUB_MAX_BIN_PTS + j];
it = 0;
while (it < 10 && otherroot >= 0 && otherroot < rows*cols && otherroot != parentsin[otherroot])
{
otherroot = parentsin[otherroot];
it++;
}
if (otherroot > my_root)
{
atomicMax(&(parentsout[my_root < 0 ? pind : my_root]), otherroot);
my_root = parentsout[my_root < 0 ? pind : my_root];
}
}
}
}
}
}
atomicMax(&parentsout[pind], my_root);
}
}
__global__ void setToRoot(const int * parentsin, int * parentsout, int npoints)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < npoints && parentsin[index] >= 0)
{
int head = parentsin[index];
while (head >= 0 && head < npoints && head != parentsin[head])
head = parentsin[head];
parentsout[index] = head;
}
}
std::vector<std::vector<DidoLidar_rangeData>> parseGraphsvec(LidarBin points, int * parents, int nbins)
{
#ifdef _WIN32
concurrency::concurrent_unordered_map<int, std::vector<DidoLidar_rangeData>> rootsinds;
concurrency::parallel_for(0, nbins, [&](int i)
{
for (unsigned int j = 0; j < points.npts[i] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
/*get it's root*/
int root = parents[i*LIDARBGSUB_MAX_BIN_PTS + j];
if (root < 0) continue; //skip the special valued ones
auto found = rootsinds.find(root);
/*add it to that blob*/
if (found != rootsinds.end())
{
found->second.push_back(DidoLidar_rangeData(points.points_range[i*LIDARBGSUB_MAX_BIN_PTS + j],
points.points_pan[i*LIDARBGSUB_MAX_BIN_PTS + j], points.points_tilt[i*LIDARBGSUB_MAX_BIN_PTS + j]));
}
/*else create a new blob*/
else
{
std::vector<DidoLidar_rangeData> temp;
temp.push_back(DidoLidar_rangeData(points.points_range[i*LIDARBGSUB_MAX_BIN_PTS + j],
points.points_pan[i*LIDARBGSUB_MAX_BIN_PTS + j], points.points_tilt[i*LIDARBGSUB_MAX_BIN_PTS + j]));
rootsinds[root] = temp;
}
}
}
);
//parse out the vectors
std::vector<std::vector<DidoLidar_rangeData>> retval;
for (auto & p : rootsinds)
{
retval.push_back(std::move(p.second));
}
return retval;
#else
std::unordered_map<int, int> rootsinds;
std::vector<std::vector<DidoLidar_rangeData>> retval;
for (int i = 0; i < nbins; i++)
{
for (unsigned int j = 0; j < points.npts[i] && j < LIDARBGSUB_MAX_BIN_PTS; j++)
{
/*get it's root*/
int root = parents[i*LIDARBGSUB_MAX_BIN_PTS + j];
if (root < 0) continue; //skip the special valued ones
auto found = rootsinds.find(root);
/*add it to that blob*/
if (found != rootsinds.end())
{
retval[found->second].push_back(DidoLidar_rangeData(points.points_range[i*LIDARBGSUB_MAX_BIN_PTS + j],
points.points_pan[i*LIDARBGSUB_MAX_BIN_PTS + j], points.points_tilt[i*LIDARBGSUB_MAX_BIN_PTS + j]));
}
/*else create a new blob*/
else
{
rootsinds[root] = retval.size();
std::vector<DidoLidar_rangeData> temp;
temp.push_back(DidoLidar_rangeData(points.points_range[i*LIDARBGSUB_MAX_BIN_PTS + j],
points.points_pan[i*LIDARBGSUB_MAX_BIN_PTS + j], points.points_tilt[i*LIDARBGSUB_MAX_BIN_PTS + j]));
retval.push_back(temp);
}
}
}
return retval;
#endif
}
}
DidoAnalytics_LidarBGSUB::DidoAnalytics_LidarBGSUB(float binw, float binh, float mindist, int history, float bgthreshold, float _eps, int _ncore) :
binWidth(binw), binHeight(binh), sqmindist(mindist*mindist), hist(history), bgthresh(bgthreshold), epsilon(_eps), ncore(_ncore)
{
modelCols = abs((int)std::floor(2 * CUDART_PI_F / binw)) + 2;
//no lidar in production I know of has a verticual FOV greater than 90' - maybe this should be a parameter?
modelRows = abs((int)std::floor(CUDART_PIO2_F / binh)) + 2;
searchWidth = (int)(mindist / binw) + 1;
if (modelCols*modelRows < 1) throw std::runtime_error("the model size must be at least one in both dimensions");
#if DIDOLIDAR_NOGPU
#else
bgmodel.allocate(modelCols*modelRows);
HANDLE_ERROR(cudaMemset(bgmodel.npts, 0, modelCols*modelRows * sizeof(unsigned int)));
HANDLE_ERROR(cudaMalloc(&modelWeights, modelCols*modelRows * sizeof(float)*LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(cudaMemset(modelWeights, 0, modelCols*modelRows * sizeof(float)*LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(cudaMalloc(&variances, modelCols*modelRows * sizeof(float)*LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(cudaMemset(variances, 0, modelCols*modelRows * sizeof(float)*LIDARBGSUB_MAX_BIN_PTS));
#endif
}
DidoAnalytics_LidarBGSUB::~DidoAnalytics_LidarBGSUB()
{
#if DIDOLIDAR_NOGPU
#else
bgmodel.deallocate();
safe_Free(modelWeights);
safe_Free(variances);
#endif
}
std::vector<DidoLidar_rangeData> DidoAnalytics_LidarBGSUB::apply(const DidoLidar_rangeData* points, int npts, float learningRate)
{
#if DIDOLIDAR_NOGPU
std::vector<DidoLidar_rangeData> rval;
#else
frameno++;
float lr = (learningRate < 0) ? 1.0f / min(frameno, hist) : learningRate;
if (lr != lr) throw std::runtime_error("learning rate was NaN");
#if DEBUG_TIMINGS
auto prevtime = std::chrono::high_resolution_clock::now();
#endif
//create GPU allocated data
LidarBin d_obs;
DidoLidar_rangeData * d_pts;
int * d_nout;
bool * d_addToModel;
if (modelCols*modelRows < 1) throw std::runtime_error("the model size must be at least one in both dimensions");
d_obs.allocate(modelCols*modelRows);
HANDLE_ERROR(cudaMemset(d_obs.npts, 0, modelCols*modelRows * sizeof(unsigned int))); //make sure it's at zero
HANDLE_ERROR(cudaMalloc(&d_pts, npts * sizeof(DidoLidar_rangeData)));
HANDLE_ERROR(cudaMalloc(&d_nout, sizeof(int)));
HANDLE_ERROR(cudaMalloc(&d_addToModel, modelCols*modelRows * sizeof(bool) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(cudaMemset(d_addToModel, 0, modelCols*modelRows * sizeof(bool) * LIDARBGSUB_MAX_BIN_PTS)); //make sure it's at zero
HANDLE_ERROR(cudaMemcpy(d_pts, points, npts * sizeof(DidoLidar_rangeData), cudaMemcpyHostToDevice));
#if DEBUG_TIMINGS
auto ts = std::chrono::high_resolution_clock::now();
std::cout << "allocation took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::collatePoints << <npts / 128 + 1, 128 >> >(d_obs, d_pts, npts, modelRows, modelCols, binWidth, binHeight);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
//run the bgsub
dim3 grid(modelCols, modelRows);
dim3 block(LIDARBGSUB_MAX_BIN_PTS, 1);
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "colaltion took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::bgsubKernel_pt1 << <grid, block >> >(bgmodel, d_obs, variances, modelWeights, bgthresh, sqmindist, modelRows, modelCols, lr, searchWidth);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "subtraction part 1 took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::bgsubKernel_pt2 << <grid, block >> >(bgmodel, d_obs, d_pts, d_nout, variances, modelWeights, d_addToModel, bgthresh, modelRows, modelCols, searchWidth);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "subtraction pt2 took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//parse the data to the output
lbgsCUDA::sortModels << <grid, block >> >(bgmodel, d_obs, d_addToModel, variances, modelWeights, modelRows, modelCols, lr, sqmindist*0.75f);
int * nout = (int*)malloc(sizeof(int));
HANDLE_ERROR(cudaMemcpy(nout, d_nout, sizeof(int), cudaMemcpyDeviceToHost));
std::vector<DidoLidar_rangeData> rval(nout[0]);
HANDLE_ERROR(cudaMemcpy(rval.data(), d_pts, nout[0] * sizeof(DidoLidar_rangeData), cudaMemcpyDeviceToHost));
if (nout) free(nout);
cudaDeviceSynchronize();
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "sorting took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
HANDLE_ERROR(cudaGetLastError());
safe_Free(d_addToModel);
d_obs.deallocate();
safe_Free(d_pts);
safe_Free(d_nout);
#endif
return rval;
}
std::vector<std::vector<DidoLidar_rangeData>> DidoAnalytics_LidarBGSUB::applyAndCluster(const DidoLidar_rangeData * points, int npts, float learningRate)
{
#if DIDOLIDAR_NOGPU
std::vector<std::vector<DidoLidar_rangeData>> rval;
#else
frameno++;
float lr = (learningRate < 0) ? 1.0f / min(frameno, hist) : learningRate;
if (lr != lr) throw std::runtime_error("learning rate was NaN");
#if DEBUG_TIMINGS
auto prevtime = std::chrono::high_resolution_clock::now();
#endif
//create GPU allocated data
LidarBin d_obs;
DidoLidar_rangeData * d_pts;
int * d_nout;
bool * d_addToModel;
//clustering data
LidarBin d_cluster_obs;
int * d_cluster_parents_1, *d_cluster_parents_2;
bool * d_cluster_core;
float clusterWidth = min(binWidth * 4, epsilon / 3);
float clusterHeight = min(binHeight * 4, epsilon / 3);
int clusterCols = (int)std::floor(2 * CUDART_PI_F / clusterWidth) + 2;
int clusterRows = (int)std::floor(binHeight*modelRows / clusterHeight) + 1;
int clusterSearch = (int)(epsilon / clusterWidth) + 1;
if (clusterCols*clusterRows < 1) throw std::runtime_error("the cluster size must be at least one in both dimensions");
if (modelCols*modelRows < 1) throw std::runtime_error("the model size must be at least one in both dimensions");
d_cluster_obs.allocate(clusterCols*clusterRows);
HANDLE_ERROR(cudaMemset(d_cluster_obs.npts, 0, clusterCols*clusterRows * sizeof(unsigned int))); //make sure it's at zero
HANDLE_ERROR(cudaMalloc(&d_cluster_core, clusterCols*clusterRows * sizeof(bool) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(cudaMalloc(&d_cluster_parents_1, clusterCols*clusterRows * sizeof(int) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(cudaMalloc(&d_cluster_parents_2, clusterCols*clusterRows * sizeof(int) * LIDARBGSUB_MAX_BIN_PTS));
d_obs.allocate(modelRows*modelCols);
HANDLE_ERROR(cudaMemset(d_obs.npts, 0, modelCols*modelRows * sizeof(unsigned int))); //make sure it's at zero
HANDLE_ERROR(cudaMalloc(&d_pts, npts * sizeof(DidoLidar_rangeData)));
HANDLE_ERROR(cudaMalloc(&d_nout, sizeof(int)));
HANDLE_ERROR(cudaMalloc(&d_addToModel, modelCols*modelRows * sizeof(bool) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(cudaMemset(d_addToModel, 0, modelCols*modelRows * sizeof(bool) * LIDARBGSUB_MAX_BIN_PTS)); //make sure it's at zero
HANDLE_ERROR(cudaMemcpy(d_pts, points, npts * sizeof(DidoLidar_rangeData), cudaMemcpyHostToDevice));
#if DEBUG_TIMINGS
auto ts = std::chrono::high_resolution_clock::now();
std::cout << "allocation took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::collatePoints << <npts / 128 + 1, 128 >> >(d_obs, d_pts, npts, modelRows, modelCols, binWidth, binHeight);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
//run the bgsub
dim3 grid(modelCols, modelRows);
dim3 block(LIDARBGSUB_MAX_BIN_PTS, 1);
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "colaltion took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::bgsubKernel_pt1 << <grid, block >> >(bgmodel, d_obs, variances, modelWeights, bgthresh, sqmindist, modelRows, modelCols, lr, searchWidth);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "subtraction part 1 took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::bgsubKernel_pt2 << <grid, block >> >(bgmodel, d_obs, d_pts, d_nout, variances, modelWeights, d_addToModel, bgthresh, modelRows, modelCols, searchWidth);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "bgsub pt 2 took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//parse the data to the output
lbgsCUDA::sortModels << <grid, block >> >(bgmodel, d_obs, d_addToModel, variances, modelWeights, modelRows, modelCols, lr, sqmindist*0.75f);
int * nout = (int*)malloc(sizeof(int));
HANDLE_ERROR(cudaMemcpy(nout, d_nout, sizeof(int), cudaMemcpyDeviceToHost));
//collate the foreground points for clustering
lbgsCUDA::collatePoints << <nout[0] / 128 + 1, 128 >> >(d_cluster_obs, d_pts, nout[0], clusterRows, clusterCols, clusterWidth, clusterHeight);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "sorting and clustering collation took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
dim3 clustergrid(clusterCols, clusterRows);
//initialise the parents
lbgsCUDA::PDSCAN_init << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_1, d_cluster_core, clusterRows, clusterCols, epsilon, ncore, clusterSearch);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "pdbscan init took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//quickly do local updates
lbgsCUDA::PDSCAN_local << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_1, d_cluster_parents_2, d_cluster_core, clusterRows, clusterCols, epsilon, clusterSearch);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "pdbscan local took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//now update the roots
lbgsCUDA::PDSCAN_global << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_2, d_cluster_parents_1, d_cluster_core, clusterRows, clusterCols, epsilon, clusterSearch);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
//twice so that we get everything
lbgsCUDA::PDSCAN_global << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_1, d_cluster_parents_2, d_cluster_core, clusterRows, clusterCols, epsilon, clusterSearch);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "pdbscan global took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::setToRoot << <clusterRows*clusterCols, LIDARBGSUB_MAX_BIN_PTS >> >(d_cluster_parents_2, d_cluster_parents_1, clusterRows*clusterCols*LIDARBGSUB_MAX_BIN_PTS);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "setting to root took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//now drop it onto the cpu and compute the output
LidarBin lbins;
lbins.local_allocate(clusterRows*clusterCols);
std::vector<int> lparents(clusterRows*clusterCols*LIDARBGSUB_MAX_BIN_PTS);
HANDLE_ERROR(cudaMemcpy(lparents.data(), d_cluster_parents_1, clusterRows*clusterCols*LIDARBGSUB_MAX_BIN_PTS * sizeof(int), cudaMemcpyDeviceToHost));
lbins.copyDownFrom(d_cluster_obs, clusterRows*clusterCols);
auto rval = lbgsCUDA::parseGraphsvec(lbins, lparents.data(), clusterRows*clusterCols);
lbins.local_deallocate();
if (nout) free(nout);
cudaDeviceSynchronize();
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "generating output took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
HANDLE_ERROR(cudaGetLastError());
safe_Free(d_cluster_parents_1);
safe_Free(d_cluster_parents_2);
d_cluster_obs.deallocate();
safe_Free(d_cluster_core);
safe_Free(d_addToModel);
d_obs.deallocate();
safe_Free(d_pts);
safe_Free(d_nout);
#endif
return rval;
}
std::vector<std::vector<DidoLidar_rangeData>> DidoAnalytics_LidarBGSUB::Cluster(const DidoLidar_rangeData * fgpts, size_t npts)
{
#if DIDOLIDAR_NOGPU
std::vector<std::vector<DidoLidar_rangeData>> rval;
#else
//create GPU allocated data
DidoLidar_rangeData * d_pts;
//clustering data
LidarBin d_cluster_obs;
int * d_cluster_parents_1, *d_cluster_parents_2;
bool * d_cluster_core;
float clusterWidth = min(binWidth * 4, epsilon / 3);
float clusterHeight = min(binHeight * 4, epsilon / 3);
int clusterCols = (int)std::floor(2 * CUDART_PI_F / clusterWidth) + 2;
int clusterRows = (int)std::floor(binHeight*modelRows / clusterHeight) + 1;
int clusterSearch = (int)(epsilon / clusterWidth) + 1;
if (clusterCols*clusterRows < 1) throw std::runtime_error("the cluster size must be at least one in both dimensions");
#if DEBUG_TIMINGS
auto prevtime = std::chrono::high_resolution_clock::now();
#endif
d_cluster_obs.allocate(clusterCols*clusterRows * sizeof(unsigned int));
HANDLE_ERROR(cudaMemset(d_cluster_obs.npts, 0, clusterCols*clusterRows * sizeof(unsigned int))); //make sure it's at zero
HANDLE_ERROR(cudaMalloc(&d_cluster_core, clusterCols*clusterRows * sizeof(bool) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(cudaMalloc(&d_cluster_parents_1, clusterCols*clusterRows * sizeof(int) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(cudaMalloc(&d_cluster_parents_2, clusterCols*clusterRows * sizeof(int) * LIDARBGSUB_MAX_BIN_PTS));
HANDLE_ERROR(cudaMalloc(&d_pts, npts * sizeof(DidoLidar_rangeData)));
HANDLE_ERROR(cudaMemcpy(d_pts, fgpts, npts * sizeof(DidoLidar_rangeData), cudaMemcpyHostToDevice));
#if DEBUG_TIMINGS
auto ts = std::chrono::high_resolution_clock::now();
std::cout << "cluster allocation took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//collate the foreground points for clustering
lbgsCUDA::collatePoints << <(npts / 128) + 1, 128 >> >(d_cluster_obs, d_pts, npts, clusterRows, clusterCols, clusterWidth, clusterHeight);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "cluster collation took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
dim3 block(LIDARBGSUB_MAX_BIN_PTS, 1);
dim3 clustergrid(clusterCols, clusterRows);
//initialise the parents
lbgsCUDA::PDSCAN_init << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_1, d_cluster_core, clusterRows, clusterCols, epsilon, ncore, clusterSearch);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "pdbscan inti took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//quickly do local updates
lbgsCUDA::PDSCAN_local << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_1, d_cluster_parents_2, d_cluster_core, clusterRows, clusterCols, epsilon, clusterSearch);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "pdbscan local took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//now update the roots
lbgsCUDA::PDSCAN_global << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_2, d_cluster_parents_1, d_cluster_core, clusterRows, clusterCols, epsilon, clusterSearch);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
//twice so that we get everything
lbgsCUDA::PDSCAN_global << <clustergrid, block >> >(d_cluster_obs, d_cluster_parents_1, d_cluster_parents_2, d_cluster_core, clusterRows, clusterCols, epsilon, clusterSearch);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "pdbscan global took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbgsCUDA::setToRoot << <clusterRows*clusterCols, LIDARBGSUB_MAX_BIN_PTS >> >(d_cluster_parents_2, d_cluster_parents_1, clusterRows*clusterCols*LIDARBGSUB_MAX_BIN_PTS);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "set to root took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
//now drop it onto the cpu and compute the output
LidarBin lbins;
lbins.local_allocate(clusterRows*clusterCols);
std::vector<int> lparents(clusterRows*clusterCols*LIDARBGSUB_MAX_BIN_PTS);
HANDLE_ERROR(cudaMemcpy(lparents.data(), d_cluster_parents_1, clusterRows*clusterCols*LIDARBGSUB_MAX_BIN_PTS * sizeof(int), cudaMemcpyDeviceToHost));
lbins.copyDownFrom(d_cluster_obs, clusterRows*clusterCols);
auto rval = lbgsCUDA::parseGraphsvec(lbins, lparents.data(), clusterRows*clusterCols);
cudaDeviceSynchronize();
#if DEBUG_TIMINGS
ts = std::chrono::high_resolution_clock::now();
std::cout << "generating output took " << std::chrono::duration_cast<std::chrono::milliseconds>(ts - prevtime).count() << "ms" << std::endl;
prevtime = ts;
#endif
lbins.local_deallocate();
HANDLE_ERROR(cudaGetLastError());
safe_Free(d_cluster_parents_1);
safe_Free(d_cluster_parents_2);
d_cluster_obs.deallocate();
safe_Free(d_cluster_core);
safe_Free(d_pts);
#endif
return rval;
}
__global__ void countPoints(LidarBin bins, unsigned char* out, int npts)
{
const int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < npts)
{
out[index] = (unsigned char)bins.npts[index];
}
}
void DidoAnalytics_LidarBGSUB::dispayBgmodelNpts(unsigned char * out, int npts)
{
unsigned char * d_counts;
HANDLE_ERROR(cudaMalloc(&d_counts, modelRows*modelCols * sizeof(unsigned char)));
countPoints << <modelRows*modelCols / 128 + 1, 128 >> > (bgmodel, d_counts, modelRows*modelCols);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
HANDLE_ERROR(cudaMemcpy(out, d_counts, min(modelRows*modelCols, npts) * sizeof(unsigned char), cudaMemcpyDeviceToHost));
safe_Free(d_counts);
}
void LidarBin::allocate(size_t nbins)
{
HANDLE_ERROR(cudaMalloc(&npts, nbins * sizeof(unsigned int)));
HANDLE_ERROR(cudaMalloc(&points_pan, nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float)));
HANDLE_ERROR(cudaMalloc(&points_tilt, nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float)));
HANDLE_ERROR(cudaMalloc(&points_range, nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float)));
}
void LidarBin::local_allocate(size_t nbins)
{
npts = (unsigned int *)malloc(nbins * sizeof(unsigned int));
points_pan = (float*)malloc(nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float));
points_tilt = (float*)malloc(nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float));
points_range = (float*)malloc(nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float));
}
void LidarBin::deallocate()
{
safe_Free(npts);
safe_Free(points_pan);
safe_Free(points_tilt);
safe_Free(points_range);
}
void LidarBin::local_deallocate()
{
if (npts) free(npts);
if (points_pan) free(points_pan);
if (points_pan) free(points_tilt);
if (points_pan) free(points_range);
}
void LidarBin::copyDownFrom(LidarBin & src, size_t nbins)
{
HANDLE_ERROR(cudaMemcpy(npts, src.npts, nbins * sizeof(unsigned int), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(points_pan, src.points_pan, nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(points_tilt, src.points_tilt, nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(points_range, src.points_range, nbins*LIDARBGSUB_MAX_BIN_PTS * sizeof(float), cudaMemcpyDeviceToHost));
}
} |
monteCarlo.hip | // !!! This is a file automatically generated by hipify!!!
/*
rkrish11 Rahul Krishna
*/
#include "hip/hip_runtime.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define SEED 35791246
__global__ void init_stuff(hiprandState_t *state, int count) {
// This sets a random number seed for all the threads
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<count)
hiprand_init(1337, idx, 0, &state[idx]);
}
__global__ void cudaMonte(double* pi, int count, hiprandState_t* state) {
// Perfome MC simulation on the threads
int id=blockIdx.x*blockDim.x+threadIdx.x;
double x,y,z;
if (id<count) {
x = (double)hiprand_uniform(&state[id]);
y = (double)hiprand_uniform(&state[id]);
z = x*x+y*y;
if (z<=1) pi[id]=1;
else pi[id]=0;
}
__syncthreads();
// Find the total number of points that lie inside the quadrant of the cirle
for (int i=1; i<count;i++) {
pi[0]+=pi[i];
}
}
int main(int argc, char** argv) {
int niter=0;
double pi;
double* d_pi;
hiprandState_t *d_state;
printf("Enter the number of iterations used to estimate pi: ");
scanf("%d",&niter);
double* h_pi = new double[niter];
if (hipMalloc(&d_pi, sizeof(int)*niter) != hipSuccess) {
printf("Error in memory allocation.\n");
return 0;
}
if (hipMalloc(&d_state, sizeof(hiprandState_t)*niter) != hipSuccess) {
printf("Error in memory allocation for random state.\n");
return 0;
}
if (hipMemcpy (d_pi, h_pi, sizeof(int)*niter, hipMemcpyHostToDevice) != hipSuccess) {
printf("Error in copy from host to device.\n");
hipFree(d_pi);
return 0;
}
// Number of threads = 1024, number of blocks = (int) (niter/threads)+1
hipLaunchKernelGGL(( init_stuff), dim3((int) niter/1024+1), dim3(1024), 0, 0, d_state, niter);
hipLaunchKernelGGL(( cudaMonte), dim3((int) niter/1024+1), dim3(1024), 0, 0, d_pi, niter, d_state);
if (hipMemcpy (h_pi, d_pi, sizeof(int)*niter, hipMemcpyDeviceToHost) != hipSuccess) {
printf("Error in copy from device to host.\n");
delete[] h_pi;
hipFree(d_pi);
return 0;
}
// Final Estimate of pi
pi= (double) h_pi[0]/niter*4;
printf("# of trials= %d , estimate of pi is %g \n",niter,pi);
}
| monteCarlo.cu | /*
rkrish11 Rahul Krishna
*/
#include "cuda.h"
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define SEED 35791246
__global__ void init_stuff(curandState *state, int count) {
// This sets a random number seed for all the threads
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<count)
curand_init(1337, idx, 0, &state[idx]);
}
__global__ void cudaMonte(double* pi, int count, curandState* state) {
// Perfome MC simulation on the threads
int id=blockIdx.x*blockDim.x+threadIdx.x;
double x,y,z;
if (id<count) {
x = (double)curand_uniform(&state[id]);
y = (double)curand_uniform(&state[id]);
z = x*x+y*y;
if (z<=1) pi[id]=1;
else pi[id]=0;
}
__syncthreads();
// Find the total number of points that lie inside the quadrant of the cirle
for (int i=1; i<count;i++) {
pi[0]+=pi[i];
}
}
int main(int argc, char** argv) {
int niter=0;
double pi;
double* d_pi;
curandState *d_state;
printf("Enter the number of iterations used to estimate pi: ");
scanf("%d",&niter);
double* h_pi = new double[niter];
if (cudaMalloc(&d_pi, sizeof(int)*niter) != cudaSuccess) {
printf("Error in memory allocation.\n");
return 0;
}
if (cudaMalloc(&d_state, sizeof(curandState)*niter) != cudaSuccess) {
printf("Error in memory allocation for random state.\n");
return 0;
}
if (cudaMemcpy (d_pi, h_pi, sizeof(int)*niter, cudaMemcpyHostToDevice) != cudaSuccess) {
printf("Error in copy from host to device.\n");
cudaFree(d_pi);
return 0;
}
// Number of threads = 1024, number of blocks = (int) (niter/threads)+1
init_stuff<<<(int) niter/1024+1, 1024>>>(d_state, niter);
cudaMonte<<<(int) niter/1024+1, 1024>>>(d_pi, niter, d_state);
if (cudaMemcpy (h_pi, d_pi, sizeof(int)*niter, cudaMemcpyDeviceToHost) != cudaSuccess) {
printf("Error in copy from device to host.\n");
delete[] h_pi;
cudaFree(d_pi);
return 0;
}
// Final Estimate of pi
pi= (double) h_pi[0]/niter*4;
printf("# of trials= %d , estimate of pi is %g \n",niter,pi);
}
|
f10d528219d497137b0a9ad5aa853a3c4e997462.hip | // !!! This is a file automatically generated by hipify!!!
#include <drivers/convolution_driver.h>
#include <drivers/conv_cnn_driver.h>
#include <core/errors.h>
#include <device/cuda_utils.h>
#include <device/device_defines.h>
#include <device/handles.h>
#include <device/gen_random.h>
#include <nn/read_nn.h>
#include <nn/read_vgg.h>
#include <nn/nn_decl.h>
#include <functions/dev_initializations.h>
#include <functions/eval_convolution.h>
#include <functions/dev_backprop_convolution.h>
#include <functions/cnn_forward.h>
#include <functions/cnn_backward.h>
#include <functions/cnn_hessian_vec.h>
#include <utilities/print_utils.h>
#include <utilities/utils.h>
#include <utilities/dataset.h>
#include <limits.h>
#include <stdlib.h>
#include <stdio.h>
#include <float.h>
#define DATASET_SIZE 4
inline int getMaxZSizes( int *elements, int count ) {
int m = elements [ 0 ] - elements[ 1 ];
for (int i = 1; i < count; i ++)
if ( m < (elements[ i+1 ] - elements[ i ] )) m = elements[ i+1 ] - elements[ i ];
return m;
}
void convertDataset( real *src, real *tgt, int ch, int h, int w)
{
for (int i = 0; i < ch; i ++){
for (int p = 0; p < DATASET_SIZE; p ++) {
for (int j = 0; j < h * w; j ++) {
tgt[ i * h * w * DATASET_SIZE +
p * h * w +
j ] = src[ p * h * w * ch +
i * h * w +
j ];
}
}
}
}
void initConvCNNDataset( DEVICE_DATASET *data, SCRATCH_AREA *scratch,
int h, int w, int ch, int k, int out_ch, int numClasses )
{
int points = DATASET_SIZE; ;
real counter=0;
real val = 1;
real *host = scratch->hostWorkspace;
real *dev = scratch->devWorkspace;
real *batchMajorDataset = host + h * w * ch * DATASET_SIZE;
for (int p = 0; p < points; p ++) {
for (int i = 0; i < ch; i ++){
for (int c = 0; c < w; c ++) {
for (int r = 0; r < h; r ++) {
host[ i * h * w * points + h * c + r + p * h * w ] = 1;
}
}
}
}
cuda_malloc( (void **)&data->trainSetX, sizeof(real) * ch * h * w * points, 0,
ERROR_MEM_ALLOC );
getRandomVector( ch * h * w * points, NULL, data->trainSetX, RAND_UNIFORM );
writeVector( data->trainSetX, ch * h * w * points, "./cuda_dataset.txt", 0, host);
readVector( host, ch * h * w * points, "./cuda_dataset.txt", 0, NULL );
convertDataset( host, batchMajorDataset, ch, h, w );
copy_host_device( batchMajorDataset, data->trainSetX, sizeof(real) * ch * h * w * points,
hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE );
for (int p = 0; p < points; p ++) host[ p ] = 1;
cuda_malloc( (void **)&data->trainSetY, sizeof(real) * points, 0,
ERROR_MEM_ALLOC );
copy_host_device( host, data->trainSetY, sizeof(real) * points,
hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE );
data->trainSizeX = points;
data->trainSizeY = points;
data->numClasses = numClasses;
}
void initConvCNNVector( real *host, real *devPtr, int pSize )
{
for (int i = 0; i < pSize; i ++ ) host[ i ] = 0.1;
getRandomVector( pSize, NULL, devPtr, RAND_UNIFORM );
writeVector( devPtr, pSize, "./cuda_weights2.txt", 0, host);
real alpha = 0.1;
cublasCheckError( hipblasDscal( cublasHandle, pSize, &alpha, devPtr, 1 ) );
readVector( host, pSize, "./cuda_weights2.txt", 0, NULL );
copy_host_device( host, devPtr, sizeof(real) * pSize,
hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE );
}
void initConvCNNWeights( CNN_MODEL *model, DEVICE_DATASET *data, real *hostPtr )
{
real *weights = data->weights;
int *wOffsets = model->wOffsets;
int *bOffsets = model->bOffsets;
int index = 0;
fprintf( stderr, " Model Parameters: %d, .... %d \n", model->pSize, index );
for (int i = 0; i < model->pSize; i ++ ) hostPtr[ i ] = 0.1;
copy_host_device( hostPtr, weights, sizeof(real) * model->pSize,
hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE );
getRandomVector( model->pSize, NULL, data->weights, RAND_UNIFORM );
real alpha = .1;
cublasCheckError( hipblasDscal( cublasHandle, model->pSize, &alpha, data->weights, 1 ) );
writeVector( data->weights, model->pSize, "./cuda_weights.txt", 0, hostPtr);
readVector( hostPtr, model->pSize, "./cuda_weights.txt", 0, NULL );
//readVector( hostPtr, model->pSize, "./lenet_kaiming.txt", 0, NULL );
copy_host_device( hostPtr, weights, sizeof (real) * model->pSize,
hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE );
}
void printConvCNNGradient( real *data, int *wOffsets, int *bOffsets, CNN_MODEL *model )
{
CONV_LAYER *c;
FC_LAYER *f;
c = & (model->convLayer[ 0 ] );
fprintf( stderr, "W0 -- \n");
print4DMatrix( data, c->outChannels, c->inChannels, c->kSize, c->kSize );
//print2DMatrix( data, c->kSize, c->kSize );
//fprintf( stderr, "b0 -- \n");
//print2DMatrix( data + bOffsets[ 0 ], 1, c->outChannels );
/*
c = & (model->convLayer[ 1 ] );
fprintf( stderr, "W1 -- \n");
print4DMatrix( data + wOffsets[1], c->outChannels, c->inChannels, c->kSize, c->kSize );
//print2DMatrix( data + wOffsets[ 1] , c->kSize, c->kSize );
fprintf( stderr, "b1 -- \n");
print2DMatrix( data + bOffsets[1], 1, c->outChannels );
*/
f = & (model->fcLayer[ 0 ] );
fprintf( stderr, "W1 -- \n");
print2DMatrix( data + wOffsets[ 1 ], f->out, f->in );
//fprintf( stderr, "b1 -- \n");
//print2DMatrix( data + bOffsets[ 2 ], 1, f->out );
/*
f = & (model->fcLayer[ 1 ] );
fprintf( stderr, "W3 -- \n");
print2DMatrix( data + wOffsets[ 3 ], f->out, f->in );
fprintf( stderr, "b3 -- \n");
print2DMatrix( data + bOffsets[ 3 ], 1, f->out );
f = & (model->fcLayer[ 2 ] );
fprintf( stderr, "W4 -- \n");
print2DMatrix( data + wOffsets[ 4 ], f->out, f->in );
fprintf( stderr, "b4 -- \n");
print2DMatrix( data + bOffsets[ 4 ], 1, f->out );
*/
}
void testConvCNN( CNN_MODEL *model, DEVICE_DATASET *data,
SCRATCH_AREA *scratch )
{
real ll = 0;
/*
int NUM_CLASSES = 4;
int HEIGHT = 8;
int WIDTH = DATASET_SIZE;
int CHANNELS = 1;
int KERNEL = 0;
int OUT_CHANNELS = 1;
readFCNN( model, DATASET_SIZE);
*/
int HEIGHT = 16;
int WIDTH = 16;
int CHANNELS = 2;
int KERNEL = 3;
int OUT_CHANNELS = 4;
int NUM_CLASSES = 10 ;
readTestCNN( model, CHANNELS, OUT_CHANNELS, NUM_CLASSES, WIDTH, HEIGHT, DATASET_SIZE);
/*
int NUM_CLASSES = 10;
int HEIGHT = 32;
int WIDTH = 32;
int CHANNELS = 3;
int OUT_CHANNELS = 0;
int KERNEL = 3;
readLenetCNN( model, CHANNELS, HEIGHT, WIDTH, DATASET_SIZE );
*/
/*
int NUM_CLASSES = 6 * 8 * 8;
int HEIGHT = 32;
int WIDTH = 32;
int CHANNELS = 3;
int KERNEL = 5;
int OUT_CHANNELS = 6;
readConv2CNN( model, CHANNELS, OUT_CHANNELS, NUM_CLASSES, HEIGHT, WIDTH, DATASET_SIZE );
*/
/*
int NUM_CLASSES = 4 * 4;
int HEIGHT = 6;
int WIDTH = 6;
int CHANNELS = 2;
int KERNEL = 3;
int OUT_CHANNELS = 4;
readConvCNN( model, CHANNELS, OUT_CHANNELS, NUM_CLASSES, HEIGHT, WIDTH, DATASET_SIZE);
*/
/*
int NUM_CLASSES = 4;
int HEIGHT = 6;
int WIDTH = 6;
int CHANNELS = 1;
int KERNEL = 3;
int OUT_CHANNELS = 1;
readConvCNN( model, CHANNELS, OUT_CHANNELS, NUM_CLASSES, HEIGHT, WIDTH, DATASET_SIZE);
*/
/*
int NUM_CLASSES = 2 * 3 * 3;
int HEIGHT = 6;
int WIDTH = 6;
int CHANNELS = 1;
int KERNEL = 5;
int OUT_CHANNELS = 2;
readConvCNN( model, CHANNELS, OUT_CHANNELS, NUM_CLASSES, HEIGHT, WIDTH, DATASET_SIZE);
*/
/*
int NUM_CLASSES = 10;
int HEIGHT = 4;
int WIDTH = 4;
int CHANNELS = 2;
int KERNEL = 3;
int OUT_CHANNELS = 3;
readTestVGG( model, DATASET_SIZE, HEIGHT, WIDTH, NUM_CLASSES, CHANNELS );
*/
fprintf( stderr, "Done with Network initialization... \n");
initConvCNNDataset( data, scratch, HEIGHT, WIDTH, CHANNELS, KERNEL, OUT_CHANNELS, NUM_CLASSES);
fprintf( stderr, "Dataset initialized \n");
cnnInitializations( model, data );
fprintf( stderr, "Done with weights initialization\n");
//compute the gradient here.
int maxDeltaSize = model->maxDeltaSize;
fprintf( stderr, "Max Z size is : %d \n", maxDeltaSize );
real *z = scratch->nextDevPtr;
real *dx = z + model->zSize;
real *gradient= dx + model->zSize;
real *errors = gradient + model->pSize;
real *errors_1 = errors + maxDeltaSize;
real *lossFuncErrors = errors_1 + maxDeltaSize;
real *rz = lossFuncErrors + maxDeltaSize;
real *rerror = rz + model->zSize;
real *probs = rerror + maxDeltaSize;
real *vector = probs + DATASET_SIZE * data->numClasses;
real *hv = vector + model->pSize;
real *nextDevPtr = hv + model->pSize;
real *hostPtr = scratch->nextHostPtr;
scratch->nextDevPtr = nextDevPtr;
real start, total;
initConvCNNWeights( model, data, hostPtr );
fprintf( stderr, "Done with initWeights ... \n");
copy_host_device( scratch->hostWorkspace, data->weights, sizeof(real) * (model->pSize),
hipMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE );
printConvCNNGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model );
fprintf( stderr, "Done with printing initial values of Gradient... \n ");
__THREADS_PER_SAMPLE__ = 1;
data->currentBatch = data->trainSetX;
data->sampledTrainY = data->trainSetY;
start = Get_Time( );
ll = cnnForward( model, data, scratch, z, probs, lossFuncErrors, 0, DATASET_SIZE, MODEL_TRAIN );
fprintf( stderr, "Model Error is %f \n", ll );
//fprintf( stderr, "Error Vector is ---> \n");
//printVector( errors, 10, NULL, scratch->hostWorkspace );
fprintf( stderr, "Beginning BACKWARD PASS... \n");
copy_device( errors, lossFuncErrors, sizeof(real) * maxDeltaSize, ERROR_MEMCPY_DEVICE_DEVICE );
cnnBackward( model, data, nextDevPtr, z, gradient, dx, errors, errors_1, 0, model->batchSize, scratch->nextHostPtr );
fprintf( stderr, "Printing Gradient... \n ");
copy_host_device( scratch->hostWorkspace, gradient, sizeof(real) * (model->pSize),
hipMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE );
printConvCNNGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model );
fprintf( stderr, "Done with Gradient......... \n\n\n\n\n\n\n\n\n\n\n\n\n\n");
fprintf( stderr, "Begin with HessianVector here... \n\n\n");
initConvCNNVector( scratch->hostWorkspace, vector, model->pSize );
fprintf( stderr, "Done with vector initialization... \n");
cuda_memset( hv, 0, model->pSize * sizeof(real), ERROR_MEMSET );
fprintf( stderr, "Done with hv initialization... \n");
fprintf( stderr, "Printing vector to be used for HV computation... \n");
copy_host_device( scratch->hostWorkspace, vector, sizeof(real) * model->pSize,
hipMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE );
printConvCNNGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model );
fprintf( stderr, "\n\n\n\n\n\n\n");
cnnHv ( model, data, z, probs, lossFuncErrors, dx, vector, hv, 0, DATASET_SIZE, nextDevPtr, scratch->nextHostPtr, 0 );
total = Get_Timing_Info( start );
fprintf( stderr, " Time to compute one hessian vec is: %g\n\n\n",
total );
fprintf( stderr, "Printing ... the HessianVec result... \n");
copy_host_device( scratch->hostWorkspace, hv, sizeof(real) * (model->pSize),
hipMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE );
printConvCNNGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model );
//revert back here.
scratch->nextDevPtr = z;
}
| f10d528219d497137b0a9ad5aa853a3c4e997462.cu |
#include <drivers/convolution_driver.h>
#include <drivers/conv_cnn_driver.h>
#include <core/errors.h>
#include <device/cuda_utils.h>
#include <device/device_defines.h>
#include <device/handles.h>
#include <device/gen_random.h>
#include <nn/read_nn.h>
#include <nn/read_vgg.h>
#include <nn/nn_decl.h>
#include <functions/dev_initializations.h>
#include <functions/eval_convolution.h>
#include <functions/dev_backprop_convolution.h>
#include <functions/cnn_forward.h>
#include <functions/cnn_backward.h>
#include <functions/cnn_hessian_vec.h>
#include <utilities/print_utils.h>
#include <utilities/utils.h>
#include <utilities/dataset.h>
#include <limits.h>
#include <stdlib.h>
#include <stdio.h>
#include <float.h>
#define DATASET_SIZE 4
inline int getMaxZSizes( int *elements, int count ) {
int m = elements [ 0 ] - elements[ 1 ];
for (int i = 1; i < count; i ++)
if ( m < (elements[ i+1 ] - elements[ i ] )) m = elements[ i+1 ] - elements[ i ];
return m;
}
void convertDataset( real *src, real *tgt, int ch, int h, int w)
{
for (int i = 0; i < ch; i ++){
for (int p = 0; p < DATASET_SIZE; p ++) {
for (int j = 0; j < h * w; j ++) {
tgt[ i * h * w * DATASET_SIZE +
p * h * w +
j ] = src[ p * h * w * ch +
i * h * w +
j ];
}
}
}
}
void initConvCNNDataset( DEVICE_DATASET *data, SCRATCH_AREA *scratch,
int h, int w, int ch, int k, int out_ch, int numClasses )
{
int points = DATASET_SIZE; ;
real counter=0;
real val = 1;
real *host = scratch->hostWorkspace;
real *dev = scratch->devWorkspace;
real *batchMajorDataset = host + h * w * ch * DATASET_SIZE;
for (int p = 0; p < points; p ++) {
for (int i = 0; i < ch; i ++){
for (int c = 0; c < w; c ++) {
for (int r = 0; r < h; r ++) {
host[ i * h * w * points + h * c + r + p * h * w ] = 1;
}
}
}
}
cuda_malloc( (void **)&data->trainSetX, sizeof(real) * ch * h * w * points, 0,
ERROR_MEM_ALLOC );
getRandomVector( ch * h * w * points, NULL, data->trainSetX, RAND_UNIFORM );
writeVector( data->trainSetX, ch * h * w * points, "./cuda_dataset.txt", 0, host);
readVector( host, ch * h * w * points, "./cuda_dataset.txt", 0, NULL );
convertDataset( host, batchMajorDataset, ch, h, w );
copy_host_device( batchMajorDataset, data->trainSetX, sizeof(real) * ch * h * w * points,
cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE );
for (int p = 0; p < points; p ++) host[ p ] = 1;
cuda_malloc( (void **)&data->trainSetY, sizeof(real) * points, 0,
ERROR_MEM_ALLOC );
copy_host_device( host, data->trainSetY, sizeof(real) * points,
cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE );
data->trainSizeX = points;
data->trainSizeY = points;
data->numClasses = numClasses;
}
void initConvCNNVector( real *host, real *devPtr, int pSize )
{
for (int i = 0; i < pSize; i ++ ) host[ i ] = 0.1;
getRandomVector( pSize, NULL, devPtr, RAND_UNIFORM );
writeVector( devPtr, pSize, "./cuda_weights2.txt", 0, host);
real alpha = 0.1;
cublasCheckError( cublasDscal( cublasHandle, pSize, &alpha, devPtr, 1 ) );
readVector( host, pSize, "./cuda_weights2.txt", 0, NULL );
copy_host_device( host, devPtr, sizeof(real) * pSize,
cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE );
}
void initConvCNNWeights( CNN_MODEL *model, DEVICE_DATASET *data, real *hostPtr )
{
real *weights = data->weights;
int *wOffsets = model->wOffsets;
int *bOffsets = model->bOffsets;
int index = 0;
fprintf( stderr, " Model Parameters: %d, .... %d \n", model->pSize, index );
for (int i = 0; i < model->pSize; i ++ ) hostPtr[ i ] = 0.1;
copy_host_device( hostPtr, weights, sizeof(real) * model->pSize,
cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE );
getRandomVector( model->pSize, NULL, data->weights, RAND_UNIFORM );
real alpha = .1;
cublasCheckError( cublasDscal( cublasHandle, model->pSize, &alpha, data->weights, 1 ) );
writeVector( data->weights, model->pSize, "./cuda_weights.txt", 0, hostPtr);
readVector( hostPtr, model->pSize, "./cuda_weights.txt", 0, NULL );
//readVector( hostPtr, model->pSize, "./lenet_kaiming.txt", 0, NULL );
copy_host_device( hostPtr, weights, sizeof (real) * model->pSize,
cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE );
}
void printConvCNNGradient( real *data, int *wOffsets, int *bOffsets, CNN_MODEL *model )
{
CONV_LAYER *c;
FC_LAYER *f;
c = & (model->convLayer[ 0 ] );
fprintf( stderr, "W0 -- \n");
print4DMatrix( data, c->outChannels, c->inChannels, c->kSize, c->kSize );
//print2DMatrix( data, c->kSize, c->kSize );
//fprintf( stderr, "b0 -- \n");
//print2DMatrix( data + bOffsets[ 0 ], 1, c->outChannels );
/*
c = & (model->convLayer[ 1 ] );
fprintf( stderr, "W1 -- \n");
print4DMatrix( data + wOffsets[1], c->outChannels, c->inChannels, c->kSize, c->kSize );
//print2DMatrix( data + wOffsets[ 1] , c->kSize, c->kSize );
fprintf( stderr, "b1 -- \n");
print2DMatrix( data + bOffsets[1], 1, c->outChannels );
*/
f = & (model->fcLayer[ 0 ] );
fprintf( stderr, "W1 -- \n");
print2DMatrix( data + wOffsets[ 1 ], f->out, f->in );
//fprintf( stderr, "b1 -- \n");
//print2DMatrix( data + bOffsets[ 2 ], 1, f->out );
/*
f = & (model->fcLayer[ 1 ] );
fprintf( stderr, "W3 -- \n");
print2DMatrix( data + wOffsets[ 3 ], f->out, f->in );
fprintf( stderr, "b3 -- \n");
print2DMatrix( data + bOffsets[ 3 ], 1, f->out );
f = & (model->fcLayer[ 2 ] );
fprintf( stderr, "W4 -- \n");
print2DMatrix( data + wOffsets[ 4 ], f->out, f->in );
fprintf( stderr, "b4 -- \n");
print2DMatrix( data + bOffsets[ 4 ], 1, f->out );
*/
}
void testConvCNN( CNN_MODEL *model, DEVICE_DATASET *data,
SCRATCH_AREA *scratch )
{
real ll = 0;
/*
int NUM_CLASSES = 4;
int HEIGHT = 8;
int WIDTH = DATASET_SIZE;
int CHANNELS = 1;
int KERNEL = 0;
int OUT_CHANNELS = 1;
readFCNN( model, DATASET_SIZE);
*/
int HEIGHT = 16;
int WIDTH = 16;
int CHANNELS = 2;
int KERNEL = 3;
int OUT_CHANNELS = 4;
int NUM_CLASSES = 10 ;
readTestCNN( model, CHANNELS, OUT_CHANNELS, NUM_CLASSES, WIDTH, HEIGHT, DATASET_SIZE);
/*
int NUM_CLASSES = 10;
int HEIGHT = 32;
int WIDTH = 32;
int CHANNELS = 3;
int OUT_CHANNELS = 0;
int KERNEL = 3;
readLenetCNN( model, CHANNELS, HEIGHT, WIDTH, DATASET_SIZE );
*/
/*
int NUM_CLASSES = 6 * 8 * 8;
int HEIGHT = 32;
int WIDTH = 32;
int CHANNELS = 3;
int KERNEL = 5;
int OUT_CHANNELS = 6;
readConv2CNN( model, CHANNELS, OUT_CHANNELS, NUM_CLASSES, HEIGHT, WIDTH, DATASET_SIZE );
*/
/*
int NUM_CLASSES = 4 * 4;
int HEIGHT = 6;
int WIDTH = 6;
int CHANNELS = 2;
int KERNEL = 3;
int OUT_CHANNELS = 4;
readConvCNN( model, CHANNELS, OUT_CHANNELS, NUM_CLASSES, HEIGHT, WIDTH, DATASET_SIZE);
*/
/*
int NUM_CLASSES = 4;
int HEIGHT = 6;
int WIDTH = 6;
int CHANNELS = 1;
int KERNEL = 3;
int OUT_CHANNELS = 1;
readConvCNN( model, CHANNELS, OUT_CHANNELS, NUM_CLASSES, HEIGHT, WIDTH, DATASET_SIZE);
*/
/*
int NUM_CLASSES = 2 * 3 * 3;
int HEIGHT = 6;
int WIDTH = 6;
int CHANNELS = 1;
int KERNEL = 5;
int OUT_CHANNELS = 2;
readConvCNN( model, CHANNELS, OUT_CHANNELS, NUM_CLASSES, HEIGHT, WIDTH, DATASET_SIZE);
*/
/*
int NUM_CLASSES = 10;
int HEIGHT = 4;
int WIDTH = 4;
int CHANNELS = 2;
int KERNEL = 3;
int OUT_CHANNELS = 3;
readTestVGG( model, DATASET_SIZE, HEIGHT, WIDTH, NUM_CLASSES, CHANNELS );
*/
fprintf( stderr, "Done with Network initialization... \n");
initConvCNNDataset( data, scratch, HEIGHT, WIDTH, CHANNELS, KERNEL, OUT_CHANNELS, NUM_CLASSES);
fprintf( stderr, "Dataset initialized \n");
cnnInitializations( model, data );
fprintf( stderr, "Done with weights initialization\n");
//compute the gradient here.
int maxDeltaSize = model->maxDeltaSize;
fprintf( stderr, "Max Z size is : %d \n", maxDeltaSize );
real *z = scratch->nextDevPtr;
real *dx = z + model->zSize;
real *gradient= dx + model->zSize;
real *errors = gradient + model->pSize;
real *errors_1 = errors + maxDeltaSize;
real *lossFuncErrors = errors_1 + maxDeltaSize;
real *rz = lossFuncErrors + maxDeltaSize;
real *rerror = rz + model->zSize;
real *probs = rerror + maxDeltaSize;
real *vector = probs + DATASET_SIZE * data->numClasses;
real *hv = vector + model->pSize;
real *nextDevPtr = hv + model->pSize;
real *hostPtr = scratch->nextHostPtr;
scratch->nextDevPtr = nextDevPtr;
real start, total;
initConvCNNWeights( model, data, hostPtr );
fprintf( stderr, "Done with initWeights ... \n");
copy_host_device( scratch->hostWorkspace, data->weights, sizeof(real) * (model->pSize),
cudaMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE );
printConvCNNGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model );
fprintf( stderr, "Done with printing initial values of Gradient... \n ");
__THREADS_PER_SAMPLE__ = 1;
data->currentBatch = data->trainSetX;
data->sampledTrainY = data->trainSetY;
start = Get_Time( );
ll = cnnForward( model, data, scratch, z, probs, lossFuncErrors, 0, DATASET_SIZE, MODEL_TRAIN );
fprintf( stderr, "Model Error is %f \n", ll );
//fprintf( stderr, "Error Vector is ---> \n");
//printVector( errors, 10, NULL, scratch->hostWorkspace );
fprintf( stderr, "Beginning BACKWARD PASS... \n");
copy_device( errors, lossFuncErrors, sizeof(real) * maxDeltaSize, ERROR_MEMCPY_DEVICE_DEVICE );
cnnBackward( model, data, nextDevPtr, z, gradient, dx, errors, errors_1, 0, model->batchSize, scratch->nextHostPtr );
fprintf( stderr, "Printing Gradient... \n ");
copy_host_device( scratch->hostWorkspace, gradient, sizeof(real) * (model->pSize),
cudaMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE );
printConvCNNGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model );
fprintf( stderr, "Done with Gradient......... \n\n\n\n\n\n\n\n\n\n\n\n\n\n");
fprintf( stderr, "Begin with HessianVector here... \n\n\n");
initConvCNNVector( scratch->hostWorkspace, vector, model->pSize );
fprintf( stderr, "Done with vector initialization... \n");
cuda_memset( hv, 0, model->pSize * sizeof(real), ERROR_MEMSET );
fprintf( stderr, "Done with hv initialization... \n");
fprintf( stderr, "Printing vector to be used for HV computation... \n");
copy_host_device( scratch->hostWorkspace, vector, sizeof(real) * model->pSize,
cudaMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE );
printConvCNNGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model );
fprintf( stderr, "\n\n\n\n\n\n\n");
cnnHv ( model, data, z, probs, lossFuncErrors, dx, vector, hv, 0, DATASET_SIZE, nextDevPtr, scratch->nextHostPtr, 0 );
total = Get_Timing_Info( start );
fprintf( stderr, " Time to compute one hessian vec is: %g\n\n\n",
total );
fprintf( stderr, "Printing ... the HessianVec result... \n");
copy_host_device( scratch->hostWorkspace, hv, sizeof(real) * (model->pSize),
cudaMemcpyDeviceToHost, ERROR_MEMCPY_HOST_DEVICE );
printConvCNNGradient( scratch->hostWorkspace, model->wOffsets, model->bOffsets, model );
//revert back here.
scratch->nextDevPtr = z;
}
|
b686e21ef328ec0cb35b553a03eb32563917b2e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include "matrix.h"
#include <chrono>
using namespace std;
void print_matrix(int*, int, int);
int main() {
int width = 32;
float block = 32;
unsigned ntotal = width * width;
int *h_m = new int[ntotal];
int *h_n = new int[ntotal];
int *h_p = new int[ntotal];
int *h_r = new int[ntotal];
unsigned i, j;
for (i = 0; i < width; i++) {
for (j = 0; j < width; j++) {
h_m[(i*width) + j] = j;
h_n[(i*width) + j] = j;
h_p[(i*width) + j] = 0;
h_r[(i*width) + j] = 0;
}
}
cout << "Matriz 1:" << endl;
print_matrix(h_m, width, width); cout << endl;
cout << "Matriz 2:" << endl;
print_matrix(h_n, width, width); cout << endl;
//high_resolution_clock::time_point t1 = high_resolution_clock::now();
cout << "Multiplicacion Normal" << endl;
square_matrix_mult(h_m, h_n, h_p, width, block, 'n');
//high_resolution_clock::time_point t2 = high_resolution_clock::now();
//auto duration1 = duration_cast<microseconds>(t2 - t1).count();
print_matrix(h_p, width, width); cout << endl;
cout << "Multiplicacion Tiles" << endl;
square_matrix_mult(h_m, h_n, h_r, width, block, 't');
print_matrix(h_r, width, width); cout << endl;
delete h_m;
delete h_n;
delete h_p;
delete h_r;
return 0;
}
void print_matrix(int* m_x, int fila, int columna) {
for (int i = 0; i < fila; i++) {
for (int j = 0; j < columna; j++) {
cout << m_x[i*columna + j] << " ";
}
cout << endl;
}
}
| b686e21ef328ec0cb35b553a03eb32563917b2e9.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include "matrix.h"
#include <chrono>
using namespace std;
void print_matrix(int*, int, int);
int main() {
int width = 32;
float block = 32;
unsigned ntotal = width * width;
int *h_m = new int[ntotal];
int *h_n = new int[ntotal];
int *h_p = new int[ntotal];
int *h_r = new int[ntotal];
unsigned i, j;
for (i = 0; i < width; i++) {
for (j = 0; j < width; j++) {
h_m[(i*width) + j] = j;
h_n[(i*width) + j] = j;
h_p[(i*width) + j] = 0;
h_r[(i*width) + j] = 0;
}
}
cout << "Matriz 1:" << endl;
print_matrix(h_m, width, width); cout << endl;
cout << "Matriz 2:" << endl;
print_matrix(h_n, width, width); cout << endl;
//high_resolution_clock::time_point t1 = high_resolution_clock::now();
cout << "Multiplicacion Normal" << endl;
square_matrix_mult(h_m, h_n, h_p, width, block, 'n');
//high_resolution_clock::time_point t2 = high_resolution_clock::now();
//auto duration1 = duration_cast<microseconds>(t2 - t1).count();
print_matrix(h_p, width, width); cout << endl;
cout << "Multiplicacion Tiles" << endl;
square_matrix_mult(h_m, h_n, h_r, width, block, 't');
print_matrix(h_r, width, width); cout << endl;
delete h_m;
delete h_n;
delete h_p;
delete h_r;
return 0;
}
void print_matrix(int* m_x, int fila, int columna) {
for (int i = 0; i < fila; i++) {
for (int j = 0; j < columna; j++) {
cout << m_x[i*columna + j] << " ";
}
cout << endl;
}
}
|
ca68de38f18d7ec3f4a149b7ebe3b89905792257.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THH.h>
#include <THHGeneral.h>
#include "utils.h"
#define VEC_0(ARRAY) ((ARRAY).x)
#define VEC_1(ARRAY) ((ARRAY).y)
#define VEC_2(ARRAY) ((ARRAY).z)
#define VEC_3(ARRAY) ((ARRAY).w)
#define IDX_1(ARRAY, X) ((ARRAY)[((X) * (ARRAY##_stride.x))])
#define IDX_2(ARRAY, X, Y) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y))])
#define IDX_3(ARRAY, X, Y, Z) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z))])
#define IDX_4(ARRAY, X, Y, Z, W) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z)) + ((W) * (ARRAY##_stride.w))])
#ifdef __cplusplus
extern "C" {
#endif
__global__ void kernel_EigenAnalysis_updateOutput(
const int n,
const float* input, const long4 input_size, const long4 input_stride,
float* output, const long4 output_size, const long4 output_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n) {
return;
}
int intBatch = ( intIndex / VEC_3(output_size) / VEC_2(output_size) ) % VEC_0(output_size); //b
int intY = ( intIndex / VEC_3(output_size) ) % VEC_2(output_size); //y
int intX = ( intIndex ) % VEC_3(output_size); //x
int offset = intBatch*input_stride.x + intY*input_stride.z + intX*input_stride.w;
float d = input[offset];
offset += input_stride.y;
float e = input[offset];
offset += input_stride.y;
float g = input[offset];
float delta = sqrt(4* e*e + (d-g)*(d-g));
float lamda0 = (d+g + delta)/2;
float lamda1 = lamda0 - delta;
float L0 = sqrt(e*e+(lamda0-d)*(lamda0-d));
float L1 = sqrt(e*e+(lamda1-d)*(lamda1-d));
const float epsilon = 0.00000001;
float x0= e*lamda0/(L0+epsilon);
float y0= (lamda0-d)*lamda0/(L0+epsilon);
float x1= e*lamda1/(L1+epsilon);
float y1= (lamda1-d)*lamda1/(L1+epsilon);
offset= intBatch*output_stride.x + intY*output_stride.z + intX*output_stride.w;
output[offset]=x0;offset+=output_stride.y;
output[offset]=y0;offset+=output_stride.y;
output[offset]=x1;offset+=output_stride.y;
output[offset]=y1;
}
//input: [batch, 3, h, w] : 0:x*x, 1:x*y, 2:y*y
//output:[batch, 4, h, w] : 0:x_big, 1:y_big, 2: x_small, 3:y_small
void EigenAnalysis_kernel_forward(
at::Tensor input,
at::Tensor output
) {
int n = input.size(0) * input.size(2)* input.size(3);
hipLaunchKernelGGL(( kernel_EigenAnalysis_updateOutput), dim3((n + 512 - 1) / 512), dim3(512) , 0, 0,
n,
TENSOR_INFO(input),
TENSOR_INFO(output)
// input.data(),make_long4(input.size(0),input.size(1),input.size(2),input.size(3)), make_long4(input.stride(0),input.stride(1),input.stride(2),input.stride(3)),
// output.data(),make_long4(output.size(0),output.size(1),output.size(2),output.size(3)), make_long4(output.stride(0),output.stride(1),output.stride(2),output.stride(3))
);
THCudaCheck(hipGetLastError());
}
__global__ void kernel_EigenAnalysis_updateGradient(
const int n,
const float* input, const long4 input_size, const long4 input_stride,
const float* grad_output, const long4 grad_output_size, const long4 grad_output_stride,
float* grad_input, const long4 grad_input_size, const long4 grad_input_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n) {
return;
}
const float epsilon = 0.00000000;
int intBatch = ( intIndex / VEC_3(input_size) / VEC_2(input_size) ) % VEC_0(input_size); //b
int intY = ( intIndex / VEC_3(input_size) ) % VEC_2(input_size); //y
int intX = ( intIndex ) % VEC_3(input_size); //x
int offset = intBatch*input_stride.x + intY*input_stride.z + intX*input_stride.w;
float d = input[offset];
offset += input_stride.y;
float e = input[offset];
offset += input_stride.y;
float g = input[offset];
float delta = sqrt(4* e*e + (d-g)*(d-g))+epsilon;
float lamda0 = (d+g + delta)/2;
float lamda1 = lamda0 - delta;
float L0 = sqrt(e*e+(lamda0-d)*(lamda0-d))+epsilon;
float L1 = sqrt(e*e+(lamda1-d)*(lamda1-d))+epsilon;
if (L0 == 0.0 || L1 == 0.0 || delta == 0.0 )
{
offset= intBatch*grad_input_stride.x + intY*grad_input_stride.z + intX*grad_input_stride.w;
grad_input[offset]=0.0f;offset+=grad_input_stride.y;
grad_input[offset]=0.0f;offset+=grad_input_stride.y;
grad_input[offset]=0.0f;
return ;
}
float d_lamda0_dd = (1+ (d-g)/delta)/2;
float d_lamda1_dd = (1- (d-g)/delta)/2;
float d_lamda0_dg = d_lamda1_dd;
float d_lamda1_dg = d_lamda0_dd;
float d_lamda0_de = + 2* e/ delta;
float d_lamda1_de = - d_lamda0_de;
offset = intBatch*grad_output_stride.x + intY*grad_output_stride.z + intX*grad_output_stride.w;
float grad_x0 = grad_output[offset];offset+=grad_output_stride.y;
float grad_y0 = grad_output[offset];offset+=grad_output_stride.y;
float grad_x1 = grad_output[offset];offset+=grad_output_stride.y;
float grad_y1 = grad_output[offset];
float grad_e = (lamda0/L0+ e*d_lamda0_de/L0 - e*lamda0/L0/L0/L0*(e+(lamda0-d)*d_lamda0_de))*grad_x0;
grad_e += (lamda1/L1+ e*d_lamda1_de/L1 - e*lamda1/L1/L1/L1*(e+(lamda1-d)*d_lamda1_de))*grad_x1;
grad_e += ((2*lamda0- d)/L0* d_lamda0_de - (lamda0-d)*lamda0*(e+(lamda0-d)*d_lamda0_de)/L0/L0/L0)*grad_y0;
grad_e += ((2*lamda1- d)/L1* d_lamda1_de - (lamda1-d)*lamda1*(e+(lamda1-d)*d_lamda1_de)/L1/L1/L1)*grad_y1;
float grad_d = (e/L0*d_lamda0_dd + e*lamda0/L0/L0/L0*(lamda0-d)*(1-d_lamda0_dd))*grad_x0;
grad_d += (e/L1*d_lamda1_dd + e*lamda1/L1/L1/L1*(lamda1-d)*(1-d_lamda1_dd))*grad_x1;
grad_d += (((d_lamda0_dd -1)*lamda0 + (lamda0-d)*d_lamda0_dd)/L0 + (lamda0-d)*(lamda0-d)*lamda0*(1-d_lamda0_dd)/L0/L0/L0)
*grad_y0;
grad_d += (((d_lamda1_dd -1)*lamda1 + (lamda1-d)*d_lamda1_dd)/L1 + (lamda1-d)*(lamda1-d)*lamda1*(1-d_lamda1_dd)/L1/L1/L1)
*grad_y1;
float grad_g = (e/L0*d_lamda0_dg + e*lamda0/L0/L0/L0*(lamda0-d)*(-d_lamda0_dg))*grad_x0;
grad_g += (e/L1*d_lamda1_dg + e*lamda1/L1/L1/L1*(lamda1-d)*(-d_lamda1_dg))*grad_x1;
grad_g += ((d_lamda0_dg*lamda0 + (lamda0-d)*d_lamda0_dg)/L0 + (lamda0-d)*(lamda0-d)*lamda0*(-d_lamda0_dg)/L0/L0/L0)
*grad_y0;
grad_g += ((d_lamda1_dg*lamda1 + (lamda1-d)*d_lamda1_dg)/L1 + (lamda1-d)*(lamda1-d)*lamda1*(-d_lamda1_dg)/L1/L1/L1)
*grad_y1;
offset= intBatch*grad_input_stride.x + intY*grad_input_stride.z + intX*grad_input_stride.w;
grad_input[offset]=grad_d;offset+=grad_input_stride.y;
grad_input[offset]=grad_e;offset+=grad_input_stride.y;
grad_input[offset]=grad_g;
}
// output = input * v * h
// grad_v = input * h * grad_out
/*
void EigenAnalysis_kernel_backward(
THCudaTensor* input,
THCudaTensor* grad_output,
THCudaTensor* grad_input
) {
int n = 0;
n = THCudaTensor_size(state, grad_output,0)*THCudaTensor_size(state, grad_output,2)*THCudaTensor_size(state, grad_output,3);
//n = grad_output->size[0]* grad_output->size[2]*grad_output->size[3];
kernel_EigenAnalysis_updateGradient<<< (n + 512 - 1) / 512, 512, 0, THCState_getCurrentStream(state) >>>(
n,
THCudaTensor_data(state, input), get_size(state, input), get_stride(state, input), //make_long4(input->size[0], input->size[1], input->size[2], input->size[3]), make_long4(input->stride[0], input->stride[1], input->stride[2], input->stride[3]),
THCudaTensor_data(state, grad_output), get_size(state, grad_output), get_stride(state, grad_output), //make_long4(grad_output->size[0], grad_output->size[1], grad_output->size[2], grad_output->size[3]), make_long4(grad_output->stride[0], grad_output->stride[1], grad_output->stride[2], grad_output->stride[3]),
THCudaTensor_data(state, grad_input), get_size(state, grad_input), get_stride(state, grad_input) //make_long4(grad_input->size[0], grad_input->size[1], grad_input->size[2], grad_input->size[3]), make_long4(grad_input->stride[0], grad_input->stride[1], grad_input->stride[2], grad_input->stride[3])
);
THCudaCheck(hipGetLastError());
}
*/
void EigenAnalysis_kernel_backward(
at::Tensor input,
at::Tensor grad_output,
at::Tensor grad_input
)
{
int n = grad_output.size(0) * grad_output.size(2)* grad_output.size(3);
hipLaunchKernelGGL(( kernel_EigenAnalysis_updateGradient), dim3((n + 512 - 1) / 512), dim3(512) , 0, 0,
n,
TENSOR_INFO(input),
TENSOR_INFO(grad_output),
TENSOR_INFO(grad_input)
// input.data(),make_long4(input.size(0),input.size(1),input.size(2),input.size(3)), make_long4(input.stride(0),input.stride(1),input.stride(2),input.stride(3)),
// grad_output.data(),make_long4(grad_output.size(0),grad_output.size(1),grad_output.size(2),grad_output.size(3)), make_long4(grad_output.stride(0),grad_output.stride(1),grad_output.stride(2),grad_output.stride(3)),
// grad_input.data(),make_long4(grad_input.size(0),grad_input.size(1),grad_input.size(2),grad_input.size(3)), make_long4(grad_input.stride(0),grad_input.stride(1),grad_input.stride(2),grad_input.stride(3))
);
THCudaCheck(hipGetLastError());
}
#ifdef __cplusplus
}
#endif
| ca68de38f18d7ec3f4a149b7ebe3b89905792257.cu | #include <THC.h>
#include <THCGeneral.h>
#include "utils.h"
#define VEC_0(ARRAY) ((ARRAY).x)
#define VEC_1(ARRAY) ((ARRAY).y)
#define VEC_2(ARRAY) ((ARRAY).z)
#define VEC_3(ARRAY) ((ARRAY).w)
#define IDX_1(ARRAY, X) ((ARRAY)[((X) * (ARRAY##_stride.x))])
#define IDX_2(ARRAY, X, Y) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y))])
#define IDX_3(ARRAY, X, Y, Z) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z))])
#define IDX_4(ARRAY, X, Y, Z, W) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z)) + ((W) * (ARRAY##_stride.w))])
#ifdef __cplusplus
extern "C" {
#endif
__global__ void kernel_EigenAnalysis_updateOutput(
const int n,
const float* input, const long4 input_size, const long4 input_stride,
float* output, const long4 output_size, const long4 output_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n) {
return;
}
int intBatch = ( intIndex / VEC_3(output_size) / VEC_2(output_size) ) % VEC_0(output_size); //b
int intY = ( intIndex / VEC_3(output_size) ) % VEC_2(output_size); //y
int intX = ( intIndex ) % VEC_3(output_size); //x
int offset = intBatch*input_stride.x + intY*input_stride.z + intX*input_stride.w;
float d = input[offset];
offset += input_stride.y;
float e = input[offset];
offset += input_stride.y;
float g = input[offset];
float delta = sqrt(4* e*e + (d-g)*(d-g));
float lamda0 = (d+g + delta)/2;
float lamda1 = lamda0 - delta;
float L0 = sqrt(e*e+(lamda0-d)*(lamda0-d));
float L1 = sqrt(e*e+(lamda1-d)*(lamda1-d));
const float epsilon = 0.00000001;
float x0= e*lamda0/(L0+epsilon);
float y0= (lamda0-d)*lamda0/(L0+epsilon);
float x1= e*lamda1/(L1+epsilon);
float y1= (lamda1-d)*lamda1/(L1+epsilon);
offset= intBatch*output_stride.x + intY*output_stride.z + intX*output_stride.w;
output[offset]=x0;offset+=output_stride.y;
output[offset]=y0;offset+=output_stride.y;
output[offset]=x1;offset+=output_stride.y;
output[offset]=y1;
}
//input: [batch, 3, h, w] : 0:x*x, 1:x*y, 2:y*y
//output:[batch, 4, h, w] : 0:x_big, 1:y_big, 2: x_small, 3:y_small
void EigenAnalysis_kernel_forward(
at::Tensor input,
at::Tensor output
) {
int n = input.size(0) * input.size(2)* input.size(3);
kernel_EigenAnalysis_updateOutput<<< (n + 512 - 1) / 512, 512 >>>
(
n,
TENSOR_INFO(input),
TENSOR_INFO(output)
// input.data(),make_long4(input.size(0),input.size(1),input.size(2),input.size(3)), make_long4(input.stride(0),input.stride(1),input.stride(2),input.stride(3)),
// output.data(),make_long4(output.size(0),output.size(1),output.size(2),output.size(3)), make_long4(output.stride(0),output.stride(1),output.stride(2),output.stride(3))
);
THCudaCheck(cudaGetLastError());
}
__global__ void kernel_EigenAnalysis_updateGradient(
const int n,
const float* input, const long4 input_size, const long4 input_stride,
const float* grad_output, const long4 grad_output_size, const long4 grad_output_stride,
float* grad_input, const long4 grad_input_size, const long4 grad_input_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n) {
return;
}
const float epsilon = 0.00000000;
int intBatch = ( intIndex / VEC_3(input_size) / VEC_2(input_size) ) % VEC_0(input_size); //b
int intY = ( intIndex / VEC_3(input_size) ) % VEC_2(input_size); //y
int intX = ( intIndex ) % VEC_3(input_size); //x
int offset = intBatch*input_stride.x + intY*input_stride.z + intX*input_stride.w;
float d = input[offset];
offset += input_stride.y;
float e = input[offset];
offset += input_stride.y;
float g = input[offset];
float delta = sqrt(4* e*e + (d-g)*(d-g))+epsilon;
float lamda0 = (d+g + delta)/2;
float lamda1 = lamda0 - delta;
float L0 = sqrt(e*e+(lamda0-d)*(lamda0-d))+epsilon;
float L1 = sqrt(e*e+(lamda1-d)*(lamda1-d))+epsilon;
if (L0 == 0.0 || L1 == 0.0 || delta == 0.0 )
{
offset= intBatch*grad_input_stride.x + intY*grad_input_stride.z + intX*grad_input_stride.w;
grad_input[offset]=0.0f;offset+=grad_input_stride.y;
grad_input[offset]=0.0f;offset+=grad_input_stride.y;
grad_input[offset]=0.0f;
return ;
}
float d_lamda0_dd = (1+ (d-g)/delta)/2;
float d_lamda1_dd = (1- (d-g)/delta)/2;
float d_lamda0_dg = d_lamda1_dd;
float d_lamda1_dg = d_lamda0_dd;
float d_lamda0_de = + 2* e/ delta;
float d_lamda1_de = - d_lamda0_de;
offset = intBatch*grad_output_stride.x + intY*grad_output_stride.z + intX*grad_output_stride.w;
float grad_x0 = grad_output[offset];offset+=grad_output_stride.y;
float grad_y0 = grad_output[offset];offset+=grad_output_stride.y;
float grad_x1 = grad_output[offset];offset+=grad_output_stride.y;
float grad_y1 = grad_output[offset];
float grad_e = (lamda0/L0+ e*d_lamda0_de/L0 - e*lamda0/L0/L0/L0*(e+(lamda0-d)*d_lamda0_de))*grad_x0;
grad_e += (lamda1/L1+ e*d_lamda1_de/L1 - e*lamda1/L1/L1/L1*(e+(lamda1-d)*d_lamda1_de))*grad_x1;
grad_e += ((2*lamda0- d)/L0* d_lamda0_de - (lamda0-d)*lamda0*(e+(lamda0-d)*d_lamda0_de)/L0/L0/L0)*grad_y0;
grad_e += ((2*lamda1- d)/L1* d_lamda1_de - (lamda1-d)*lamda1*(e+(lamda1-d)*d_lamda1_de)/L1/L1/L1)*grad_y1;
float grad_d = (e/L0*d_lamda0_dd + e*lamda0/L0/L0/L0*(lamda0-d)*(1-d_lamda0_dd))*grad_x0;
grad_d += (e/L1*d_lamda1_dd + e*lamda1/L1/L1/L1*(lamda1-d)*(1-d_lamda1_dd))*grad_x1;
grad_d += (((d_lamda0_dd -1)*lamda0 + (lamda0-d)*d_lamda0_dd)/L0 + (lamda0-d)*(lamda0-d)*lamda0*(1-d_lamda0_dd)/L0/L0/L0)
*grad_y0;
grad_d += (((d_lamda1_dd -1)*lamda1 + (lamda1-d)*d_lamda1_dd)/L1 + (lamda1-d)*(lamda1-d)*lamda1*(1-d_lamda1_dd)/L1/L1/L1)
*grad_y1;
float grad_g = (e/L0*d_lamda0_dg + e*lamda0/L0/L0/L0*(lamda0-d)*(-d_lamda0_dg))*grad_x0;
grad_g += (e/L1*d_lamda1_dg + e*lamda1/L1/L1/L1*(lamda1-d)*(-d_lamda1_dg))*grad_x1;
grad_g += ((d_lamda0_dg*lamda0 + (lamda0-d)*d_lamda0_dg)/L0 + (lamda0-d)*(lamda0-d)*lamda0*(-d_lamda0_dg)/L0/L0/L0)
*grad_y0;
grad_g += ((d_lamda1_dg*lamda1 + (lamda1-d)*d_lamda1_dg)/L1 + (lamda1-d)*(lamda1-d)*lamda1*(-d_lamda1_dg)/L1/L1/L1)
*grad_y1;
offset= intBatch*grad_input_stride.x + intY*grad_input_stride.z + intX*grad_input_stride.w;
grad_input[offset]=grad_d;offset+=grad_input_stride.y;
grad_input[offset]=grad_e;offset+=grad_input_stride.y;
grad_input[offset]=grad_g;
}
// output = input * v * h
// grad_v = input * h * grad_out
/*
void EigenAnalysis_kernel_backward(
THCudaTensor* input,
THCudaTensor* grad_output,
THCudaTensor* grad_input
) {
int n = 0;
n = THCudaTensor_size(state, grad_output,0)*THCudaTensor_size(state, grad_output,2)*THCudaTensor_size(state, grad_output,3);
//n = grad_output->size[0]* grad_output->size[2]*grad_output->size[3];
kernel_EigenAnalysis_updateGradient<<< (n + 512 - 1) / 512, 512, 0, THCState_getCurrentStream(state) >>>(
n,
THCudaTensor_data(state, input), get_size(state, input), get_stride(state, input), //make_long4(input->size[0], input->size[1], input->size[2], input->size[3]), make_long4(input->stride[0], input->stride[1], input->stride[2], input->stride[3]),
THCudaTensor_data(state, grad_output), get_size(state, grad_output), get_stride(state, grad_output), //make_long4(grad_output->size[0], grad_output->size[1], grad_output->size[2], grad_output->size[3]), make_long4(grad_output->stride[0], grad_output->stride[1], grad_output->stride[2], grad_output->stride[3]),
THCudaTensor_data(state, grad_input), get_size(state, grad_input), get_stride(state, grad_input) //make_long4(grad_input->size[0], grad_input->size[1], grad_input->size[2], grad_input->size[3]), make_long4(grad_input->stride[0], grad_input->stride[1], grad_input->stride[2], grad_input->stride[3])
);
THCudaCheck(cudaGetLastError());
}
*/
void EigenAnalysis_kernel_backward(
at::Tensor input,
at::Tensor grad_output,
at::Tensor grad_input
)
{
int n = grad_output.size(0) * grad_output.size(2)* grad_output.size(3);
kernel_EigenAnalysis_updateGradient<<< (n + 512 - 1) / 512, 512 >>>
(
n,
TENSOR_INFO(input),
TENSOR_INFO(grad_output),
TENSOR_INFO(grad_input)
// input.data(),make_long4(input.size(0),input.size(1),input.size(2),input.size(3)), make_long4(input.stride(0),input.stride(1),input.stride(2),input.stride(3)),
// grad_output.data(),make_long4(grad_output.size(0),grad_output.size(1),grad_output.size(2),grad_output.size(3)), make_long4(grad_output.stride(0),grad_output.stride(1),grad_output.stride(2),grad_output.stride(3)),
// grad_input.data(),make_long4(grad_input.size(0),grad_input.size(1),grad_input.size(2),grad_input.size(3)), make_long4(grad_input.stride(0),grad_input.stride(1),grad_input.stride(2),grad_input.stride(3))
);
THCudaCheck(cudaGetLastError());
}
#ifdef __cplusplus
}
#endif
|
360565c034337a82758e0ea7725ae49d7ed36924.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuGriddingConvolution.h"
#include <thrust/binary_search.h>
#include "cuNDArray_elemwise.h"
#include "cuNDArray_utils.h"
#include "cudaDeviceManager.h"
#include "ConvolverC2NC.cuh"
#include "ConvolverNC2C_atomic.cuh"
#include "ConvolverNC2C_sparse.cuh"
#include "ConvolverNC2C_standard.cuh"
#define CUDA_CONV_MAX_COILS (16)
#define CUDA_CONV_THREADS_PER_KERNEL (192)
namespace Gadgetron
{
template<class T, unsigned int D, template<class, unsigned int> class K>
cuGriddingConvolution<T, D, K>::cuGriddingConvolution(
const vector_td<size_t, D>& matrix_size,
const vector_td<size_t, D>& matrix_size_os,
const K<REAL, D>& kernel,
ConvolutionType conv_type)
: GriddingConvolutionBase<cuNDArray, T, D, K>(
matrix_size, matrix_size_os, kernel)
{
this->initialize(conv_type);
}
template<class T, unsigned int D, template<class, unsigned int> class K>
cuGriddingConvolution<T, D, K>::cuGriddingConvolution(
const vector_td<size_t, D>& matrix_size,
const REAL os_factor,
const K<REAL, D>& kernel,
ConvolutionType conv_type)
: GriddingConvolutionBase<cuNDArray, T, D, K>(
matrix_size, os_factor, kernel)
{
this->initialize(conv_type);
}
template<class T, unsigned int D, template<class, unsigned int> class K>
cuGriddingConvolution<T, D, K>::~cuGriddingConvolution()
{
// Release kernel on device.
hipFree(this->d_kernel_);
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void cuGriddingConvolution<T, D, K>::initialize(ConvolutionType conv_type)
{
// Get device number.
if (hipGetDevice(&this->device_) != hipSuccess)
{
throw cuda_error("Could not get device number.");
}
// The convolution does not work properly for very small convolution
// kernel width (experimentally observed limit).
if (this->kernel_.get_width() < REAL(1.8))
throw std::runtime_error("Kernel width must be equal to or larger "
"than 1.8.");
// Matrix size must be a multiple of warp size.
vector_td<size_t, D> warp_size(
(size_t)cudaDeviceManager::Instance()->warp_size(this->device_));
if (sum(this->matrix_size_ % warp_size) ||
sum(this->matrix_size_os_ % warp_size))
{
std::stringstream ss;
ss << "Matrix size must be a multiple of " << warp_size[0] << ".";
throw std::runtime_error(ss.str());
}
// Compute matrix padding.
vector_td<REAL, D> radius(this->kernel_.get_radius());
this->matrix_padding_ = vector_td<size_t, D>(ceil(radius));
this->matrix_padding_ <<= 1;
// Compute warp size power.
unsigned int warp_size_power = 0;
unsigned int tmp = cudaDeviceManager::Instance()->warp_size(
this->device_);
while (tmp != 1)
{
tmp >>= 1;
warp_size_power++;
}
this->warp_size_power_ = warp_size_power;
// Copy kernel to device.
hipMalloc((void**)&this->d_kernel_, sizeof(K<REAL, D>));
hipMemcpy((void*)this->d_kernel_, (const void*)&this->kernel_,
sizeof(K<REAL, D>), hipMemcpyHostToDevice);
// Set up convolvers.
switch (conv_type)
{
case ConvolutionType::STANDARD:
{
this->conv_C2NC_ = std::make_unique<ConvolverC2NC<T, D, K, ConvolutionType::STANDARD>>(*this);
this->conv_NC2C_ = std::make_unique<ConvolverNC2C<T, D, K, ConvolutionType::STANDARD>>(*this);
break;
}
case ConvolutionType::ATOMIC:
{
this->conv_C2NC_ = std::make_unique<ConvolverC2NC<T, D, K, ConvolutionType::ATOMIC>>(*this);
this->conv_NC2C_ = std::make_unique<ConvolverNC2C<T, D, K, ConvolutionType::ATOMIC>>(*this);
break;
}
case ConvolutionType::SPARSE_MATRIX:
{
this->conv_C2NC_ = std::make_unique<ConvolverC2NC<T, D, K, ConvolutionType::SPARSE_MATRIX>>(*this);
this->conv_NC2C_ = std::make_unique<ConvolverNC2C<T, D, K, ConvolutionType::SPARSE_MATRIX>>(*this);
break;
}
}
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void cuGriddingConvolution<T, D, K>::preprocess(
const cuNDArray<vector_td<REAL, D>>& trajectory,
GriddingConvolutionPrepMode prep_mode)
{
// Call base class method.
GriddingConvolutionBase<cuNDArray, T, D, K>::preprocess(
trajectory, prep_mode);
// Make sure that the trajectory values are within range [-1/2, 1/2].
cuNDArray<REAL> traj_view(
std::vector<size_t>{trajectory.get_number_of_elements() * D},
(REAL*) trajectory.get_data_ptr());
thrust::pair<thrust::device_ptr<REAL>, thrust::device_ptr<REAL>> mm_pair =
thrust::minmax_element(traj_view.begin(), traj_view.end());
if (*mm_pair.first < REAL(-0.5) || *mm_pair.second > REAL(0.5))
{
std::stringstream ss;
ss << "Error: cuGriddingConvolution::preprocess: trajectory [" <<
*mm_pair.first << ", " << *mm_pair.second <<
"] out of range [-1/2, 1/2].";
throw std::runtime_error(ss.str());
}
// Allocate trajectory.
this->trajectory_ = thrust::device_vector<vector_td<REAL, D>>(
trajectory.get_number_of_elements());
CHECK_FOR_CUDA_ERROR();
// Cast matrix size values to floating-point type.
vector_td<REAL, D> matrix_size_os_fp =
vector_td<REAL, D>(this->matrix_size_os_);
vector_td<REAL, D> matrix_size_os_padded_fp =
vector_td<REAL, D>((this->matrix_size_os_ + this->matrix_padding_) >> 1);
// Convert input trajectory from range [-1/2, 1/2] to
// [0, this->matrix_size_os_], and copy to class member.
thrust::transform(trajectory.begin(),
trajectory.end(),
this->trajectory_.begin(),
trajectory_scale<REAL, D>(matrix_size_os_fp,
matrix_size_os_padded_fp));
// Prepare convolution.
if (prep_mode == GriddingConvolutionPrepMode::C2NC ||
prep_mode == GriddingConvolutionPrepMode::ALL)
{
this->conv_C2NC_->prepare(this->trajectory_);
CHECK_FOR_CUDA_ERROR();
}
if (prep_mode == GriddingConvolutionPrepMode::NC2C ||
prep_mode == GriddingConvolutionPrepMode::ALL)
{
this->conv_NC2C_->prepare(this->trajectory_);
CHECK_FOR_CUDA_ERROR();
}
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void cuGriddingConvolution<T, D, K>::compute_C2NC(
const cuNDArray<T>& image,
cuNDArray<T>& samples,
bool accumulate)
{
this->check_inputs(samples, image);
this->conv_C2NC_->compute(image, samples, accumulate);
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void cuGriddingConvolution<T, D, K>::compute_NC2C(
const cuNDArray<T>& samples,
cuNDArray<T>& image,
bool accumulate)
{
this->check_inputs(samples, image);
this->conv_NC2C_->compute(samples, image, accumulate);
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void cuGriddingConvolution<T, D, K>::check_inputs(
const cuNDArray<T>& samples,
const cuNDArray<T>& image)
{
if (image.get_number_of_dimensions() < D)
{
throw std::runtime_error("Number of image dimensions does not "
"match the plan.");
}
vector_td<size_t, D> image_dims =
from_std_vector<size_t, D>(*image.get_dimensions());
if (image_dims != this->matrix_size_os_)
throw std::runtime_error("Image dimensions mismatch.");
if ((samples.get_number_of_elements() == 0) ||
(samples.get_number_of_elements() % (this->num_frames_ * this->num_samples_)))
{
printf("\nConsistency check failed:\n"
"#elements in the samples array: %ld.\n"
"#samples from preprocessing: %zu.\n"
"#frames from preprocessing: %zu.\n",
samples.get_number_of_elements(), this->num_samples_, this->num_frames_);
fflush(stdout);
throw std::runtime_error(
"The number of samples is not a multiple of #samples/frame x "
"#frames as requested through preprocessing.");
}
unsigned int num_batches_in_samples_array =
samples.get_number_of_elements() / (this->num_frames_ * this->num_samples_);
unsigned int num_batches_in_image_array = 1;
for (unsigned int d = D; d < image.get_number_of_dimensions(); d++) {
num_batches_in_image_array *= image.get_size(d);
}
num_batches_in_image_array /= this->num_frames_;
if (num_batches_in_samples_array != num_batches_in_image_array) {
printf("\nConsistency check failed:\n"
"#elements in the samples array: %ld.\n"
"#samples from preprocessing: %zu.\n"
"#frames from preprocessing: %zu.\n"
"Leading to %d batches in the samples array.\n"
"The number of batches in the image array is %u.\n",
samples.get_number_of_elements(),
this->num_samples_,
this->num_frames_,
num_batches_in_samples_array,
num_batches_in_image_array);
fflush(stdout);
throw std::runtime_error(
"Number of batches mismatch between samples and image arrays.");
}
}
template<class T, unsigned int D, template<class, unsigned int> class K, ConvolutionType C>
void ConvolverC2NC<T, D, K, C>::prepare(
const thrust::device_vector<vector_td<REAL, D>>& trajectory)
{
// No need to do anything here.
// Defined for completeness.
}
template<class T, unsigned int D, template<class, unsigned int> class K, ConvolutionType C>
void ConvolverC2NC<T, D, K, C>::compute(
const cuNDArray<T>& image,
cuNDArray<T>& samples,
bool accumulate)
{
// Compute number of batches.
unsigned int num_batches = 1;
for (unsigned int d = D; d < image.get_number_of_dimensions(); d++)
num_batches *= image.get_size(d);
num_batches /= this->plan_.num_frames_;
// Set up grid and threads. We can convolve only max_coils batches per
// run due to shared memory issues.
unsigned int threads_per_block = CUDA_CONV_THREADS_PER_KERNEL;
unsigned int max_coils = CUDA_CONV_MAX_COILS;
unsigned int domain_size_coils_desired = num_batches;
unsigned int num_repetitions = domain_size_coils_desired / max_coils +
(((domain_size_coils_desired % max_coils) == 0) ? 0 : 1);
unsigned int domain_size_coils =
(num_repetitions == 1) ? domain_size_coils_desired : max_coils;
unsigned int domain_size_coils_tail =
(num_repetitions == 1) ? domain_size_coils_desired :
domain_size_coils_desired - (num_repetitions - 1) * domain_size_coils;
// Block and grid dimensions.
dim3 dimBlock(threads_per_block);
dim3 dimGrid((this->plan_.num_samples_ + dimBlock.x - 1) / dimBlock.x,
this->plan_.num_frames_);
// Calculate how much shared memory to use per thread.
size_t bytes_per_thread = domain_size_coils * sizeof(T);
size_t bytes_per_thread_tail = domain_size_coils_tail * sizeof(T);
// Image view dimensions.
auto view_dims = to_std_vector(this->plan_.matrix_size_os_);
view_dims.push_back(this->plan_.num_frames_);
view_dims.push_back(0); // Placeholder for num_coils.
for (unsigned int repetition = 0; repetition < num_repetitions; repetition++)
{
// Number of coils in this repetition.
size_t num_coils = (repetition == num_repetitions - 1) ?
domain_size_coils_tail : domain_size_coils;
view_dims.back() = num_coils;
// Image view for this repetition.
size_t image_view_elements = std::accumulate(
view_dims.begin(), view_dims.end()-1, size_t(1), std::multiplies<>());
auto image_view = cuNDArray<T>(view_dims, const_cast<T*>(
image.get_data_ptr()) + repetition * image_view_elements*domain_size_coils);
auto permutation = std::vector<size_t>(D+2);
permutation[0] = D+1;
std::iota(permutation.begin() + 1, permutation.end(), 0);
auto image_permuted = permute(image_view, permutation);
// Size of shared memory.
size_t sharedMemSize = (repetition == num_repetitions - 1) ?
dimBlock.x * bytes_per_thread_tail :
dimBlock.x * bytes_per_thread;
// Launch CUDA kernel.
hipLaunchKernelGGL(( NFFT_convolve_kernel<T, D, K>)
, dim3(dimGrid), dim3(dimBlock), sharedMemSize, 0,
vector_td<unsigned int, D>(this->plan_.matrix_size_os_),
vector_td<unsigned int, D>(this->plan_.matrix_padding_),
this->plan_.num_samples_,
num_coils,
raw_pointer_cast(&this->plan_.trajectory_[0]),
image_permuted.get_data_ptr(),
samples.get_data_ptr() + repetition * this->plan_.num_samples_ *
this->plan_.num_frames_ * domain_size_coils,
this->plan_.warp_size_power_,
accumulate,
this->plan_.d_kernel_);
CHECK_FOR_CUDA_ERROR();
}
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::STANDARD>::prepare(
const thrust::device_vector<vector_td<REAL, D>>& trajectory)
{
// Allocate storage for and compute temporary prefix-sum variable
// (#cells influenced per sample).
thrust::device_vector<unsigned int> c_p_s(trajectory.size());
thrust::device_vector<unsigned int> c_p_s_ps(trajectory.size());
CHECK_FOR_CUDA_ERROR();
REAL radius = this->plan_.kernel_.get_radius();
transform(trajectory.begin(), trajectory.end(),
c_p_s.begin(), compute_num_cells_per_sample<REAL, D>(radius));
inclusive_scan(c_p_s.begin(), c_p_s.end(), c_p_s_ps.begin(),
thrust::plus<unsigned int>()); // Prefix sum.
// Build the vector of (grid_idx, sample_idx) tuples. Actually kept in
// two separate vectors.
unsigned int num_pairs = c_p_s_ps.back();
c_p_s.clear();
tuples_first = thrust::device_vector<unsigned int>(num_pairs);
tuples_last = thrust::device_vector<unsigned int>(num_pairs);
CHECK_FOR_CUDA_ERROR();
// Fill tuple vector.
write_pairs<REAL, D>(vector_td<unsigned int, D>(this->plan_.matrix_size_os_),
vector_td<unsigned int, D>(this->plan_.matrix_padding_),
this->plan_.num_samples_, this->plan_.num_frames_,
this->plan_.kernel_.get_width(),
raw_pointer_cast(&trajectory[0]),
raw_pointer_cast(&c_p_s_ps[0]),
raw_pointer_cast(&tuples_first[0]),
raw_pointer_cast(&tuples_last[0]));
c_p_s_ps.clear();
// Sort by grid indices.
sort_by_key(tuples_first.begin(), tuples_first.end(), tuples_last.begin());
// Each bucket_begin[i] indexes the first element of bucket i's list of points.
// Each bucket_end[i] indexes one past the last element of bucket i's list of points.
bucket_begin = thrust::device_vector<unsigned int>(
this->plan_.num_frames_ * prod(this->plan_.matrix_size_os_ + this->plan_.matrix_padding_));
bucket_end = thrust::device_vector<unsigned int>(
this->plan_.num_frames_ * prod(this->plan_.matrix_size_os_ + this->plan_.matrix_padding_));
CHECK_FOR_CUDA_ERROR();
// Find the beginning of each bucket's list of points.
thrust::counting_iterator<unsigned int> search_begin(0);
thrust::lower_bound(tuples_first.begin(), tuples_first.end(),
search_begin, search_begin + this->plan_.num_frames_ *
prod(this->plan_.matrix_size_os_ + this->plan_.matrix_padding_),
bucket_begin.begin());
// Find the end of each bucket's list of points.
thrust::upper_bound(tuples_first.begin(), tuples_first.end(),
search_begin, search_begin + this->plan_.num_frames_ *
prod(this->plan_.matrix_size_os_ + this->plan_.matrix_padding_),
bucket_end.begin());
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::STANDARD>::compute(
const cuNDArray<T>& samples,
cuNDArray<T>& image,
bool accumulate)
{
// Check if warp_size is a power of two. We do some modulus tricks in
// the kernels that depend on this.
if (!((cudaDeviceManager::Instance()->warp_size(this->plan_.device_) &
(cudaDeviceManager::Instance()->warp_size(this->plan_.device_) - 1)) == 0))
{
throw cuda_error("cuGriddingConvolution: Unsupported hardware "
"(warp size is not a power of 2).");
}
// Compute number of batches.
unsigned int num_batches = 1;
for (unsigned int d = D; d < image.get_number_of_dimensions(); d++)
num_batches *= image.get_size(d);
num_batches /= this->plan_.num_frames_;
// Set up grid and threads. We can convolve only max_coils batches per
// run due to shared memory issues.
unsigned int threads_per_block = CUDA_CONV_THREADS_PER_KERNEL;
unsigned int max_coils = CUDA_CONV_MAX_COILS;
unsigned int domain_size_coils_desired = num_batches;
unsigned int num_repetitions = domain_size_coils_desired / max_coils +
(((domain_size_coils_desired % max_coils) == 0) ? 0 : 1);
unsigned int domain_size_coils = (num_repetitions == 1) ?
domain_size_coils_desired : max_coils;
unsigned int domain_size_coils_tail = (num_repetitions == 1) ?
domain_size_coils_desired :
domain_size_coils_desired - (num_repetitions - 1) * domain_size_coils;
// Block and grid dimensions.
dim3 dimBlock(threads_per_block);
dim3 dimGrid((prod(this->plan_.matrix_size_os_ + this->plan_.matrix_padding_) +
dimBlock.x - 1) / dimBlock.x, this->plan_.num_frames_);
// Calculate how much shared memory to use per thread.
size_t bytes_per_thread = domain_size_coils * sizeof(T);
size_t bytes_per_thread_tail = domain_size_coils_tail * sizeof(T);
// Define temporary image that includes padding.
std::vector<size_t> padded_image_dims = to_std_vector(
this->plan_.matrix_size_os_ + this->plan_.matrix_padding_);
if (this->plan_.num_frames_ > 1)
padded_image_dims.push_back(this->plan_.num_frames_);
if (num_batches > 1)
padded_image_dims.push_back(num_batches);
cuNDArray<T> padded_image(padded_image_dims);
// Prioritise shared memory over L1 cache.
hipFuncSetCacheConfig(NFFT_H_convolve_kernel<T, D, K>,
hipFuncCachePreferShared);
// Samples view dimensions.
std::vector<size_t> view_dims = {
this->plan_.num_samples_,
this->plan_.num_frames_,
0}; // Placeholder for num_coils.
for (unsigned int repetition = 0; repetition < num_repetitions; repetition++)
{
// Number of coils in this repetition.
size_t num_coils = (repetition == num_repetitions - 1) ?
domain_size_coils_tail : domain_size_coils;
// Samples view for current repetition.
view_dims.back() = num_coils;
cuNDArray<T> samples_view(view_dims,
const_cast<T*>(samples.get_data_ptr()) +
repetition * this->plan_.num_samples_ *
this->plan_.num_frames_ * domain_size_coils);
std::vector<size_t> permute_order = {2, 0, 1};
auto samples_permuted = permute(samples_view, permute_order);
// Size of shared memory.
size_t sharedMemSize = (repetition == num_repetitions - 1) ?
dimBlock.x * bytes_per_thread_tail :
dimBlock.x * bytes_per_thread;
// Launch CUDA kernel.
hipLaunchKernelGGL(( NFFT_H_convolve_kernel<T, D, K>)
, dim3(dimGrid), dim3(dimBlock), sharedMemSize, 0,
vector_td<unsigned int, D>(this->plan_.matrix_size_os_ +
this->plan_.matrix_padding_),
this->plan_.num_samples_,
num_coils,
raw_pointer_cast(&this->plan_.trajectory_[0]),
padded_image.get_data_ptr() + repetition *
prod(this->plan_.matrix_size_os_ +
this->plan_.matrix_padding_) *
this->plan_.num_frames_ *
domain_size_coils,
samples_permuted.get_data_ptr(),
raw_pointer_cast(&tuples_last[0]),
raw_pointer_cast(&bucket_begin[0]),
raw_pointer_cast(&bucket_end[0]),
this->plan_.warp_size_power_,
this->plan_.d_kernel_);
}
CHECK_FOR_CUDA_ERROR();
this->wrap_image(padded_image, image, accumulate);
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::STANDARD>::wrap_image(
cuNDArray<T>& source,
cuNDArray<T>& target,
bool accumulate)
{
// Compute number of batches.
unsigned int num_batches = 1;
for (unsigned int d = D; d < source.get_number_of_dimensions(); d++)
num_batches *= source.get_size(d);
num_batches /= this->plan_.num_frames_;
// Set dimensions of grid/blocks.
unsigned int bdim = 256;
dim3 dimBlock(bdim);
dim3 dimGrid(prod(this->plan_.matrix_size_os_) / bdim,
this->plan_.num_frames_ * num_batches);
// Safety check.
if ((prod(this->plan_.matrix_size_os_) % bdim) != 0)
{
std::stringstream ss;
ss << "Error: cuNFFT : the number of oversampled image elements must be a multiplum of the block size: "
<< bdim;
throw std::runtime_error(ss.str());
}
// Invoke kernel.
hipLaunchKernelGGL(( wrap_image_kernel<T, D>)
, dim3(dimGrid), dim3(dimBlock), 0, 0,
source.get_data_ptr(),
target.get_data_ptr(),
vector_td<unsigned int, D>(this->plan_.matrix_size_os_),
vector_td<unsigned int, D>(this->plan_.matrix_padding_),
accumulate);
CHECK_FOR_CUDA_ERROR();
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::ATOMIC>::prepare(
const thrust::device_vector<vector_td<REAL, D>>& trajectory)
{
// No need to do anything here.
// Defined for completeness.
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::ATOMIC>::compute(
const cuNDArray<T>& samples,
cuNDArray<T>& image,
bool accumulate)
{
// We need a device with compute capability >= 2.0 to use atomics.
if (cudaDeviceManager::Instance()->major_version(this->plan_.device_) == 1)
{
throw cuda_error("cuGriddingConvolution: Atomic gridding "
"convolution supported only on devices with "
"compute capability 2.0 or higher.");
}
// We require the warp size to be a power of 2.
if (!((cudaDeviceManager::Instance()->warp_size(this->plan_.device_) &
(cudaDeviceManager::Instance()->warp_size(this->plan_.device_) - 1)) == 0))
{
throw cuda_error("cuGriddingConvolution: Unsupported hardware "
"(warp size is not a power of 2).");
}
// Compute number of batches.
unsigned int num_batches = 1;
for (unsigned int d = D; d < image.get_number_of_dimensions(); d++)
num_batches *= image.get_size(d);
num_batches /= this->plan_.num_frames_;
// Set up grid and threads. We can convolve only max_coils batches per
// run due to shared memory issues.
unsigned int threads_per_block = CUDA_CONV_THREADS_PER_KERNEL;
unsigned int max_coils = CUDA_CONV_MAX_COILS;
unsigned int domain_size_coils_desired = num_batches;
unsigned int num_repetitions = domain_size_coils_desired / max_coils +
(((domain_size_coils_desired % max_coils) == 0) ? 0 : 1);
unsigned int domain_size_coils =
(num_repetitions == 1) ? domain_size_coils_desired : max_coils;
unsigned int domain_size_coils_tail =
(num_repetitions == 1) ? domain_size_coils_desired :
domain_size_coils_desired - (num_repetitions - 1) * domain_size_coils;
// Block and grid dimensions.
dim3 dimBlock(threads_per_block);
dim3 dimGrid((this->plan_.num_samples_ + dimBlock.x - 1) / dimBlock.x,
this->plan_.num_frames_);
// Calculate how much shared memory to use per thread.
size_t bytes_per_thread =
domain_size_coils * sizeof(vector_td<REAL, D>);
size_t bytes_per_thread_tail =
domain_size_coils_tail * sizeof(vector_td<REAL, D>);
// Clear image if not accumulating.
if (!accumulate)
clear(&image);
for (unsigned int repetition = 0; repetition < num_repetitions; repetition++)
{
// Number of coils in this repetition.
size_t num_coils = (repetition == num_repetitions - 1) ?
domain_size_coils_tail : domain_size_coils;
// Size of shared memory.
size_t sharedMemSize = (repetition == num_repetitions - 1) ?
dimBlock.x * bytes_per_thread_tail :
dimBlock.x * bytes_per_thread;
// Launch CUDA kernel.
hipLaunchKernelGGL(( NFFT_H_atomic_convolve_kernel<T, D, K>)
, dim3(dimGrid), dim3(dimBlock), sharedMemSize, 0,
vector_td<unsigned int, D>(this->plan_.matrix_size_os_),
vector_td<unsigned int, D>(this->plan_.matrix_padding_),
this->plan_.num_samples_,
num_coils,
raw_pointer_cast(&this->plan_.trajectory_[0]),
samples.get_data_ptr() + repetition * this->plan_.num_samples_ *
this->plan_.num_frames_ * domain_size_coils,
image.get_data_ptr() + repetition * prod(this->plan_.matrix_size_os_) *
this->plan_.num_frames_ * domain_size_coils,
this->plan_.d_kernel_);
}
CHECK_FOR_CUDA_ERROR();
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::SPARSE_MATRIX>::prepare(
const thrust::device_vector<vector_td<REAL, D>>& trajectory)
{
this->conv_matrix_ = std::make_unique<cuCsrMatrix<T>>(
make_conv_matrix<T, D, K>(
trajectory, this->plan_.matrix_size_os_, this->plan_.d_kernel_));
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::SPARSE_MATRIX>::compute(
const cuNDArray<T>& samples,
cuNDArray<T>& image,
bool accumulate)
{
unsigned int num_batches = 1;
for (unsigned int d = D; d < image.get_number_of_dimensions(); d++)
num_batches *= image.get_size(d);
num_batches /= this->plan_.num_frames_;
std::vector<size_t> sample_dims =
{ this->plan_.num_samples_ * this->plan_.num_frames_, num_batches};
std::vector<size_t> image_dims =
{ prod(this->plan_.matrix_size_os_), num_batches };
image_dims.push_back(this->plan_.num_frames_);
cuNDArray<T> image_view(
image_dims, image.get_data_ptr());
cuNDArray<T> samples_view(
sample_dims, const_cast<T*>(samples.get_data_ptr()));
sparseMM(T(1.0), T(1.0), *this->conv_matrix_, samples_view, image_view, true);
}
} // namespace Gadgetron
template class Gadgetron::cuGriddingConvolution<float, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<float, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<float, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<float, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<double, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<double, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<double, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<double, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<float, 1, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<float, 2, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<float, 3, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<float, 4, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<double, 1, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<double, 2, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<double, 3, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<double, 4, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 1, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 2, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 3, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 4, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 1, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 2, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 3, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 4, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 1, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 2, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 3, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 4, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 1, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 2, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 3, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 4, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 1, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 2, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 3, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 4, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 1, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 2, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 3, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 4, Gadgetron::JincKernel>;
| 360565c034337a82758e0ea7725ae49d7ed36924.cu |
#include "cuGriddingConvolution.h"
#include <thrust/binary_search.h>
#include "cuNDArray_elemwise.h"
#include "cuNDArray_utils.h"
#include "cudaDeviceManager.h"
#include "ConvolverC2NC.cuh"
#include "ConvolverNC2C_atomic.cuh"
#include "ConvolverNC2C_sparse.cuh"
#include "ConvolverNC2C_standard.cuh"
#define CUDA_CONV_MAX_COILS (16)
#define CUDA_CONV_THREADS_PER_KERNEL (192)
namespace Gadgetron
{
template<class T, unsigned int D, template<class, unsigned int> class K>
cuGriddingConvolution<T, D, K>::cuGriddingConvolution(
const vector_td<size_t, D>& matrix_size,
const vector_td<size_t, D>& matrix_size_os,
const K<REAL, D>& kernel,
ConvolutionType conv_type)
: GriddingConvolutionBase<cuNDArray, T, D, K>(
matrix_size, matrix_size_os, kernel)
{
this->initialize(conv_type);
}
template<class T, unsigned int D, template<class, unsigned int> class K>
cuGriddingConvolution<T, D, K>::cuGriddingConvolution(
const vector_td<size_t, D>& matrix_size,
const REAL os_factor,
const K<REAL, D>& kernel,
ConvolutionType conv_type)
: GriddingConvolutionBase<cuNDArray, T, D, K>(
matrix_size, os_factor, kernel)
{
this->initialize(conv_type);
}
template<class T, unsigned int D, template<class, unsigned int> class K>
cuGriddingConvolution<T, D, K>::~cuGriddingConvolution()
{
// Release kernel on device.
cudaFree(this->d_kernel_);
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void cuGriddingConvolution<T, D, K>::initialize(ConvolutionType conv_type)
{
// Get device number.
if (cudaGetDevice(&this->device_) != cudaSuccess)
{
throw cuda_error("Could not get device number.");
}
// The convolution does not work properly for very small convolution
// kernel width (experimentally observed limit).
if (this->kernel_.get_width() < REAL(1.8))
throw std::runtime_error("Kernel width must be equal to or larger "
"than 1.8.");
// Matrix size must be a multiple of warp size.
vector_td<size_t, D> warp_size(
(size_t)cudaDeviceManager::Instance()->warp_size(this->device_));
if (sum(this->matrix_size_ % warp_size) ||
sum(this->matrix_size_os_ % warp_size))
{
std::stringstream ss;
ss << "Matrix size must be a multiple of " << warp_size[0] << ".";
throw std::runtime_error(ss.str());
}
// Compute matrix padding.
vector_td<REAL, D> radius(this->kernel_.get_radius());
this->matrix_padding_ = vector_td<size_t, D>(ceil(radius));
this->matrix_padding_ <<= 1;
// Compute warp size power.
unsigned int warp_size_power = 0;
unsigned int tmp = cudaDeviceManager::Instance()->warp_size(
this->device_);
while (tmp != 1)
{
tmp >>= 1;
warp_size_power++;
}
this->warp_size_power_ = warp_size_power;
// Copy kernel to device.
cudaMalloc((void**)&this->d_kernel_, sizeof(K<REAL, D>));
cudaMemcpy((void*)this->d_kernel_, (const void*)&this->kernel_,
sizeof(K<REAL, D>), cudaMemcpyHostToDevice);
// Set up convolvers.
switch (conv_type)
{
case ConvolutionType::STANDARD:
{
this->conv_C2NC_ = std::make_unique<ConvolverC2NC<T, D, K, ConvolutionType::STANDARD>>(*this);
this->conv_NC2C_ = std::make_unique<ConvolverNC2C<T, D, K, ConvolutionType::STANDARD>>(*this);
break;
}
case ConvolutionType::ATOMIC:
{
this->conv_C2NC_ = std::make_unique<ConvolverC2NC<T, D, K, ConvolutionType::ATOMIC>>(*this);
this->conv_NC2C_ = std::make_unique<ConvolverNC2C<T, D, K, ConvolutionType::ATOMIC>>(*this);
break;
}
case ConvolutionType::SPARSE_MATRIX:
{
this->conv_C2NC_ = std::make_unique<ConvolverC2NC<T, D, K, ConvolutionType::SPARSE_MATRIX>>(*this);
this->conv_NC2C_ = std::make_unique<ConvolverNC2C<T, D, K, ConvolutionType::SPARSE_MATRIX>>(*this);
break;
}
}
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void cuGriddingConvolution<T, D, K>::preprocess(
const cuNDArray<vector_td<REAL, D>>& trajectory,
GriddingConvolutionPrepMode prep_mode)
{
// Call base class method.
GriddingConvolutionBase<cuNDArray, T, D, K>::preprocess(
trajectory, prep_mode);
// Make sure that the trajectory values are within range [-1/2, 1/2].
cuNDArray<REAL> traj_view(
std::vector<size_t>{trajectory.get_number_of_elements() * D},
(REAL*) trajectory.get_data_ptr());
thrust::pair<thrust::device_ptr<REAL>, thrust::device_ptr<REAL>> mm_pair =
thrust::minmax_element(traj_view.begin(), traj_view.end());
if (*mm_pair.first < REAL(-0.5) || *mm_pair.second > REAL(0.5))
{
std::stringstream ss;
ss << "Error: cuGriddingConvolution::preprocess: trajectory [" <<
*mm_pair.first << ", " << *mm_pair.second <<
"] out of range [-1/2, 1/2].";
throw std::runtime_error(ss.str());
}
// Allocate trajectory.
this->trajectory_ = thrust::device_vector<vector_td<REAL, D>>(
trajectory.get_number_of_elements());
CHECK_FOR_CUDA_ERROR();
// Cast matrix size values to floating-point type.
vector_td<REAL, D> matrix_size_os_fp =
vector_td<REAL, D>(this->matrix_size_os_);
vector_td<REAL, D> matrix_size_os_padded_fp =
vector_td<REAL, D>((this->matrix_size_os_ + this->matrix_padding_) >> 1);
// Convert input trajectory from range [-1/2, 1/2] to
// [0, this->matrix_size_os_], and copy to class member.
thrust::transform(trajectory.begin(),
trajectory.end(),
this->trajectory_.begin(),
trajectory_scale<REAL, D>(matrix_size_os_fp,
matrix_size_os_padded_fp));
// Prepare convolution.
if (prep_mode == GriddingConvolutionPrepMode::C2NC ||
prep_mode == GriddingConvolutionPrepMode::ALL)
{
this->conv_C2NC_->prepare(this->trajectory_);
CHECK_FOR_CUDA_ERROR();
}
if (prep_mode == GriddingConvolutionPrepMode::NC2C ||
prep_mode == GriddingConvolutionPrepMode::ALL)
{
this->conv_NC2C_->prepare(this->trajectory_);
CHECK_FOR_CUDA_ERROR();
}
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void cuGriddingConvolution<T, D, K>::compute_C2NC(
const cuNDArray<T>& image,
cuNDArray<T>& samples,
bool accumulate)
{
this->check_inputs(samples, image);
this->conv_C2NC_->compute(image, samples, accumulate);
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void cuGriddingConvolution<T, D, K>::compute_NC2C(
const cuNDArray<T>& samples,
cuNDArray<T>& image,
bool accumulate)
{
this->check_inputs(samples, image);
this->conv_NC2C_->compute(samples, image, accumulate);
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void cuGriddingConvolution<T, D, K>::check_inputs(
const cuNDArray<T>& samples,
const cuNDArray<T>& image)
{
if (image.get_number_of_dimensions() < D)
{
throw std::runtime_error("Number of image dimensions does not "
"match the plan.");
}
vector_td<size_t, D> image_dims =
from_std_vector<size_t, D>(*image.get_dimensions());
if (image_dims != this->matrix_size_os_)
throw std::runtime_error("Image dimensions mismatch.");
if ((samples.get_number_of_elements() == 0) ||
(samples.get_number_of_elements() % (this->num_frames_ * this->num_samples_)))
{
printf("\nConsistency check failed:\n"
"#elements in the samples array: %ld.\n"
"#samples from preprocessing: %zu.\n"
"#frames from preprocessing: %zu.\n",
samples.get_number_of_elements(), this->num_samples_, this->num_frames_);
fflush(stdout);
throw std::runtime_error(
"The number of samples is not a multiple of #samples/frame x "
"#frames as requested through preprocessing.");
}
unsigned int num_batches_in_samples_array =
samples.get_number_of_elements() / (this->num_frames_ * this->num_samples_);
unsigned int num_batches_in_image_array = 1;
for (unsigned int d = D; d < image.get_number_of_dimensions(); d++) {
num_batches_in_image_array *= image.get_size(d);
}
num_batches_in_image_array /= this->num_frames_;
if (num_batches_in_samples_array != num_batches_in_image_array) {
printf("\nConsistency check failed:\n"
"#elements in the samples array: %ld.\n"
"#samples from preprocessing: %zu.\n"
"#frames from preprocessing: %zu.\n"
"Leading to %d batches in the samples array.\n"
"The number of batches in the image array is %u.\n",
samples.get_number_of_elements(),
this->num_samples_,
this->num_frames_,
num_batches_in_samples_array,
num_batches_in_image_array);
fflush(stdout);
throw std::runtime_error(
"Number of batches mismatch between samples and image arrays.");
}
}
template<class T, unsigned int D, template<class, unsigned int> class K, ConvolutionType C>
void ConvolverC2NC<T, D, K, C>::prepare(
const thrust::device_vector<vector_td<REAL, D>>& trajectory)
{
// No need to do anything here.
// Defined for completeness.
}
template<class T, unsigned int D, template<class, unsigned int> class K, ConvolutionType C>
void ConvolverC2NC<T, D, K, C>::compute(
const cuNDArray<T>& image,
cuNDArray<T>& samples,
bool accumulate)
{
// Compute number of batches.
unsigned int num_batches = 1;
for (unsigned int d = D; d < image.get_number_of_dimensions(); d++)
num_batches *= image.get_size(d);
num_batches /= this->plan_.num_frames_;
// Set up grid and threads. We can convolve only max_coils batches per
// run due to shared memory issues.
unsigned int threads_per_block = CUDA_CONV_THREADS_PER_KERNEL;
unsigned int max_coils = CUDA_CONV_MAX_COILS;
unsigned int domain_size_coils_desired = num_batches;
unsigned int num_repetitions = domain_size_coils_desired / max_coils +
(((domain_size_coils_desired % max_coils) == 0) ? 0 : 1);
unsigned int domain_size_coils =
(num_repetitions == 1) ? domain_size_coils_desired : max_coils;
unsigned int domain_size_coils_tail =
(num_repetitions == 1) ? domain_size_coils_desired :
domain_size_coils_desired - (num_repetitions - 1) * domain_size_coils;
// Block and grid dimensions.
dim3 dimBlock(threads_per_block);
dim3 dimGrid((this->plan_.num_samples_ + dimBlock.x - 1) / dimBlock.x,
this->plan_.num_frames_);
// Calculate how much shared memory to use per thread.
size_t bytes_per_thread = domain_size_coils * sizeof(T);
size_t bytes_per_thread_tail = domain_size_coils_tail * sizeof(T);
// Image view dimensions.
auto view_dims = to_std_vector(this->plan_.matrix_size_os_);
view_dims.push_back(this->plan_.num_frames_);
view_dims.push_back(0); // Placeholder for num_coils.
for (unsigned int repetition = 0; repetition < num_repetitions; repetition++)
{
// Number of coils in this repetition.
size_t num_coils = (repetition == num_repetitions - 1) ?
domain_size_coils_tail : domain_size_coils;
view_dims.back() = num_coils;
// Image view for this repetition.
size_t image_view_elements = std::accumulate(
view_dims.begin(), view_dims.end()-1, size_t(1), std::multiplies<>());
auto image_view = cuNDArray<T>(view_dims, const_cast<T*>(
image.get_data_ptr()) + repetition * image_view_elements*domain_size_coils);
auto permutation = std::vector<size_t>(D+2);
permutation[0] = D+1;
std::iota(permutation.begin() + 1, permutation.end(), 0);
auto image_permuted = permute(image_view, permutation);
// Size of shared memory.
size_t sharedMemSize = (repetition == num_repetitions - 1) ?
dimBlock.x * bytes_per_thread_tail :
dimBlock.x * bytes_per_thread;
// Launch CUDA kernel.
NFFT_convolve_kernel<T, D, K>
<<<dimGrid, dimBlock, sharedMemSize>>>(
vector_td<unsigned int, D>(this->plan_.matrix_size_os_),
vector_td<unsigned int, D>(this->plan_.matrix_padding_),
this->plan_.num_samples_,
num_coils,
raw_pointer_cast(&this->plan_.trajectory_[0]),
image_permuted.get_data_ptr(),
samples.get_data_ptr() + repetition * this->plan_.num_samples_ *
this->plan_.num_frames_ * domain_size_coils,
this->plan_.warp_size_power_,
accumulate,
this->plan_.d_kernel_);
CHECK_FOR_CUDA_ERROR();
}
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::STANDARD>::prepare(
const thrust::device_vector<vector_td<REAL, D>>& trajectory)
{
// Allocate storage for and compute temporary prefix-sum variable
// (#cells influenced per sample).
thrust::device_vector<unsigned int> c_p_s(trajectory.size());
thrust::device_vector<unsigned int> c_p_s_ps(trajectory.size());
CHECK_FOR_CUDA_ERROR();
REAL radius = this->plan_.kernel_.get_radius();
transform(trajectory.begin(), trajectory.end(),
c_p_s.begin(), compute_num_cells_per_sample<REAL, D>(radius));
inclusive_scan(c_p_s.begin(), c_p_s.end(), c_p_s_ps.begin(),
thrust::plus<unsigned int>()); // Prefix sum.
// Build the vector of (grid_idx, sample_idx) tuples. Actually kept in
// two separate vectors.
unsigned int num_pairs = c_p_s_ps.back();
c_p_s.clear();
tuples_first = thrust::device_vector<unsigned int>(num_pairs);
tuples_last = thrust::device_vector<unsigned int>(num_pairs);
CHECK_FOR_CUDA_ERROR();
// Fill tuple vector.
write_pairs<REAL, D>(vector_td<unsigned int, D>(this->plan_.matrix_size_os_),
vector_td<unsigned int, D>(this->plan_.matrix_padding_),
this->plan_.num_samples_, this->plan_.num_frames_,
this->plan_.kernel_.get_width(),
raw_pointer_cast(&trajectory[0]),
raw_pointer_cast(&c_p_s_ps[0]),
raw_pointer_cast(&tuples_first[0]),
raw_pointer_cast(&tuples_last[0]));
c_p_s_ps.clear();
// Sort by grid indices.
sort_by_key(tuples_first.begin(), tuples_first.end(), tuples_last.begin());
// Each bucket_begin[i] indexes the first element of bucket i's list of points.
// Each bucket_end[i] indexes one past the last element of bucket i's list of points.
bucket_begin = thrust::device_vector<unsigned int>(
this->plan_.num_frames_ * prod(this->plan_.matrix_size_os_ + this->plan_.matrix_padding_));
bucket_end = thrust::device_vector<unsigned int>(
this->plan_.num_frames_ * prod(this->plan_.matrix_size_os_ + this->plan_.matrix_padding_));
CHECK_FOR_CUDA_ERROR();
// Find the beginning of each bucket's list of points.
thrust::counting_iterator<unsigned int> search_begin(0);
thrust::lower_bound(tuples_first.begin(), tuples_first.end(),
search_begin, search_begin + this->plan_.num_frames_ *
prod(this->plan_.matrix_size_os_ + this->plan_.matrix_padding_),
bucket_begin.begin());
// Find the end of each bucket's list of points.
thrust::upper_bound(tuples_first.begin(), tuples_first.end(),
search_begin, search_begin + this->plan_.num_frames_ *
prod(this->plan_.matrix_size_os_ + this->plan_.matrix_padding_),
bucket_end.begin());
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::STANDARD>::compute(
const cuNDArray<T>& samples,
cuNDArray<T>& image,
bool accumulate)
{
// Check if warp_size is a power of two. We do some modulus tricks in
// the kernels that depend on this.
if (!((cudaDeviceManager::Instance()->warp_size(this->plan_.device_) &
(cudaDeviceManager::Instance()->warp_size(this->plan_.device_) - 1)) == 0))
{
throw cuda_error("cuGriddingConvolution: Unsupported hardware "
"(warp size is not a power of 2).");
}
// Compute number of batches.
unsigned int num_batches = 1;
for (unsigned int d = D; d < image.get_number_of_dimensions(); d++)
num_batches *= image.get_size(d);
num_batches /= this->plan_.num_frames_;
// Set up grid and threads. We can convolve only max_coils batches per
// run due to shared memory issues.
unsigned int threads_per_block = CUDA_CONV_THREADS_PER_KERNEL;
unsigned int max_coils = CUDA_CONV_MAX_COILS;
unsigned int domain_size_coils_desired = num_batches;
unsigned int num_repetitions = domain_size_coils_desired / max_coils +
(((domain_size_coils_desired % max_coils) == 0) ? 0 : 1);
unsigned int domain_size_coils = (num_repetitions == 1) ?
domain_size_coils_desired : max_coils;
unsigned int domain_size_coils_tail = (num_repetitions == 1) ?
domain_size_coils_desired :
domain_size_coils_desired - (num_repetitions - 1) * domain_size_coils;
// Block and grid dimensions.
dim3 dimBlock(threads_per_block);
dim3 dimGrid((prod(this->plan_.matrix_size_os_ + this->plan_.matrix_padding_) +
dimBlock.x - 1) / dimBlock.x, this->plan_.num_frames_);
// Calculate how much shared memory to use per thread.
size_t bytes_per_thread = domain_size_coils * sizeof(T);
size_t bytes_per_thread_tail = domain_size_coils_tail * sizeof(T);
// Define temporary image that includes padding.
std::vector<size_t> padded_image_dims = to_std_vector(
this->plan_.matrix_size_os_ + this->plan_.matrix_padding_);
if (this->plan_.num_frames_ > 1)
padded_image_dims.push_back(this->plan_.num_frames_);
if (num_batches > 1)
padded_image_dims.push_back(num_batches);
cuNDArray<T> padded_image(padded_image_dims);
// Prioritise shared memory over L1 cache.
cudaFuncSetCacheConfig(NFFT_H_convolve_kernel<T, D, K>,
cudaFuncCachePreferShared);
// Samples view dimensions.
std::vector<size_t> view_dims = {
this->plan_.num_samples_,
this->plan_.num_frames_,
0}; // Placeholder for num_coils.
for (unsigned int repetition = 0; repetition < num_repetitions; repetition++)
{
// Number of coils in this repetition.
size_t num_coils = (repetition == num_repetitions - 1) ?
domain_size_coils_tail : domain_size_coils;
// Samples view for current repetition.
view_dims.back() = num_coils;
cuNDArray<T> samples_view(view_dims,
const_cast<T*>(samples.get_data_ptr()) +
repetition * this->plan_.num_samples_ *
this->plan_.num_frames_ * domain_size_coils);
std::vector<size_t> permute_order = {2, 0, 1};
auto samples_permuted = permute(samples_view, permute_order);
// Size of shared memory.
size_t sharedMemSize = (repetition == num_repetitions - 1) ?
dimBlock.x * bytes_per_thread_tail :
dimBlock.x * bytes_per_thread;
// Launch CUDA kernel.
NFFT_H_convolve_kernel<T, D, K>
<<<dimGrid, dimBlock, sharedMemSize>>>(
vector_td<unsigned int, D>(this->plan_.matrix_size_os_ +
this->plan_.matrix_padding_),
this->plan_.num_samples_,
num_coils,
raw_pointer_cast(&this->plan_.trajectory_[0]),
padded_image.get_data_ptr() + repetition *
prod(this->plan_.matrix_size_os_ +
this->plan_.matrix_padding_) *
this->plan_.num_frames_ *
domain_size_coils,
samples_permuted.get_data_ptr(),
raw_pointer_cast(&tuples_last[0]),
raw_pointer_cast(&bucket_begin[0]),
raw_pointer_cast(&bucket_end[0]),
this->plan_.warp_size_power_,
this->plan_.d_kernel_);
}
CHECK_FOR_CUDA_ERROR();
this->wrap_image(padded_image, image, accumulate);
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::STANDARD>::wrap_image(
cuNDArray<T>& source,
cuNDArray<T>& target,
bool accumulate)
{
// Compute number of batches.
unsigned int num_batches = 1;
for (unsigned int d = D; d < source.get_number_of_dimensions(); d++)
num_batches *= source.get_size(d);
num_batches /= this->plan_.num_frames_;
// Set dimensions of grid/blocks.
unsigned int bdim = 256;
dim3 dimBlock(bdim);
dim3 dimGrid(prod(this->plan_.matrix_size_os_) / bdim,
this->plan_.num_frames_ * num_batches);
// Safety check.
if ((prod(this->plan_.matrix_size_os_) % bdim) != 0)
{
std::stringstream ss;
ss << "Error: cuNFFT : the number of oversampled image elements must be a multiplum of the block size: "
<< bdim;
throw std::runtime_error(ss.str());
}
// Invoke kernel.
wrap_image_kernel<T, D>
<<<dimGrid, dimBlock>>>(
source.get_data_ptr(),
target.get_data_ptr(),
vector_td<unsigned int, D>(this->plan_.matrix_size_os_),
vector_td<unsigned int, D>(this->plan_.matrix_padding_),
accumulate);
CHECK_FOR_CUDA_ERROR();
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::ATOMIC>::prepare(
const thrust::device_vector<vector_td<REAL, D>>& trajectory)
{
// No need to do anything here.
// Defined for completeness.
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::ATOMIC>::compute(
const cuNDArray<T>& samples,
cuNDArray<T>& image,
bool accumulate)
{
// We need a device with compute capability >= 2.0 to use atomics.
if (cudaDeviceManager::Instance()->major_version(this->plan_.device_) == 1)
{
throw cuda_error("cuGriddingConvolution: Atomic gridding "
"convolution supported only on devices with "
"compute capability 2.0 or higher.");
}
// We require the warp size to be a power of 2.
if (!((cudaDeviceManager::Instance()->warp_size(this->plan_.device_) &
(cudaDeviceManager::Instance()->warp_size(this->plan_.device_) - 1)) == 0))
{
throw cuda_error("cuGriddingConvolution: Unsupported hardware "
"(warp size is not a power of 2).");
}
// Compute number of batches.
unsigned int num_batches = 1;
for (unsigned int d = D; d < image.get_number_of_dimensions(); d++)
num_batches *= image.get_size(d);
num_batches /= this->plan_.num_frames_;
// Set up grid and threads. We can convolve only max_coils batches per
// run due to shared memory issues.
unsigned int threads_per_block = CUDA_CONV_THREADS_PER_KERNEL;
unsigned int max_coils = CUDA_CONV_MAX_COILS;
unsigned int domain_size_coils_desired = num_batches;
unsigned int num_repetitions = domain_size_coils_desired / max_coils +
(((domain_size_coils_desired % max_coils) == 0) ? 0 : 1);
unsigned int domain_size_coils =
(num_repetitions == 1) ? domain_size_coils_desired : max_coils;
unsigned int domain_size_coils_tail =
(num_repetitions == 1) ? domain_size_coils_desired :
domain_size_coils_desired - (num_repetitions - 1) * domain_size_coils;
// Block and grid dimensions.
dim3 dimBlock(threads_per_block);
dim3 dimGrid((this->plan_.num_samples_ + dimBlock.x - 1) / dimBlock.x,
this->plan_.num_frames_);
// Calculate how much shared memory to use per thread.
size_t bytes_per_thread =
domain_size_coils * sizeof(vector_td<REAL, D>);
size_t bytes_per_thread_tail =
domain_size_coils_tail * sizeof(vector_td<REAL, D>);
// Clear image if not accumulating.
if (!accumulate)
clear(&image);
for (unsigned int repetition = 0; repetition < num_repetitions; repetition++)
{
// Number of coils in this repetition.
size_t num_coils = (repetition == num_repetitions - 1) ?
domain_size_coils_tail : domain_size_coils;
// Size of shared memory.
size_t sharedMemSize = (repetition == num_repetitions - 1) ?
dimBlock.x * bytes_per_thread_tail :
dimBlock.x * bytes_per_thread;
// Launch CUDA kernel.
NFFT_H_atomic_convolve_kernel<T, D, K>
<<<dimGrid, dimBlock, sharedMemSize>>>(
vector_td<unsigned int, D>(this->plan_.matrix_size_os_),
vector_td<unsigned int, D>(this->plan_.matrix_padding_),
this->plan_.num_samples_,
num_coils,
raw_pointer_cast(&this->plan_.trajectory_[0]),
samples.get_data_ptr() + repetition * this->plan_.num_samples_ *
this->plan_.num_frames_ * domain_size_coils,
image.get_data_ptr() + repetition * prod(this->plan_.matrix_size_os_) *
this->plan_.num_frames_ * domain_size_coils,
this->plan_.d_kernel_);
}
CHECK_FOR_CUDA_ERROR();
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::SPARSE_MATRIX>::prepare(
const thrust::device_vector<vector_td<REAL, D>>& trajectory)
{
this->conv_matrix_ = std::make_unique<cuCsrMatrix<T>>(
make_conv_matrix<T, D, K>(
trajectory, this->plan_.matrix_size_os_, this->plan_.d_kernel_));
}
template<class T, unsigned int D, template<class, unsigned int> class K>
void ConvolverNC2C<T, D, K, ConvolutionType::SPARSE_MATRIX>::compute(
const cuNDArray<T>& samples,
cuNDArray<T>& image,
bool accumulate)
{
unsigned int num_batches = 1;
for (unsigned int d = D; d < image.get_number_of_dimensions(); d++)
num_batches *= image.get_size(d);
num_batches /= this->plan_.num_frames_;
std::vector<size_t> sample_dims =
{ this->plan_.num_samples_ * this->plan_.num_frames_, num_batches};
std::vector<size_t> image_dims =
{ prod(this->plan_.matrix_size_os_), num_batches };
image_dims.push_back(this->plan_.num_frames_);
cuNDArray<T> image_view(
image_dims, image.get_data_ptr());
cuNDArray<T> samples_view(
sample_dims, const_cast<T*>(samples.get_data_ptr()));
sparseMM(T(1.0), T(1.0), *this->conv_matrix_, samples_view, image_view, true);
}
} // namespace Gadgetron
template class Gadgetron::cuGriddingConvolution<float, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<float, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<float, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<float, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<double, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<double, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<double, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<double, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::cuGriddingConvolution<float, 1, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<float, 2, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<float, 3, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<float, 4, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<double, 1, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<double, 2, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<double, 3, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<double, 4, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 1, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 2, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 3, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<float>, 4, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 1, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 2, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 3, Gadgetron::JincKernel>;
template class Gadgetron::cuGriddingConvolution<Gadgetron::complext<double>, 4, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 1, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 2, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 3, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 4, Gadgetron::KaiserKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 1, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 2, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 3, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, float, 4, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 1, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 2, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 3, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, double, 4, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 1, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 2, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 3, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<float>, 4, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 1, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 2, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 3, Gadgetron::JincKernel>;
template class Gadgetron::GriddingConvolution<Gadgetron::cuNDArray, Gadgetron::complext<double>, 4, Gadgetron::JincKernel>;
|
3252036dc3f921c35597c6cef59eb3e0e584ce35.hip | // !!! This is a file automatically generated by hipify!!!
#include "cupy_cub.h" // need to make atomicAdd visible to CUB templates early
#include <cupy/type_dispatcher.cuh>
#include <hipcub/hipcub.hpp>
#include <cub/device/device_segmented_reduce.cuh>
#include <cub/device/device_spmv.cuh>
#include <hipcub/hipcub.hpp>
#include <cub/device/device_histogram.cuh>
using namespace cub;
/* ------------------------------------ Minimum boilerplate to support complex numbers ------------------------------------ */
// - This works only because all data fields in the *Traits struct are not
// used in <hipcub/hipcub.hpp>.
// - The Max() and Lowest() below are chosen to comply with NumPy's lexical
// ordering; note that std::numeric_limits<T> does not support complex
// numbers as in general the comparison is ill defined.
// - DO NOT USE THIS STUB for supporting CUB sorting!!!!!!
template <>
struct FpLimits<complex<float>>
{
static __host__ __device__ __forceinline__ complex<float> Max() {
return (complex<float>(FLT_MAX, FLT_MAX));
}
static __host__ __device__ __forceinline__ complex<float> Lowest() {
return (complex<float>(FLT_MAX * float(-1), FLT_MAX * float(-1)));
}
};
template <>
struct FpLimits<complex<double>>
{
static __host__ __device__ __forceinline__ complex<double> Max() {
return (complex<double>(DBL_MAX, DBL_MAX));
}
static __host__ __device__ __forceinline__ complex<double> Lowest() {
return (complex<double>(DBL_MAX * double(-1), DBL_MAX * double(-1)));
}
};
template <> struct NumericTraits<complex<float>> : BaseTraits<FLOATING_POINT, true, false, unsigned int, complex<float>> {};
template <> struct NumericTraits<complex<double>> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, complex<double>> {};
/* ------------------------------------ end of boilerplate ------------------------------------ */
/* ------------------------------------ "Patches" to CUB ------------------------------------
This stub is needed because CUB does not have a built-in "prod" operator
*/
//
// product functor
//
struct _multiply
{
template <typename T>
__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const
{
return a * b;
}
};
/*
These stubs are needed because CUB does not handle NaNs properly, while NumPy has certain
behaviors with which we must comply.
*/
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
__host__ __device__ __forceinline__ bool half_isnan(const __half& x) {
#ifdef __CUDA_ARCH__
return __hisnan(x);
#else
// TODO: avoid cast to float
return isnan(__half2float(x));
#endif
}
__host__ __device__ __forceinline__ bool half_less(const __half& l, const __half& r) {
#ifdef __CUDA_ARCH__
return l < r;
#else
// TODO: avoid cast to float
return __half2float(l) < __half2float(r);
#endif
}
__host__ __device__ __forceinline__ bool half_equal(const __half& l, const __half& r) {
#ifdef __CUDA_ARCH__
return l == r;
#else
// TODO: avoid cast to float
return __half2float(l) == __half2float(r);
#endif
}
#endif
//
// Max()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ float Max::operator()(const float &a, const float &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MAX(a, b);}
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ double Max::operator()(const double &a, const double &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MAX(a, b);}
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<float> Max::operator()(const complex<float> &a, const complex<float> &b) const
{
// - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and max() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return max(a, b);}
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<double> Max::operator()(const complex<double> &a, const complex<double> &b) const
{
// - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and max() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return max(a, b);}
}
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ __half Max::operator()(const __half &a, const __half &b) const
{
// NumPy behavior: NaN is always chosen!
if (half_isnan(a)) {return a;}
else if (half_isnan(b)) {return b;}
else if (half_less(a, b)) {return b;}
else {return a;}
}
#endif
//
// Min()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ float Min::operator()(const float &a, const float &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MIN(a, b);}
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ double Min::operator()(const double &a, const double &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MIN(a, b);}
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<float> Min::operator()(const complex<float> &a, const complex<float> &b) const
{
// - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and min() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return min(a, b);}
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<double> Min::operator()(const complex<double> &a, const complex<double> &b) const
{
// - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and min() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return min(a, b);}
}
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ __half Min::operator()(const __half &a, const __half &b) const
{
// NumPy behavior: NaN is always chosen!
if (half_isnan(a)) {return a;}
else if (half_isnan(b)) {return b;}
else if (half_less(a, b)) {return a;}
else {return b;}
}
#endif
//
// ArgMax()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMax::operator()(
const KeyValuePair<int, float> &a,
const KeyValuePair<int, float> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMax::operator()(
const KeyValuePair<int, double> &a,
const KeyValuePair<int, double> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMax::operator()(
const KeyValuePair<int, complex<float>> &a,
const KeyValuePair<int, complex<float>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMax::operator()(
const KeyValuePair<int, complex<double>> &a,
const KeyValuePair<int, complex<double>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, __half> ArgMax::operator()(
const KeyValuePair<int, __half> &a,
const KeyValuePair<int, __half> &b) const
{
if (half_isnan(a.value))
return a;
else if (half_isnan(b.value))
return b;
else if ((half_less(a.value, b.value)) ||
(half_equal(a.value, b.value) && (b.key < a.key)))
return b;
else
return a;
}
#endif
//
// ArgMin()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMin::operator()(
const KeyValuePair<int, float> &a,
const KeyValuePair<int, float> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMin::operator()(
const KeyValuePair<int, double> &a,
const KeyValuePair<int, double> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMin::operator()(
const KeyValuePair<int, complex<float>> &a,
const KeyValuePair<int, complex<float>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMin::operator()(
const KeyValuePair<int, complex<double>> &a,
const KeyValuePair<int, complex<double>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, __half> ArgMin::operator()(
const KeyValuePair<int, __half> &a,
const KeyValuePair<int, __half> &b) const
{
if (half_isnan(a.value))
return a;
else if (half_isnan(b.value))
return b;
else if ((half_less(b.value, a.value)) ||
(half_equal(a.value, b.value) && (b.key < a.key)))
return b;
else
return a;
}
#endif
/* ------------------------------------ End of "patches" ------------------------------------ */
//
// **** CUB Sum ****
//
struct _cub_reduce_sum {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t s)
{
DeviceReduce::Sum(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, s);
}
};
struct _cub_segmented_reduce_sum {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, hipStream_t s)
{
DeviceSegmentedReduce::Sum(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end), s);
}
};
//
// **** CUB Prod ****
//
struct _cub_reduce_prod {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t s)
{
_multiply product_op;
// the init value is cast from 1.0f because on host __half can only be
// initialized by float or double; static_cast<__half>(1) = 0 on host.
DeviceReduce::Reduce(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, product_op, static_cast<T>(1.0f), s);
}
};
struct _cub_segmented_reduce_prod {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, hipStream_t s)
{
_multiply product_op;
// the init value is cast from 1.0f because on host __half can only be
// initialized by float or double; static_cast<__half>(1) = 0 on host.
DeviceSegmentedReduce::Reduce(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end),
product_op, static_cast<T>(1.0f), s);
}
};
//
// **** CUB Min ****
//
struct _cub_reduce_min {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t s)
{
DeviceReduce::Min(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, s);
}
};
struct _cub_segmented_reduce_min {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, hipStream_t s)
{
DeviceSegmentedReduce::Min(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end), s);
}
};
//
// **** CUB Max ****
//
struct _cub_reduce_max {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t s)
{
DeviceReduce::Max(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, s);
}
};
struct _cub_segmented_reduce_max {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, hipStream_t s)
{
DeviceSegmentedReduce::Max(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end), s);
}
};
//
// **** CUB ArgMin ****
//
struct _cub_reduce_argmin {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t s)
{
DeviceReduce::ArgMin(workspace, workspace_size, static_cast<T*>(x),
static_cast<KeyValuePair<int, T>*>(y), num_items, s);
}
};
// TODO(leofang): add _cub_segmented_reduce_argmin
//
// **** CUB ArgMax ****
//
struct _cub_reduce_argmax {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t s)
{
DeviceReduce::ArgMax(workspace, workspace_size, static_cast<T*>(x),
static_cast<KeyValuePair<int, T>*>(y), num_items, s);
}
};
// TODO(leofang): add _cub_segmented_reduce_argmax
//
// **** CUB SpMV ****
//
struct _cub_device_spmv {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* values,
void* row_offsets, void* column_indices, void* x, void* y,
int num_rows, int num_cols, int num_nonzeros, hipStream_t stream)
{
DeviceSpmv::CsrMV(workspace, workspace_size, static_cast<T*>(values),
static_cast<int*>(row_offsets), static_cast<int*>(column_indices),
static_cast<T*>(x), static_cast<T*>(y), num_rows, num_cols,
num_nonzeros, stream);
}
};
//
// **** CUB InclusiveSum ****
//
struct _cub_inclusive_sum {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
int num_items, hipStream_t s)
{
DeviceScan::InclusiveSum(workspace, workspace_size, static_cast<T*>(input),
static_cast<T*>(output), num_items, s);
}
};
//
// **** CUB inclusive product ****
//
struct _cub_inclusive_product {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
int num_items, hipStream_t s)
{
_multiply product_op;
DeviceScan::InclusiveScan(workspace, workspace_size, static_cast<T*>(input),
static_cast<T*>(output), product_op, num_items, s);
}
};
//
// **** CUB histogram range ****
//
struct _cub_histogram_range {
template <typename sampleT,
typename binT = typename If<std::is_integral<sampleT>::value, double, sampleT>::Type>
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
int n_bins, void* bins, size_t n_samples, hipStream_t s) const
{
// Ugly hack to avoid specializing complex types, which cub::DeviceHistogram does not support.
// The If and Equals templates are from cub/util_type.cuh.
// TODO(leofang): revisit this part when complex support is added to cupy.histogram()
typedef typename If<(Equals<sampleT, complex<float>>::VALUE || Equals<sampleT, complex<double>>::VALUE),
double,
sampleT>::Type h_sampleT;
typedef typename If<(Equals<binT, complex<float>>::VALUE || Equals<binT, complex<double>>::VALUE),
double,
binT>::Type h_binT;
// TODO(leofang): CUB has a bug that when specializing n_samples with type size_t,
// it would error out. Before the fix (thrust/cub#38) is merged we disable the code
// path splitting for now. A type/range check must be done in the caller.
// if (n_samples < (1ULL << 31)) {
int num_samples = n_samples;
DeviceHistogram::HistogramRange(workspace, workspace_size, static_cast<h_sampleT*>(input),
static_cast<long long*>(output), n_bins, static_cast<h_binT*>(bins), num_samples, s);
// } else {
// DeviceHistogram::HistogramRange(workspace, workspace_size, static_cast<h_sampleT*>(input),
// static_cast<long long*>(output), n_bins, static_cast<h_binT*>(bins), n_samples, s);
// }
}
};
//
// APIs exposed to CuPy
//
/* -------- device reduce -------- */
void cub_device_reduce(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t stream, int op, int dtype_id)
{
switch(op) {
case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_reduce_sum(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_reduce_min(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_reduce_max(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_ARGMIN: return dtype_dispatcher(dtype_id, _cub_reduce_argmin(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_ARGMAX: return dtype_dispatcher(dtype_id, _cub_reduce_argmax(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_PROD: return dtype_dispatcher(dtype_id, _cub_reduce_prod(),
workspace, workspace_size, x, y, num_items, stream);
default: throw std::runtime_error("Unsupported operation");
}
}
size_t cub_device_reduce_get_workspace_size(void* x, void* y, int num_items,
hipStream_t stream, int op, int dtype_id)
{
size_t workspace_size = 0;
cub_device_reduce(NULL, workspace_size, x, y, num_items, stream,
op, dtype_id);
return workspace_size;
}
/* -------- device segmented reduce -------- */
void cub_device_segmented_reduce(void* workspace, size_t& workspace_size,
void* x, void* y, int num_segments, void* offset_start, void* offset_end,
hipStream_t stream, int op, int dtype_id)
{
switch(op) {
case CUPY_CUB_SUM:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_sum(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
case CUPY_CUB_MIN:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_min(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
case CUPY_CUB_MAX:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_max(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
case CUPY_CUB_PROD:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_prod(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
default:
throw std::runtime_error("Unsupported operation");
}
}
size_t cub_device_segmented_reduce_get_workspace_size(void* x, void* y,
int num_segments, void* offset_start, void* offset_end,
hipStream_t stream, int op, int dtype_id)
{
size_t workspace_size = 0;
cub_device_segmented_reduce(NULL, workspace_size, x, y, num_segments,
offset_start, offset_end, stream,
op, dtype_id);
return workspace_size;
}
/*--------- device spmv (sparse-matrix dense-vector multiply) ---------*/
void cub_device_spmv(void* workspace, size_t& workspace_size, void* values,
void* row_offsets, void* column_indices, void* x, void* y, int num_rows,
int num_cols, int num_nonzeros, hipStream_t stream,
int dtype_id)
{
return dtype_dispatcher(dtype_id, _cub_device_spmv(),
workspace, workspace_size, values, row_offsets,
column_indices, x, y, num_rows, num_cols,
num_nonzeros, stream);
}
size_t cub_device_spmv_get_workspace_size(void* values, void* row_offsets,
void* column_indices, void* x, void* y, int num_rows, int num_cols,
int num_nonzeros, hipStream_t stream, int dtype_id)
{
size_t workspace_size = 0;
cub_device_spmv(NULL, workspace_size, values, row_offsets, column_indices,
x, y, num_rows, num_cols, num_nonzeros, stream, dtype_id);
return workspace_size;
}
/* -------- device scan -------- */
void cub_device_scan(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, hipStream_t stream, int op, int dtype_id)
{
switch(op) {
case CUPY_CUB_CUMSUM:
return dtype_dispatcher(dtype_id, _cub_inclusive_sum(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_CUMPROD:
return dtype_dispatcher(dtype_id, _cub_inclusive_product(),
workspace, workspace_size, x, y, num_items, stream);
default:
throw std::runtime_error("Unsupported operation");
}
}
size_t cub_device_scan_get_workspace_size(void* x, void* y, int num_items,
hipStream_t stream, int op, int dtype_id)
{
size_t workspace_size = 0;
cub_device_scan(NULL, workspace_size, x, y, num_items, stream,
op, dtype_id);
return workspace_size;
}
/* -------- device histogram -------- */
void cub_device_histogram_range(void* workspace, size_t& workspace_size, void* x, void* y,
int n_bins, void* bins, size_t n_samples, hipStream_t stream, int dtype_id)
{
// TODO(leofang): support complex
if (dtype_id == CUPY_TYPE_COMPLEX64 || dtype_id == CUPY_TYPE_COMPLEX128) {
throw std::runtime_error("complex dtype is not yet supported");
}
// TODO(leofang): n_samples is of type size_t, but if it's < 2^31 we cast it to int later
return dtype_dispatcher(dtype_id, _cub_histogram_range(),
workspace, workspace_size, x, y, n_bins, bins, n_samples, stream);
}
size_t cub_device_histogram_range_get_workspace_size(void* x, void* y, int n_bins,
void* bins, size_t n_samples, hipStream_t stream, int dtype_id)
{
size_t workspace_size = 0;
cub_device_histogram_range(NULL, workspace_size, x, y, n_bins, bins, n_samples,
stream, dtype_id);
return workspace_size;
}
| 3252036dc3f921c35597c6cef59eb3e0e584ce35.cu | #include "cupy_cub.h" // need to make atomicAdd visible to CUB templates early
#include <cupy/type_dispatcher.cuh>
#include <cub/device/device_reduce.cuh>
#include <cub/device/device_segmented_reduce.cuh>
#include <cub/device/device_spmv.cuh>
#include <cub/device/device_scan.cuh>
#include <cub/device/device_histogram.cuh>
using namespace cub;
/* ------------------------------------ Minimum boilerplate to support complex numbers ------------------------------------ */
// - This works only because all data fields in the *Traits struct are not
// used in <cub/device/device_reduce.cuh>.
// - The Max() and Lowest() below are chosen to comply with NumPy's lexical
// ordering; note that std::numeric_limits<T> does not support complex
// numbers as in general the comparison is ill defined.
// - DO NOT USE THIS STUB for supporting CUB sorting!!!!!!
template <>
struct FpLimits<complex<float>>
{
static __host__ __device__ __forceinline__ complex<float> Max() {
return (complex<float>(FLT_MAX, FLT_MAX));
}
static __host__ __device__ __forceinline__ complex<float> Lowest() {
return (complex<float>(FLT_MAX * float(-1), FLT_MAX * float(-1)));
}
};
template <>
struct FpLimits<complex<double>>
{
static __host__ __device__ __forceinline__ complex<double> Max() {
return (complex<double>(DBL_MAX, DBL_MAX));
}
static __host__ __device__ __forceinline__ complex<double> Lowest() {
return (complex<double>(DBL_MAX * double(-1), DBL_MAX * double(-1)));
}
};
template <> struct NumericTraits<complex<float>> : BaseTraits<FLOATING_POINT, true, false, unsigned int, complex<float>> {};
template <> struct NumericTraits<complex<double>> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, complex<double>> {};
/* ------------------------------------ end of boilerplate ------------------------------------ */
/* ------------------------------------ "Patches" to CUB ------------------------------------
This stub is needed because CUB does not have a built-in "prod" operator
*/
//
// product functor
//
struct _multiply
{
template <typename T>
__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const
{
return a * b;
}
};
/*
These stubs are needed because CUB does not handle NaNs properly, while NumPy has certain
behaviors with which we must comply.
*/
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
__host__ __device__ __forceinline__ bool half_isnan(const __half& x) {
#ifdef __CUDA_ARCH__
return __hisnan(x);
#else
// TODO: avoid cast to float
return isnan(__half2float(x));
#endif
}
__host__ __device__ __forceinline__ bool half_less(const __half& l, const __half& r) {
#ifdef __CUDA_ARCH__
return l < r;
#else
// TODO: avoid cast to float
return __half2float(l) < __half2float(r);
#endif
}
__host__ __device__ __forceinline__ bool half_equal(const __half& l, const __half& r) {
#ifdef __CUDA_ARCH__
return l == r;
#else
// TODO: avoid cast to float
return __half2float(l) == __half2float(r);
#endif
}
#endif
//
// Max()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ float Max::operator()(const float &a, const float &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MAX(a, b);}
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ double Max::operator()(const double &a, const double &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MAX(a, b);}
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<float> Max::operator()(const complex<float> &a, const complex<float> &b) const
{
// - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and max() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return max(a, b);}
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<double> Max::operator()(const complex<double> &a, const complex<double> &b) const
{
// - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and max() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return max(a, b);}
}
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ __half Max::operator()(const __half &a, const __half &b) const
{
// NumPy behavior: NaN is always chosen!
if (half_isnan(a)) {return a;}
else if (half_isnan(b)) {return b;}
else if (half_less(a, b)) {return b;}
else {return a;}
}
#endif
//
// Min()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ float Min::operator()(const float &a, const float &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MIN(a, b);}
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ double Min::operator()(const double &a, const double &b) const
{
// NumPy behavior: NaN is always chosen!
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return CUB_MIN(a, b);}
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<float> Min::operator()(const complex<float> &a, const complex<float> &b) const
{
// - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and min() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return min(a, b);}
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ complex<double> Min::operator()(const complex<double> &a, const complex<double> &b) const
{
// - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed
// - NumPy behavior: If both a and b contain NaN, the first argument is chosen
// - isnan() and min() are defined in cupy/complex.cuh
if (isnan(a)) {return a;}
else if (isnan(b)) {return b;}
else {return min(a, b);}
}
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ __half Min::operator()(const __half &a, const __half &b) const
{
// NumPy behavior: NaN is always chosen!
if (half_isnan(a)) {return a;}
else if (half_isnan(b)) {return b;}
else if (half_less(a, b)) {return a;}
else {return b;}
}
#endif
//
// ArgMax()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMax::operator()(
const KeyValuePair<int, float> &a,
const KeyValuePair<int, float> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMax::operator()(
const KeyValuePair<int, double> &a,
const KeyValuePair<int, double> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMax::operator()(
const KeyValuePair<int, complex<float>> &a,
const KeyValuePair<int, complex<float>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMax::operator()(
const KeyValuePair<int, complex<double>> &a,
const KeyValuePair<int, complex<double>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, __half> ArgMax::operator()(
const KeyValuePair<int, __half> &a,
const KeyValuePair<int, __half> &b) const
{
if (half_isnan(a.value))
return a;
else if (half_isnan(b.value))
return b;
else if ((half_less(a.value, b.value)) ||
(half_equal(a.value, b.value) && (b.key < a.key)))
return b;
else
return a;
}
#endif
//
// ArgMin()
//
// specialization for float for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMin::operator()(
const KeyValuePair<int, float> &a,
const KeyValuePair<int, float> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for double for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMin::operator()(
const KeyValuePair<int, double> &a,
const KeyValuePair<int, double> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<float> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMin::operator()(
const KeyValuePair<int, complex<float>> &a,
const KeyValuePair<int, complex<float>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
// specialization for complex<double> for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMin::operator()(
const KeyValuePair<int, complex<double>> &a,
const KeyValuePair<int, complex<double>> &b) const
{
if (isnan(a.value))
return a;
else if (isnan(b.value))
return b;
else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key)))
return b;
else
return a;
}
#if (__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))
// specialization for half for handling NaNs
template <>
__host__ __device__ __forceinline__ KeyValuePair<int, __half> ArgMin::operator()(
const KeyValuePair<int, __half> &a,
const KeyValuePair<int, __half> &b) const
{
if (half_isnan(a.value))
return a;
else if (half_isnan(b.value))
return b;
else if ((half_less(b.value, a.value)) ||
(half_equal(a.value, b.value) && (b.key < a.key)))
return b;
else
return a;
}
#endif
/* ------------------------------------ End of "patches" ------------------------------------ */
//
// **** CUB Sum ****
//
struct _cub_reduce_sum {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t s)
{
DeviceReduce::Sum(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, s);
}
};
struct _cub_segmented_reduce_sum {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, cudaStream_t s)
{
DeviceSegmentedReduce::Sum(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end), s);
}
};
//
// **** CUB Prod ****
//
struct _cub_reduce_prod {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t s)
{
_multiply product_op;
// the init value is cast from 1.0f because on host __half can only be
// initialized by float or double; static_cast<__half>(1) = 0 on host.
DeviceReduce::Reduce(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, product_op, static_cast<T>(1.0f), s);
}
};
struct _cub_segmented_reduce_prod {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, cudaStream_t s)
{
_multiply product_op;
// the init value is cast from 1.0f because on host __half can only be
// initialized by float or double; static_cast<__half>(1) = 0 on host.
DeviceSegmentedReduce::Reduce(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end),
product_op, static_cast<T>(1.0f), s);
}
};
//
// **** CUB Min ****
//
struct _cub_reduce_min {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t s)
{
DeviceReduce::Min(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, s);
}
};
struct _cub_segmented_reduce_min {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, cudaStream_t s)
{
DeviceSegmentedReduce::Min(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end), s);
}
};
//
// **** CUB Max ****
//
struct _cub_reduce_max {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t s)
{
DeviceReduce::Max(workspace, workspace_size, static_cast<T*>(x),
static_cast<T*>(y), num_items, s);
}
};
struct _cub_segmented_reduce_max {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_segments, void* offset_start, void* offset_end, cudaStream_t s)
{
DeviceSegmentedReduce::Max(workspace, workspace_size,
static_cast<T*>(x), static_cast<T*>(y), num_segments,
static_cast<int*>(offset_start),
static_cast<int*>(offset_end), s);
}
};
//
// **** CUB ArgMin ****
//
struct _cub_reduce_argmin {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t s)
{
DeviceReduce::ArgMin(workspace, workspace_size, static_cast<T*>(x),
static_cast<KeyValuePair<int, T>*>(y), num_items, s);
}
};
// TODO(leofang): add _cub_segmented_reduce_argmin
//
// **** CUB ArgMax ****
//
struct _cub_reduce_argmax {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t s)
{
DeviceReduce::ArgMax(workspace, workspace_size, static_cast<T*>(x),
static_cast<KeyValuePair<int, T>*>(y), num_items, s);
}
};
// TODO(leofang): add _cub_segmented_reduce_argmax
//
// **** CUB SpMV ****
//
struct _cub_device_spmv {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* values,
void* row_offsets, void* column_indices, void* x, void* y,
int num_rows, int num_cols, int num_nonzeros, cudaStream_t stream)
{
DeviceSpmv::CsrMV(workspace, workspace_size, static_cast<T*>(values),
static_cast<int*>(row_offsets), static_cast<int*>(column_indices),
static_cast<T*>(x), static_cast<T*>(y), num_rows, num_cols,
num_nonzeros, stream);
}
};
//
// **** CUB InclusiveSum ****
//
struct _cub_inclusive_sum {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
int num_items, cudaStream_t s)
{
DeviceScan::InclusiveSum(workspace, workspace_size, static_cast<T*>(input),
static_cast<T*>(output), num_items, s);
}
};
//
// **** CUB inclusive product ****
//
struct _cub_inclusive_product {
template <typename T>
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
int num_items, cudaStream_t s)
{
_multiply product_op;
DeviceScan::InclusiveScan(workspace, workspace_size, static_cast<T*>(input),
static_cast<T*>(output), product_op, num_items, s);
}
};
//
// **** CUB histogram range ****
//
struct _cub_histogram_range {
template <typename sampleT,
typename binT = typename If<std::is_integral<sampleT>::value, double, sampleT>::Type>
void operator()(void* workspace, size_t& workspace_size, void* input, void* output,
int n_bins, void* bins, size_t n_samples, cudaStream_t s) const
{
// Ugly hack to avoid specializing complex types, which cub::DeviceHistogram does not support.
// The If and Equals templates are from cub/util_type.cuh.
// TODO(leofang): revisit this part when complex support is added to cupy.histogram()
typedef typename If<(Equals<sampleT, complex<float>>::VALUE || Equals<sampleT, complex<double>>::VALUE),
double,
sampleT>::Type h_sampleT;
typedef typename If<(Equals<binT, complex<float>>::VALUE || Equals<binT, complex<double>>::VALUE),
double,
binT>::Type h_binT;
// TODO(leofang): CUB has a bug that when specializing n_samples with type size_t,
// it would error out. Before the fix (thrust/cub#38) is merged we disable the code
// path splitting for now. A type/range check must be done in the caller.
// if (n_samples < (1ULL << 31)) {
int num_samples = n_samples;
DeviceHistogram::HistogramRange(workspace, workspace_size, static_cast<h_sampleT*>(input),
static_cast<long long*>(output), n_bins, static_cast<h_binT*>(bins), num_samples, s);
// } else {
// DeviceHistogram::HistogramRange(workspace, workspace_size, static_cast<h_sampleT*>(input),
// static_cast<long long*>(output), n_bins, static_cast<h_binT*>(bins), n_samples, s);
// }
}
};
//
// APIs exposed to CuPy
//
/* -------- device reduce -------- */
void cub_device_reduce(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t stream, int op, int dtype_id)
{
switch(op) {
case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_reduce_sum(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_reduce_min(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_reduce_max(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_ARGMIN: return dtype_dispatcher(dtype_id, _cub_reduce_argmin(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_ARGMAX: return dtype_dispatcher(dtype_id, _cub_reduce_argmax(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_PROD: return dtype_dispatcher(dtype_id, _cub_reduce_prod(),
workspace, workspace_size, x, y, num_items, stream);
default: throw std::runtime_error("Unsupported operation");
}
}
size_t cub_device_reduce_get_workspace_size(void* x, void* y, int num_items,
cudaStream_t stream, int op, int dtype_id)
{
size_t workspace_size = 0;
cub_device_reduce(NULL, workspace_size, x, y, num_items, stream,
op, dtype_id);
return workspace_size;
}
/* -------- device segmented reduce -------- */
void cub_device_segmented_reduce(void* workspace, size_t& workspace_size,
void* x, void* y, int num_segments, void* offset_start, void* offset_end,
cudaStream_t stream, int op, int dtype_id)
{
switch(op) {
case CUPY_CUB_SUM:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_sum(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
case CUPY_CUB_MIN:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_min(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
case CUPY_CUB_MAX:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_max(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
case CUPY_CUB_PROD:
return dtype_dispatcher(dtype_id, _cub_segmented_reduce_prod(),
workspace, workspace_size, x, y, num_segments, offset_start,
offset_end, stream);
default:
throw std::runtime_error("Unsupported operation");
}
}
size_t cub_device_segmented_reduce_get_workspace_size(void* x, void* y,
int num_segments, void* offset_start, void* offset_end,
cudaStream_t stream, int op, int dtype_id)
{
size_t workspace_size = 0;
cub_device_segmented_reduce(NULL, workspace_size, x, y, num_segments,
offset_start, offset_end, stream,
op, dtype_id);
return workspace_size;
}
/*--------- device spmv (sparse-matrix dense-vector multiply) ---------*/
void cub_device_spmv(void* workspace, size_t& workspace_size, void* values,
void* row_offsets, void* column_indices, void* x, void* y, int num_rows,
int num_cols, int num_nonzeros, cudaStream_t stream,
int dtype_id)
{
return dtype_dispatcher(dtype_id, _cub_device_spmv(),
workspace, workspace_size, values, row_offsets,
column_indices, x, y, num_rows, num_cols,
num_nonzeros, stream);
}
size_t cub_device_spmv_get_workspace_size(void* values, void* row_offsets,
void* column_indices, void* x, void* y, int num_rows, int num_cols,
int num_nonzeros, cudaStream_t stream, int dtype_id)
{
size_t workspace_size = 0;
cub_device_spmv(NULL, workspace_size, values, row_offsets, column_indices,
x, y, num_rows, num_cols, num_nonzeros, stream, dtype_id);
return workspace_size;
}
/* -------- device scan -------- */
void cub_device_scan(void* workspace, size_t& workspace_size, void* x, void* y,
int num_items, cudaStream_t stream, int op, int dtype_id)
{
switch(op) {
case CUPY_CUB_CUMSUM:
return dtype_dispatcher(dtype_id, _cub_inclusive_sum(),
workspace, workspace_size, x, y, num_items, stream);
case CUPY_CUB_CUMPROD:
return dtype_dispatcher(dtype_id, _cub_inclusive_product(),
workspace, workspace_size, x, y, num_items, stream);
default:
throw std::runtime_error("Unsupported operation");
}
}
size_t cub_device_scan_get_workspace_size(void* x, void* y, int num_items,
cudaStream_t stream, int op, int dtype_id)
{
size_t workspace_size = 0;
cub_device_scan(NULL, workspace_size, x, y, num_items, stream,
op, dtype_id);
return workspace_size;
}
/* -------- device histogram -------- */
void cub_device_histogram_range(void* workspace, size_t& workspace_size, void* x, void* y,
int n_bins, void* bins, size_t n_samples, cudaStream_t stream, int dtype_id)
{
// TODO(leofang): support complex
if (dtype_id == CUPY_TYPE_COMPLEX64 || dtype_id == CUPY_TYPE_COMPLEX128) {
throw std::runtime_error("complex dtype is not yet supported");
}
// TODO(leofang): n_samples is of type size_t, but if it's < 2^31 we cast it to int later
return dtype_dispatcher(dtype_id, _cub_histogram_range(),
workspace, workspace_size, x, y, n_bins, bins, n_samples, stream);
}
size_t cub_device_histogram_range_get_workspace_size(void* x, void* y, int n_bins,
void* bins, size_t n_samples, cudaStream_t stream, int dtype_id)
{
size_t workspace_size = 0;
cub_device_histogram_range(NULL, workspace_size, x, y, n_bins, bins, n_samples,
stream, dtype_id);
return workspace_size;
}
|
416b4faf1c22b077cb6eff25e020d4b511a733c4.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <cfloat>
#include <iostream>
using std::cout;
using std::endl;
#define BLOCKSIZE 1024
namespace ohem_space {
template<typename scalar_t>
__forceinline__ __device__ void reduce_sum(scalar_t *sdata, int blocksize, int tid) {
__syncthreads();
// NOTE: block size should be 2 ** x
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
}
template<typename scalar_t>
__forceinline__ __device__ void reduce_max(scalar_t* sdata, int blocksize, int tid) {
__syncthreads();
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) {
if (sdata[tid] < sdata[tid + s]) sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
}
}
// kernel functions
template<typename scalar_t>
__global__ void OHEMGetScores(const int n_size,
const int dimsize, const int m_size,
const scalar_t *logits,
scalar_t *scores,
const int64_t *labels,
int *indices,
const int64_t ignore_index) {
// shared memory
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
int sample_offset = gridDim.x * blockDim.y;
sdata = sdata + blockDim.x * threadIdx.y;
int tid = threadIdx.x;
int sample_id = blockIdx.x * blockDim.y + threadIdx.y;
int samplesize = n_size * m_size;
for (int i{sample_id}; i < samplesize; i += sample_offset) {
indices[i] = i;
int n_idx = i / m_size;
int m_idx = i % m_size;
int64_t lb = labels[i];
if (lb == ignore_index) {
if (tid == 0) scores[i] = scalar_t(1.);
continue;
}
// obtain max
sdata[tid] = scalar_t(-10000.);
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > sdata[tid]) sdata[tid] = val;
}
__syncthreads();
ohem_space::reduce_max<scalar_t>(sdata, blockDim.x, tid);
scalar_t max_val = sdata[0];
// obtain exp sum
sdata[tid] = 0.;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
sdata[tid] += expf(logits[idx] - max_val);
}
__syncthreads();
ohem_space::reduce_sum<scalar_t>(sdata, blockDim.x, tid);
if (tid == 0) {
int idx = n_idx * dimsize * m_size + lb * m_size + m_idx;
scores[i] = expf(logits[idx] - max_val) / sdata[0];
}
}
}
template<typename scalar_t>
__global__ void OHEMGetScoresSpatial(const int n_size,
const int dimsize, const int m_size,
const scalar_t *logits,
scalar_t *scores,
const int64_t *labels,
int *indices,
const int64_t ignore_index) {
int sample_offset = gridDim.x * blockDim.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int samplesize = n_size * m_size;
for (int i{tid}; i < samplesize; i += sample_offset) {
indices[i] = i;
int n_idx = i / m_size;
int m_idx = i % m_size;
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
scores[i] = scalar_t(1.);
continue;
}
// obtain max
scalar_t max_val = scalar_t(-10000.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > max_val) max_val = val;
}
// obtain sum exp
scalar_t sum_exp = scalar_t(0.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
sum_exp += expf(logits[idx] - max_val);
}
int idx = n_idx * dimsize * m_size + lb * m_size + m_idx;
scores[i] = expf(logits[idx] - max_val) / sum_exp;
}
}
template<typename scalar_t>
__global__ void OHEMSetLabels(const int samplesize,
const int *idx,
const scalar_t *scores,
int64_t *ohem_label,
const int64_t ignore_index,
const float score_thresh,
const int64_t n_min) {
int sample_offset = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i{static_cast<int>(n_min) + tid}; i < samplesize; i += sample_offset) {
if (scores[i] > score_thresh) ohem_label[idx[i]] = ignore_index;
}
}
// cuda functions
at::Tensor Score_ohem_label_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float score_thresh,
const int64_t n_min) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
if (n_min >= samplesize) return labels;
// allocate memory and cuda grid/block
auto ohem_label = labels.clone();
auto scores = torch::empty_like(labels, logits.options());
thrust::device_vector<int> idx(samplesize);
if (ohem_label.numel() == 0) {
THCudaCheck(hipGetLastError());
return ohem_label;
}
// call kernel
if (dimsize < 32 && samplesize > (4 * 1024)) {
int gridx = ::min((int)4096, int(samplesize / BLOCKSIZE));
gridx = ::max((int)1, gridx);
dim3 block1(BLOCKSIZE);
dim3 grid1(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "ohem score label", [&] {
hipLaunchKernelGGL(( OHEMGetScoresSpatial<scalar_t>), dim3(grid1), dim3(block1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
scores.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
thrust::raw_pointer_cast(&idx[0]),
ignore_index
);
});
} else {
int blockx = 32;
while (blockx < dimsize) blockx *= 2;
blockx = ::max(::min((int)BLOCKSIZE, blockx / 2), (int)32);
int blocky = ::min(samplesize, (int)(BLOCKSIZE / blockx));
blocky = ::max((int)1, blocky);
int gridx = ::min(4096, (int)(samplesize / blocky));
gridx = ::max((int)1, gridx);
int n_shm = blockx * blocky;
dim3 block1(blockx, blocky);
dim3 grid1(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "ohem score label", [&] {
int shm_size = n_shm * sizeof(scalar_t);
hipLaunchKernelGGL(( OHEMGetScores<scalar_t>), dim3(grid1), dim3(block1), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
scores.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
thrust::raw_pointer_cast(&idx[0]),
ignore_index
);
});
}
int grid2_num = ::min(4096, (int)(samplesize / BLOCKSIZE));
grid2_num = ::max((int)1, grid2_num);
dim3 block2(BLOCKSIZE);
dim3 grid2(grid2_num);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "ohem score label", [&] {
thrust::sort_by_key(
thrust::device,
scores.contiguous().data_ptr<scalar_t>(),
scores.contiguous().data_ptr<scalar_t>() + samplesize,
&idx[0]
);
hipLaunchKernelGGL(( OHEMSetLabels<scalar_t>), dim3(grid2), dim3(block2), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
samplesize, thrust::raw_pointer_cast(&idx[0]),
scores.contiguous().data_ptr<scalar_t>(),
ohem_label.contiguous().data_ptr<int64_t>(),
ignore_index, score_thresh, n_min
);
});
THCudaCheck(hipGetLastError());
return ohem_label;
}
// python inferface
at::Tensor Score_ohem_label(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float score_thresh,
const int64_t n_min) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this ohem method only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return Score_ohem_label_cuda(logits, labels, ignore_index, score_thresh, n_min);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("score_ohem_label", &Score_ohem_label, "ohem by score on label");
}
| 416b4faf1c22b077cb6eff25e020d4b511a733c4.cu |
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <cfloat>
#include <iostream>
using std::cout;
using std::endl;
#define BLOCKSIZE 1024
namespace ohem_space {
template<typename scalar_t>
__forceinline__ __device__ void reduce_sum(scalar_t *sdata, int blocksize, int tid) {
__syncthreads();
// NOTE: block size should be 2 ** x
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
}
template<typename scalar_t>
__forceinline__ __device__ void reduce_max(scalar_t* sdata, int blocksize, int tid) {
__syncthreads();
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) {
if (sdata[tid] < sdata[tid + s]) sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
}
}
// kernel functions
template<typename scalar_t>
__global__ void OHEMGetScores(const int n_size,
const int dimsize, const int m_size,
const scalar_t *logits,
scalar_t *scores,
const int64_t *labels,
int *indices,
const int64_t ignore_index) {
// shared memory
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
int sample_offset = gridDim.x * blockDim.y;
sdata = sdata + blockDim.x * threadIdx.y;
int tid = threadIdx.x;
int sample_id = blockIdx.x * blockDim.y + threadIdx.y;
int samplesize = n_size * m_size;
for (int i{sample_id}; i < samplesize; i += sample_offset) {
indices[i] = i;
int n_idx = i / m_size;
int m_idx = i % m_size;
int64_t lb = labels[i];
if (lb == ignore_index) {
if (tid == 0) scores[i] = scalar_t(1.);
continue;
}
// obtain max
sdata[tid] = scalar_t(-10000.);
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > sdata[tid]) sdata[tid] = val;
}
__syncthreads();
ohem_space::reduce_max<scalar_t>(sdata, blockDim.x, tid);
scalar_t max_val = sdata[0];
// obtain exp sum
sdata[tid] = 0.;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
sdata[tid] += expf(logits[idx] - max_val);
}
__syncthreads();
ohem_space::reduce_sum<scalar_t>(sdata, blockDim.x, tid);
if (tid == 0) {
int idx = n_idx * dimsize * m_size + lb * m_size + m_idx;
scores[i] = expf(logits[idx] - max_val) / sdata[0];
}
}
}
template<typename scalar_t>
__global__ void OHEMGetScoresSpatial(const int n_size,
const int dimsize, const int m_size,
const scalar_t *logits,
scalar_t *scores,
const int64_t *labels,
int *indices,
const int64_t ignore_index) {
int sample_offset = gridDim.x * blockDim.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int samplesize = n_size * m_size;
for (int i{tid}; i < samplesize; i += sample_offset) {
indices[i] = i;
int n_idx = i / m_size;
int m_idx = i % m_size;
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
scores[i] = scalar_t(1.);
continue;
}
// obtain max
scalar_t max_val = scalar_t(-10000.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > max_val) max_val = val;
}
// obtain sum exp
scalar_t sum_exp = scalar_t(0.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
sum_exp += expf(logits[idx] - max_val);
}
int idx = n_idx * dimsize * m_size + lb * m_size + m_idx;
scores[i] = expf(logits[idx] - max_val) / sum_exp;
}
}
template<typename scalar_t>
__global__ void OHEMSetLabels(const int samplesize,
const int *idx,
const scalar_t *scores,
int64_t *ohem_label,
const int64_t ignore_index,
const float score_thresh,
const int64_t n_min) {
int sample_offset = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i{static_cast<int>(n_min) + tid}; i < samplesize; i += sample_offset) {
if (scores[i] > score_thresh) ohem_label[idx[i]] = ignore_index;
}
}
// cuda functions
at::Tensor Score_ohem_label_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float score_thresh,
const int64_t n_min) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
if (n_min >= samplesize) return labels;
// allocate memory and cuda grid/block
auto ohem_label = labels.clone();
auto scores = torch::empty_like(labels, logits.options());
thrust::device_vector<int> idx(samplesize);
if (ohem_label.numel() == 0) {
THCudaCheck(cudaGetLastError());
return ohem_label;
}
// call kernel
if (dimsize < 32 && samplesize > (4 * 1024)) {
int gridx = std::min((int)4096, int(samplesize / BLOCKSIZE));
gridx = std::max((int)1, gridx);
dim3 block1(BLOCKSIZE);
dim3 grid1(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "ohem score label", [&] {
OHEMGetScoresSpatial<scalar_t><<<grid1, block1, 0, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
scores.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
thrust::raw_pointer_cast(&idx[0]),
ignore_index
);
});
} else {
int blockx = 32;
while (blockx < dimsize) blockx *= 2;
blockx = std::max(std::min((int)BLOCKSIZE, blockx / 2), (int)32);
int blocky = std::min(samplesize, (int)(BLOCKSIZE / blockx));
blocky = std::max((int)1, blocky);
int gridx = std::min(4096, (int)(samplesize / blocky));
gridx = std::max((int)1, gridx);
int n_shm = blockx * blocky;
dim3 block1(blockx, blocky);
dim3 grid1(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "ohem score label", [&] {
int shm_size = n_shm * sizeof(scalar_t);
OHEMGetScores<scalar_t><<<grid1, block1, shm_size, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
scores.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
thrust::raw_pointer_cast(&idx[0]),
ignore_index
);
});
}
int grid2_num = std::min(4096, (int)(samplesize / BLOCKSIZE));
grid2_num = std::max((int)1, grid2_num);
dim3 block2(BLOCKSIZE);
dim3 grid2(grid2_num);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "ohem score label", [&] {
thrust::sort_by_key(
thrust::device,
scores.contiguous().data_ptr<scalar_t>(),
scores.contiguous().data_ptr<scalar_t>() + samplesize,
&idx[0]
);
OHEMSetLabels<scalar_t><<<grid2, block2, 0, at::cuda::getCurrentCUDAStream()>>>(
samplesize, thrust::raw_pointer_cast(&idx[0]),
scores.contiguous().data_ptr<scalar_t>(),
ohem_label.contiguous().data_ptr<int64_t>(),
ignore_index, score_thresh, n_min
);
});
THCudaCheck(cudaGetLastError());
return ohem_label;
}
// python inferface
at::Tensor Score_ohem_label(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float score_thresh,
const int64_t n_min) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this ohem method only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return Score_ohem_label_cuda(logits, labels, ignore_index, score_thresh, n_min);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("score_ohem_label", &Score_ohem_label, "ohem by score on label");
}
|
c4d5770666b7afa49814712f03bf3cac8fc8d6d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/selected_rows/adamw_kernel.h"
#include <math.h> // for sqrt in CPU and CUDA
#include <vector>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/adam_functors.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/selected_rows_functor.h"
namespace phi {
namespace sr {
template <typename T>
__global__ void UpdateAdamWBetaPow(T beta1,
T beta2,
const T* beta1_pow_,
const T* beta2_pow_,
T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename MT>
__global__ void SparseAdamWCUDAKernelREG(MT beta1,
MT beta2,
MT epsilon,
MT coeff,
MT lr_ratio,
const MT beta1_pow,
const MT beta2_pow,
const MT* mom1_,
MT* mom1_out_,
const MT* mom2_,
MT* mom2_out_,
const MT* lr_,
const T* grad_,
const T* param_,
T* param_out_,
const MT* master_param,
MT* master_param_out,
const int64_t* rows_,
int64_t row_numel,
int64_t row_count,
bool lazy_mode,
int ndim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
MT lr = *lr_ * lr_ratio;
for (; id < ndim; id += blockDim.x * gridDim.x) {
auto row_idx =
phi::funcs::BinarySearch<int64_t>(rows_, row_count, id / row_numel);
if (lazy_mode && row_idx < 0) {
return;
} else {
MT mom1 = static_cast<MT>(mom1_[id]);
MT mom2 = static_cast<MT>(mom2_[id]);
MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]);
MT g = row_idx >= 0
? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel])
: static_cast<MT>(0);
p *= (static_cast<MT>(1.0) - lr * coeff);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
MT denom =
(sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon;
p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow)));
// Write back to global memory
mom1_out_[id] = mom1;
mom2_out_[id] = mom2;
param_out_[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
}
template <typename T, typename Context>
void AdamwDenseParamSparseGradKernel(
const Context& dev_ctx,
const DenseTensor& param,
const SelectedRows& grad,
const DenseTensor& learning_rate,
const DenseTensor& moment1,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
float lr_ratio,
float coeff,
bool with_decay,
bool lazy_mode,
int64_t min_row_size_to_use_multithread,
bool multi_precision,
bool use_global_beta_pow,
DenseTensor* param_out,
DenseTensor* moment1_out,
DenseTensor* moment2_out,
DenseTensor* beta1_pow_out,
DenseTensor* beta2_pow_out,
DenseTensor* master_param_outs) {
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
MPDType coeff_ = static_cast<MPDType>(coeff);
MPDType lr_ratio_ = static_cast<MPDType>(lr_ratio);
bool skip_update_ = false;
if (skip_update.is_initialized()) {
PADDLE_ENFORCE_EQ(
skip_update->numel(),
1,
errors::InvalidArgument("Input(SkipUpdate) size must be 1, but get %d",
skip_update->numel()));
std::vector<bool> skip_update_vec;
phi::TensorToVector(*skip_update, dev_ctx, &skip_update_vec);
skip_update_ = skip_update_vec[0];
}
// skip_update=true, just copy input to output, and TensorCopy will call
// mutable_data
if (skip_update_) {
VLOG(4) << "Adamw skip update";
phi::Copy(dev_ctx, param, dev_ctx.GetPlace(), false, param_out);
phi::Copy(dev_ctx, moment1, dev_ctx.GetPlace(), false, moment1_out);
phi::Copy(dev_ctx, moment2, dev_ctx.GetPlace(), false, moment2_out);
if (!use_global_beta_pow) {
phi::Copy(dev_ctx, beta1_pow, beta1_pow.place(), false, beta1_pow_out);
phi::Copy(dev_ctx, beta2_pow, beta2_pow.place(), false, beta2_pow_out);
}
return;
}
// if with_decay = false, coeff = 0
if (!with_decay) {
coeff_ = static_cast<MPDType>(0.0);
}
MPDType beta1_ = beta1.to<MPDType>();
MPDType beta2_ = beta2.to<MPDType>();
MPDType epsilon_ = epsilon.to<MPDType>();
VLOG(3) << "beta1_pow.numel() : " << beta1_pow.numel()
<< "beta2_pow.numel() : " << beta2_pow.numel();
VLOG(3) << "param.numel(): " << param.numel();
PADDLE_ENFORCE_EQ(
beta1_pow_out->numel(),
1,
errors::InvalidArgument("beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(
beta2_pow_out->numel(),
1,
errors::InvalidArgument("beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_outs)
: nullptr;
if (grad.rows().size() == 0) {
VLOG(3) << "grad row size is 0!!";
return;
}
std::vector<int64_t> cpu_rows(grad.rows().begin(), grad.rows().end());
bool is_strict_sorted = true;
for (size_t i = 1; i < cpu_rows.size(); ++i) {
if (cpu_rows[i - 1] >= cpu_rows[i]) {
is_strict_sorted = false;
break;
}
}
phi::SelectedRows tmp_grad_merge;
const phi::SelectedRows* grad_merge_ptr;
if (is_strict_sorted) {
grad_merge_ptr = &grad;
} else {
// merge duplicated rows if any.
// The rows of grad_merge have been sorted inside MergeAdd functor
phi::funcs::scatter::MergeAdd<Context, T> merge_func;
merge_func(dev_ctx, grad, &tmp_grad_merge, true);
grad_merge_ptr = &tmp_grad_merge;
}
auto& grad_merge = *grad_merge_ptr;
auto& grad_tensor = grad_merge.value();
const T* grad_data = grad_tensor.template data<T>();
auto* grad_merge_rows = &grad_merge.rows();
phi::MixVector<int64_t> mixv_grad_merge_rows(grad_merge_rows);
const int64_t* rows = mixv_grad_merge_rows.Data(dev_ctx.GetPlace());
auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
if (beta1_pow.place() == CPUPlace() && beta2_pow.place() == CPUPlace()) {
int threads = 512;
int ndim = param.numel();
int blocks = (ndim + threads - 1) / threads;
hipLaunchKernelGGL(( SparseAdamWCUDAKernelREG<T, MPDType>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1_,
beta2_,
epsilon_,
coeff_,
lr_ratio_,
*beta1_pow.data<MPDType>(),
*beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode,
ndim);
if (!use_global_beta_pow) {
// Update with cpu
dev_ctx.template HostAlloc<MPDType>(beta1_pow_out)[0] =
beta1_ * beta1_pow.data<MPDType>()[0];
dev_ctx.template HostAlloc<MPDType>(beta2_pow_out)[0] =
beta2_ * beta2_pow.data<MPDType>()[0];
}
} else {
funcs::SparseAdamWFunctor<T, funcs::GPUAdamW, MPDType> functor(
beta1_,
beta2_,
epsilon_,
coeff_,
lr_ratio_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode);
// FIXME(minqiyang): remove BinarySearch in GPU later
funcs::ForRange<Context> for_range(dev_ctx, param.numel());
for_range(functor);
if (!use_global_beta_pow) {
// update beta1 and beta2
hipLaunchKernelGGL(( UpdateAdamWBetaPow<MPDType>), dim3(1), dim3(32), 0, dev_ctx.stream(),
beta1_,
beta2_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(beta1_pow_out),
dev_ctx.template Alloc<MPDType>(beta2_pow_out));
}
}
}
} // namespace sr
} // namespace phi
PD_REGISTER_KERNEL(adamw_dense_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::sr::AdamwDenseParamSparseGradKernel,
float,
double,
phi::dtype::float16) {
// Skip beta1_pow, beta2_pow, skip_update data transform
kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(8).SetBackend(phi::Backend::ALL_BACKEND);
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(5).SetDataType(phi::DataType::FLOAT32);
}
kernel->OutputAt(3).SetBackend(phi::Backend::UNDEFINED);
kernel->OutputAt(4).SetBackend(phi::Backend::UNDEFINED);
}
| c4d5770666b7afa49814712f03bf3cac8fc8d6d7.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/selected_rows/adamw_kernel.h"
#include <math.h> // for sqrt in CPU and CUDA
#include <vector>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/adam_functors.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/selected_rows_functor.h"
namespace phi {
namespace sr {
template <typename T>
__global__ void UpdateAdamWBetaPow(T beta1,
T beta2,
const T* beta1_pow_,
const T* beta2_pow_,
T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename MT>
__global__ void SparseAdamWCUDAKernelREG(MT beta1,
MT beta2,
MT epsilon,
MT coeff,
MT lr_ratio,
const MT beta1_pow,
const MT beta2_pow,
const MT* mom1_,
MT* mom1_out_,
const MT* mom2_,
MT* mom2_out_,
const MT* lr_,
const T* grad_,
const T* param_,
T* param_out_,
const MT* master_param,
MT* master_param_out,
const int64_t* rows_,
int64_t row_numel,
int64_t row_count,
bool lazy_mode,
int ndim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
MT lr = *lr_ * lr_ratio;
for (; id < ndim; id += blockDim.x * gridDim.x) {
auto row_idx =
phi::funcs::BinarySearch<int64_t>(rows_, row_count, id / row_numel);
if (lazy_mode && row_idx < 0) {
return;
} else {
MT mom1 = static_cast<MT>(mom1_[id]);
MT mom2 = static_cast<MT>(mom2_[id]);
MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]);
MT g = row_idx >= 0
? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel])
: static_cast<MT>(0);
p *= (static_cast<MT>(1.0) - lr * coeff);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
MT denom =
(sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon;
p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow)));
// Write back to global memory
mom1_out_[id] = mom1;
mom2_out_[id] = mom2;
param_out_[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
}
template <typename T, typename Context>
void AdamwDenseParamSparseGradKernel(
const Context& dev_ctx,
const DenseTensor& param,
const SelectedRows& grad,
const DenseTensor& learning_rate,
const DenseTensor& moment1,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
float lr_ratio,
float coeff,
bool with_decay,
bool lazy_mode,
int64_t min_row_size_to_use_multithread,
bool multi_precision,
bool use_global_beta_pow,
DenseTensor* param_out,
DenseTensor* moment1_out,
DenseTensor* moment2_out,
DenseTensor* beta1_pow_out,
DenseTensor* beta2_pow_out,
DenseTensor* master_param_outs) {
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
MPDType coeff_ = static_cast<MPDType>(coeff);
MPDType lr_ratio_ = static_cast<MPDType>(lr_ratio);
bool skip_update_ = false;
if (skip_update.is_initialized()) {
PADDLE_ENFORCE_EQ(
skip_update->numel(),
1,
errors::InvalidArgument("Input(SkipUpdate) size must be 1, but get %d",
skip_update->numel()));
std::vector<bool> skip_update_vec;
phi::TensorToVector(*skip_update, dev_ctx, &skip_update_vec);
skip_update_ = skip_update_vec[0];
}
// skip_update=true, just copy input to output, and TensorCopy will call
// mutable_data
if (skip_update_) {
VLOG(4) << "Adamw skip update";
phi::Copy(dev_ctx, param, dev_ctx.GetPlace(), false, param_out);
phi::Copy(dev_ctx, moment1, dev_ctx.GetPlace(), false, moment1_out);
phi::Copy(dev_ctx, moment2, dev_ctx.GetPlace(), false, moment2_out);
if (!use_global_beta_pow) {
phi::Copy(dev_ctx, beta1_pow, beta1_pow.place(), false, beta1_pow_out);
phi::Copy(dev_ctx, beta2_pow, beta2_pow.place(), false, beta2_pow_out);
}
return;
}
// if with_decay = false, coeff = 0
if (!with_decay) {
coeff_ = static_cast<MPDType>(0.0);
}
MPDType beta1_ = beta1.to<MPDType>();
MPDType beta2_ = beta2.to<MPDType>();
MPDType epsilon_ = epsilon.to<MPDType>();
VLOG(3) << "beta1_pow.numel() : " << beta1_pow.numel()
<< "beta2_pow.numel() : " << beta2_pow.numel();
VLOG(3) << "param.numel(): " << param.numel();
PADDLE_ENFORCE_EQ(
beta1_pow_out->numel(),
1,
errors::InvalidArgument("beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(
beta2_pow_out->numel(),
1,
errors::InvalidArgument("beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_outs)
: nullptr;
if (grad.rows().size() == 0) {
VLOG(3) << "grad row size is 0!!";
return;
}
std::vector<int64_t> cpu_rows(grad.rows().begin(), grad.rows().end());
bool is_strict_sorted = true;
for (size_t i = 1; i < cpu_rows.size(); ++i) {
if (cpu_rows[i - 1] >= cpu_rows[i]) {
is_strict_sorted = false;
break;
}
}
phi::SelectedRows tmp_grad_merge;
const phi::SelectedRows* grad_merge_ptr;
if (is_strict_sorted) {
grad_merge_ptr = &grad;
} else {
// merge duplicated rows if any.
// The rows of grad_merge have been sorted inside MergeAdd functor
phi::funcs::scatter::MergeAdd<Context, T> merge_func;
merge_func(dev_ctx, grad, &tmp_grad_merge, true);
grad_merge_ptr = &tmp_grad_merge;
}
auto& grad_merge = *grad_merge_ptr;
auto& grad_tensor = grad_merge.value();
const T* grad_data = grad_tensor.template data<T>();
auto* grad_merge_rows = &grad_merge.rows();
phi::MixVector<int64_t> mixv_grad_merge_rows(grad_merge_rows);
const int64_t* rows = mixv_grad_merge_rows.Data(dev_ctx.GetPlace());
auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
if (beta1_pow.place() == CPUPlace() && beta2_pow.place() == CPUPlace()) {
int threads = 512;
int ndim = param.numel();
int blocks = (ndim + threads - 1) / threads;
SparseAdamWCUDAKernelREG<T, MPDType>
<<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
epsilon_,
coeff_,
lr_ratio_,
*beta1_pow.data<MPDType>(),
*beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode,
ndim);
if (!use_global_beta_pow) {
// Update with cpu
dev_ctx.template HostAlloc<MPDType>(beta1_pow_out)[0] =
beta1_ * beta1_pow.data<MPDType>()[0];
dev_ctx.template HostAlloc<MPDType>(beta2_pow_out)[0] =
beta2_ * beta2_pow.data<MPDType>()[0];
}
} else {
funcs::SparseAdamWFunctor<T, funcs::GPUAdamW, MPDType> functor(
beta1_,
beta2_,
epsilon_,
coeff_,
lr_ratio_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode);
// FIXME(minqiyang): remove BinarySearch in GPU later
funcs::ForRange<Context> for_range(dev_ctx, param.numel());
for_range(functor);
if (!use_global_beta_pow) {
// update beta1 and beta2
UpdateAdamWBetaPow<MPDType><<<1, 32, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(beta1_pow_out),
dev_ctx.template Alloc<MPDType>(beta2_pow_out));
}
}
}
} // namespace sr
} // namespace phi
PD_REGISTER_KERNEL(adamw_dense_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::sr::AdamwDenseParamSparseGradKernel,
float,
double,
phi::dtype::float16) {
// Skip beta1_pow, beta2_pow, skip_update data transform
kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(8).SetBackend(phi::Backend::ALL_BACKEND);
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(5).SetDataType(phi::DataType::FLOAT32);
}
kernel->OutputAt(3).SetBackend(phi::Backend::UNDEFINED);
kernel->OutputAt(4).SetBackend(phi::Backend::UNDEFINED);
}
|
05dae437a370eddc4c039afc854e60823303fd91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// We assume that NUM_ELEMENTS is divisible by BLOCK_SIZE
#define RADIUS 3
#define BLOCK_SIZE 256
#define NUM_ELEMENTS (4096*2)
// CUDA API error checking macro
static void handleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define cudaCheck( err ) (handleError( err, __FILE__, __LINE__ ))
__global__ void stencil_1d(int *in, int *out)
{
// __shared__ keyword to declare variables in shared block memory
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS;
int lindex = threadIdx.x + RADIUS;
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS)
{
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
__syncthreads();
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex-RADIUS] = result;
}
int main()
{
unsigned int i;
int h_in[NUM_ELEMENTS + 2 * RADIUS], h_out[NUM_ELEMENTS];
int *d_in, *d_out;
// Initialize host data
for( i = 0; i < (NUM_ELEMENTS + 2*RADIUS); ++i )
h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7
// Allocate space on the device
cudaCheck( hipMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int)) );
cudaCheck( hipMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) );
// Copy input data to device
cudaCheck( hipMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( stencil_1d), dim3((NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, d_in, d_out);
// Check errors from launching the kernel
cudaCheck(hipPeekAtLastError());
cudaCheck( hipMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), hipMemcpyDeviceToHost) );
// Verify every out value is 7
for( i = 0; i < NUM_ELEMENTS; ++i )
if (h_out[i] != 7)
{
printf("Element h_out[%d] == %d != 7\n", i, h_out[i]);
break;
}
if (i == NUM_ELEMENTS)
printf("SUCCESS!\n");
// Free out memory
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 05dae437a370eddc4c039afc854e60823303fd91.cu | #include <stdio.h>
// We assume that NUM_ELEMENTS is divisible by BLOCK_SIZE
#define RADIUS 3
#define BLOCK_SIZE 256
#define NUM_ELEMENTS (4096*2)
// CUDA API error checking macro
static void handleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define cudaCheck( err ) (handleError( err, __FILE__, __LINE__ ))
__global__ void stencil_1d(int *in, int *out)
{
// __shared__ keyword to declare variables in shared block memory
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS;
int lindex = threadIdx.x + RADIUS;
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS)
{
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
__syncthreads();
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex-RADIUS] = result;
}
int main()
{
unsigned int i;
int h_in[NUM_ELEMENTS + 2 * RADIUS], h_out[NUM_ELEMENTS];
int *d_in, *d_out;
// Initialize host data
for( i = 0; i < (NUM_ELEMENTS + 2*RADIUS); ++i )
h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7
// Allocate space on the device
cudaCheck( cudaMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int)) );
cudaCheck( cudaMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) );
// Copy input data to device
cudaCheck( cudaMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), cudaMemcpyHostToDevice) );
stencil_1d<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out);
// Check errors from launching the kernel
cudaCheck(cudaPeekAtLastError());
cudaCheck( cudaMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost) );
// Verify every out value is 7
for( i = 0; i < NUM_ELEMENTS; ++i )
if (h_out[i] != 7)
{
printf("Element h_out[%d] == %d != 7\n", i, h_out[i]);
break;
}
if (i == NUM_ELEMENTS)
printf("SUCCESS!\n");
// Free out memory
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
382d81d665d187de328f698dc506aec7a07b29ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "chrono_multicore/physics/ChMPM.cuh"
#include "chrono_multicore/physics/MPMUtils.h"
#include "chrono_multicore/ChCudaHelper.cuh"
#include "chrono_multicore/ChGPUVector.cuh"
#include "chrono_thirdparty/hipcub/hipcub.hpp"
#include "chrono_multicore/math/matrixf.cuh"
//#define BOX_YIELD
#define SPHERE_YIELD
//#define DRUCKER_PRAGER
namespace chrono {
struct Bounds {
float minimum[3];
float maximum[3];
};
float3 min_bounding_point;
float3 max_bounding_point;
MPM_Settings host_settings;
std::vector<int> particle_node_mapping;
std::vector<int> node_particle_mapping;
std::vector<int> node_start_index;
std::vector<int> particle_number;
unsigned int num_mpm_nodes_active;
// GPU Things
float3* lower_bound;
float3* upper_bound;
gpu_vector<float> pos, vel, JE_JP;
gpu_vector<float> node_mass;
gpu_vector<float> marker_volume;
gpu_vector<float> grid_vel, delta_v;
gpu_vector<float> rhs;
gpu_vector<float> marker_Fe, marker_Fe_hat, marker_Fp;
gpu_vector<float> PolarS, PolarR;
gpu_vector<float> old_vel_node_mpm;
gpu_vector<float> ml, mg, mg_p, ml_p;
gpu_vector<float> dot_g_proj_norm;
gpu_vector<float> marker_plasticity;
CUDA_CONSTANT MPM_Settings device_settings;
CUDA_CONSTANT Bounds system_bounds;
hipEvent_t start;
hipEvent_t stop;
float time_measured = 0;
/////// BB Constants
__device__ float alpha = 0.0001;
__device__ float dot_ms_ms = 0;
__device__ float dot_ms_my = 0;
__device__ float dot_my_my = 0;
#define a_min 1e-13
#define a_max 1e13
#define neg_BB1_fallback 0.11
#define neg_BB2_fallback 0.12
#define LOOP_TWO_RING_GPUSP(X) \
cx = GridCoord(xix, inv_bin_edge, system_bounds.minimum[0]); \
cy = GridCoord(xiy, inv_bin_edge, system_bounds.minimum[1]); \
cz = GridCoord(xiz, inv_bin_edge, system_bounds.minimum[2]); \
for (int i = cx - 2; i <= cx + 2; ++i) { \
for (int j = cy - 2; j <= cy + 2; ++j) { \
for (int k = cz - 2; k <= cz + 2; ++k) { \
int current_node = GridHash(i, j, k, device_settings.bins_per_axis_x, device_settings.bins_per_axis_y, \
device_settings.bins_per_axis_z); \
float current_node_locationx = i * bin_edge + system_bounds.minimum[0]; \
float current_node_locationy = j * bin_edge + system_bounds.minimum[1]; \
float current_node_locationz = k * bin_edge + system_bounds.minimum[2]; \
X \
} \
} \
}
//////========================================================================================================================================================================
////
void CUDA_HOST_DEVICE WeakEqual(const float& x, const float& y, float COMPARE_EPS = FLT_EPSILON) {
if (fabsf(x - y) > COMPARE_EPS) {
printf("%f does not equal %f %.20e\n", x, y, fabsf(x - y));
// exit(1);
}
}
void CUDA_HOST_DEVICE WeakEqual(const Mat33f& a, const Mat33f& b, float COMPARE_EPS = FLT_EPSILON) {
WeakEqual(a[0], b[0], COMPARE_EPS);
WeakEqual(a[1], b[1], COMPARE_EPS);
WeakEqual(a[2], b[2], COMPARE_EPS);
WeakEqual(a[3], b[3], COMPARE_EPS);
WeakEqual(a[4], b[4], COMPARE_EPS);
WeakEqual(a[5], b[5], COMPARE_EPS);
WeakEqual(a[6], b[6], COMPARE_EPS);
WeakEqual(a[7], b[7], COMPARE_EPS);
WeakEqual(a[8], b[8], COMPARE_EPS);
}
CUDA_GLOBAL void kComputeBounds(const float* pos, // input
float3* lower, // output
float3* upper // output
) {
typedef hipcub::BlockReduce<float3, num_threads_per_block> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int block_start = blockDim.x * blockIdx.x;
const int num_valid = min(device_settings.num_mpm_markers - block_start, blockDim.x);
const int index = block_start + threadIdx.x;
if (index < device_settings.num_mpm_markers) {
float3 data = make_float3(pos[index * 3 + 0], pos[index * 3 + 1], pos[index * 3 + 2]);
float3 blockUpper = BlockReduce(temp_storage).Reduce(data, float3Max(), num_valid);
__syncthreads();
float3 blockLower = BlockReduce(temp_storage).Reduce(data, float3Min(), num_valid);
if (threadIdx.x == 0) {
AtomicMax(upper, blockUpper);
AtomicMin(lower, blockLower);
}
}
}
////========================================================================================================================================================================
CUDA_GLOBAL void kRasterize(const float* sorted_pos, // input
const float* sorted_vel, // input
float* grid_mass, // output
float* grid_vel) { // output
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
const float vix = sorted_vel[p * 3 + 0];
const float viy = sorted_vel[p * 3 + 1];
const float viz = sorted_vel[p * 3 + 2];
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP( //
float weight = N((xix - current_node_locationx) * inv_bin_edge) *
N((xiy - current_node_locationy) * inv_bin_edge) *
N((xiz - current_node_locationz) * inv_bin_edge) * device_settings.mass;
atomicAdd(&grid_mass[current_node], weight); //
atomicAdd(&grid_vel[current_node * 3 + 0], weight * vix);
atomicAdd(&grid_vel[current_node * 3 + 1], weight * viy);
atomicAdd(&grid_vel[current_node * 3 + 2], weight * viz);)
}
}
CUDA_GLOBAL void kRasterize(const float* sorted_pos, // input
float* grid_mass) { // output
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP( //
float weight = N((xix - current_node_locationx) * inv_bin_edge) *
N((xiy - current_node_locationy) * inv_bin_edge) *
N((xiz - current_node_locationz) * inv_bin_edge) * device_settings.mass;
atomicAdd(&grid_mass[current_node], weight); //
)
}
}
//
////========================================================================================================================================================================
//
CUDA_GLOBAL void kNormalizeWeights(float* grid_mass, // input
float* grid_vel) { // output
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < device_settings.num_mpm_nodes) {
float n_mass = grid_mass[i];
if (n_mass > FLT_EPSILON) {
grid_vel[i * 3 + 0] /= n_mass;
grid_vel[i * 3 + 1] /= n_mass;
grid_vel[i * 3 + 2] /= n_mass;
}
}
}
//////========================================================================================================================================================================
////
CUDA_GLOBAL void kComputeParticleVolumes(const float* sorted_pos, // input
float* grid_mass, // output
float* volume) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
float particle_density = 0;
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP( //
float weight = N((xix - current_node_locationx) * inv_bin_edge) *
N((xiy - current_node_locationy) * inv_bin_edge) *
N((xiz - current_node_locationz) * inv_bin_edge);
particle_density += grid_mass[current_node] * weight; //
)
// Inverse density to remove division
particle_density = (bin_edge * bin_edge * bin_edge) / particle_density;
volume[p] = device_settings.mass * particle_density;
}
}
CUDA_GLOBAL void kFeHat(const float* sorted_pos, // input
const float* marker_Fe, // input
const float* grid_vel, // input
float* marker_Fe_hat) { // output
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
Mat33f Fe_hat_t(0.0);
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP(
float vnx = grid_vel[current_node * 3 + 0]; //
float vny = grid_vel[current_node * 3 + 1]; //
float vnz = grid_vel[current_node * 3 + 2];
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
Fe_hat_t[0] += vnx * valx; Fe_hat_t[1] += vny * valx; Fe_hat_t[2] += vnz * valx; //
Fe_hat_t[3] += vnx * valy; Fe_hat_t[4] += vny * valy; Fe_hat_t[5] += vnz * valy; //
Fe_hat_t[6] += vnx * valz; Fe_hat_t[7] += vny * valz; Fe_hat_t[8] += vnz * valz;
// float3 vel(grid_vel[current_node * 3 + 0], grid_vel[current_node * 3 + 1],
// grid_vel[current_node * 3 + 2]); //
// float3 kern = dN(xi - current_node_location, inv_bin_edge); //
// Fe_hat_t += OuterProduct(device_settings.dt * vel, kern);
)
Mat33f m_Fe(marker_Fe, p, device_settings.num_mpm_markers);
Mat33f m_Fe_hat = (Mat33f(1.0) + device_settings.dt * Fe_hat_t) * m_Fe;
m_Fe_hat.Store(marker_Fe_hat, p, device_settings.num_mpm_markers);
}
}
// CUDA_GLOBAL void kSVD(Mat33f* marker_Fe_hat, Mat33f* PolarR, Mat33f* PolarS) {
// const int p = blockIdx.x * blockDim.x + threadIdx.x;
// if (p < device_settings.num_mpm_markers) {
// Mat33f U, V, R, S, W;
// float3 E;
// SVD(marker_Fe_hat[p], U, E, V);
// // Perform polar decomposition F = R*S
// R = MultTranspose(U, V);
// S = V * MultTranspose(Mat33f(E), V);
//
// PolarR[p] = R;
// PolarS[p] = S;
// }
//}
CUDA_GLOBAL void kApplyForces(const float* sorted_pos, // input
const float* marker_Fe_hat, // input
const float* marker_Fe, // input
const float* marker_volume, // input
const float* node_mass, // input
const float* plasticity, // input
float* PolarR, // input
float* PolarS, // input
float* grid_vel) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const Mat33f FE(marker_Fe, p, device_settings.num_mpm_markers);
const Mat33f FE_hat(marker_Fe_hat, p, device_settings.num_mpm_markers);
const float a = -one_third;
const float J = Determinant(FE_hat);
const float Ja = powf(J, a);
#if defined(BOX_YIELD) || defined(SPHERE_YIELD)
const float current_mu = device_settings.mu * expf(device_settings.hardening_coefficient * (plasticity[p]));
#else
const float current_mu = device_settings.mu;
#endif
#if 1
Mat33f JaFE = Ja * FE;
Mat33f UE, VE;
float3 EE;
SVD(JaFE, UE, EE, VE); /* Perform a polar decomposition, FE=RE*SE, RE is the Unitary part*/
Mat33f RE = MultTranspose(UE, VE);
Mat33f SE = VE * MultTranspose(EE, VE);
RE.Store(PolarR, p, device_settings.num_mpm_markers);
PolarS[p + 0 * device_settings.num_mpm_markers] = SE[0];
PolarS[p + 1 * device_settings.num_mpm_markers] = SE[1];
PolarS[p + 2 * device_settings.num_mpm_markers] = SE[2];
PolarS[p + 3 * device_settings.num_mpm_markers] = SE[4];
PolarS[p + 4 * device_settings.num_mpm_markers] = SE[5];
PolarS[p + 5 * device_settings.num_mpm_markers] = SE[8];
#else
const Mat33f A = Potential_Energy_Derivative_Deviatoric(Ja * FE_hat, current_mu, PolarR[p], PolarS[p]);
#endif
const Mat33f H = AdjointTranspose(FE_hat) * (1.0f / J);
const Mat33f A = 2.f * current_mu * (JaFE - RE);
const Mat33f Z_B = Z__B(A, FE_hat, Ja, a, H);
const Mat33f vPEDFepT = device_settings.dt * marker_volume[p] * MultTranspose(Z_B, FE);
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
LOOP_TWO_RING_GPUSP( //
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
float fx = vPEDFepT[0] * valx + vPEDFepT[3] * valy + vPEDFepT[6] * valz;
float fy = vPEDFepT[1] * valx + vPEDFepT[4] * valy + vPEDFepT[7] * valz;
float fz = vPEDFepT[2] * valx + vPEDFepT[5] * valy + vPEDFepT[8] * valz;
float mass = node_mass[current_node]; //
if (mass > 0) {
atomicAdd(&grid_vel[current_node * 3 + 0], -fx / mass); //
atomicAdd(&grid_vel[current_node * 3 + 1], -fy / mass); //
atomicAdd(&grid_vel[current_node * 3 + 2], -fz / mass); //
})
}
}
CUDA_GLOBAL void kRhs(const float* node_mass, // input
const float* grid_vel,
float* rhs) {
const int current_node = blockIdx.x * blockDim.x + threadIdx.x;
if (current_node < device_settings.num_mpm_nodes) {
float mass = node_mass[current_node]; //
if (mass > 0) {
rhs[current_node * 3 + 0] = mass * grid_vel[current_node * 3 + 0]; //
rhs[current_node * 3 + 1] = mass * grid_vel[current_node * 3 + 1]; //
rhs[current_node * 3 + 2] = mass * grid_vel[current_node * 3 + 2]; //
} else {
rhs[current_node * 3 + 0] = 0;
rhs[current_node * 3 + 1] = 0;
rhs[current_node * 3 + 2] = 0;
}
}
}
CUDA_GLOBAL void kMultiplyA(const float* sorted_pos, // input
const float* v_array,
const float* old_vel_node_mpm,
const float* PolarR, // input
const float* PolarS, // input
const float* marker_Fe, // input
const float* marker_Fe_hat, // input
const float* marker_volume, // input
const float* plasticity, // input
float* result_array) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
// float VAP[7];
// float delta_F[7] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
Mat33f delta_F(0.0f);
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP( //
float vnx = v_array[current_node * 3 + 0]; //
float vny = v_array[current_node * 3 + 1]; //
float vnz = v_array[current_node * 3 + 2];
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
delta_F[0] += vnx * valx; delta_F[1] += vny * valx; delta_F[2] += vnz * valx; //
delta_F[3] += vnx * valy; delta_F[4] += vny * valy; delta_F[5] += vnz * valy; //
delta_F[6] += vnx * valz; delta_F[7] += vny * valz; delta_F[8] += vnz * valz;)
const Mat33f m_FE(marker_Fe, p, device_settings.num_mpm_markers);
delta_F = delta_F * m_FE;
#if defined(BOX_YIELD) || defined(SPHERE_YIELD)
const float current_mu =
2.0f * device_settings.mu * expf(device_settings.hardening_coefficient * (plasticity[p]));
#else
const float current_mu = 2.0f * device_settings.mu;
#endif
Mat33f RE(PolarR, p, device_settings.num_mpm_markers);
const Mat33f F(marker_Fe_hat, p, device_settings.num_mpm_markers);
const float a = -one_third;
const float J = Determinant(F);
const float Ja = powf(J, a);
const Mat33f H = AdjointTranspose(F) * (1.0f / J);
const Mat33f B_Z = B__Z(delta_F, F, Ja, a, H);
const Mat33f WE = TransposeMult(RE, B_Z);
// C is the original second derivative
SymMat33f SE;
SE[0] = PolarS[p + device_settings.num_mpm_markers * 0];
SE[1] = PolarS[p + device_settings.num_mpm_markers * 1];
SE[2] = PolarS[p + device_settings.num_mpm_markers * 2];
SE[3] = PolarS[p + device_settings.num_mpm_markers * 3];
SE[4] = PolarS[p + device_settings.num_mpm_markers * 4];
SE[5] = PolarS[p + device_settings.num_mpm_markers * 5];
const Mat33f C_B_Z = current_mu * (B_Z - Solve_dR(RE, SE, WE));
const Mat33f FE = Ja * F;
const Mat33f A = current_mu * (FE - RE);
const Mat33f P1 = Z__B(C_B_Z, F, Ja, a, H);
const Mat33f P2 = (a * DoubleDot(H, delta_F)) * Z__B(A, F, Ja, a, H);
const Mat33f P3 = (a * Ja * DoubleDot(A, delta_F)) * H;
const Mat33f P4 = (-a * Ja * DoubleDot(A, F)) * H * TransposeMult(delta_F, H);
const Mat33f VAP = marker_volume[p] * MultTranspose(P1 + P2 + P3 + P4, m_FE);
// Mat33f VAP = d2PsidFdFO(delta_F, m_FE_hat, PolarR[p], PolarS[p], current_mu);
// WeakEqual(VAP, VAP2);
LOOP_TWO_RING_GPUSP( //
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
float resx = VAP[0] * valx + VAP[3] * valy + VAP[6] * valz;
float resy = VAP[1] * valx + VAP[4] * valy + VAP[7] * valz;
float resz = VAP[2] * valx + VAP[5] * valy + VAP[8] * valz;
atomicAdd(&result_array[current_node * 3 + 0], resx); atomicAdd(&result_array[current_node * 3 + 1], resy);
atomicAdd(&result_array[current_node * 3 + 2], resz););
}
}
CUDA_GLOBAL void kMultiplyB(const float* v_array,
const float* old_vel_node_mpm,
const float* node_mass,
float* result_array) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < device_settings.num_mpm_nodes) {
float mass = node_mass[i];
if (mass > 0) {
result_array[i * 3 + 0] += mass * (v_array[i * 3 + 0]);
result_array[i * 3 + 1] += mass * (v_array[i * 3 + 1]);
result_array[i * 3 + 2] += mass * (v_array[i * 3 + 2]);
}
}
}
void MPM_ComputeBounds() {
max_bounding_point = make_float3(-FLT_MAX, -FLT_MAX, -FLT_MAX);
min_bounding_point = make_float3(FLT_MAX, FLT_MAX, FLT_MAX);
hipMemcpyAsync(lower_bound, &min_bounding_point, sizeof(float3), hipMemcpyHostToDevice);
hipMemcpyAsync(upper_bound, &max_bounding_point, sizeof(float3), hipMemcpyHostToDevice);
kComputeBounds<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, //
lower_bound, //
upper_bound); //
hipMemcpy(&min_bounding_point, lower_bound, sizeof(float3), hipMemcpyDeviceToHost);
hipMemcpy(&max_bounding_point, upper_bound, sizeof(float3), hipMemcpyDeviceToHost);
min_bounding_point.x = host_settings.kernel_radius * roundf(min_bounding_point.x / host_settings.kernel_radius);
min_bounding_point.y = host_settings.kernel_radius * roundf(min_bounding_point.y / host_settings.kernel_radius);
min_bounding_point.z = host_settings.kernel_radius * roundf(min_bounding_point.z / host_settings.kernel_radius);
max_bounding_point.x = host_settings.kernel_radius * roundf(max_bounding_point.x / host_settings.kernel_radius);
max_bounding_point.y = host_settings.kernel_radius * roundf(max_bounding_point.y / host_settings.kernel_radius);
max_bounding_point.z = host_settings.kernel_radius * roundf(max_bounding_point.z / host_settings.kernel_radius);
max_bounding_point = max_bounding_point + host_settings.kernel_radius * 8;
min_bounding_point = min_bounding_point - host_settings.kernel_radius * 6;
hipMemcpyToSymbolAsync(system_bounds, &min_bounding_point, sizeof(float3), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbolAsync(system_bounds, &max_bounding_point, sizeof(float3), sizeof(float3), hipMemcpyHostToDevice);
host_settings.bin_edge = host_settings.kernel_radius * 2;
host_settings.bins_per_axis_x = int(max_bounding_point.x - min_bounding_point.x) / (int)host_settings.bin_edge;
host_settings.bins_per_axis_y = int(max_bounding_point.y - min_bounding_point.y) / (int)host_settings.bin_edge;
host_settings.bins_per_axis_z = int(max_bounding_point.z - min_bounding_point.z) / (int)host_settings.bin_edge;
host_settings.inv_bin_edge = float(1.) / host_settings.bin_edge;
host_settings.num_mpm_nodes =
host_settings.bins_per_axis_x * host_settings.bins_per_axis_y * host_settings.bins_per_axis_z;
cudaCheck(hipMemcpyToSymbolAsync(device_settings, &host_settings, sizeof(MPM_Settings)));
printf("max_bounding_point [%f %f %f]\n", max_bounding_point.x, max_bounding_point.y, max_bounding_point.z);
printf("min_bounding_point [%f %f %f]\n", min_bounding_point.x, min_bounding_point.y, min_bounding_point.z);
printf("Compute DOF [%d %d %d] [%f] %d %d\n", host_settings.bins_per_axis_x, host_settings.bins_per_axis_y,
host_settings.bins_per_axis_z, host_settings.bin_edge, host_settings.num_mpm_nodes,
host_settings.num_mpm_markers);
}
//
void Multiply(gpu_vector<float>& input, gpu_vector<float>& output) {
int size = (int)input.size();
kMultiplyA<<<CONFIG(size)>>>(pos.data_d, // input
input.data_d, //
old_vel_node_mpm.data_d,
PolarR.data_d, // input
PolarS.data_d, // input
marker_Fe.data_d, // input
marker_Fe_hat.data_d, // input
marker_volume.data_d, // input
marker_plasticity.data_d, // input
output.data_d);
kMultiplyB<<<CONFIG(size)>>>(input.data_d, old_vel_node_mpm.data_d, node_mass.data_d, output.data_d);
}
CUDA_GLOBAL void kSubtract(int size, float* x, float* y) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
y[i] = y[i] - x[i];
}
}
template <bool inner>
CUDA_GLOBAL void kResetGlobals() {
if (inner) {
dot_ms_ms = 0;
dot_ms_my = 0;
dot_my_my = 0;
} else {
alpha = 0.0001;
}
}
template <bool even>
CUDA_GLOBAL void kUpdateAlpha(int num_items, float* ml_p, float* ml, float* mg_p, float* mg) {
typedef hipcub::BlockReduce<float, num_threads_per_block> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int block_start = blockDim.x * blockIdx.x;
const int num_valid = min(num_items - block_start, blockDim.x);
const int tid = block_start + threadIdx.x;
if (tid < num_items) {
float data, block_sum;
float ms = ml_p[tid] - ml[tid];
float my = mg_p[tid] - mg[tid];
if (even) {
data = ms * ms;
block_sum = BlockReduce(temp_storage).Reduce(data, hipcub::Sum(), num_valid);
if (threadIdx.x == 0) {
atomicAdd(&dot_ms_ms, block_sum);
}
} else {
data = my * my;
block_sum = BlockReduce(temp_storage).Reduce(data, hipcub::Sum(), num_valid);
if (threadIdx.x == 0) {
atomicAdd(&dot_my_my, block_sum);
}
}
__syncthreads();
data = ms * my;
block_sum = BlockReduce(temp_storage).Reduce(data, hipcub::Sum(), num_valid);
if (threadIdx.x == 0) {
atomicAdd(&dot_ms_my, block_sum);
}
}
}
template <bool even>
CUDA_GLOBAL void kAlpha() {
if (even) {
if (dot_ms_my <= 0) {
alpha = neg_BB1_fallback;
} else {
alpha = fminf(a_max, fmaxf(a_min, dot_ms_ms / dot_ms_my));
}
} else {
if (dot_ms_my <= 0) {
alpha = neg_BB2_fallback;
} else {
alpha = fminf(a_max, fmaxf(a_min, dot_ms_my / dot_my_my));
}
}
// printf("alpha: %f %f %f %f \n", alpha, dot_ms_ms, dot_ms_my, dot_my_my);
}
CUDA_GLOBAL void kCompute_ml_p(int num_items, float* ml, float* mg, float* ml_p) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_items) {
ml_p[i] = ml[i] - alpha * mg[i];
// printf("mlps : [%f %f %f]\n", ml_p[i], ml[i], mg[i]);
}
}
CUDA_GLOBAL void kResidual(int num_items, float* mg, float* dot_g_proj_norm) {
typedef hipcub::BlockReduce<float, num_threads_per_block> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int block_start = blockDim.x * blockIdx.x;
const int num_valid = min(num_items - block_start, blockDim.x);
float data, block_sum;
const int tid = block_start + threadIdx.x;
if (tid < num_items) {
data = mg[tid] * mg[tid];
block_sum = BlockReduce(temp_storage).Reduce(data, hipcub::Sum(), num_valid);
if (threadIdx.x == 0) {
atomicAdd(&dot_g_proj_norm[0], block_sum);
}
// printf("resid [%f %f]\n", mg[tid], dot_g_proj_norm[0]);
}
}
float time_no_shur = 0;
float time_shur = 0;
void MPM_BBSolver(gpu_vector<float>& r, gpu_vector<float>& delta_v) {
time_shur = 0;
time_no_shur = 0;
const unsigned int size = (unsigned int)r.size();
float lastgoodres = 10e30f;
{
CudaEventTimer timer(start, stop, true, time_no_shur);
dot_g_proj_norm.resize(1);
ml.resize(size);
mg.resize(size);
mg_p.resize(size);
ml_p.resize(size);
ml = delta_v;
mg = 0;
}
{
CudaEventTimer timer(start, stop, true, time_shur);
Multiply(ml, mg);
}
{
CudaEventTimer timer(start, stop, true, time_no_shur);
kSubtract<<<CONFIG(size)>>>(size, r.data_d, mg.data_d);
mg_p = mg;
}
hipLaunchKernelGGL(( kResetGlobals<false>), dim3(1), dim3(1), 0, 0, );
for (int current_iteration = 0; current_iteration < host_settings.num_iterations; current_iteration++) {
{
CudaEventTimer timer(start, stop, true, time_no_shur);
hipLaunchKernelGGL(( kResetGlobals<true>), dim3(1), dim3(1), 0, 0, );
kCompute_ml_p<<<CONFIG(size)>>>(size, ml.data_d, mg.data_d, ml_p.data_d);
mg_p = 0;
}
{
CudaEventTimer timer(start, stop, true, time_shur);
Multiply(ml_p, mg_p);
}
{
CudaEventTimer timer(start, stop, true, time_no_shur);
kSubtract<<<CONFIG(size)>>>(size, r.data_d, mg_p.data_d);
if (current_iteration % 2 == 0) {
kUpdateAlpha<true><<<CONFIG(size)>>>(size, ml_p.data_d, ml.data_d, mg_p.data_d, mg.data_d);
hipLaunchKernelGGL(( kAlpha<true>), dim3(1), dim3(1), 0, 0, );
} else {
kUpdateAlpha<false><<<CONFIG(size)>>>(size, ml_p.data_d, ml.data_d, mg_p.data_d, mg.data_d);
hipLaunchKernelGGL(( kAlpha<false>), dim3(1), dim3(1), 0, 0, );
}
ml = ml_p;
mg = mg_p;
dot_g_proj_norm = 0;
kResidual<<<CONFIG(size)>>>(size, mg.data_d, dot_g_proj_norm.data_d);
dot_g_proj_norm.copyDeviceToHost();
float g_proj_norm = sqrtf(dot_g_proj_norm.data_h[0]);
if (g_proj_norm < lastgoodres) {
lastgoodres = g_proj_norm;
delta_v = ml;
}
// printf("[%f]\n", lastgoodres);
}
}
cudaCheck(hipPeekAtLastError());
cudaCheck(hipDeviceSynchronize());
printf("MPM Solver: [%f, %f %f] \n", time_no_shur, time_shur, lastgoodres);
}
CUDA_GLOBAL void kIncrementVelocity(float* delta_v, float* old_vel_node_mpm, float* grid_vel) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < device_settings.num_mpm_nodes) {
grid_vel[i * 3 + 0] += delta_v[i * 3 + 0] - old_vel_node_mpm[i * 3 + 0];
grid_vel[i * 3 + 1] += delta_v[i * 3 + 1] - old_vel_node_mpm[i * 3 + 1];
grid_vel[i * 3 + 2] += delta_v[i * 3 + 2] - old_vel_node_mpm[i * 3 + 2];
}
}
CUDA_GLOBAL void kUpdateParticleVelocity(float* grid_vel,
float* old_vel_node_mpm,
float* pos_marker,
float* vel_marker) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = pos_marker[p * 3 + 0];
const float xiy = pos_marker[p * 3 + 1];
const float xiz = pos_marker[p * 3 + 2];
float3 V_flip;
V_flip.x = vel_marker[p * 3 + 0];
V_flip.y = vel_marker[p * 3 + 1];
V_flip.z = vel_marker[p * 3 + 2];
float3 V_pic = make_float3(0.0, 0.0, 0.0);
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
int cx, cy, cz;
LOOP_TWO_RING_GPUSP(
float weight = N((xix - current_node_locationx) * inv_bin_edge) *
N((xiy - current_node_locationy) * inv_bin_edge) *
N((xiz - current_node_locationz) * inv_bin_edge);
float vnx = grid_vel[current_node * 3 + 0]; //
float vny = grid_vel[current_node * 3 + 1]; //
float vnz = grid_vel[current_node * 3 + 2];
V_pic.x += vnx * weight; //
V_pic.y += vny * weight; //
V_pic.z += vnz * weight; //
V_flip.x += (vnx - old_vel_node_mpm[current_node * 3 + 0]) * weight; //
V_flip.y += (vny - old_vel_node_mpm[current_node * 3 + 1]) * weight; //
V_flip.z += (vnz - old_vel_node_mpm[current_node * 3 + 2]) * weight; //
)
float3 new_vel = (1.0 - alpha) * V_pic + alpha * V_flip;
float speed = Length(new_vel);
if (speed > device_settings.max_velocity) {
new_vel = new_vel * device_settings.max_velocity / speed;
}
vel_marker[p * 3 + 0] = new_vel.x;
vel_marker[p * 3 + 1] = new_vel.y;
vel_marker[p * 3 + 2] = new_vel.z;
}
}
CUDA_GLOBAL void kUpdateDeformationGradient(float* grid_vel,
float* pos_marker,
float* marker_Fe,
float* marker_Fp,
float* plasticity,
float* JE_JP) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = pos_marker[p * 3 + 0];
const float xiy = pos_marker[p * 3 + 1];
const float xiz = pos_marker[p * 3 + 2];
Mat33f vel_grad(0.0);
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP(float vnx = grid_vel[current_node * 3 + 0]; //
float vny = grid_vel[current_node * 3 + 1]; //
float vnz = grid_vel[current_node * 3 + 2];
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
vel_grad[0] += vnx * valx; vel_grad[1] += vny * valx; vel_grad[2] += vnz * valx; //
vel_grad[3] += vnx * valy; vel_grad[4] += vny * valy; vel_grad[5] += vnz * valy; //
vel_grad[6] += vnx * valz; vel_grad[7] += vny * valz; vel_grad[8] += vnz * valz;
)
Mat33f delta_F = (Mat33f(1.0) + device_settings.dt * vel_grad);
Mat33f m_FE(marker_Fe, p, device_settings.num_mpm_markers);
Mat33f m_FPpre(marker_Fp, p, device_settings.num_mpm_markers);
Mat33f Fe_tmp = delta_F * m_FE;
Mat33f F_tmp = Fe_tmp * m_FPpre;
Mat33f U, V;
float3 E;
SVD(Fe_tmp, U, E, V);
float3 E_clamped = E;
#if defined(BOX_YIELD)
// Simple box clamp
E_clamped.x = Clamp(E.x, 1.0 - device_settings.theta_c, 1.0 + device_settings.theta_s);
E_clamped.y = Clamp(E.y, 1.0 - device_settings.theta_c, 1.0 + device_settings.theta_s);
E_clamped.z = Clamp(E.z, 1.0 - device_settings.theta_c, 1.0 + device_settings.theta_s);
plasticity[p] = fabsf(E.x * E.y * E.z - E_clamped.x * E_clamped.y * E_clamped.z);
#elif defined(SPHERE_YIELD)
// Clamp to sphere (better)
float center = 1.0 + (device_settings.theta_s - device_settings.theta_c) * .5;
float radius = (device_settings.theta_s + device_settings.theta_c) * .5;
float3 offset = E - center;
float lent = Length(offset);
if (lent > radius) {
offset = offset * radius / lent;
}
E_clamped = offset + center;
plasticity[p] = fabsf(E.x * E.y * E.z - E_clamped.x * E_clamped.y * E_clamped.z);
#elif defined(DRUCKER_PRAGER)
float3 eps = make_float3(logf(E.x), logf(E.y), logf(E.z));
float tr_eps = (eps.x + eps.y + eps.z);
float3 eps_hat = make_float3(logf(E.x), logf(E.y), logf(E.z));
float f_norm_eps_hat = Length(eps_hat);
float delta_gp = f_norm_eps_hat +
(3.0f * device_settings.lambda + 2.0f * device_settings.mu) / (2.0f * device_settings.mu) *
tr_eps * 0;//plasticity[p + device_settings.num_mpm_markers];
float delta_qp = 0;
if (delta_gp <= 0) {
// CASE 1
delta_qp = 0;
} else if (f_norm_eps_hat == 0 || tr_eps > 0) {
// CASE 2
delta_qp = f_norm_eps_hat;
E_clamped = make_float3(1.0f, 1.0f, 1.0f);
} else {
// CASE 3
delta_qp = delta_gp;
E_clamped.x = expf(eps.x - delta_gp * eps_hat.x / f_norm_eps_hat);
E_clamped.y = expf(eps.y - delta_gp * eps_hat.y / f_norm_eps_hat);
E_clamped.z = expf(eps.z - delta_gp * eps_hat.z / f_norm_eps_hat);
}
// Holds the plasticity
float qp_new = plasticity[p] + delta_qp;
float theta_Fp = 0.00110865;
// device_settings.h0 + (device_settings.h1 * qp_new - device_settings.h3) * exp(-device_settings.h2 *
// qp_new);
// 35.0f + (9.0f * qp_new - 10.0f) * exp(-.2f * qp_new);
plasticity[p] = qp_new;
plasticity[p + device_settings.num_mpm_markers] =
sqrtf(2.0 / 3.0) * (2.0f * sinf(theta_Fp)) / (3.0f - sinf(theta_Fp));
printf("YLD: [%f %f %f] %f [%f %f]\n", delta_gp, f_norm_eps_hat, tr_eps, eps_hat.x + eps_hat.y + eps_hat.z,
qp_new, plasticity[p + device_settings.num_mpm_markers]);
#endif
// printf("E %d %f %f %f\n", p, E_clamped.x * E_clamped.y * E_clamped.z, E.x * E.y * E.z, plasticity[p]);
// Inverse of Diagonal E_clamped matrix is 1/E_clamped
Mat33f m_FP = V * MultTranspose(Mat33f(1.0 / E_clamped), U) * F_tmp;
float JP_new = Determinant(m_FP);
// Ensure that F_p is purely deviatoric
Mat33f T1 = powf(JP_new, 1.0 / 3.0) * U * MultTranspose(Mat33f(E_clamped), V);
Mat33f T2 = powf(JP_new, -1.0 / 3.0) * m_FP;
JE_JP[p * 2 + 0] = Determinant(T1);
JE_JP[p * 2 + 1] = Determinant(T2);
T1.Store(marker_Fe, p, device_settings.num_mpm_markers);
T2.Store(marker_Fp, p, device_settings.num_mpm_markers);
// printf("JP: %f JE: %f\n", Determinant(marker_Fe[p]), Determinant(marker_Fp[p]));
}
}
void MPM_UpdateDeformationGradient(MPM_Settings& settings,
std::vector<float>& positions,
std::vector<float>& velocities,
std::vector<float>& jejp) {
hipEventCreate(&start);
hipEventCreate(&stop);
host_settings = settings;
printf("Solving MPM: %d\n", host_settings.num_iterations);
pos.data_h = positions;
pos.copyHostToDevice();
vel.data_h = velocities;
vel.copyHostToDevice();
cudaCheck(hipMemcpyToSymbolAsync(device_settings, &host_settings, sizeof(MPM_Settings)));
MPM_ComputeBounds();
node_mass.resize(host_settings.num_mpm_nodes);
node_mass = 0;
grid_vel.resize(host_settings.num_mpm_nodes * 3);
grid_vel = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
// ========================================================================================
kRasterize<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, // input
vel.data_d, // input
node_mass.data_d, // output
grid_vel.data_d // output
);
}
printf("kRasterize: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kNormalizeWeights<<<CONFIG(host_settings.num_mpm_nodes)>>>(node_mass.data_d, // output
grid_vel.data_d);
}
printf("kNormalizeWeights: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kUpdateDeformationGradient<<<CONFIG(host_settings.num_mpm_markers)>>>(
grid_vel.data_d, pos.data_d, marker_Fe.data_d, marker_Fp.data_d, marker_plasticity.data_d, JE_JP.data_d);
JE_JP.copyDeviceToHost();
}
jejp = JE_JP.data_h;
printf("kUpdateDeformationGradient: %f\n", time_measured);
time_measured = 0;
}
void MPM_Solve(MPM_Settings& settings, std::vector<float>& positions, std::vector<float>& velocities) {
old_vel_node_mpm.resize(host_settings.num_mpm_nodes * 3);
rhs.resize(host_settings.num_mpm_nodes * 3);
old_vel_node_mpm = grid_vel;
// cudaCheck(hipPeekAtLastError());
// cudaCheck(hipDeviceSynchronize());
{
CudaEventTimer timer(start, stop, true, time_measured);
kFeHat<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, marker_Fe.data_d, grid_vel.data_d,
marker_Fe_hat.data_d);
}
printf("kFeHat: %f\n", time_measured);
time_measured = 0;
// kSVD<<<CONFIG(host_settings.num_mpm_markers)>>>(marker_Fe_hat.data_d, PolarR.data_d, PolarS.data_d);
{
CudaEventTimer timer(start, stop, true, time_measured);
kApplyForces<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, // input
marker_Fe_hat.data_d, // input
marker_Fe.data_d, // input
marker_volume.data_d, // input
node_mass.data_d, // input
marker_plasticity.data_d, // input
PolarR.data_d, // output
PolarS.data_d, // output
grid_vel.data_d); // output
}
printf("kApplyForces: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kRhs<<<CONFIG(host_settings.num_mpm_nodes)>>>(node_mass.data_d, grid_vel.data_d, rhs.data_d);
}
printf("kRhs: %f\n", time_measured);
time_measured = 0;
delta_v.resize(host_settings.num_mpm_nodes * 3);
delta_v = old_vel_node_mpm;
MPM_BBSolver(rhs, delta_v);
{
CudaEventTimer timer(start, stop, true, time_measured);
kIncrementVelocity<<<CONFIG(host_settings.num_mpm_nodes)>>>(delta_v.data_d, old_vel_node_mpm.data_d,
grid_vel.data_d);
}
printf("kIncrementVelocity: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kUpdateParticleVelocity<<<CONFIG(host_settings.num_mpm_markers)>>>(grid_vel.data_d, old_vel_node_mpm.data_d,
pos.data_d, vel.data_d);
}
printf("kUpdateParticleVelocity: %f\n", time_measured);
time_measured = 0;
vel.copyDeviceToHost();
velocities = vel.data_h;
hipEventDestroy(start);
hipEventDestroy(stop);
}
CUDA_GLOBAL void kInitFeFp(float* marker_Fe, float* marker_Fp, float* marker_RE, float* marker_SE) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < device_settings.num_mpm_markers) {
Mat33f T(1.0f);
T.Store(marker_Fe, i, device_settings.num_mpm_markers);
T.Store(marker_Fp, i, device_settings.num_mpm_markers);
T.Store(marker_RE, i, device_settings.num_mpm_markers);
marker_SE[i + device_settings.num_mpm_markers * 0] = 1.0f;
marker_SE[i + device_settings.num_mpm_markers * 1] = 0.0f;
marker_SE[i + device_settings.num_mpm_markers * 2] = 0.0f;
marker_SE[i + device_settings.num_mpm_markers * 3] = 1.0f;
marker_SE[i + device_settings.num_mpm_markers * 4] = 0.0f;
marker_SE[i + device_settings.num_mpm_markers * 5] = 1.0f;
}
}
void MPM_Initialize(MPM_Settings& settings, std::vector<float>& positions) {
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
hipEventCreate(&start);
hipEventCreate(&stop);
host_settings = settings;
cudaCheck(hipMalloc(&lower_bound, sizeof(float3)));
cudaCheck(hipMalloc(&upper_bound, sizeof(float3)));
pos.data_h = positions;
pos.copyHostToDevice();
cudaCheck(hipMemcpyToSymbolAsync(device_settings, &host_settings, sizeof(MPM_Settings)));
MPM_ComputeBounds();
marker_volume.resize(host_settings.num_mpm_markers);
node_mass.resize(host_settings.num_mpm_nodes);
node_mass = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kRasterize<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, // input
node_mass.data_d); // output
}
printf("kRasterize: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kComputeParticleVolumes<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, // input
node_mass.data_d, // input
marker_volume.data_d); // output
}
printf("kComputeParticleVolumes: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
marker_Fe.resize(host_settings.num_mpm_markers * 9);
marker_Fe_hat.resize(host_settings.num_mpm_markers * 9);
marker_Fp.resize(host_settings.num_mpm_markers * 9);
PolarR.resize(host_settings.num_mpm_markers * 9);
PolarS.resize(host_settings.num_mpm_markers * 6);
JE_JP.resize(host_settings.num_mpm_markers * 2);
marker_plasticity.resize(host_settings.num_mpm_markers * 2);
marker_plasticity = 0;
}
printf("Resize: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kInitFeFp<<<CONFIG(host_settings.num_mpm_markers)>>>(marker_Fe.data_d, // output
marker_Fp.data_d, // output
PolarR.data_d, // output
PolarS.data_d); // output
}
printf("kInitFeFp: %f\n", time_measured);
time_measured = 0;
// cudaCheck(hipPeekAtLastError());
// cudaCheck(hipDeviceSynchronize());
hipEventDestroy(start);
hipEventDestroy(stop);
}
}
| 382d81d665d187de328f698dc506aec7a07b29ea.cu | #include "chrono_multicore/physics/ChMPM.cuh"
#include "chrono_multicore/physics/MPMUtils.h"
#include "chrono_multicore/ChCudaHelper.cuh"
#include "chrono_multicore/ChGPUVector.cuh"
#include "chrono_thirdparty/cub/cub.cuh"
#include "chrono_multicore/math/matrixf.cuh"
//#define BOX_YIELD
#define SPHERE_YIELD
//#define DRUCKER_PRAGER
namespace chrono {
struct Bounds {
float minimum[3];
float maximum[3];
};
float3 min_bounding_point;
float3 max_bounding_point;
MPM_Settings host_settings;
std::vector<int> particle_node_mapping;
std::vector<int> node_particle_mapping;
std::vector<int> node_start_index;
std::vector<int> particle_number;
unsigned int num_mpm_nodes_active;
// GPU Things
float3* lower_bound;
float3* upper_bound;
gpu_vector<float> pos, vel, JE_JP;
gpu_vector<float> node_mass;
gpu_vector<float> marker_volume;
gpu_vector<float> grid_vel, delta_v;
gpu_vector<float> rhs;
gpu_vector<float> marker_Fe, marker_Fe_hat, marker_Fp;
gpu_vector<float> PolarS, PolarR;
gpu_vector<float> old_vel_node_mpm;
gpu_vector<float> ml, mg, mg_p, ml_p;
gpu_vector<float> dot_g_proj_norm;
gpu_vector<float> marker_plasticity;
CUDA_CONSTANT MPM_Settings device_settings;
CUDA_CONSTANT Bounds system_bounds;
cudaEvent_t start;
cudaEvent_t stop;
float time_measured = 0;
/////// BB Constants
__device__ float alpha = 0.0001;
__device__ float dot_ms_ms = 0;
__device__ float dot_ms_my = 0;
__device__ float dot_my_my = 0;
#define a_min 1e-13
#define a_max 1e13
#define neg_BB1_fallback 0.11
#define neg_BB2_fallback 0.12
#define LOOP_TWO_RING_GPUSP(X) \
cx = GridCoord(xix, inv_bin_edge, system_bounds.minimum[0]); \
cy = GridCoord(xiy, inv_bin_edge, system_bounds.minimum[1]); \
cz = GridCoord(xiz, inv_bin_edge, system_bounds.minimum[2]); \
for (int i = cx - 2; i <= cx + 2; ++i) { \
for (int j = cy - 2; j <= cy + 2; ++j) { \
for (int k = cz - 2; k <= cz + 2; ++k) { \
int current_node = GridHash(i, j, k, device_settings.bins_per_axis_x, device_settings.bins_per_axis_y, \
device_settings.bins_per_axis_z); \
float current_node_locationx = i * bin_edge + system_bounds.minimum[0]; \
float current_node_locationy = j * bin_edge + system_bounds.minimum[1]; \
float current_node_locationz = k * bin_edge + system_bounds.minimum[2]; \
X \
} \
} \
}
//////========================================================================================================================================================================
////
void CUDA_HOST_DEVICE WeakEqual(const float& x, const float& y, float COMPARE_EPS = FLT_EPSILON) {
if (fabsf(x - y) > COMPARE_EPS) {
printf("%f does not equal %f %.20e\n", x, y, fabsf(x - y));
// exit(1);
}
}
void CUDA_HOST_DEVICE WeakEqual(const Mat33f& a, const Mat33f& b, float COMPARE_EPS = FLT_EPSILON) {
WeakEqual(a[0], b[0], COMPARE_EPS);
WeakEqual(a[1], b[1], COMPARE_EPS);
WeakEqual(a[2], b[2], COMPARE_EPS);
WeakEqual(a[3], b[3], COMPARE_EPS);
WeakEqual(a[4], b[4], COMPARE_EPS);
WeakEqual(a[5], b[5], COMPARE_EPS);
WeakEqual(a[6], b[6], COMPARE_EPS);
WeakEqual(a[7], b[7], COMPARE_EPS);
WeakEqual(a[8], b[8], COMPARE_EPS);
}
CUDA_GLOBAL void kComputeBounds(const float* pos, // input
float3* lower, // output
float3* upper // output
) {
typedef cub::BlockReduce<float3, num_threads_per_block> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int block_start = blockDim.x * blockIdx.x;
const int num_valid = min(device_settings.num_mpm_markers - block_start, blockDim.x);
const int index = block_start + threadIdx.x;
if (index < device_settings.num_mpm_markers) {
float3 data = make_float3(pos[index * 3 + 0], pos[index * 3 + 1], pos[index * 3 + 2]);
float3 blockUpper = BlockReduce(temp_storage).Reduce(data, float3Max(), num_valid);
__syncthreads();
float3 blockLower = BlockReduce(temp_storage).Reduce(data, float3Min(), num_valid);
if (threadIdx.x == 0) {
AtomicMax(upper, blockUpper);
AtomicMin(lower, blockLower);
}
}
}
////========================================================================================================================================================================
CUDA_GLOBAL void kRasterize(const float* sorted_pos, // input
const float* sorted_vel, // input
float* grid_mass, // output
float* grid_vel) { // output
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
const float vix = sorted_vel[p * 3 + 0];
const float viy = sorted_vel[p * 3 + 1];
const float viz = sorted_vel[p * 3 + 2];
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP( //
float weight = N((xix - current_node_locationx) * inv_bin_edge) *
N((xiy - current_node_locationy) * inv_bin_edge) *
N((xiz - current_node_locationz) * inv_bin_edge) * device_settings.mass;
atomicAdd(&grid_mass[current_node], weight); //
atomicAdd(&grid_vel[current_node * 3 + 0], weight * vix);
atomicAdd(&grid_vel[current_node * 3 + 1], weight * viy);
atomicAdd(&grid_vel[current_node * 3 + 2], weight * viz);)
}
}
CUDA_GLOBAL void kRasterize(const float* sorted_pos, // input
float* grid_mass) { // output
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP( //
float weight = N((xix - current_node_locationx) * inv_bin_edge) *
N((xiy - current_node_locationy) * inv_bin_edge) *
N((xiz - current_node_locationz) * inv_bin_edge) * device_settings.mass;
atomicAdd(&grid_mass[current_node], weight); //
)
}
}
//
////========================================================================================================================================================================
//
CUDA_GLOBAL void kNormalizeWeights(float* grid_mass, // input
float* grid_vel) { // output
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < device_settings.num_mpm_nodes) {
float n_mass = grid_mass[i];
if (n_mass > FLT_EPSILON) {
grid_vel[i * 3 + 0] /= n_mass;
grid_vel[i * 3 + 1] /= n_mass;
grid_vel[i * 3 + 2] /= n_mass;
}
}
}
//////========================================================================================================================================================================
////
CUDA_GLOBAL void kComputeParticleVolumes(const float* sorted_pos, // input
float* grid_mass, // output
float* volume) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
float particle_density = 0;
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP( //
float weight = N((xix - current_node_locationx) * inv_bin_edge) *
N((xiy - current_node_locationy) * inv_bin_edge) *
N((xiz - current_node_locationz) * inv_bin_edge);
particle_density += grid_mass[current_node] * weight; //
)
// Inverse density to remove division
particle_density = (bin_edge * bin_edge * bin_edge) / particle_density;
volume[p] = device_settings.mass * particle_density;
}
}
CUDA_GLOBAL void kFeHat(const float* sorted_pos, // input
const float* marker_Fe, // input
const float* grid_vel, // input
float* marker_Fe_hat) { // output
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
Mat33f Fe_hat_t(0.0);
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP(
float vnx = grid_vel[current_node * 3 + 0]; //
float vny = grid_vel[current_node * 3 + 1]; //
float vnz = grid_vel[current_node * 3 + 2];
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
Fe_hat_t[0] += vnx * valx; Fe_hat_t[1] += vny * valx; Fe_hat_t[2] += vnz * valx; //
Fe_hat_t[3] += vnx * valy; Fe_hat_t[4] += vny * valy; Fe_hat_t[5] += vnz * valy; //
Fe_hat_t[6] += vnx * valz; Fe_hat_t[7] += vny * valz; Fe_hat_t[8] += vnz * valz;
// float3 vel(grid_vel[current_node * 3 + 0], grid_vel[current_node * 3 + 1],
// grid_vel[current_node * 3 + 2]); //
// float3 kern = dN(xi - current_node_location, inv_bin_edge); //
// Fe_hat_t += OuterProduct(device_settings.dt * vel, kern);
)
Mat33f m_Fe(marker_Fe, p, device_settings.num_mpm_markers);
Mat33f m_Fe_hat = (Mat33f(1.0) + device_settings.dt * Fe_hat_t) * m_Fe;
m_Fe_hat.Store(marker_Fe_hat, p, device_settings.num_mpm_markers);
}
}
// CUDA_GLOBAL void kSVD(Mat33f* marker_Fe_hat, Mat33f* PolarR, Mat33f* PolarS) {
// const int p = blockIdx.x * blockDim.x + threadIdx.x;
// if (p < device_settings.num_mpm_markers) {
// Mat33f U, V, R, S, W;
// float3 E;
// SVD(marker_Fe_hat[p], U, E, V);
// // Perform polar decomposition F = R*S
// R = MultTranspose(U, V);
// S = V * MultTranspose(Mat33f(E), V);
//
// PolarR[p] = R;
// PolarS[p] = S;
// }
//}
CUDA_GLOBAL void kApplyForces(const float* sorted_pos, // input
const float* marker_Fe_hat, // input
const float* marker_Fe, // input
const float* marker_volume, // input
const float* node_mass, // input
const float* plasticity, // input
float* PolarR, // input
float* PolarS, // input
float* grid_vel) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const Mat33f FE(marker_Fe, p, device_settings.num_mpm_markers);
const Mat33f FE_hat(marker_Fe_hat, p, device_settings.num_mpm_markers);
const float a = -one_third;
const float J = Determinant(FE_hat);
const float Ja = powf(J, a);
#if defined(BOX_YIELD) || defined(SPHERE_YIELD)
const float current_mu = device_settings.mu * expf(device_settings.hardening_coefficient * (plasticity[p]));
#else
const float current_mu = device_settings.mu;
#endif
#if 1
Mat33f JaFE = Ja * FE;
Mat33f UE, VE;
float3 EE;
SVD(JaFE, UE, EE, VE); /* Perform a polar decomposition, FE=RE*SE, RE is the Unitary part*/
Mat33f RE = MultTranspose(UE, VE);
Mat33f SE = VE * MultTranspose(EE, VE);
RE.Store(PolarR, p, device_settings.num_mpm_markers);
PolarS[p + 0 * device_settings.num_mpm_markers] = SE[0];
PolarS[p + 1 * device_settings.num_mpm_markers] = SE[1];
PolarS[p + 2 * device_settings.num_mpm_markers] = SE[2];
PolarS[p + 3 * device_settings.num_mpm_markers] = SE[4];
PolarS[p + 4 * device_settings.num_mpm_markers] = SE[5];
PolarS[p + 5 * device_settings.num_mpm_markers] = SE[8];
#else
const Mat33f A = Potential_Energy_Derivative_Deviatoric(Ja * FE_hat, current_mu, PolarR[p], PolarS[p]);
#endif
const Mat33f H = AdjointTranspose(FE_hat) * (1.0f / J);
const Mat33f A = 2.f * current_mu * (JaFE - RE);
const Mat33f Z_B = Z__B(A, FE_hat, Ja, a, H);
const Mat33f vPEDFepT = device_settings.dt * marker_volume[p] * MultTranspose(Z_B, FE);
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
LOOP_TWO_RING_GPUSP( //
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
float fx = vPEDFepT[0] * valx + vPEDFepT[3] * valy + vPEDFepT[6] * valz;
float fy = vPEDFepT[1] * valx + vPEDFepT[4] * valy + vPEDFepT[7] * valz;
float fz = vPEDFepT[2] * valx + vPEDFepT[5] * valy + vPEDFepT[8] * valz;
float mass = node_mass[current_node]; //
if (mass > 0) {
atomicAdd(&grid_vel[current_node * 3 + 0], -fx / mass); //
atomicAdd(&grid_vel[current_node * 3 + 1], -fy / mass); //
atomicAdd(&grid_vel[current_node * 3 + 2], -fz / mass); //
})
}
}
CUDA_GLOBAL void kRhs(const float* node_mass, // input
const float* grid_vel,
float* rhs) {
const int current_node = blockIdx.x * blockDim.x + threadIdx.x;
if (current_node < device_settings.num_mpm_nodes) {
float mass = node_mass[current_node]; //
if (mass > 0) {
rhs[current_node * 3 + 0] = mass * grid_vel[current_node * 3 + 0]; //
rhs[current_node * 3 + 1] = mass * grid_vel[current_node * 3 + 1]; //
rhs[current_node * 3 + 2] = mass * grid_vel[current_node * 3 + 2]; //
} else {
rhs[current_node * 3 + 0] = 0;
rhs[current_node * 3 + 1] = 0;
rhs[current_node * 3 + 2] = 0;
}
}
}
CUDA_GLOBAL void kMultiplyA(const float* sorted_pos, // input
const float* v_array,
const float* old_vel_node_mpm,
const float* PolarR, // input
const float* PolarS, // input
const float* marker_Fe, // input
const float* marker_Fe_hat, // input
const float* marker_volume, // input
const float* plasticity, // input
float* result_array) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
// float VAP[7];
// float delta_F[7] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
Mat33f delta_F(0.0f);
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP( //
float vnx = v_array[current_node * 3 + 0]; //
float vny = v_array[current_node * 3 + 1]; //
float vnz = v_array[current_node * 3 + 2];
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
delta_F[0] += vnx * valx; delta_F[1] += vny * valx; delta_F[2] += vnz * valx; //
delta_F[3] += vnx * valy; delta_F[4] += vny * valy; delta_F[5] += vnz * valy; //
delta_F[6] += vnx * valz; delta_F[7] += vny * valz; delta_F[8] += vnz * valz;)
const Mat33f m_FE(marker_Fe, p, device_settings.num_mpm_markers);
delta_F = delta_F * m_FE;
#if defined(BOX_YIELD) || defined(SPHERE_YIELD)
const float current_mu =
2.0f * device_settings.mu * expf(device_settings.hardening_coefficient * (plasticity[p]));
#else
const float current_mu = 2.0f * device_settings.mu;
#endif
Mat33f RE(PolarR, p, device_settings.num_mpm_markers);
const Mat33f F(marker_Fe_hat, p, device_settings.num_mpm_markers);
const float a = -one_third;
const float J = Determinant(F);
const float Ja = powf(J, a);
const Mat33f H = AdjointTranspose(F) * (1.0f / J);
const Mat33f B_Z = B__Z(delta_F, F, Ja, a, H);
const Mat33f WE = TransposeMult(RE, B_Z);
// C is the original second derivative
SymMat33f SE;
SE[0] = PolarS[p + device_settings.num_mpm_markers * 0];
SE[1] = PolarS[p + device_settings.num_mpm_markers * 1];
SE[2] = PolarS[p + device_settings.num_mpm_markers * 2];
SE[3] = PolarS[p + device_settings.num_mpm_markers * 3];
SE[4] = PolarS[p + device_settings.num_mpm_markers * 4];
SE[5] = PolarS[p + device_settings.num_mpm_markers * 5];
const Mat33f C_B_Z = current_mu * (B_Z - Solve_dR(RE, SE, WE));
const Mat33f FE = Ja * F;
const Mat33f A = current_mu * (FE - RE);
const Mat33f P1 = Z__B(C_B_Z, F, Ja, a, H);
const Mat33f P2 = (a * DoubleDot(H, delta_F)) * Z__B(A, F, Ja, a, H);
const Mat33f P3 = (a * Ja * DoubleDot(A, delta_F)) * H;
const Mat33f P4 = (-a * Ja * DoubleDot(A, F)) * H * TransposeMult(delta_F, H);
const Mat33f VAP = marker_volume[p] * MultTranspose(P1 + P2 + P3 + P4, m_FE);
// Mat33f VAP = d2PsidFdFO(delta_F, m_FE_hat, PolarR[p], PolarS[p], current_mu);
// WeakEqual(VAP, VAP2);
LOOP_TWO_RING_GPUSP( //
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
float resx = VAP[0] * valx + VAP[3] * valy + VAP[6] * valz;
float resy = VAP[1] * valx + VAP[4] * valy + VAP[7] * valz;
float resz = VAP[2] * valx + VAP[5] * valy + VAP[8] * valz;
atomicAdd(&result_array[current_node * 3 + 0], resx); atomicAdd(&result_array[current_node * 3 + 1], resy);
atomicAdd(&result_array[current_node * 3 + 2], resz););
}
}
CUDA_GLOBAL void kMultiplyB(const float* v_array,
const float* old_vel_node_mpm,
const float* node_mass,
float* result_array) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < device_settings.num_mpm_nodes) {
float mass = node_mass[i];
if (mass > 0) {
result_array[i * 3 + 0] += mass * (v_array[i * 3 + 0]);
result_array[i * 3 + 1] += mass * (v_array[i * 3 + 1]);
result_array[i * 3 + 2] += mass * (v_array[i * 3 + 2]);
}
}
}
void MPM_ComputeBounds() {
max_bounding_point = make_float3(-FLT_MAX, -FLT_MAX, -FLT_MAX);
min_bounding_point = make_float3(FLT_MAX, FLT_MAX, FLT_MAX);
cudaMemcpyAsync(lower_bound, &min_bounding_point, sizeof(float3), cudaMemcpyHostToDevice);
cudaMemcpyAsync(upper_bound, &max_bounding_point, sizeof(float3), cudaMemcpyHostToDevice);
kComputeBounds<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, //
lower_bound, //
upper_bound); //
cudaMemcpy(&min_bounding_point, lower_bound, sizeof(float3), cudaMemcpyDeviceToHost);
cudaMemcpy(&max_bounding_point, upper_bound, sizeof(float3), cudaMemcpyDeviceToHost);
min_bounding_point.x = host_settings.kernel_radius * roundf(min_bounding_point.x / host_settings.kernel_radius);
min_bounding_point.y = host_settings.kernel_radius * roundf(min_bounding_point.y / host_settings.kernel_radius);
min_bounding_point.z = host_settings.kernel_radius * roundf(min_bounding_point.z / host_settings.kernel_radius);
max_bounding_point.x = host_settings.kernel_radius * roundf(max_bounding_point.x / host_settings.kernel_radius);
max_bounding_point.y = host_settings.kernel_radius * roundf(max_bounding_point.y / host_settings.kernel_radius);
max_bounding_point.z = host_settings.kernel_radius * roundf(max_bounding_point.z / host_settings.kernel_radius);
max_bounding_point = max_bounding_point + host_settings.kernel_radius * 8;
min_bounding_point = min_bounding_point - host_settings.kernel_radius * 6;
cudaMemcpyToSymbolAsync(system_bounds, &min_bounding_point, sizeof(float3), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbolAsync(system_bounds, &max_bounding_point, sizeof(float3), sizeof(float3), cudaMemcpyHostToDevice);
host_settings.bin_edge = host_settings.kernel_radius * 2;
host_settings.bins_per_axis_x = int(max_bounding_point.x - min_bounding_point.x) / (int)host_settings.bin_edge;
host_settings.bins_per_axis_y = int(max_bounding_point.y - min_bounding_point.y) / (int)host_settings.bin_edge;
host_settings.bins_per_axis_z = int(max_bounding_point.z - min_bounding_point.z) / (int)host_settings.bin_edge;
host_settings.inv_bin_edge = float(1.) / host_settings.bin_edge;
host_settings.num_mpm_nodes =
host_settings.bins_per_axis_x * host_settings.bins_per_axis_y * host_settings.bins_per_axis_z;
cudaCheck(cudaMemcpyToSymbolAsync(device_settings, &host_settings, sizeof(MPM_Settings)));
printf("max_bounding_point [%f %f %f]\n", max_bounding_point.x, max_bounding_point.y, max_bounding_point.z);
printf("min_bounding_point [%f %f %f]\n", min_bounding_point.x, min_bounding_point.y, min_bounding_point.z);
printf("Compute DOF [%d %d %d] [%f] %d %d\n", host_settings.bins_per_axis_x, host_settings.bins_per_axis_y,
host_settings.bins_per_axis_z, host_settings.bin_edge, host_settings.num_mpm_nodes,
host_settings.num_mpm_markers);
}
//
void Multiply(gpu_vector<float>& input, gpu_vector<float>& output) {
int size = (int)input.size();
kMultiplyA<<<CONFIG(size)>>>(pos.data_d, // input
input.data_d, //
old_vel_node_mpm.data_d,
PolarR.data_d, // input
PolarS.data_d, // input
marker_Fe.data_d, // input
marker_Fe_hat.data_d, // input
marker_volume.data_d, // input
marker_plasticity.data_d, // input
output.data_d);
kMultiplyB<<<CONFIG(size)>>>(input.data_d, old_vel_node_mpm.data_d, node_mass.data_d, output.data_d);
}
CUDA_GLOBAL void kSubtract(int size, float* x, float* y) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
y[i] = y[i] - x[i];
}
}
template <bool inner>
CUDA_GLOBAL void kResetGlobals() {
if (inner) {
dot_ms_ms = 0;
dot_ms_my = 0;
dot_my_my = 0;
} else {
alpha = 0.0001;
}
}
template <bool even>
CUDA_GLOBAL void kUpdateAlpha(int num_items, float* ml_p, float* ml, float* mg_p, float* mg) {
typedef cub::BlockReduce<float, num_threads_per_block> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int block_start = blockDim.x * blockIdx.x;
const int num_valid = min(num_items - block_start, blockDim.x);
const int tid = block_start + threadIdx.x;
if (tid < num_items) {
float data, block_sum;
float ms = ml_p[tid] - ml[tid];
float my = mg_p[tid] - mg[tid];
if (even) {
data = ms * ms;
block_sum = BlockReduce(temp_storage).Reduce(data, cub::Sum(), num_valid);
if (threadIdx.x == 0) {
atomicAdd(&dot_ms_ms, block_sum);
}
} else {
data = my * my;
block_sum = BlockReduce(temp_storage).Reduce(data, cub::Sum(), num_valid);
if (threadIdx.x == 0) {
atomicAdd(&dot_my_my, block_sum);
}
}
__syncthreads();
data = ms * my;
block_sum = BlockReduce(temp_storage).Reduce(data, cub::Sum(), num_valid);
if (threadIdx.x == 0) {
atomicAdd(&dot_ms_my, block_sum);
}
}
}
template <bool even>
CUDA_GLOBAL void kAlpha() {
if (even) {
if (dot_ms_my <= 0) {
alpha = neg_BB1_fallback;
} else {
alpha = fminf(a_max, fmaxf(a_min, dot_ms_ms / dot_ms_my));
}
} else {
if (dot_ms_my <= 0) {
alpha = neg_BB2_fallback;
} else {
alpha = fminf(a_max, fmaxf(a_min, dot_ms_my / dot_my_my));
}
}
// printf("alpha: %f %f %f %f \n", alpha, dot_ms_ms, dot_ms_my, dot_my_my);
}
CUDA_GLOBAL void kCompute_ml_p(int num_items, float* ml, float* mg, float* ml_p) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_items) {
ml_p[i] = ml[i] - alpha * mg[i];
// printf("mlps : [%f %f %f]\n", ml_p[i], ml[i], mg[i]);
}
}
CUDA_GLOBAL void kResidual(int num_items, float* mg, float* dot_g_proj_norm) {
typedef cub::BlockReduce<float, num_threads_per_block> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int block_start = blockDim.x * blockIdx.x;
const int num_valid = min(num_items - block_start, blockDim.x);
float data, block_sum;
const int tid = block_start + threadIdx.x;
if (tid < num_items) {
data = mg[tid] * mg[tid];
block_sum = BlockReduce(temp_storage).Reduce(data, cub::Sum(), num_valid);
if (threadIdx.x == 0) {
atomicAdd(&dot_g_proj_norm[0], block_sum);
}
// printf("resid [%f %f]\n", mg[tid], dot_g_proj_norm[0]);
}
}
float time_no_shur = 0;
float time_shur = 0;
void MPM_BBSolver(gpu_vector<float>& r, gpu_vector<float>& delta_v) {
time_shur = 0;
time_no_shur = 0;
const unsigned int size = (unsigned int)r.size();
float lastgoodres = 10e30f;
{
CudaEventTimer timer(start, stop, true, time_no_shur);
dot_g_proj_norm.resize(1);
ml.resize(size);
mg.resize(size);
mg_p.resize(size);
ml_p.resize(size);
ml = delta_v;
mg = 0;
}
{
CudaEventTimer timer(start, stop, true, time_shur);
Multiply(ml, mg);
}
{
CudaEventTimer timer(start, stop, true, time_no_shur);
kSubtract<<<CONFIG(size)>>>(size, r.data_d, mg.data_d);
mg_p = mg;
}
kResetGlobals<false><<<1, 1>>>();
for (int current_iteration = 0; current_iteration < host_settings.num_iterations; current_iteration++) {
{
CudaEventTimer timer(start, stop, true, time_no_shur);
kResetGlobals<true><<<1, 1>>>();
kCompute_ml_p<<<CONFIG(size)>>>(size, ml.data_d, mg.data_d, ml_p.data_d);
mg_p = 0;
}
{
CudaEventTimer timer(start, stop, true, time_shur);
Multiply(ml_p, mg_p);
}
{
CudaEventTimer timer(start, stop, true, time_no_shur);
kSubtract<<<CONFIG(size)>>>(size, r.data_d, mg_p.data_d);
if (current_iteration % 2 == 0) {
kUpdateAlpha<true><<<CONFIG(size)>>>(size, ml_p.data_d, ml.data_d, mg_p.data_d, mg.data_d);
kAlpha<true><<<1, 1>>>();
} else {
kUpdateAlpha<false><<<CONFIG(size)>>>(size, ml_p.data_d, ml.data_d, mg_p.data_d, mg.data_d);
kAlpha<false><<<1, 1>>>();
}
ml = ml_p;
mg = mg_p;
dot_g_proj_norm = 0;
kResidual<<<CONFIG(size)>>>(size, mg.data_d, dot_g_proj_norm.data_d);
dot_g_proj_norm.copyDeviceToHost();
float g_proj_norm = sqrtf(dot_g_proj_norm.data_h[0]);
if (g_proj_norm < lastgoodres) {
lastgoodres = g_proj_norm;
delta_v = ml;
}
// printf("[%f]\n", lastgoodres);
}
}
cudaCheck(cudaPeekAtLastError());
cudaCheck(cudaDeviceSynchronize());
printf("MPM Solver: [%f, %f %f] \n", time_no_shur, time_shur, lastgoodres);
}
CUDA_GLOBAL void kIncrementVelocity(float* delta_v, float* old_vel_node_mpm, float* grid_vel) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < device_settings.num_mpm_nodes) {
grid_vel[i * 3 + 0] += delta_v[i * 3 + 0] - old_vel_node_mpm[i * 3 + 0];
grid_vel[i * 3 + 1] += delta_v[i * 3 + 1] - old_vel_node_mpm[i * 3 + 1];
grid_vel[i * 3 + 2] += delta_v[i * 3 + 2] - old_vel_node_mpm[i * 3 + 2];
}
}
CUDA_GLOBAL void kUpdateParticleVelocity(float* grid_vel,
float* old_vel_node_mpm,
float* pos_marker,
float* vel_marker) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = pos_marker[p * 3 + 0];
const float xiy = pos_marker[p * 3 + 1];
const float xiz = pos_marker[p * 3 + 2];
float3 V_flip;
V_flip.x = vel_marker[p * 3 + 0];
V_flip.y = vel_marker[p * 3 + 1];
V_flip.z = vel_marker[p * 3 + 2];
float3 V_pic = make_float3(0.0, 0.0, 0.0);
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
int cx, cy, cz;
LOOP_TWO_RING_GPUSP(
float weight = N((xix - current_node_locationx) * inv_bin_edge) *
N((xiy - current_node_locationy) * inv_bin_edge) *
N((xiz - current_node_locationz) * inv_bin_edge);
float vnx = grid_vel[current_node * 3 + 0]; //
float vny = grid_vel[current_node * 3 + 1]; //
float vnz = grid_vel[current_node * 3 + 2];
V_pic.x += vnx * weight; //
V_pic.y += vny * weight; //
V_pic.z += vnz * weight; //
V_flip.x += (vnx - old_vel_node_mpm[current_node * 3 + 0]) * weight; //
V_flip.y += (vny - old_vel_node_mpm[current_node * 3 + 1]) * weight; //
V_flip.z += (vnz - old_vel_node_mpm[current_node * 3 + 2]) * weight; //
)
float3 new_vel = (1.0 - alpha) * V_pic + alpha * V_flip;
float speed = Length(new_vel);
if (speed > device_settings.max_velocity) {
new_vel = new_vel * device_settings.max_velocity / speed;
}
vel_marker[p * 3 + 0] = new_vel.x;
vel_marker[p * 3 + 1] = new_vel.y;
vel_marker[p * 3 + 2] = new_vel.z;
}
}
CUDA_GLOBAL void kUpdateDeformationGradient(float* grid_vel,
float* pos_marker,
float* marker_Fe,
float* marker_Fp,
float* plasticity,
float* JE_JP) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = pos_marker[p * 3 + 0];
const float xiy = pos_marker[p * 3 + 1];
const float xiz = pos_marker[p * 3 + 2];
Mat33f vel_grad(0.0);
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP(float vnx = grid_vel[current_node * 3 + 0]; //
float vny = grid_vel[current_node * 3 + 1]; //
float vnz = grid_vel[current_node * 3 + 2];
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
vel_grad[0] += vnx * valx; vel_grad[1] += vny * valx; vel_grad[2] += vnz * valx; //
vel_grad[3] += vnx * valy; vel_grad[4] += vny * valy; vel_grad[5] += vnz * valy; //
vel_grad[6] += vnx * valz; vel_grad[7] += vny * valz; vel_grad[8] += vnz * valz;
)
Mat33f delta_F = (Mat33f(1.0) + device_settings.dt * vel_grad);
Mat33f m_FE(marker_Fe, p, device_settings.num_mpm_markers);
Mat33f m_FPpre(marker_Fp, p, device_settings.num_mpm_markers);
Mat33f Fe_tmp = delta_F * m_FE;
Mat33f F_tmp = Fe_tmp * m_FPpre;
Mat33f U, V;
float3 E;
SVD(Fe_tmp, U, E, V);
float3 E_clamped = E;
#if defined(BOX_YIELD)
// Simple box clamp
E_clamped.x = Clamp(E.x, 1.0 - device_settings.theta_c, 1.0 + device_settings.theta_s);
E_clamped.y = Clamp(E.y, 1.0 - device_settings.theta_c, 1.0 + device_settings.theta_s);
E_clamped.z = Clamp(E.z, 1.0 - device_settings.theta_c, 1.0 + device_settings.theta_s);
plasticity[p] = fabsf(E.x * E.y * E.z - E_clamped.x * E_clamped.y * E_clamped.z);
#elif defined(SPHERE_YIELD)
// Clamp to sphere (better)
float center = 1.0 + (device_settings.theta_s - device_settings.theta_c) * .5;
float radius = (device_settings.theta_s + device_settings.theta_c) * .5;
float3 offset = E - center;
float lent = Length(offset);
if (lent > radius) {
offset = offset * radius / lent;
}
E_clamped = offset + center;
plasticity[p] = fabsf(E.x * E.y * E.z - E_clamped.x * E_clamped.y * E_clamped.z);
#elif defined(DRUCKER_PRAGER)
float3 eps = make_float3(logf(E.x), logf(E.y), logf(E.z));
float tr_eps = (eps.x + eps.y + eps.z);
float3 eps_hat = make_float3(logf(E.x), logf(E.y), logf(E.z));
float f_norm_eps_hat = Length(eps_hat);
float delta_gp = f_norm_eps_hat +
(3.0f * device_settings.lambda + 2.0f * device_settings.mu) / (2.0f * device_settings.mu) *
tr_eps * 0;//plasticity[p + device_settings.num_mpm_markers];
float delta_qp = 0;
if (delta_gp <= 0) {
// CASE 1
delta_qp = 0;
} else if (f_norm_eps_hat == 0 || tr_eps > 0) {
// CASE 2
delta_qp = f_norm_eps_hat;
E_clamped = make_float3(1.0f, 1.0f, 1.0f);
} else {
// CASE 3
delta_qp = delta_gp;
E_clamped.x = expf(eps.x - delta_gp * eps_hat.x / f_norm_eps_hat);
E_clamped.y = expf(eps.y - delta_gp * eps_hat.y / f_norm_eps_hat);
E_clamped.z = expf(eps.z - delta_gp * eps_hat.z / f_norm_eps_hat);
}
// Holds the plasticity
float qp_new = plasticity[p] + delta_qp;
float theta_Fp = 0.00110865;
// device_settings.h0 + (device_settings.h1 * qp_new - device_settings.h3) * exp(-device_settings.h2 *
// qp_new);
// 35.0f + (9.0f * qp_new - 10.0f) * exp(-.2f * qp_new);
plasticity[p] = qp_new;
plasticity[p + device_settings.num_mpm_markers] =
sqrtf(2.0 / 3.0) * (2.0f * sinf(theta_Fp)) / (3.0f - sinf(theta_Fp));
printf("YLD: [%f %f %f] %f [%f %f]\n", delta_gp, f_norm_eps_hat, tr_eps, eps_hat.x + eps_hat.y + eps_hat.z,
qp_new, plasticity[p + device_settings.num_mpm_markers]);
#endif
// printf("E %d %f %f %f\n", p, E_clamped.x * E_clamped.y * E_clamped.z, E.x * E.y * E.z, plasticity[p]);
// Inverse of Diagonal E_clamped matrix is 1/E_clamped
Mat33f m_FP = V * MultTranspose(Mat33f(1.0 / E_clamped), U) * F_tmp;
float JP_new = Determinant(m_FP);
// Ensure that F_p is purely deviatoric
Mat33f T1 = powf(JP_new, 1.0 / 3.0) * U * MultTranspose(Mat33f(E_clamped), V);
Mat33f T2 = powf(JP_new, -1.0 / 3.0) * m_FP;
JE_JP[p * 2 + 0] = Determinant(T1);
JE_JP[p * 2 + 1] = Determinant(T2);
T1.Store(marker_Fe, p, device_settings.num_mpm_markers);
T2.Store(marker_Fp, p, device_settings.num_mpm_markers);
// printf("JP: %f JE: %f\n", Determinant(marker_Fe[p]), Determinant(marker_Fp[p]));
}
}
void MPM_UpdateDeformationGradient(MPM_Settings& settings,
std::vector<float>& positions,
std::vector<float>& velocities,
std::vector<float>& jejp) {
cudaEventCreate(&start);
cudaEventCreate(&stop);
host_settings = settings;
printf("Solving MPM: %d\n", host_settings.num_iterations);
pos.data_h = positions;
pos.copyHostToDevice();
vel.data_h = velocities;
vel.copyHostToDevice();
cudaCheck(cudaMemcpyToSymbolAsync(device_settings, &host_settings, sizeof(MPM_Settings)));
MPM_ComputeBounds();
node_mass.resize(host_settings.num_mpm_nodes);
node_mass = 0;
grid_vel.resize(host_settings.num_mpm_nodes * 3);
grid_vel = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
// ========================================================================================
kRasterize<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, // input
vel.data_d, // input
node_mass.data_d, // output
grid_vel.data_d // output
);
}
printf("kRasterize: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kNormalizeWeights<<<CONFIG(host_settings.num_mpm_nodes)>>>(node_mass.data_d, // output
grid_vel.data_d);
}
printf("kNormalizeWeights: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kUpdateDeformationGradient<<<CONFIG(host_settings.num_mpm_markers)>>>(
grid_vel.data_d, pos.data_d, marker_Fe.data_d, marker_Fp.data_d, marker_plasticity.data_d, JE_JP.data_d);
JE_JP.copyDeviceToHost();
}
jejp = JE_JP.data_h;
printf("kUpdateDeformationGradient: %f\n", time_measured);
time_measured = 0;
}
void MPM_Solve(MPM_Settings& settings, std::vector<float>& positions, std::vector<float>& velocities) {
old_vel_node_mpm.resize(host_settings.num_mpm_nodes * 3);
rhs.resize(host_settings.num_mpm_nodes * 3);
old_vel_node_mpm = grid_vel;
// cudaCheck(cudaPeekAtLastError());
// cudaCheck(cudaDeviceSynchronize());
{
CudaEventTimer timer(start, stop, true, time_measured);
kFeHat<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, marker_Fe.data_d, grid_vel.data_d,
marker_Fe_hat.data_d);
}
printf("kFeHat: %f\n", time_measured);
time_measured = 0;
// kSVD<<<CONFIG(host_settings.num_mpm_markers)>>>(marker_Fe_hat.data_d, PolarR.data_d, PolarS.data_d);
{
CudaEventTimer timer(start, stop, true, time_measured);
kApplyForces<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, // input
marker_Fe_hat.data_d, // input
marker_Fe.data_d, // input
marker_volume.data_d, // input
node_mass.data_d, // input
marker_plasticity.data_d, // input
PolarR.data_d, // output
PolarS.data_d, // output
grid_vel.data_d); // output
}
printf("kApplyForces: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kRhs<<<CONFIG(host_settings.num_mpm_nodes)>>>(node_mass.data_d, grid_vel.data_d, rhs.data_d);
}
printf("kRhs: %f\n", time_measured);
time_measured = 0;
delta_v.resize(host_settings.num_mpm_nodes * 3);
delta_v = old_vel_node_mpm;
MPM_BBSolver(rhs, delta_v);
{
CudaEventTimer timer(start, stop, true, time_measured);
kIncrementVelocity<<<CONFIG(host_settings.num_mpm_nodes)>>>(delta_v.data_d, old_vel_node_mpm.data_d,
grid_vel.data_d);
}
printf("kIncrementVelocity: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kUpdateParticleVelocity<<<CONFIG(host_settings.num_mpm_markers)>>>(grid_vel.data_d, old_vel_node_mpm.data_d,
pos.data_d, vel.data_d);
}
printf("kUpdateParticleVelocity: %f\n", time_measured);
time_measured = 0;
vel.copyDeviceToHost();
velocities = vel.data_h;
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
CUDA_GLOBAL void kInitFeFp(float* marker_Fe, float* marker_Fp, float* marker_RE, float* marker_SE) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < device_settings.num_mpm_markers) {
Mat33f T(1.0f);
T.Store(marker_Fe, i, device_settings.num_mpm_markers);
T.Store(marker_Fp, i, device_settings.num_mpm_markers);
T.Store(marker_RE, i, device_settings.num_mpm_markers);
marker_SE[i + device_settings.num_mpm_markers * 0] = 1.0f;
marker_SE[i + device_settings.num_mpm_markers * 1] = 0.0f;
marker_SE[i + device_settings.num_mpm_markers * 2] = 0.0f;
marker_SE[i + device_settings.num_mpm_markers * 3] = 1.0f;
marker_SE[i + device_settings.num_mpm_markers * 4] = 0.0f;
marker_SE[i + device_settings.num_mpm_markers * 5] = 1.0f;
}
}
void MPM_Initialize(MPM_Settings& settings, std::vector<float>& positions) {
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
cudaEventCreate(&start);
cudaEventCreate(&stop);
host_settings = settings;
cudaCheck(cudaMalloc(&lower_bound, sizeof(float3)));
cudaCheck(cudaMalloc(&upper_bound, sizeof(float3)));
pos.data_h = positions;
pos.copyHostToDevice();
cudaCheck(cudaMemcpyToSymbolAsync(device_settings, &host_settings, sizeof(MPM_Settings)));
MPM_ComputeBounds();
marker_volume.resize(host_settings.num_mpm_markers);
node_mass.resize(host_settings.num_mpm_nodes);
node_mass = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kRasterize<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, // input
node_mass.data_d); // output
}
printf("kRasterize: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kComputeParticleVolumes<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, // input
node_mass.data_d, // input
marker_volume.data_d); // output
}
printf("kComputeParticleVolumes: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
marker_Fe.resize(host_settings.num_mpm_markers * 9);
marker_Fe_hat.resize(host_settings.num_mpm_markers * 9);
marker_Fp.resize(host_settings.num_mpm_markers * 9);
PolarR.resize(host_settings.num_mpm_markers * 9);
PolarS.resize(host_settings.num_mpm_markers * 6);
JE_JP.resize(host_settings.num_mpm_markers * 2);
marker_plasticity.resize(host_settings.num_mpm_markers * 2);
marker_plasticity = 0;
}
printf("Resize: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kInitFeFp<<<CONFIG(host_settings.num_mpm_markers)>>>(marker_Fe.data_d, // output
marker_Fp.data_d, // output
PolarR.data_d, // output
PolarS.data_d); // output
}
printf("kInitFeFp: %f\n", time_measured);
time_measured = 0;
// cudaCheck(cudaPeekAtLastError());
// cudaCheck(cudaDeviceSynchronize());
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
}
|
04eaccaa947086437e888ed824edcc50ebf0f697.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <fstream>
#include <algorithm>
using namespace std;
//Constants
const float PI = 3.14159265;
const float kB = 1.38E-23;
const int Atom_type_N = 2;
const int Pair_type_N = 3;
const int Pt_I = 6, Pt_J = 6, Pt_K = 3;
const int Pt_N = 4 * Pt_I * Pt_J * Pt_K;
const int Ar_N = 1;
//Customize Structures
struct Wall_Molecule {
int Sid;
int Lid;
float x;
float y;
float z;
float Bx;
float By;
float Bz;
float vx;
float vy;
float vz;
float ax;
float ay;
float az;
float atx;
float aty;
float atz;
};
struct Gas_Molecule {
int Sid;
int Lid;
float x;
float y;
float z;
float vx;
float vy;
float vz;
float ax;
float ay;
float az;
float atx;
float aty;
float atz;
};
struct Parameters_Wall {
int Type;
float Mass;
float T;
float mpVelocity;
float CurrentT;
float ArgVelX;
float ArgVelY;
float ArgVelZ;
float Sigma;
float Epselon;
float Lattice;
};
struct Parameters_Gas {
int Type;
float Mass;
float T;
float mpVelocity;
float Sigma;
float Epselon;
};
struct Parameters_MD {
float ArPt_Sigma;
float ArPt_Epselon;
float CutOff;
float SprK;
float dt;
int Rt;
int Tt;
int DumpStep;
int TimeStep;
float BoxXLow;
float BoxXHigh;
float BoxYLow;
float BoxYHigh;
float BoxZLow;
float BoxZHigh;
bool State;
};
struct Dimensionless {
float Mass;
float Energy;
float Length;
float Velocity;
float Time;
float Acceleration;
};
//MD Definition
class MD {
Parameters_Wall Pars_Pt;
Parameters_Gas Pars_Ar;
Parameters_MD Pars_MD;
Dimensionless Pars_Dim;
Wall_Molecule Pt[Pt_N];
Gas_Molecule Ar[Ar_N];
public:
void Pars_Init();
void Models_Init();
void Init_Kernels();
void Boundary_XY();
void RescaleT1();
void RescaleT2();
void RescaleT3();
void RescaleT4();
void AccelerationCal();
void MainMD();
void Dump();
void Exit();
float random();
};
/******************************************************************************/
void MD::Pars_Init() {
//
Pars_Ar.Type = 1;
Pars_Pt.Type = 2;
Pars_Ar.Mass = 39.95 / 6.02 * 1E-26;//kg
Pars_Pt.Mass = 195.08 / 6.02 * 1E-26;
Pars_Ar.T = 300.0;//K
Pars_Pt.T = 300.0;
Pars_Ar.mpVelocity = sqrt(2 * kB * Pars_Ar.T / Pars_Ar.Mass);//
Pars_Pt.mpVelocity = sqrt(3 * kB * Pars_Pt.T / Pars_Pt.Mass);//
Pars_Pt.CurrentT = 0.0;
Pars_Pt.ArgVelX = 0.0;
Pars_Pt.ArgVelY = 0.0;
Pars_Pt.ArgVelZ = 0.0;
Pars_Ar.Sigma = 3.40 * 1E-10;//m
Pars_Pt.Sigma = 2.47 * 1E-10;
Pars_Ar.Epselon = 1.654E-21;//J
Pars_Pt.Epselon = 5.207E-20;
Pars_Pt.Lattice = 3.93E-10;
Pars_MD.ArPt_Sigma = 2.94 * 1E-10;
Pars_MD.ArPt_Epselon = 1.093E-21;
Pars_MD.CutOff = 10 * 1E-10;
//
Pars_Dim.Mass = Pars_Pt.Mass;
Pars_Dim.Energy = Pars_Pt.Epselon;
Pars_Dim.Length = Pars_Pt.Sigma;
Pars_Dim.Velocity = sqrt(Pars_Dim.Energy / Pars_Dim.Mass);
Pars_Dim.Time = Pars_Dim.Length / Pars_Dim.Velocity;
Pars_Dim.Acceleration = Pars_Dim.Energy / (Pars_Dim.Mass * Pars_Dim.Length);
//
Pars_Ar.Mass /= Pars_Dim.Mass;
Pars_Pt.Mass /= Pars_Dim.Mass;
Pars_Ar.Epselon /= Pars_Dim.Energy;
Pars_Pt.Epselon /= Pars_Dim.Energy;
Pars_MD.ArPt_Epselon /= Pars_Dim.Energy;
Pars_Ar.Sigma /= Pars_Dim.Length;
Pars_Pt.Sigma /= Pars_Dim.Length;
Pars_MD.ArPt_Sigma /= Pars_Dim.Length;
Pars_Pt.Lattice /= Pars_Dim.Length;
Pars_MD.CutOff /= Pars_Dim.Length;
Pars_Ar.mpVelocity /= Pars_Dim.Velocity;
Pars_Pt.mpVelocity /= Pars_Dim.Velocity;
//
Pars_MD.SprK = 5000.0;
Pars_MD.dt = 0.001;
Pars_MD.Rt = 100;
Pars_MD.Tt = 3500000;
Pars_MD.DumpStep = 100;
Pars_MD.TimeStep = 0;
Pars_MD.BoxXLow = 0.0;
Pars_MD.BoxXHigh = Pt_I * Pars_Pt.Lattice;
Pars_MD.BoxYLow = 0.0;
Pars_MD.BoxYHigh = Pt_J * Pars_Pt.Lattice;
Pars_MD.BoxZLow = -(Pt_K - 0.5) * Pars_Pt.Lattice;
Pars_MD.BoxZHigh = 5.0;
Pars_MD.State = true;
//
cout << "*******[Pars_Init]: Parameters Initialized!*******\n";
cout << "Box_ZoneX: " << Pars_MD.BoxXLow << ", " << Pars_MD.BoxXHigh << "\n";
cout << "Box_ZoneY: " << Pars_MD.BoxYLow << ", " << Pars_MD.BoxYHigh << "\n";
cout << "Box_ZoneZ: " << Pars_MD.BoxZLow << ", " << Pars_MD.BoxZHigh << "\n";
}
/******************************************************************************/
void MD::Models_Init() {
int count, i, j, k;
float R1, R2, Rx, Ry;
//
count = 0;
srand((unsigned)time(NULL));
for (i = 0; i < 2 * Pt_I; i++) {
for (j = 0; j < 2 * Pt_J; j++) {
for (k = 0; k < 2 * Pt_K; k++) {
if (i / 2. + j / 2. + k / 2. == int(i / 2. + j / 2. + k / 2.)) {
Pt[count].Sid = count + 1;
Pt[count].Lid = 1;
Pt[count].x = i / 2.*Pars_Pt.Lattice;
Pt[count].Bx = Pt[count].x;
Pt[count].y = j / 2.*Pars_Pt.Lattice;
Pt[count].By = Pt[count].y;
Pt[count].z = (k / 2. - 2.5)*Pars_Pt.Lattice;
Pt[count].Bz = Pt[count].z;
R1 = random();
R2 = random();
Pt[count].vx = Pars_Pt.mpVelocity / sqrt(3) * sqrt(-2 * log(R1)) * cos(2 * PI * R2);
R1 = random();
R2 = random();
Pt[count].vy = Pars_Pt.mpVelocity / sqrt(3) * sqrt(-2 * log(R1)) * cos(2 * PI * R2);
R1 = random();
R2 = random();
Pt[count].vz = Pars_Pt.mpVelocity / sqrt(3) * sqrt(-2 * log(R1)) * cos(2 * PI * R2);
Pt[count].ax = 0.0;
Pt[count].ay = 0.0;
Pt[count].az = 0.0;
Pt[count].atx = 0.0;
Pt[count].aty = 0.0;
Pt[count].atz = 0.0;
count += 1;
}
}
}
}
//
Ar[0].Sid = 1;
Ar[0].Lid = 1;
Rx = random();
Ry = random();
Ar[0].x = Pars_MD.BoxXLow + (Pars_MD.BoxXHigh - Pars_MD.BoxXLow) * Rx;
Ar[0].y = Pars_MD.BoxYLow + (Pars_MD.BoxYHigh - Pars_MD.BoxYLow) * Ry;
Ar[0].z = Pars_MD.BoxZHigh;
R1 = random();
R2 = random();
Ar[0].vx = Pars_Ar.mpVelocity * sqrt(-log(R1)) * cos(2 * PI * R2);//Maxwell Distribution
R1 = random();
R2 = random();
Ar[0].vy = Pars_Ar.mpVelocity * sqrt(-log(R1)) * sin(2 * PI * R2);
R1 = random();
Ar[0].vz = -Pars_Ar.mpVelocity * sqrt(-log(R1));
Ar[0].ax = 0.0;
Ar[0].ay = 0.0;
Ar[0].az = 0.0;
Ar[0].atx = 0.0;
Ar[0].aty = 0.0;
Ar[0].atz = 0.0;
//
cout << "*******[Models_Init]: Models Initialized!*******\n";
cout << "Created " << Pt_N << " Pt\n";
cout << "Created " << Ar_N << " Ar\n";
cout << "Ar Incidence Speed: " << Ar[0].vx << "," << Ar[0].vy << "," << Ar[0].vz << "\n";
}
/******************************************************************************/
void MD::Init_Kernels() {
//
int i;
//
Boundary_XY();
//
Pars_MD.BoxZLow = Pt[0].z;
for (i = 0; i<Pt_N; i++) {
if (Pt[i].z<Pars_MD.BoxZLow) {
Pars_MD.BoxZLow = Pt[i].z;
}
}
cout << "Box Z Low: " << Pars_MD.BoxZLow << "\n";
//
RescaleT1();
RescaleT2();
RescaleT3();
RescaleT4();
//
AccelerationCal();
//
Exit();
//
cout << "*******[Init_Kernels]: Initialization Done!*******\n";
cout << "Pt Average Speed in X: " << Pars_Pt.ArgVelX << "\n";
cout << "Pt Average Speed in Y: " << Pars_Pt.ArgVelY << "\n";
cout << "Pt Average Speed in Z: " << Pars_Pt.ArgVelZ << "\n";
cout << "Pt Temperature: " << Pars_Pt.CurrentT << "\n";
}
/******************************************************************************/
void MD::Boundary_XY() {
//
int i;
//
for (i = 0; i < Pt_N; i++) {
//
if (Pt[i].x < Pars_MD.BoxXLow) {
Pt[i].x += Pars_MD.BoxXHigh - Pars_MD.BoxXLow;
Pt[i].Bx += Pars_MD.BoxXHigh - Pars_MD.BoxXLow;
}
else if (Pt[i].x >= Pars_MD.BoxXHigh) {
Pt[i].x -= Pars_MD.BoxXHigh - Pars_MD.BoxXLow;
Pt[i].Bx -= Pars_MD.BoxXHigh - Pars_MD.BoxXLow;
}
//
if (Pt[i].y < Pars_MD.BoxYLow) {
Pt[i].y += Pars_MD.BoxYHigh - Pars_MD.BoxYLow;
Pt[i].By += Pars_MD.BoxYHigh - Pars_MD.BoxYLow;
}
else if (Pt[i].y >= Pars_MD.BoxYHigh) {
Pt[i].y -= Pars_MD.BoxYHigh - Pars_MD.BoxYLow;
Pt[i].By -= Pars_MD.BoxYHigh - Pars_MD.BoxYLow;
}
}
//
if (Ar[0].x < Pars_MD.BoxXLow) {
Ar[0].x += Pars_MD.BoxXHigh - Pars_MD.BoxXLow;
}
else if (Ar[0].x >= Pars_MD.BoxXHigh) {
Ar[0].x -= Pars_MD.BoxXHigh - Pars_MD.BoxXLow;
}
if (Ar[0].y < Pars_MD.BoxYLow) {
Ar[0].y += Pars_MD.BoxYHigh - Pars_MD.BoxYLow;
}
else if (Ar[0].y >= Pars_MD.BoxYHigh) {
Ar[0].y -= Pars_MD.BoxYHigh - Pars_MD.BoxYLow;
}
}
/******************************************************************************/
void MD::RescaleT1() {
//
int i;
//
Pars_Pt.ArgVelX = 0.0;
Pars_Pt.ArgVelY = 0.0;
Pars_Pt.ArgVelZ = 0.0;
for (i = 0; i < Pt_N; i++) {
Pars_Pt.ArgVelX += Pt[i].vx / Pt_N;
Pars_Pt.ArgVelY += Pt[i].vy / Pt_N;
Pars_Pt.ArgVelZ += Pt[i].vz / Pt_N;
}
}
/******************************************************************************/
void MD::RescaleT2() {
//
int i;
//
for (i = 0; i < Pt_N; i++) {
Pt[i].vx -= Pars_Pt.ArgVelX;
Pt[i].vy -= Pars_Pt.ArgVelY;
Pt[i].vz -= Pars_Pt.ArgVelZ;
}
}
/******************************************************************************/
void MD::RescaleT3() {
//
int i;
//
Pars_Pt.CurrentT = 0.0;
for (i = 0; i < Pt_N; i++) {
Pars_Pt.CurrentT += Pt[i].vx * Pt[i].vx + Pt[i].vy * Pt[i].vy + Pt[i].vz * Pt[i].vz;
}
Pars_Pt.CurrentT *= Pars_Dim.Velocity * Pars_Dim.Velocity * Pars_Pt.Mass * Pars_Dim.Mass / (3 * Pt_N * kB);
}
/******************************************************************************/
void MD::RescaleT4() {
//
int i;
//
for (i = 0; i < Pt_N; i++) {
Pt[i].vx *= sqrt(Pars_Pt.T / Pars_Pt.CurrentT);
Pt[i].vy *= sqrt(Pars_Pt.T / Pars_Pt.CurrentT);
Pt[i].vz *= sqrt(Pars_Pt.T / Pars_Pt.CurrentT);
}
}
/******************************************************************************/
void MD::AccelerationCal() {
//
int i, j;
float Epair, Spair, Pairx, Pairy, Pairz, Dispair, Fpair, Atom_Fx, Atom_Fy, Atom_Fz;
float Spring_Disx, Spring_Fx, Pt_Fx, Spring_Disy, Spring_Fy, Pt_Fy, Spring_Disz, Spring_Fz, Pt_Fz, Ar_Fx, Ar_Fy, Ar_Fz;
//
for (i = 0; i < Pt_N + Ar_N; i++) {
Atom_Fx = 0.0;
Atom_Fy = 0.0;
Atom_Fz = 0.0;
for (j = 0; j < Pt_N + Ar_N; j++) {
if (i < Pt_N && j < Pt_N) {
Epair = Pars_Pt.Epselon;
Spair = Pars_Pt.Sigma;
Pairx = Pt[i].x - Pt[j].x;
Pairy = Pt[i].y - Pt[j].y;
Pairz = Pt[i].z - Pt[j].z;
}
else if (i < Pt_N && j == Pt_N) {
Epair = Pars_MD.ArPt_Epselon;
Spair = Pars_MD.ArPt_Sigma;
Pairx = Pt[i].x - Ar[0].x;
Pairy = Pt[i].y - Ar[0].y;
Pairz = Pt[i].z - Ar[0].z;
}
else if (i == Pt_N && j < Pt_N) {
Epair = Pars_MD.ArPt_Epselon;
Spair = Pars_MD.ArPt_Sigma;
Pairx = Ar[0].x - Pt[j].x;
Pairy = Ar[0].y - Pt[j].y;
Pairz = Ar[0].z - Pt[j].z;
}
else {
Epair = Pars_Ar.Epselon;
Spair = Pars_Ar.Sigma;
Pairx = 0.0;
Pairy = 0.0;
Pairz = 0.0;
}
//
if (abs(Pairx) >= Pars_MD.BoxXHigh - Pars_MD.BoxXLow - Pars_MD.CutOff) {
Pairx -= (Pars_MD.BoxXHigh - Pars_MD.BoxXLow) * Pairx / abs(Pairx);
}
if (abs(Pairy) >= Pars_MD.BoxYHigh - Pars_MD.BoxYLow - Pars_MD.CutOff) {
Pairy -= (Pars_MD.BoxYHigh - Pars_MD.BoxYLow) * Pairy / abs(Pairy);
}
//
Dispair = sqrt(Pairx * Pairx + Pairy * Pairy + Pairz * Pairz);
if (Dispair > 0 && Dispair <= Pars_MD.CutOff) {
Fpair = 48 * Epair*(pow(Spair, 12) / pow(Dispair, 13) - 0.5*pow(Spair, 6) / pow(Dispair, 7));
Atom_Fx += Pairx * Fpair / Dispair;
Atom_Fy += Pairy * Fpair / Dispair;
Atom_Fz += Pairz * Fpair / Dispair;
}
}
if (i < Pt_N) {
//
Spring_Disx = Pt[i].x - Pt[i].Bx;
Spring_Fx = -Pars_MD.SprK * Spring_Disx;
Pt_Fx = Atom_Fx + Spring_Fx;
Pt[i].ax = Pt_Fx / Pars_Pt.Mass;
Spring_Disy = Pt[i].y - Pt[i].By;
Spring_Fy = -Pars_MD.SprK * Spring_Disy;
Pt_Fy = Atom_Fy + Spring_Fy;
Pt[i].ay = Pt_Fy / Pars_Pt.Mass;
Spring_Disz = Pt[i].z - Pt[i].Bz;
Spring_Fz = -Pars_MD.SprK * Spring_Disz;
Pt_Fz = Atom_Fz + Spring_Fz;
Pt[i].az = Pt_Fz / Pars_Pt.Mass;
}
else {
//
Ar_Fx = Atom_Fx;
Ar[0].ax = Ar_Fx / Pars_Ar.Mass;
Ar_Fy = Atom_Fy;
Ar[0].ay = Ar_Fy / Pars_Ar.Mass;
Ar_Fz = Atom_Fz;
Ar[0].az = Ar_Fz / Pars_Ar.Mass;
}
}
}
/******************************************************************************/
__global__ void Time_Advancement(Wall_Molecule *Pt, Gas_Molecule *Ar, Parameters_Wall *Pars_Pt, Parameters_Gas *Pars_Ar, Parameters_MD *Pars_MD, Dimensionless *Pars_Dim) {
//
__shared__ float All_Pos[Pt_N + Ar_N][3];
__shared__ float All_BPos[Pt_N][3];
__shared__ float All_Vel[Pt_N + Ar_N][3];
__shared__ float All_Acc[Pt_N + Ar_N][3];
__shared__ float All_Acc_Temp[Pt_N + Ar_N][3];
__shared__ float Pt_argVel[3];
__shared__ float Pt_T;
//
int tid = threadIdx.x;
int i;
//
float Epair, Spair, Pairx, Pairy, Pairz, Dispair, Fpair, Atom_Fx, Atom_Fy, Atom_Fz;
float Spring_Disx, Spring_Fx, Pt_Fx, Spring_Disy, Spring_Fy, Pt_Fy, Spring_Disz, Spring_Fz, Pt_Fz, Ar_Fx, Ar_Fy, Ar_Fz;
//Share Data
if (tid < Pt_N) {
//
All_Pos[tid][0] = Pt[tid].x;
All_Pos[tid][1] = Pt[tid].y;
All_Pos[tid][2] = Pt[tid].z;
All_BPos[tid][0] = Pt[tid].Bx;
All_BPos[tid][1] = Pt[tid].By;
All_BPos[tid][2] = Pt[tid].Bz;
All_Vel[tid][0] = Pt[tid].vx;
All_Vel[tid][1] = Pt[tid].vy;
All_Vel[tid][2] = Pt[tid].vz;
All_Acc[tid][0] = Pt[tid].ax;
All_Acc[tid][1] = Pt[tid].ay;
All_Acc[tid][2] = Pt[tid].az;
}
if (tid == Pt_N) {
//
All_Pos[tid][0] = Ar[0].x;
All_Pos[tid][1] = Ar[0].y;
All_Pos[tid][2] = Ar[0].z;
All_Vel[tid][0] = Ar[0].vx;
All_Vel[tid][1] = Ar[0].vy;
All_Vel[tid][2] = Ar[0].vz;
All_Acc[tid][0] = Ar[0].ax;
All_Acc[tid][1] = Ar[0].ay;
All_Acc[tid][2] = Ar[0].az;
}
//
if (tid == Pt_N) {
printf("*******[Time_Advancement]: Starting!*******\n");
}
//
while ((*Pars_MD).State) {
//Verlet_Pos
if (tid < Pt_N + Ar_N) {
All_Pos[tid][0] += All_Vel[tid][0] * (*Pars_MD).dt + 0.5 * All_Acc[tid][0] * (*Pars_MD).dt * (*Pars_MD).dt;
All_Pos[tid][1] += All_Vel[tid][1] * (*Pars_MD).dt + 0.5 * All_Acc[tid][1] * (*Pars_MD).dt * (*Pars_MD).dt;
All_Pos[tid][2] += All_Vel[tid][2] * (*Pars_MD).dt + 0.5 * All_Acc[tid][2] * (*Pars_MD).dt * (*Pars_MD).dt;
}
//Boundary_XY
if (tid < Pt_N + Ar_N) {
//X
if (All_Pos[tid][0] < (*Pars_MD).BoxXLow) {
All_Pos[tid][0] += (*Pars_MD).BoxXHigh - (*Pars_MD).BoxXLow;
if (tid<Pt_N) {
All_BPos[tid][0] += (*Pars_MD).BoxXHigh - (*Pars_MD).BoxXLow;
}
}
else if (All_Pos[tid][0] >= (*Pars_MD).BoxXHigh) {
All_Pos[tid][0] -= (*Pars_MD).BoxXHigh - (*Pars_MD).BoxXLow;
if (tid<Pt_N) {
All_BPos[tid][0] -= (*Pars_MD).BoxXHigh - (*Pars_MD).BoxXLow;
}
}
//Y
if (All_Pos[tid][1] < (*Pars_MD).BoxYLow) {
All_Pos[tid][1] += (*Pars_MD).BoxYHigh - (*Pars_MD).BoxYLow;
if (tid<Pt_N) {
All_BPos[tid][1] += (*Pars_MD).BoxYHigh - (*Pars_MD).BoxYLow;
}
}
else if (All_Pos[tid][1] >= (*Pars_MD).BoxYHigh) {
All_Pos[tid][1] -= (*Pars_MD).BoxYHigh - (*Pars_MD).BoxYLow;
if (tid<Pt_N) {
All_BPos[tid][1] -= (*Pars_MD).BoxYHigh - (*Pars_MD).BoxYLow;
}
}
}
//Last_Acceleration
if (tid < Pt_N + Ar_N) {
All_Acc_Temp[tid][0] = All_Acc[tid][0];
All_Acc_Temp[tid][1] = All_Acc[tid][1];
All_Acc_Temp[tid][2] = All_Acc[tid][2];
}
//AccelerationCal
__syncthreads();
//
if (tid < Pt_N + Ar_N) {
Atom_Fx = 0.0;
Atom_Fy = 0.0;
Atom_Fz = 0.0;
for (i = 0; i < Pt_N + Ar_N; i++) {
if (tid < Pt_N && i < Pt_N) {
Epair = (*Pars_Pt).Epselon;
Spair = (*Pars_Pt).Sigma;
}
else if (tid == Pt_N && i == Pt_N) {
Epair = (*Pars_Ar).Epselon;
Spair = (*Pars_Ar).Sigma;
}
else {
Epair = (*Pars_MD).ArPt_Epselon;
Spair = (*Pars_MD).ArPt_Sigma;
}
Pairx = All_Pos[tid][0] - All_Pos[i][0];
Pairy = All_Pos[tid][1] - All_Pos[i][1];
Pairz = All_Pos[tid][2] - All_Pos[i][2];
//
if (abs(Pairx) >= (*Pars_MD).BoxXHigh - (*Pars_MD).BoxXLow - (*Pars_MD).CutOff) {
Pairx -= ((*Pars_MD).BoxXHigh - (*Pars_MD).BoxXLow) * Pairx / abs(Pairx);
}
if (abs(Pairy) >= (*Pars_MD).BoxYHigh - (*Pars_MD).BoxYLow - (*Pars_MD).CutOff) {
Pairy -= ((*Pars_MD).BoxYHigh - (*Pars_MD).BoxYLow) * Pairy / abs(Pairy);
}
//
Dispair = sqrt(Pairx * Pairx + Pairy * Pairy + Pairz * Pairz);
if (Dispair > 0 && Dispair <= (*Pars_MD).CutOff) {
Fpair = 48 * Epair*(pow(Spair, 12) / pow(Dispair, 13) - 0.5*pow(Spair, 6) / pow(Dispair, 7));
Atom_Fx += Pairx * Fpair / Dispair;
Atom_Fy += Pairy * Fpair / Dispair;
Atom_Fz += Pairz * Fpair / Dispair;
}
}
if (tid < Pt_N) {
//
Spring_Disx = All_Pos[tid][0] - All_BPos[tid][0];
Spring_Fx = -(*Pars_MD).SprK * Spring_Disx;
Pt_Fx = Atom_Fx + Spring_Fx;
All_Acc[tid][0] = Pt_Fx / (*Pars_Pt).Mass;
Spring_Disy = All_Pos[tid][1] - All_BPos[tid][1];
Spring_Fy = -(*Pars_MD).SprK * Spring_Disy;
Pt_Fy = Atom_Fy + Spring_Fy;
All_Acc[tid][1] = Pt_Fy / (*Pars_Pt).Mass;
Spring_Disz = All_Pos[tid][2] - All_BPos[tid][2];
Spring_Fz = -(*Pars_MD).SprK * Spring_Disz;
Pt_Fz = Atom_Fz + Spring_Fz;
All_Acc[tid][2] = Pt_Fz / (*Pars_Pt).Mass;
}
else {
//
Ar_Fx = Atom_Fx;
All_Acc[tid][0] = Ar_Fx / (*Pars_Ar).Mass;
Ar_Fy = Atom_Fy;
All_Acc[tid][1] = Ar_Fy / (*Pars_Ar).Mass;
Ar_Fz = Atom_Fz;
All_Acc[tid][2] = Ar_Fz / (*Pars_Ar).Mass;
}
}
//Verlet_Vel
if (tid < Pt_N + Ar_N) {
All_Vel[tid][0] += 0.5 * (All_Acc_Temp[tid][0] + All_Acc[tid][0]) * (*Pars_MD).dt;
All_Vel[tid][1] += 0.5 * (All_Acc_Temp[tid][1] + All_Acc[tid][1]) * (*Pars_MD).dt;
All_Vel[tid][2] += 0.5 * (All_Acc_Temp[tid][2] + All_Acc[tid][2]) * (*Pars_MD).dt;
}
__syncthreads();
//RescaleT
if (tid == Pt_N) {
Pt_argVel[0] = 0.0;
Pt_argVel[1] = 0.0;
Pt_argVel[2] = 0.0;
for (i = 0; i < Pt_N; i++) {
Pt_argVel[0] += All_Vel[i][0];
Pt_argVel[1] += All_Vel[i][1];
Pt_argVel[2] += All_Vel[i][2];
}
Pt_argVel[0] /= Pt_N;
Pt_argVel[1] /= Pt_N;
Pt_argVel[2] /= Pt_N;
}
__syncthreads();
if (tid < Pt_N) {
All_Vel[tid][0] -= Pt_argVel[0];
All_Vel[tid][1] -= Pt_argVel[1];
All_Vel[tid][2] -= Pt_argVel[2];
}
__syncthreads();
if (tid == Pt_N) {
Pt_T = 0.0;
for (i = 0; i < Pt_N; i++) {
Pt_T += All_Vel[i][0] * All_Vel[i][0] + All_Vel[i][1] * All_Vel[i][1] + All_Vel[i][2] * All_Vel[i][2];
}
Pt_T *= (*Pars_Dim).Velocity * (*Pars_Dim).Velocity * (*Pars_Pt).Mass * (*Pars_Dim).Mass / (3 * Pt_N * 1.38E-23);
}
__syncthreads();
//
if (tid == Pt_N) {
printf("Before modification, Pt_T: %f\n", Pt_T);
}
if (tid < Pt_N) {
All_Vel[tid][0] *= sqrt((*Pars_Pt).T / Pt_T);
All_Vel[tid][1] *= sqrt((*Pars_Pt).T / Pt_T);
All_Vel[tid][2] *= sqrt((*Pars_Pt).T / Pt_T);
}
__syncthreads();
if (tid == Pt_N) {
Pt_T = 0.0;
for (i = 0; i < Pt_N; i++) {
Pt_T += All_Vel[i][0] * All_Vel[i][0] + All_Vel[i][1] * All_Vel[i][1] + All_Vel[i][2] * All_Vel[i][2];
}
Pt_T *= (*Pars_Dim).Velocity * (*Pars_Dim).Velocity * (*Pars_Pt).Mass * (*Pars_Dim).Mass / (3 * Pt_N * 1.38E-23);
}
__syncthreads();
//
if (tid == Pt_N) {
printf("After modification, Pt_T: %f\n", Pt_T);
}
//Update_BoxZ
if (tid == Pt_N) {
(*Pars_MD).BoxZLow = All_Pos[0][2];
for (i = 0; i < Pt_N + Ar_N; i++) {
if ((*Pars_MD).BoxZLow > All_Pos[i][2]) {
(*Pars_MD).BoxZLow = All_Pos[i][2];
}
}
}
__syncthreads();
//
if (tid == Pt_N) {
printf("BoxZLow: %f\n", (*Pars_MD).BoxZLow);
}
if (tid == Pt_N) {
(*Pars_MD).TimeStep += 1;
printf("CurrentStep: %d\n", (*Pars_MD).TimeStep);
printf("In Shared Memery, Ar_Z: %f\n", All_Pos[tid][2]);
//
if (All_Pos[tid][2] >(*Pars_MD).BoxZHigh || (*Pars_MD).TimeStep >= (*Pars_MD).Tt) {
(*Pars_MD).State = false;
(*Pars_MD).DumpStep = 1;
}
}
__syncthreads();
//Get Data
if (tid == Pt_N) {
printf("Copying Data From Shared Memery To Global Memery......\n");
}
if (tid < Pt_N) {
//
Pt[tid].x = All_Pos[tid][0];
Pt[tid].y = All_Pos[tid][1];
Pt[tid].z = All_Pos[tid][2];
}
if (tid == Pt_N) {
//
printf("In Global Memery, Ar_Z: %f\n", All_Pos[tid][2]);
Ar[0].x = All_Pos[tid][0];
Ar[0].y = All_Pos[tid][1];
Ar[0].z = All_Pos[tid][2];
}
}
if (tid == Pt_N) {
printf("*******[Time_Advancement]: Done!*******\n");
}
}
/******************************************************************************/
void MD::Dump() {
//
int i;
//
if (Pars_MD.TimeStep % Pars_MD.DumpStep == 0) {
//
ofstream MD;
MD.open("Kernel_MD_CUDA_C.dump", ios::app);
MD << "ITEM: TIMESTEP\n";
MD << Pars_MD.TimeStep << "\n";
MD << "ITEM: NUMBER OF ATOMS\n";
MD << Pt_N + Ar_N << "\n";
MD << "ITEM: BOX BOUNDS pp pp ff\n";
MD << Pars_MD.BoxXLow << " " << Pars_MD.BoxXHigh << "\n";
MD << Pars_MD.BoxYLow << " " << Pars_MD.BoxYHigh << "\n";
MD << Pars_MD.BoxZLow << " " << Pars_MD.BoxZHigh << "\n";
MD << "ITEM: ATOMS id type x y z\n";
for (i = 0; i < Pt_N; i++) {
MD << i + 1 << " " << Pars_Pt.Type << " " << Pt[i].x << " " << Pt[i].y << " " << Pt[i].z << "\n";
}
MD << Pt_N + Ar_N << " " << Pars_Ar.Type << " " << Ar[0].x << " " << Ar[0].y << " " << Ar[0].z << "\n";
MD.close();
//
ofstream Zt;
Zt.open("Kernel_MD_CUDA_C_Zt.dat", ios::app);
Zt << Pars_MD.TimeStep * Pars_MD.dt << " " << Ar[0].z << "\n";
Zt.close();
}
}
/******************************************************************************/
void MD::Exit() {
//
if (Ar[0].z > Pars_MD.BoxZHigh || Pars_MD.TimeStep >= Pars_MD.Tt) {
Pars_MD.State = false;
Pars_MD.DumpStep = 1;
Dump();
}
else {
Dump();
}
}
/******************************************************************************/
float MD::random() {
//
float R;
//
R = 0.0;
while (R == 0.0) {
R = rand() / float(RAND_MAX);
}
return R;
}
/******************************************************************************/
void MD::MainMD() {
//
clock_t start, finish;
float tperl;
Gas_Molecule *d_Ar;
Wall_Molecule *d_Pt;
Parameters_Gas *d_Pars_Ar;
Parameters_Wall *d_Pars_Pt;
Parameters_MD *d_Pars_MD;
Dimensionless *d_Pars_Dim;
hipError_t cudaStatus;
//
hipMalloc((void**)&d_Ar, sizeof(Ar));
hipMalloc((void**)&d_Pt, sizeof(Pt));
hipMalloc((void**)&d_Pars_Ar, sizeof(Parameters_Gas));
hipMalloc((void**)&d_Pars_Pt, sizeof(Parameters_Wall));
hipMalloc((void**)&d_Pars_MD, sizeof(Parameters_MD));
hipMalloc((void**)&d_Pars_Dim, sizeof(Dimensionless));
//
hipMemcpy(d_Ar, Ar, sizeof(Ar), hipMemcpyHostToDevice);
hipMemcpy(d_Pt, Pt, sizeof(Pt), hipMemcpyHostToDevice);
hipMemcpy(d_Pars_Ar, &Pars_Ar, sizeof(Parameters_Gas), hipMemcpyHostToDevice);
hipMemcpy(d_Pars_Pt, &Pars_Pt, sizeof(Parameters_Wall), hipMemcpyHostToDevice);
hipMemcpy(d_Pars_MD, &Pars_MD, sizeof(Parameters_MD), hipMemcpyHostToDevice);
hipMemcpy(d_Pars_Dim, &Pars_Dim, sizeof(Dimensionless), hipMemcpyHostToDevice);
//
start = clock();
Time_Advancement << < 10, Pt_N + Ar_N >> >(d_Pt, d_Ar, d_Pars_Pt, d_Pars_Ar, d_Pars_MD, d_Pars_Dim);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Time_Advancement launch failed: %s\n", hipGetErrorString(cudaStatus));
}
//cudaStatus = hipDeviceSynchronize();
//if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
//}
//
hipMemcpy(Ar, d_Ar, sizeof(Ar), hipMemcpyDeviceToHost);
hipMemcpy(Pt, d_Pt, sizeof(Pt), hipMemcpyDeviceToHost);
hipMemcpy(&Pars_MD, d_Pars_MD, sizeof(Parameters_MD), hipMemcpyDeviceToHost);
Dump();
finish = clock();
tperl = float(finish - start) / CLOCKS_PER_SEC / Pars_MD.TimeStep;
cout << Ar[0].x << ", " <<Ar[0].y << ", " << Ar[0].z << "\n";
cout << "Totally Run " << Pars_MD.TimeStep << " TimeSteps with ArgTimePerStep: " << tperl << " Seconds!\n";
//
hipFree(d_Ar);
hipFree(d_Pt);
hipFree(d_Pars_Ar);
hipFree(d_Pars_Pt);
hipFree(d_Pars_MD);
//
cout << "*******[MainMD]: MD Done!*******\n";
}
////////////////////////////////////////////////////////////////////////////////
/*************************************main*************************************/
////////////////////////////////////////////////////////////////////////////////
int main() {
//
class MD GasWall;
GasWall.Pars_Init();
GasWall.Models_Init();
GasWall.Init_Kernels();
GasWall.MainMD();
system("pause");
return 0;
}
| 04eaccaa947086437e888ed824edcc50ebf0f697.cu | #include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <fstream>
#include <algorithm>
using namespace std;
//Constants
const float PI = 3.14159265;
const float kB = 1.38E-23;
const int Atom_type_N = 2;
const int Pair_type_N = 3;
const int Pt_I = 6, Pt_J = 6, Pt_K = 3;
const int Pt_N = 4 * Pt_I * Pt_J * Pt_K;
const int Ar_N = 1;
//Customize Structures
struct Wall_Molecule {
int Sid;
int Lid;
float x;
float y;
float z;
float Bx;
float By;
float Bz;
float vx;
float vy;
float vz;
float ax;
float ay;
float az;
float atx;
float aty;
float atz;
};
struct Gas_Molecule {
int Sid;
int Lid;
float x;
float y;
float z;
float vx;
float vy;
float vz;
float ax;
float ay;
float az;
float atx;
float aty;
float atz;
};
struct Parameters_Wall {
int Type;
float Mass;
float T;
float mpVelocity;
float CurrentT;
float ArgVelX;
float ArgVelY;
float ArgVelZ;
float Sigma;
float Epselon;
float Lattice;
};
struct Parameters_Gas {
int Type;
float Mass;
float T;
float mpVelocity;
float Sigma;
float Epselon;
};
struct Parameters_MD {
float ArPt_Sigma;
float ArPt_Epselon;
float CutOff;
float SprK;
float dt;
int Rt;
int Tt;
int DumpStep;
int TimeStep;
float BoxXLow;
float BoxXHigh;
float BoxYLow;
float BoxYHigh;
float BoxZLow;
float BoxZHigh;
bool State;
};
struct Dimensionless {
float Mass;
float Energy;
float Length;
float Velocity;
float Time;
float Acceleration;
};
//MD Definition
class MD {
Parameters_Wall Pars_Pt;
Parameters_Gas Pars_Ar;
Parameters_MD Pars_MD;
Dimensionless Pars_Dim;
Wall_Molecule Pt[Pt_N];
Gas_Molecule Ar[Ar_N];
public:
void Pars_Init();
void Models_Init();
void Init_Kernels();
void Boundary_XY();
void RescaleT1();
void RescaleT2();
void RescaleT3();
void RescaleT4();
void AccelerationCal();
void MainMD();
void Dump();
void Exit();
float random();
};
/******************************************************************************/
void MD::Pars_Init() {
//
Pars_Ar.Type = 1;
Pars_Pt.Type = 2;
Pars_Ar.Mass = 39.95 / 6.02 * 1E-26;//kg
Pars_Pt.Mass = 195.08 / 6.02 * 1E-26;
Pars_Ar.T = 300.0;//K
Pars_Pt.T = 300.0;
Pars_Ar.mpVelocity = sqrt(2 * kB * Pars_Ar.T / Pars_Ar.Mass);//
Pars_Pt.mpVelocity = sqrt(3 * kB * Pars_Pt.T / Pars_Pt.Mass);//
Pars_Pt.CurrentT = 0.0;
Pars_Pt.ArgVelX = 0.0;
Pars_Pt.ArgVelY = 0.0;
Pars_Pt.ArgVelZ = 0.0;
Pars_Ar.Sigma = 3.40 * 1E-10;//m
Pars_Pt.Sigma = 2.47 * 1E-10;
Pars_Ar.Epselon = 1.654E-21;//J
Pars_Pt.Epselon = 5.207E-20;
Pars_Pt.Lattice = 3.93E-10;
Pars_MD.ArPt_Sigma = 2.94 * 1E-10;
Pars_MD.ArPt_Epselon = 1.093E-21;
Pars_MD.CutOff = 10 * 1E-10;
//
Pars_Dim.Mass = Pars_Pt.Mass;
Pars_Dim.Energy = Pars_Pt.Epselon;
Pars_Dim.Length = Pars_Pt.Sigma;
Pars_Dim.Velocity = sqrt(Pars_Dim.Energy / Pars_Dim.Mass);
Pars_Dim.Time = Pars_Dim.Length / Pars_Dim.Velocity;
Pars_Dim.Acceleration = Pars_Dim.Energy / (Pars_Dim.Mass * Pars_Dim.Length);
//
Pars_Ar.Mass /= Pars_Dim.Mass;
Pars_Pt.Mass /= Pars_Dim.Mass;
Pars_Ar.Epselon /= Pars_Dim.Energy;
Pars_Pt.Epselon /= Pars_Dim.Energy;
Pars_MD.ArPt_Epselon /= Pars_Dim.Energy;
Pars_Ar.Sigma /= Pars_Dim.Length;
Pars_Pt.Sigma /= Pars_Dim.Length;
Pars_MD.ArPt_Sigma /= Pars_Dim.Length;
Pars_Pt.Lattice /= Pars_Dim.Length;
Pars_MD.CutOff /= Pars_Dim.Length;
Pars_Ar.mpVelocity /= Pars_Dim.Velocity;
Pars_Pt.mpVelocity /= Pars_Dim.Velocity;
//
Pars_MD.SprK = 5000.0;
Pars_MD.dt = 0.001;
Pars_MD.Rt = 100;
Pars_MD.Tt = 3500000;
Pars_MD.DumpStep = 100;
Pars_MD.TimeStep = 0;
Pars_MD.BoxXLow = 0.0;
Pars_MD.BoxXHigh = Pt_I * Pars_Pt.Lattice;
Pars_MD.BoxYLow = 0.0;
Pars_MD.BoxYHigh = Pt_J * Pars_Pt.Lattice;
Pars_MD.BoxZLow = -(Pt_K - 0.5) * Pars_Pt.Lattice;
Pars_MD.BoxZHigh = 5.0;
Pars_MD.State = true;
//
cout << "*******[Pars_Init]: Parameters Initialized!*******\n";
cout << "Box_ZoneX: " << Pars_MD.BoxXLow << ", " << Pars_MD.BoxXHigh << "\n";
cout << "Box_ZoneY: " << Pars_MD.BoxYLow << ", " << Pars_MD.BoxYHigh << "\n";
cout << "Box_ZoneZ: " << Pars_MD.BoxZLow << ", " << Pars_MD.BoxZHigh << "\n";
}
/******************************************************************************/
void MD::Models_Init() {
int count, i, j, k;
float R1, R2, Rx, Ry;
//
count = 0;
srand((unsigned)time(NULL));
for (i = 0; i < 2 * Pt_I; i++) {
for (j = 0; j < 2 * Pt_J; j++) {
for (k = 0; k < 2 * Pt_K; k++) {
if (i / 2. + j / 2. + k / 2. == int(i / 2. + j / 2. + k / 2.)) {
Pt[count].Sid = count + 1;
Pt[count].Lid = 1;
Pt[count].x = i / 2.*Pars_Pt.Lattice;
Pt[count].Bx = Pt[count].x;
Pt[count].y = j / 2.*Pars_Pt.Lattice;
Pt[count].By = Pt[count].y;
Pt[count].z = (k / 2. - 2.5)*Pars_Pt.Lattice;
Pt[count].Bz = Pt[count].z;
R1 = random();
R2 = random();
Pt[count].vx = Pars_Pt.mpVelocity / sqrt(3) * sqrt(-2 * log(R1)) * cos(2 * PI * R2);
R1 = random();
R2 = random();
Pt[count].vy = Pars_Pt.mpVelocity / sqrt(3) * sqrt(-2 * log(R1)) * cos(2 * PI * R2);
R1 = random();
R2 = random();
Pt[count].vz = Pars_Pt.mpVelocity / sqrt(3) * sqrt(-2 * log(R1)) * cos(2 * PI * R2);
Pt[count].ax = 0.0;
Pt[count].ay = 0.0;
Pt[count].az = 0.0;
Pt[count].atx = 0.0;
Pt[count].aty = 0.0;
Pt[count].atz = 0.0;
count += 1;
}
}
}
}
//
Ar[0].Sid = 1;
Ar[0].Lid = 1;
Rx = random();
Ry = random();
Ar[0].x = Pars_MD.BoxXLow + (Pars_MD.BoxXHigh - Pars_MD.BoxXLow) * Rx;
Ar[0].y = Pars_MD.BoxYLow + (Pars_MD.BoxYHigh - Pars_MD.BoxYLow) * Ry;
Ar[0].z = Pars_MD.BoxZHigh;
R1 = random();
R2 = random();
Ar[0].vx = Pars_Ar.mpVelocity * sqrt(-log(R1)) * cos(2 * PI * R2);//Maxwell Distribution
R1 = random();
R2 = random();
Ar[0].vy = Pars_Ar.mpVelocity * sqrt(-log(R1)) * sin(2 * PI * R2);
R1 = random();
Ar[0].vz = -Pars_Ar.mpVelocity * sqrt(-log(R1));
Ar[0].ax = 0.0;
Ar[0].ay = 0.0;
Ar[0].az = 0.0;
Ar[0].atx = 0.0;
Ar[0].aty = 0.0;
Ar[0].atz = 0.0;
//
cout << "*******[Models_Init]: Models Initialized!*******\n";
cout << "Created " << Pt_N << " Pt\n";
cout << "Created " << Ar_N << " Ar\n";
cout << "Ar Incidence Speed: " << Ar[0].vx << "," << Ar[0].vy << "," << Ar[0].vz << "\n";
}
/******************************************************************************/
void MD::Init_Kernels() {
//
int i;
//
Boundary_XY();
//
Pars_MD.BoxZLow = Pt[0].z;
for (i = 0; i<Pt_N; i++) {
if (Pt[i].z<Pars_MD.BoxZLow) {
Pars_MD.BoxZLow = Pt[i].z;
}
}
cout << "Box Z Low: " << Pars_MD.BoxZLow << "\n";
//
RescaleT1();
RescaleT2();
RescaleT3();
RescaleT4();
//
AccelerationCal();
//
Exit();
//
cout << "*******[Init_Kernels]: Initialization Done!*******\n";
cout << "Pt Average Speed in X: " << Pars_Pt.ArgVelX << "\n";
cout << "Pt Average Speed in Y: " << Pars_Pt.ArgVelY << "\n";
cout << "Pt Average Speed in Z: " << Pars_Pt.ArgVelZ << "\n";
cout << "Pt Temperature: " << Pars_Pt.CurrentT << "\n";
}
/******************************************************************************/
void MD::Boundary_XY() {
//
int i;
//
for (i = 0; i < Pt_N; i++) {
//
if (Pt[i].x < Pars_MD.BoxXLow) {
Pt[i].x += Pars_MD.BoxXHigh - Pars_MD.BoxXLow;
Pt[i].Bx += Pars_MD.BoxXHigh - Pars_MD.BoxXLow;
}
else if (Pt[i].x >= Pars_MD.BoxXHigh) {
Pt[i].x -= Pars_MD.BoxXHigh - Pars_MD.BoxXLow;
Pt[i].Bx -= Pars_MD.BoxXHigh - Pars_MD.BoxXLow;
}
//
if (Pt[i].y < Pars_MD.BoxYLow) {
Pt[i].y += Pars_MD.BoxYHigh - Pars_MD.BoxYLow;
Pt[i].By += Pars_MD.BoxYHigh - Pars_MD.BoxYLow;
}
else if (Pt[i].y >= Pars_MD.BoxYHigh) {
Pt[i].y -= Pars_MD.BoxYHigh - Pars_MD.BoxYLow;
Pt[i].By -= Pars_MD.BoxYHigh - Pars_MD.BoxYLow;
}
}
//
if (Ar[0].x < Pars_MD.BoxXLow) {
Ar[0].x += Pars_MD.BoxXHigh - Pars_MD.BoxXLow;
}
else if (Ar[0].x >= Pars_MD.BoxXHigh) {
Ar[0].x -= Pars_MD.BoxXHigh - Pars_MD.BoxXLow;
}
if (Ar[0].y < Pars_MD.BoxYLow) {
Ar[0].y += Pars_MD.BoxYHigh - Pars_MD.BoxYLow;
}
else if (Ar[0].y >= Pars_MD.BoxYHigh) {
Ar[0].y -= Pars_MD.BoxYHigh - Pars_MD.BoxYLow;
}
}
/******************************************************************************/
void MD::RescaleT1() {
//
int i;
//
Pars_Pt.ArgVelX = 0.0;
Pars_Pt.ArgVelY = 0.0;
Pars_Pt.ArgVelZ = 0.0;
for (i = 0; i < Pt_N; i++) {
Pars_Pt.ArgVelX += Pt[i].vx / Pt_N;
Pars_Pt.ArgVelY += Pt[i].vy / Pt_N;
Pars_Pt.ArgVelZ += Pt[i].vz / Pt_N;
}
}
/******************************************************************************/
void MD::RescaleT2() {
//
int i;
//
for (i = 0; i < Pt_N; i++) {
Pt[i].vx -= Pars_Pt.ArgVelX;
Pt[i].vy -= Pars_Pt.ArgVelY;
Pt[i].vz -= Pars_Pt.ArgVelZ;
}
}
/******************************************************************************/
void MD::RescaleT3() {
//
int i;
//
Pars_Pt.CurrentT = 0.0;
for (i = 0; i < Pt_N; i++) {
Pars_Pt.CurrentT += Pt[i].vx * Pt[i].vx + Pt[i].vy * Pt[i].vy + Pt[i].vz * Pt[i].vz;
}
Pars_Pt.CurrentT *= Pars_Dim.Velocity * Pars_Dim.Velocity * Pars_Pt.Mass * Pars_Dim.Mass / (3 * Pt_N * kB);
}
/******************************************************************************/
void MD::RescaleT4() {
//
int i;
//
for (i = 0; i < Pt_N; i++) {
Pt[i].vx *= sqrt(Pars_Pt.T / Pars_Pt.CurrentT);
Pt[i].vy *= sqrt(Pars_Pt.T / Pars_Pt.CurrentT);
Pt[i].vz *= sqrt(Pars_Pt.T / Pars_Pt.CurrentT);
}
}
/******************************************************************************/
void MD::AccelerationCal() {
//
int i, j;
float Epair, Spair, Pairx, Pairy, Pairz, Dispair, Fpair, Atom_Fx, Atom_Fy, Atom_Fz;
float Spring_Disx, Spring_Fx, Pt_Fx, Spring_Disy, Spring_Fy, Pt_Fy, Spring_Disz, Spring_Fz, Pt_Fz, Ar_Fx, Ar_Fy, Ar_Fz;
//
for (i = 0; i < Pt_N + Ar_N; i++) {
Atom_Fx = 0.0;
Atom_Fy = 0.0;
Atom_Fz = 0.0;
for (j = 0; j < Pt_N + Ar_N; j++) {
if (i < Pt_N && j < Pt_N) {
Epair = Pars_Pt.Epselon;
Spair = Pars_Pt.Sigma;
Pairx = Pt[i].x - Pt[j].x;
Pairy = Pt[i].y - Pt[j].y;
Pairz = Pt[i].z - Pt[j].z;
}
else if (i < Pt_N && j == Pt_N) {
Epair = Pars_MD.ArPt_Epselon;
Spair = Pars_MD.ArPt_Sigma;
Pairx = Pt[i].x - Ar[0].x;
Pairy = Pt[i].y - Ar[0].y;
Pairz = Pt[i].z - Ar[0].z;
}
else if (i == Pt_N && j < Pt_N) {
Epair = Pars_MD.ArPt_Epselon;
Spair = Pars_MD.ArPt_Sigma;
Pairx = Ar[0].x - Pt[j].x;
Pairy = Ar[0].y - Pt[j].y;
Pairz = Ar[0].z - Pt[j].z;
}
else {
Epair = Pars_Ar.Epselon;
Spair = Pars_Ar.Sigma;
Pairx = 0.0;
Pairy = 0.0;
Pairz = 0.0;
}
//
if (abs(Pairx) >= Pars_MD.BoxXHigh - Pars_MD.BoxXLow - Pars_MD.CutOff) {
Pairx -= (Pars_MD.BoxXHigh - Pars_MD.BoxXLow) * Pairx / abs(Pairx);
}
if (abs(Pairy) >= Pars_MD.BoxYHigh - Pars_MD.BoxYLow - Pars_MD.CutOff) {
Pairy -= (Pars_MD.BoxYHigh - Pars_MD.BoxYLow) * Pairy / abs(Pairy);
}
//
Dispair = sqrt(Pairx * Pairx + Pairy * Pairy + Pairz * Pairz);
if (Dispair > 0 && Dispair <= Pars_MD.CutOff) {
Fpair = 48 * Epair*(pow(Spair, 12) / pow(Dispair, 13) - 0.5*pow(Spair, 6) / pow(Dispair, 7));
Atom_Fx += Pairx * Fpair / Dispair;
Atom_Fy += Pairy * Fpair / Dispair;
Atom_Fz += Pairz * Fpair / Dispair;
}
}
if (i < Pt_N) {
//
Spring_Disx = Pt[i].x - Pt[i].Bx;
Spring_Fx = -Pars_MD.SprK * Spring_Disx;
Pt_Fx = Atom_Fx + Spring_Fx;
Pt[i].ax = Pt_Fx / Pars_Pt.Mass;
Spring_Disy = Pt[i].y - Pt[i].By;
Spring_Fy = -Pars_MD.SprK * Spring_Disy;
Pt_Fy = Atom_Fy + Spring_Fy;
Pt[i].ay = Pt_Fy / Pars_Pt.Mass;
Spring_Disz = Pt[i].z - Pt[i].Bz;
Spring_Fz = -Pars_MD.SprK * Spring_Disz;
Pt_Fz = Atom_Fz + Spring_Fz;
Pt[i].az = Pt_Fz / Pars_Pt.Mass;
}
else {
//
Ar_Fx = Atom_Fx;
Ar[0].ax = Ar_Fx / Pars_Ar.Mass;
Ar_Fy = Atom_Fy;
Ar[0].ay = Ar_Fy / Pars_Ar.Mass;
Ar_Fz = Atom_Fz;
Ar[0].az = Ar_Fz / Pars_Ar.Mass;
}
}
}
/******************************************************************************/
__global__ void Time_Advancement(Wall_Molecule *Pt, Gas_Molecule *Ar, Parameters_Wall *Pars_Pt, Parameters_Gas *Pars_Ar, Parameters_MD *Pars_MD, Dimensionless *Pars_Dim) {
//
__shared__ float All_Pos[Pt_N + Ar_N][3];
__shared__ float All_BPos[Pt_N][3];
__shared__ float All_Vel[Pt_N + Ar_N][3];
__shared__ float All_Acc[Pt_N + Ar_N][3];
__shared__ float All_Acc_Temp[Pt_N + Ar_N][3];
__shared__ float Pt_argVel[3];
__shared__ float Pt_T;
//
int tid = threadIdx.x;
int i;
//
float Epair, Spair, Pairx, Pairy, Pairz, Dispair, Fpair, Atom_Fx, Atom_Fy, Atom_Fz;
float Spring_Disx, Spring_Fx, Pt_Fx, Spring_Disy, Spring_Fy, Pt_Fy, Spring_Disz, Spring_Fz, Pt_Fz, Ar_Fx, Ar_Fy, Ar_Fz;
//Share Data
if (tid < Pt_N) {
//
All_Pos[tid][0] = Pt[tid].x;
All_Pos[tid][1] = Pt[tid].y;
All_Pos[tid][2] = Pt[tid].z;
All_BPos[tid][0] = Pt[tid].Bx;
All_BPos[tid][1] = Pt[tid].By;
All_BPos[tid][2] = Pt[tid].Bz;
All_Vel[tid][0] = Pt[tid].vx;
All_Vel[tid][1] = Pt[tid].vy;
All_Vel[tid][2] = Pt[tid].vz;
All_Acc[tid][0] = Pt[tid].ax;
All_Acc[tid][1] = Pt[tid].ay;
All_Acc[tid][2] = Pt[tid].az;
}
if (tid == Pt_N) {
//
All_Pos[tid][0] = Ar[0].x;
All_Pos[tid][1] = Ar[0].y;
All_Pos[tid][2] = Ar[0].z;
All_Vel[tid][0] = Ar[0].vx;
All_Vel[tid][1] = Ar[0].vy;
All_Vel[tid][2] = Ar[0].vz;
All_Acc[tid][0] = Ar[0].ax;
All_Acc[tid][1] = Ar[0].ay;
All_Acc[tid][2] = Ar[0].az;
}
//
if (tid == Pt_N) {
printf("*******[Time_Advancement]: Starting!*******\n");
}
//
while ((*Pars_MD).State) {
//Verlet_Pos
if (tid < Pt_N + Ar_N) {
All_Pos[tid][0] += All_Vel[tid][0] * (*Pars_MD).dt + 0.5 * All_Acc[tid][0] * (*Pars_MD).dt * (*Pars_MD).dt;
All_Pos[tid][1] += All_Vel[tid][1] * (*Pars_MD).dt + 0.5 * All_Acc[tid][1] * (*Pars_MD).dt * (*Pars_MD).dt;
All_Pos[tid][2] += All_Vel[tid][2] * (*Pars_MD).dt + 0.5 * All_Acc[tid][2] * (*Pars_MD).dt * (*Pars_MD).dt;
}
//Boundary_XY
if (tid < Pt_N + Ar_N) {
//X
if (All_Pos[tid][0] < (*Pars_MD).BoxXLow) {
All_Pos[tid][0] += (*Pars_MD).BoxXHigh - (*Pars_MD).BoxXLow;
if (tid<Pt_N) {
All_BPos[tid][0] += (*Pars_MD).BoxXHigh - (*Pars_MD).BoxXLow;
}
}
else if (All_Pos[tid][0] >= (*Pars_MD).BoxXHigh) {
All_Pos[tid][0] -= (*Pars_MD).BoxXHigh - (*Pars_MD).BoxXLow;
if (tid<Pt_N) {
All_BPos[tid][0] -= (*Pars_MD).BoxXHigh - (*Pars_MD).BoxXLow;
}
}
//Y
if (All_Pos[tid][1] < (*Pars_MD).BoxYLow) {
All_Pos[tid][1] += (*Pars_MD).BoxYHigh - (*Pars_MD).BoxYLow;
if (tid<Pt_N) {
All_BPos[tid][1] += (*Pars_MD).BoxYHigh - (*Pars_MD).BoxYLow;
}
}
else if (All_Pos[tid][1] >= (*Pars_MD).BoxYHigh) {
All_Pos[tid][1] -= (*Pars_MD).BoxYHigh - (*Pars_MD).BoxYLow;
if (tid<Pt_N) {
All_BPos[tid][1] -= (*Pars_MD).BoxYHigh - (*Pars_MD).BoxYLow;
}
}
}
//Last_Acceleration
if (tid < Pt_N + Ar_N) {
All_Acc_Temp[tid][0] = All_Acc[tid][0];
All_Acc_Temp[tid][1] = All_Acc[tid][1];
All_Acc_Temp[tid][2] = All_Acc[tid][2];
}
//AccelerationCal
__syncthreads();
//
if (tid < Pt_N + Ar_N) {
Atom_Fx = 0.0;
Atom_Fy = 0.0;
Atom_Fz = 0.0;
for (i = 0; i < Pt_N + Ar_N; i++) {
if (tid < Pt_N && i < Pt_N) {
Epair = (*Pars_Pt).Epselon;
Spair = (*Pars_Pt).Sigma;
}
else if (tid == Pt_N && i == Pt_N) {
Epair = (*Pars_Ar).Epselon;
Spair = (*Pars_Ar).Sigma;
}
else {
Epair = (*Pars_MD).ArPt_Epselon;
Spair = (*Pars_MD).ArPt_Sigma;
}
Pairx = All_Pos[tid][0] - All_Pos[i][0];
Pairy = All_Pos[tid][1] - All_Pos[i][1];
Pairz = All_Pos[tid][2] - All_Pos[i][2];
//
if (abs(Pairx) >= (*Pars_MD).BoxXHigh - (*Pars_MD).BoxXLow - (*Pars_MD).CutOff) {
Pairx -= ((*Pars_MD).BoxXHigh - (*Pars_MD).BoxXLow) * Pairx / abs(Pairx);
}
if (abs(Pairy) >= (*Pars_MD).BoxYHigh - (*Pars_MD).BoxYLow - (*Pars_MD).CutOff) {
Pairy -= ((*Pars_MD).BoxYHigh - (*Pars_MD).BoxYLow) * Pairy / abs(Pairy);
}
//
Dispair = sqrt(Pairx * Pairx + Pairy * Pairy + Pairz * Pairz);
if (Dispair > 0 && Dispair <= (*Pars_MD).CutOff) {
Fpair = 48 * Epair*(pow(Spair, 12) / pow(Dispair, 13) - 0.5*pow(Spair, 6) / pow(Dispair, 7));
Atom_Fx += Pairx * Fpair / Dispair;
Atom_Fy += Pairy * Fpair / Dispair;
Atom_Fz += Pairz * Fpair / Dispair;
}
}
if (tid < Pt_N) {
//
Spring_Disx = All_Pos[tid][0] - All_BPos[tid][0];
Spring_Fx = -(*Pars_MD).SprK * Spring_Disx;
Pt_Fx = Atom_Fx + Spring_Fx;
All_Acc[tid][0] = Pt_Fx / (*Pars_Pt).Mass;
Spring_Disy = All_Pos[tid][1] - All_BPos[tid][1];
Spring_Fy = -(*Pars_MD).SprK * Spring_Disy;
Pt_Fy = Atom_Fy + Spring_Fy;
All_Acc[tid][1] = Pt_Fy / (*Pars_Pt).Mass;
Spring_Disz = All_Pos[tid][2] - All_BPos[tid][2];
Spring_Fz = -(*Pars_MD).SprK * Spring_Disz;
Pt_Fz = Atom_Fz + Spring_Fz;
All_Acc[tid][2] = Pt_Fz / (*Pars_Pt).Mass;
}
else {
//
Ar_Fx = Atom_Fx;
All_Acc[tid][0] = Ar_Fx / (*Pars_Ar).Mass;
Ar_Fy = Atom_Fy;
All_Acc[tid][1] = Ar_Fy / (*Pars_Ar).Mass;
Ar_Fz = Atom_Fz;
All_Acc[tid][2] = Ar_Fz / (*Pars_Ar).Mass;
}
}
//Verlet_Vel
if (tid < Pt_N + Ar_N) {
All_Vel[tid][0] += 0.5 * (All_Acc_Temp[tid][0] + All_Acc[tid][0]) * (*Pars_MD).dt;
All_Vel[tid][1] += 0.5 * (All_Acc_Temp[tid][1] + All_Acc[tid][1]) * (*Pars_MD).dt;
All_Vel[tid][2] += 0.5 * (All_Acc_Temp[tid][2] + All_Acc[tid][2]) * (*Pars_MD).dt;
}
__syncthreads();
//RescaleT
if (tid == Pt_N) {
Pt_argVel[0] = 0.0;
Pt_argVel[1] = 0.0;
Pt_argVel[2] = 0.0;
for (i = 0; i < Pt_N; i++) {
Pt_argVel[0] += All_Vel[i][0];
Pt_argVel[1] += All_Vel[i][1];
Pt_argVel[2] += All_Vel[i][2];
}
Pt_argVel[0] /= Pt_N;
Pt_argVel[1] /= Pt_N;
Pt_argVel[2] /= Pt_N;
}
__syncthreads();
if (tid < Pt_N) {
All_Vel[tid][0] -= Pt_argVel[0];
All_Vel[tid][1] -= Pt_argVel[1];
All_Vel[tid][2] -= Pt_argVel[2];
}
__syncthreads();
if (tid == Pt_N) {
Pt_T = 0.0;
for (i = 0; i < Pt_N; i++) {
Pt_T += All_Vel[i][0] * All_Vel[i][0] + All_Vel[i][1] * All_Vel[i][1] + All_Vel[i][2] * All_Vel[i][2];
}
Pt_T *= (*Pars_Dim).Velocity * (*Pars_Dim).Velocity * (*Pars_Pt).Mass * (*Pars_Dim).Mass / (3 * Pt_N * 1.38E-23);
}
__syncthreads();
//
if (tid == Pt_N) {
printf("Before modification, Pt_T: %f\n", Pt_T);
}
if (tid < Pt_N) {
All_Vel[tid][0] *= sqrt((*Pars_Pt).T / Pt_T);
All_Vel[tid][1] *= sqrt((*Pars_Pt).T / Pt_T);
All_Vel[tid][2] *= sqrt((*Pars_Pt).T / Pt_T);
}
__syncthreads();
if (tid == Pt_N) {
Pt_T = 0.0;
for (i = 0; i < Pt_N; i++) {
Pt_T += All_Vel[i][0] * All_Vel[i][0] + All_Vel[i][1] * All_Vel[i][1] + All_Vel[i][2] * All_Vel[i][2];
}
Pt_T *= (*Pars_Dim).Velocity * (*Pars_Dim).Velocity * (*Pars_Pt).Mass * (*Pars_Dim).Mass / (3 * Pt_N * 1.38E-23);
}
__syncthreads();
//
if (tid == Pt_N) {
printf("After modification, Pt_T: %f\n", Pt_T);
}
//Update_BoxZ
if (tid == Pt_N) {
(*Pars_MD).BoxZLow = All_Pos[0][2];
for (i = 0; i < Pt_N + Ar_N; i++) {
if ((*Pars_MD).BoxZLow > All_Pos[i][2]) {
(*Pars_MD).BoxZLow = All_Pos[i][2];
}
}
}
__syncthreads();
//
if (tid == Pt_N) {
printf("BoxZLow: %f\n", (*Pars_MD).BoxZLow);
}
if (tid == Pt_N) {
(*Pars_MD).TimeStep += 1;
printf("CurrentStep: %d\n", (*Pars_MD).TimeStep);
printf("In Shared Memery, Ar_Z: %f\n", All_Pos[tid][2]);
//
if (All_Pos[tid][2] >(*Pars_MD).BoxZHigh || (*Pars_MD).TimeStep >= (*Pars_MD).Tt) {
(*Pars_MD).State = false;
(*Pars_MD).DumpStep = 1;
}
}
__syncthreads();
//Get Data
if (tid == Pt_N) {
printf("Copying Data From Shared Memery To Global Memery......\n");
}
if (tid < Pt_N) {
//
Pt[tid].x = All_Pos[tid][0];
Pt[tid].y = All_Pos[tid][1];
Pt[tid].z = All_Pos[tid][2];
}
if (tid == Pt_N) {
//
printf("In Global Memery, Ar_Z: %f\n", All_Pos[tid][2]);
Ar[0].x = All_Pos[tid][0];
Ar[0].y = All_Pos[tid][1];
Ar[0].z = All_Pos[tid][2];
}
}
if (tid == Pt_N) {
printf("*******[Time_Advancement]: Done!*******\n");
}
}
/******************************************************************************/
void MD::Dump() {
//
int i;
//
if (Pars_MD.TimeStep % Pars_MD.DumpStep == 0) {
//
ofstream MD;
MD.open("Kernel_MD_CUDA_C.dump", ios::app);
MD << "ITEM: TIMESTEP\n";
MD << Pars_MD.TimeStep << "\n";
MD << "ITEM: NUMBER OF ATOMS\n";
MD << Pt_N + Ar_N << "\n";
MD << "ITEM: BOX BOUNDS pp pp ff\n";
MD << Pars_MD.BoxXLow << " " << Pars_MD.BoxXHigh << "\n";
MD << Pars_MD.BoxYLow << " " << Pars_MD.BoxYHigh << "\n";
MD << Pars_MD.BoxZLow << " " << Pars_MD.BoxZHigh << "\n";
MD << "ITEM: ATOMS id type x y z\n";
for (i = 0; i < Pt_N; i++) {
MD << i + 1 << " " << Pars_Pt.Type << " " << Pt[i].x << " " << Pt[i].y << " " << Pt[i].z << "\n";
}
MD << Pt_N + Ar_N << " " << Pars_Ar.Type << " " << Ar[0].x << " " << Ar[0].y << " " << Ar[0].z << "\n";
MD.close();
//
ofstream Zt;
Zt.open("Kernel_MD_CUDA_C_Zt.dat", ios::app);
Zt << Pars_MD.TimeStep * Pars_MD.dt << " " << Ar[0].z << "\n";
Zt.close();
}
}
/******************************************************************************/
void MD::Exit() {
//
if (Ar[0].z > Pars_MD.BoxZHigh || Pars_MD.TimeStep >= Pars_MD.Tt) {
Pars_MD.State = false;
Pars_MD.DumpStep = 1;
Dump();
}
else {
Dump();
}
}
/******************************************************************************/
float MD::random() {
//
float R;
//
R = 0.0;
while (R == 0.0) {
R = rand() / float(RAND_MAX);
}
return R;
}
/******************************************************************************/
void MD::MainMD() {
//
clock_t start, finish;
float tperl;
Gas_Molecule *d_Ar;
Wall_Molecule *d_Pt;
Parameters_Gas *d_Pars_Ar;
Parameters_Wall *d_Pars_Pt;
Parameters_MD *d_Pars_MD;
Dimensionless *d_Pars_Dim;
cudaError_t cudaStatus;
//
cudaMalloc((void**)&d_Ar, sizeof(Ar));
cudaMalloc((void**)&d_Pt, sizeof(Pt));
cudaMalloc((void**)&d_Pars_Ar, sizeof(Parameters_Gas));
cudaMalloc((void**)&d_Pars_Pt, sizeof(Parameters_Wall));
cudaMalloc((void**)&d_Pars_MD, sizeof(Parameters_MD));
cudaMalloc((void**)&d_Pars_Dim, sizeof(Dimensionless));
//
cudaMemcpy(d_Ar, Ar, sizeof(Ar), cudaMemcpyHostToDevice);
cudaMemcpy(d_Pt, Pt, sizeof(Pt), cudaMemcpyHostToDevice);
cudaMemcpy(d_Pars_Ar, &Pars_Ar, sizeof(Parameters_Gas), cudaMemcpyHostToDevice);
cudaMemcpy(d_Pars_Pt, &Pars_Pt, sizeof(Parameters_Wall), cudaMemcpyHostToDevice);
cudaMemcpy(d_Pars_MD, &Pars_MD, sizeof(Parameters_MD), cudaMemcpyHostToDevice);
cudaMemcpy(d_Pars_Dim, &Pars_Dim, sizeof(Dimensionless), cudaMemcpyHostToDevice);
//
start = clock();
Time_Advancement << < 10, Pt_N + Ar_N >> >(d_Pt, d_Ar, d_Pars_Pt, d_Pars_Ar, d_Pars_MD, d_Pars_Dim);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Time_Advancement launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
//cudaStatus = cudaDeviceSynchronize();
//if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
//}
//
cudaMemcpy(Ar, d_Ar, sizeof(Ar), cudaMemcpyDeviceToHost);
cudaMemcpy(Pt, d_Pt, sizeof(Pt), cudaMemcpyDeviceToHost);
cudaMemcpy(&Pars_MD, d_Pars_MD, sizeof(Parameters_MD), cudaMemcpyDeviceToHost);
Dump();
finish = clock();
tperl = float(finish - start) / CLOCKS_PER_SEC / Pars_MD.TimeStep;
cout << Ar[0].x << ", " <<Ar[0].y << ", " << Ar[0].z << "\n";
cout << "Totally Run " << Pars_MD.TimeStep << " TimeSteps with ArgTimePerStep: " << tperl << " Seconds!\n";
//
cudaFree(d_Ar);
cudaFree(d_Pt);
cudaFree(d_Pars_Ar);
cudaFree(d_Pars_Pt);
cudaFree(d_Pars_MD);
//
cout << "*******[MainMD]: MD Done!*******\n";
}
////////////////////////////////////////////////////////////////////////////////
/*************************************main*************************************/
////////////////////////////////////////////////////////////////////////////////
int main() {
//
class MD GasWall;
GasWall.Pars_Init();
GasWall.Models_Init();
GasWall.Init_Kernels();
GasWall.MainMD();
system("pause");
return 0;
}
|
e40398fa79709eba4635f4a09364d2bb05ac071b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#define size 5
#define threads 32
using namespace std;
__global__ void callOperation(int *a, int *res, int x, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
res[tid] = a[tid] * x;
}
__global__ void callOperationSharedStatic(int *a, int *res, int x, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
__shared__ int s_a[size * size], s_res[size * size], s_x;
s_x = x;
s_a[tid] = a[tid];
s_res[tid] = s_a[tid] * s_x;
res[tid] = s_res[tid];
}
__global__ void callOperationSharedDynamic(int *a, int *res, int x, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
extern __shared__ int data[];
int *s_a = data;
int *s_res = &s_a[size * size];
__shared__ int s_x;
s_x = x;
s_a[tid] = a[tid];
s_res[tid] = s_a[tid] * s_x;
res[tid] = s_res[tid];
}
int main()
{
int *a, *res;
int *d_a, *d_res;
int x = 10;
a = (int*)malloc(size * size * sizeof(int));
res = (int*)malloc(size* size * sizeof(int));
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
a[i * size + j] = size * i - j;
}
}
cout << "Skalar x: " << x << endl;
cout << "\n\nMatrica A:" << endl;
for (int i = 0; i < size; i++) {
cout << endl;
for (int j = 0; j < size; j++) {
cout<<a[i * size + j]<<"\t";
}
}
hipMalloc((void**)&d_a, size * size * sizeof(int));
hipMalloc((void**)&d_res, size * size * sizeof(int));
hipMemcpy(d_a, a, size * size * sizeof(int), hipMemcpyHostToDevice);
dim3 numberOfBlocks(size / threads + 1, size / threads + 1, 1);
dim3 numberOfThreads(threads, threads, 1);
//callOperation << <numberOfBlocks, numberOfThreads >> > (d_a, d_res, x, size);
//callOperationSharedStatic << <numberOfBlocks, numberOfThreads >> > (d_a, d_res, x, size);
callOperationSharedDynamic << <numberOfBlocks, numberOfThreads , size * size * sizeof(int) + size * size * sizeof(int) >> > (d_a, d_res, x, size);
hipMemcpy(res, d_res, size *size * sizeof(int), hipMemcpyDeviceToHost);
cout << "\n\nMatrica RES:" << endl;
for (int i = 0; i < size; i++) {
cout << endl;
for (int j = 0; j < size; j++) {
cout << res[i * size + j] << "\t";
}
}
hipFree(d_a);
hipFree(d_res);
free(res);
free(a);
system("PAUSE");
return 0;
}
| e40398fa79709eba4635f4a09364d2bb05ac071b.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#define size 5
#define threads 32
using namespace std;
__global__ void callOperation(int *a, int *res, int x, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
res[tid] = a[tid] * x;
}
__global__ void callOperationSharedStatic(int *a, int *res, int x, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
__shared__ int s_a[size * size], s_res[size * size], s_x;
s_x = x;
s_a[tid] = a[tid];
s_res[tid] = s_a[tid] * s_x;
res[tid] = s_res[tid];
}
__global__ void callOperationSharedDynamic(int *a, int *res, int x, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
extern __shared__ int data[];
int *s_a = data;
int *s_res = &s_a[size * size];
__shared__ int s_x;
s_x = x;
s_a[tid] = a[tid];
s_res[tid] = s_a[tid] * s_x;
res[tid] = s_res[tid];
}
int main()
{
int *a, *res;
int *d_a, *d_res;
int x = 10;
a = (int*)malloc(size * size * sizeof(int));
res = (int*)malloc(size* size * sizeof(int));
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
a[i * size + j] = size * i - j;
}
}
cout << "Skalar x: " << x << endl;
cout << "\n\nMatrica A:" << endl;
for (int i = 0; i < size; i++) {
cout << endl;
for (int j = 0; j < size; j++) {
cout<<a[i * size + j]<<"\t";
}
}
cudaMalloc((void**)&d_a, size * size * sizeof(int));
cudaMalloc((void**)&d_res, size * size * sizeof(int));
cudaMemcpy(d_a, a, size * size * sizeof(int), cudaMemcpyHostToDevice);
dim3 numberOfBlocks(size / threads + 1, size / threads + 1, 1);
dim3 numberOfThreads(threads, threads, 1);
//callOperation << <numberOfBlocks, numberOfThreads >> > (d_a, d_res, x, size);
//callOperationSharedStatic << <numberOfBlocks, numberOfThreads >> > (d_a, d_res, x, size);
callOperationSharedDynamic << <numberOfBlocks, numberOfThreads , size * size * sizeof(int) + size * size * sizeof(int) >> > (d_a, d_res, x, size);
cudaMemcpy(res, d_res, size *size * sizeof(int), cudaMemcpyDeviceToHost);
cout << "\n\nMatrica RES:" << endl;
for (int i = 0; i < size; i++) {
cout << endl;
for (int j = 0; j < size; j++) {
cout << res[i * size + j] << "\t";
}
}
cudaFree(d_a);
cudaFree(d_res);
free(res);
free(a);
system("PAUSE");
return 0;
}
|
12e540a5d2dc536d65a318f2653fb2d5845a421f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define TIMER_CREATE(t) \
hipEvent_t t##_start, t##_end; \
hipEventCreate(&t##_start); \
hipEventCreate(&t##_end);
#define TIMER_START(t) \
hipEventRecord(t##_start); \
hipEventSynchronize(t##_start); \
#define TIMER_END(t) \
hipEventRecord(t##_end); \
hipEventSynchronize(t##_end); \
hipEventElapsedTime(&t, t##_start, t##_end); \
hipEventDestroy(t##_start); \
hipEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
unsigned char *input_gpu;
unsigned char *output_gpu;
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline hipError_t checkCuda(hipError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// GPU kernel and functions
__global__ void kernel(unsigned char *input,
unsigned char *output,
unsigned int height,
unsigned int width){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
// http://homepages.inf.ed.ac.uk/rbf/HIPR2/sobel.htm
// https://stackoverflow.com/questions/14358916/applying-sobel-edge-detection-with-cuda-and-opencv-on-a-grayscale-jpg-image
if (x < width && y < height ){
int gX[3][3] = {{-1,0,1},{-2,0,2},{-1,0,1}};
int gY[3][3] = {{-1,-2,-1},{0,0,0},{1,2,1}};
double x_sum = 0.0;
double y_sum = 0.0;
int index;
for (int j = -1; j < 2; j++) {
for (int i = -1; i < 2; i++) {
index = width * (j + y) + i + x;
x_sum += input[index] * gX[j+1][i+1];
y_sum += input[index] * gY[j+1][i+1];
}
}
double answer = sqrt(x_sum * x_sum + y_sum * y_sum);
// keep exceeding values, so correct that, and apply filter
if (answer < 128) {answer = 0;} else {
answer = 255;
}
output[x*height+y] = answer;
}
}
void transpose_img(unsigned char *in_mat,
unsigned char *out_mat,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(hipMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(hipMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(hipMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(hipMemcpy(input_gpu,
in_mat,
height*width*sizeof(char),
hipMemcpyHostToDevice));
checkCuda(hipDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// Kernel Call
hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu, output_gpu, height, width);
checkCuda(hipDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(hipMemcpy(out_mat,
output_gpu,
height*width*sizeof(unsigned char),
hipMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(hipFree(output_gpu));
checkCuda(hipFree(input_gpu));
}
| 12e540a5d2dc536d65a318f2653fb2d5845a421f.cu |
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
unsigned char *input_gpu;
unsigned char *output_gpu;
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// GPU kernel and functions
__global__ void kernel(unsigned char *input,
unsigned char *output,
unsigned int height,
unsigned int width){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
// http://homepages.inf.ed.ac.uk/rbf/HIPR2/sobel.htm
// https://stackoverflow.com/questions/14358916/applying-sobel-edge-detection-with-cuda-and-opencv-on-a-grayscale-jpg-image
if (x < width && y < height ){
int gX[3][3] = {{-1,0,1},{-2,0,2},{-1,0,1}};
int gY[3][3] = {{-1,-2,-1},{0,0,0},{1,2,1}};
double x_sum = 0.0;
double y_sum = 0.0;
int index;
for (int j = -1; j < 2; j++) {
for (int i = -1; i < 2; i++) {
index = width * (j + y) + i + x;
x_sum += input[index] * gX[j+1][i+1];
y_sum += input[index] * gY[j+1][i+1];
}
}
double answer = sqrt(x_sum * x_sum + y_sum * y_sum);
// keep exceeding values, so correct that, and apply filter
if (answer < 128) {answer = 0;} else {
answer = 255;
}
output[x*height+y] = answer;
}
}
void transpose_img(unsigned char *in_mat,
unsigned char *out_mat,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
in_mat,
height*width*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// Kernel Call
kernel<<<dimGrid, dimBlock>>>(input_gpu, output_gpu, height, width);
checkCuda(cudaDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(cudaMemcpy(out_mat,
output_gpu,
height*width*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
|
d1c0c5fce522fc5c9c80c257aa37f69228cdef88.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#endif
#include <opencv2/gpu/gpu.hpp>
#include <opencv2/gpu/devmem2d.hpp>
#include <math_constants.h>
#include "cv/common.hpp" //copied from opencv
#include "pcl/limits.hpp"
#include "pcl/device.hpp"
#include "pcl/vector_math.hpp"
namespace btl{ namespace device
{
using namespace pcl::device;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct STSDF{
enum{
MAX_WEIGHT = 1 << 7
};
};
/*
__constant__ double _aRW[9]; //camera externals Rotation defined in world
__constant__ double _aTW[3]; //camera externals Translation defined in world
__constant__ double _aCW[3]; //camera center*/
struct SVolumn{
pcl::device::Intr sCameraIntrinsics_;
float _fVoxelSize;
float _fTruncDistanceM;
pcl::device::Mat33 _Rw;
//float3 _Tw;
float3 _Cw;
cv::gpu::DevMem2D_<float> _cvgmDepthScaled;
cv::gpu::DevMem2D_<short2> _cvgmYZxXVolume;
__device__ __forceinline__ float3 gridToCoordinateVolume(const int3& n3Grid_ )
{
float x = n3Grid_.x * _fVoxelSize;
float y = n3Grid_.y * _fVoxelSize;// - convert from cv to GL
float z = n3Grid_.z * _fVoxelSize;// - convert from cv to GL
return make_float3( x,y,z );
}
__device__ __forceinline__ void operator () (){
int nX = threadIdx.x + blockIdx.x * blockDim.x; // for each y*z z0,z1,...
int nY = threadIdx.y + blockIdx.y * blockDim.y;
if (nX >= _cvgmYZxXVolume.cols && nY >= _cvgmYZxXVolume.rows) return;
int nHalfCols = _cvgmYZxXVolume.rows/2;
float fHalfVoxelSize = _fVoxelSize/2.f;
//calc grid idx
int3 n3Grid;
n3Grid.x = nY;
n3Grid.y = nX/_cvgmYZxXVolume.rows;
n3Grid.z = nX%_cvgmYZxXVolume.rows;
//calc voxel center coordinate, 0,1|2,3 // -1.5,-0.5|0.5,1.5 //fVoxelSize = 1.0
float3 fVoxelCenter = gridToCoordinateVolume(n3Grid) ;
//convert voxel to camera coordinate (local coordinate)
//fVoxelCenterLocal = R * fVoxelCenter + T = R * ( fVoxelCenter - Cw )
float3 fVoxelCenterLocal;
fVoxelCenterLocal = _Rw * ( fVoxelCenter - _Cw );
/*fVoxelCenterLocal.x = _aRW[0]*fVoxelCenter.x+_aRW[3]*fVoxelCenter.y+_aRW[6]*fVoxelCenter.z+_aTW[0];
fVoxelCenterLocal.y = _aRW[1]*fVoxelCenter.x+_aRW[4]*fVoxelCenter.y+_aRW[7]*fVoxelCenter.z+_aTW[1];
fVoxelCenterLocal.z = _aRW[2]*fVoxelCenter.x+_aRW[5]*fVoxelCenter.y+_aRW[8]*fVoxelCenter.z+_aTW[2];*/
//project voxel local to image to pick up corresponding depth
int c = __float2int_rn((sCameraIntrinsics_.fx * fVoxelCenterLocal.x + sCameraIntrinsics_.cx * fVoxelCenterLocal.z)/fVoxelCenterLocal.z);
int r = __float2int_rn((sCameraIntrinsics_.fy * fVoxelCenterLocal.y + sCameraIntrinsics_.cy * fVoxelCenterLocal.z)/fVoxelCenterLocal.z);
if (c < 0 || r < 0 || c >= _cvgmDepthScaled.cols || r >= _cvgmDepthScaled.rows) return;
//get the depthScaled
const float& fDepth = _cvgmDepthScaled.ptr(r)[c]; if(isnan<float>(fDepth) || fDepth < 0.1) return;
float3 Tmp;
Tmp = fVoxelCenter - _Cw;
/*Tmp.x = fVoxelCenter.x - _aCW[0];
Tmp.y = fVoxelCenter.y - _aCW[1];
Tmp.z = fVoxelCenter.z - _aCW[2];*/
float fSignedDistance = fDepth - sqrt(Tmp.x*Tmp.x + Tmp.y*Tmp.y+ Tmp.z*Tmp.z); //- outside + inside
float fTrancDistInv = 1.0f / _fTruncDistanceM;
/*float fTSDF;
if(fSignedDistance > 0 ){
fTSDF = fmin ( 1.0f, fSignedDistance * fTrancDistInv );
}
else{
fTSDF = fmax (-1.0f, fSignedDistance * fTrancDistInv );
}// truncated and normalize the Signed Distance to [-1,1]
//read an unpack tsdf value and store into the volumes
short2& sValue = _cvgmYZxXVolume.ptr(nY)[nX];
float fTSDFNew;
int nWeightNew;
if(sValue.x < 30000 ){
float fTSDFPrev;
int nWeightPrev;
pcl::device::unpack_tsdf(sValue,fTSDFPrev,nWeightPrev);
fTSDFNew = (fTSDFPrev*nWeightPrev + fTSDF*1.f)/(1.f+nWeightPrev);
nWeightNew = min(STSDF::MAX_WEIGHT,nWeightPrev+1);
}else{
fTSDFNew = fTSDF;
nWeightNew = 1;
}
pcl::device::pack_tsdf( fTSDFNew,nWeightNew,sValue);*/
float fTSDF = fSignedDistance * fTrancDistInv;
//read an unpack tsdf value and store into the volumes
short2& sValue = _cvgmYZxXVolume.ptr(nY)[nX];
float fTSDFNew,fTSDFPrev;
int nWeightNew,nWeightPrev;
if(fTSDF > 0.f ){
fTSDF = fmin ( 1.f, fTSDF );
if(abs(sValue.x) < 30000 ){
pcl::device::unpack_tsdf(sValue,fTSDFPrev,nWeightPrev);
fTSDFNew = (fTSDFPrev*nWeightPrev + fTSDF*1.f)/(1.f+nWeightPrev);
nWeightNew = min(STSDF::MAX_WEIGHT,nWeightPrev+1);
}else{
fTSDFNew = fTSDF;
nWeightNew = 1;
}
pcl::device::pack_tsdf( fTSDFNew,nWeightNew,sValue);
}
else{//if (fTSDF < = 0.f)
fTSDF = fmax ( -1.f, fTSDF );
if(abs(sValue.x) < 30000 ){
pcl::device::unpack_tsdf(sValue,fTSDFPrev,nWeightPrev);
fTSDFNew = (fTSDFPrev*nWeightPrev + fTSDF*1.f)/(1.f+nWeightPrev);
nWeightNew = min(STSDF::MAX_WEIGHT,nWeightPrev+1);
}else{
fTSDFNew = fTSDF;
nWeightNew = 1;
}
pcl::device::pack_tsdf( fTSDFNew,nWeightNew,sValue);
}// truncated and normalize the Signed Distance to [-1,1]
return;
}//kernelIntegrateFrame2VolumeCVmCVm()
};
__global__ void kernelIntegrateFrame2VolumeCVmCVm( SVolumn sSV_ ){
sSV_();
}
void integrateFrame2VolumeCVCV(cv::gpu::GpuMat& cvgmDepthScaled_, const unsigned short usPyrLevel_,
const float fVoxelSize_, const float fTruncDistanceM_,
const pcl::device::Mat33& Rw_, const float3& Cw_,
//const double* pR_, const double* pT_, const double* pC_,
const float fFx_, const float fFy_, const float u_, const float v_, cv::gpu::GpuMat* pcvgmYZxXVolume_){
//pR_ is colume major
/*size_t sN1 = sizeof(double) * 9;
cudaSafeCall( hipMemcpyToSymbol(_aRW, pR_, sN1) );
size_t sN2 = sizeof(double) * 3;
cudaSafeCall( hipMemcpyToSymbol(_aTW, pT_, sN2) );
cudaSafeCall( hipMemcpyToSymbol(_aCW, pC_, sN2) );*/
SVolumn sSV;
sSV._Rw = Rw_;
sSV._Cw = Cw_;
sSV.sCameraIntrinsics_ = pcl::device::Intr(fFx_,fFy_,u_,v_)(usPyrLevel_);
sSV._cvgmDepthScaled = cvgmDepthScaled_;
sSV._fVoxelSize = fVoxelSize_;
sSV._fTruncDistanceM = fTruncDistanceM_;
sSV._cvgmYZxXVolume = *pcvgmYZxXVolume_;
//define grid and block
dim3 block(64, 16);
dim3 grid(cv::gpu::divUp(pcvgmYZxXVolume_->cols, block.x), cv::gpu::divUp(pcvgmYZxXVolume_->rows, block.y));
hipLaunchKernelGGL(( kernelIntegrateFrame2VolumeCVmCVm), dim3(grid),dim3(block), 0, 0, sSV );
cudaSafeCall ( hipGetLastError () );
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__constant__ float _aParam[2];//0:_fThreshold;1:_fSize
__global__ void kernelThresholdVolume2by2CVGL(const cv::gpu::DevMem2D_<short2> cvgmYZxXVolume_,cv::gpu::DevMem2D_<float3> cvgmYZxXVolCenter_){
int nX = threadIdx.x + blockIdx.x * blockDim.x; // for each y*z z0,z1,...
int nY = threadIdx.y + blockIdx.y * blockDim.y;
if (nX >= cvgmYZxXVolume_.cols && nY >= cvgmYZxXVolume_.rows) return; //both nX and nX and bounded by cols as the structure is a cubic
const short2& sValue = cvgmYZxXVolume_.ptr(nY)[nX];
float3& fCenter = cvgmYZxXVolCenter_.ptr(nY)[nX];
int nGridX = nY;
int nGridY = nX/cvgmYZxXVolume_.rows;
int nGridZ = nX%cvgmYZxXVolume_.rows;
float fTSDF = pcl::device::unpack_tsdf(sValue);
if(fabsf(fTSDF)<_aParam[0]){
fCenter.x = nGridX *_aParam[1] ;
fCenter.y = nGridY *_aParam[1] ;// - convert from cv to GL
fCenter.z = nGridZ *_aParam[1] ;// - convert from cv to GL
}//within threshold
else{
fCenter.x = fCenter.y = fCenter.z = pcl::device::numeric_limits<float>::quiet_NaN();
}
return;
}//kernelThresholdVolume()
void thresholdVolumeCVGL(const cv::gpu::GpuMat& cvgmYZxXVolume_, const float fThreshold_, const float fVoxelSize_, const cv::gpu::GpuMat* pcvgmYZxXVolCenter_){
size_t sN = sizeof(float)*2;
float* const pParam = (float*) malloc( sN );
pParam[0] = fThreshold_;
pParam[1] = fVoxelSize_;
cudaSafeCall( hipMemcpyToSymbol(_aParam, pParam, sN) );
dim3 block(64, 16);
dim3 grid(cv::gpu::divUp(cvgmYZxXVolume_.cols, block.x), cv::gpu::divUp(cvgmYZxXVolume_.rows, block.y));
//kernelThresholdVolumeCVGL<<<grid,block>>>(cvgmYZxXVolume_,*pcvgmYZxXVolCenter_);
hipLaunchKernelGGL(( kernelThresholdVolume2by2CVGL), dim3(grid),dim3(block), 0, 0, cvgmYZxXVolume_,*pcvgmYZxXVolCenter_);
cudaSafeCall ( hipGetLastError () );
}//thresholdVolume()
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct SCross{
ushort _usV;
cv::gpu::DevMem2D_<short2> _cvgmYZxXVolume;
cv::gpu::DevMem2D_<uchar3> _cvgmCross;
ushort _usType; // cross-section intersept with X, Y, or Z axis
__device__ __forceinline__ void operator () () {
int nX = threadIdx.x + blockIdx.x * blockDim.x; // for each y*z z0,z1,...
int nY = threadIdx.y + blockIdx.y * blockDim.y;
if (nX >= _cvgmYZxXVolume.cols && nY >= _cvgmYZxXVolume.rows) return;
//calc grid idx
int3 n3Grid;
n3Grid.x = nY;
n3Grid.y = nX/_cvgmYZxXVolume.rows;
n3Grid.z = nX%_cvgmYZxXVolume.rows;
int Axis,XX,YY;
switch(_usType){
case 1: //intercepting X
Axis = n3Grid.x;
XX = n3Grid.y;
YY = n3Grid.z;
break;
case 2: //intercepting Y
Axis = n3Grid.y;
XX = n3Grid.x;
YY = n3Grid.z;
break;
case 3: //intercepting Z
Axis = n3Grid.z;
XX = n3Grid.x;
YY = n3Grid.y;
break;
}//switch
if( Axis == _usV ){
// get truncated signed distance value and weight
short2& sValue = _cvgmYZxXVolume.ptr(nY)[nX];
float fTSDF;
int nWeight;
pcl::device::unpack_tsdf(sValue,fTSDF,nWeight);
uchar3& pixel = _cvgmCross.ptr(YY)[XX];
if( fTSDF > 0.f )
{
if (fTSDF > 1.f){
pixel.x = 0;
pixel.y = (uchar)255;
pixel.z = 0;
}
else{
pixel.x = pixel.y = pixel.z = uchar(abs(fTSDF)*255 );
}
}
else{
if (fTSDF < -1.f){
pixel.x = (uchar)255;
pixel.y = 0;
pixel.z = 0;
}
else{
pixel.x = pixel.y = pixel.z = uchar(abs(fTSDF)*255 );
}
}
}
}//kernelIntegrateFrame2VolumeCVmCVm()
};
__global__ void kernelExportVolume2CrossSection( SCross sSC_ ){
sSC_();
}
void exportVolume2CrossSectionX(const cv::gpu::GpuMat& cvgmYZxXVolContentCV_, ushort usV_, ushort usType_, cv::gpu::GpuMat* pcvgmCross_){
SCross sSC;
sSC._usV = usV_;
sSC._usType = usType_;
sSC._cvgmCross = *pcvgmCross_;
sSC._cvgmYZxXVolume = cvgmYZxXVolContentCV_;
dim3 block(64, 16);
dim3 grid(cv::gpu::divUp(cvgmYZxXVolContentCV_.cols, block.x), cv::gpu::divUp(cvgmYZxXVolContentCV_.rows, block.y));
hipLaunchKernelGGL(( kernelExportVolume2CrossSection), dim3(grid),dim3(block), 0, 0, sSC );
cudaSafeCall ( hipGetLastError () );
}//exportVolume2CrossSectionX()
}//device
}//btl | d1c0c5fce522fc5c9c80c257aa37f69228cdef88.cu |
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#endif
#include <opencv2/gpu/gpu.hpp>
#include <opencv2/gpu/devmem2d.hpp>
#include <math_constants.h>
#include "cv/common.hpp" //copied from opencv
#include "pcl/limits.hpp"
#include "pcl/device.hpp"
#include "pcl/vector_math.hpp"
namespace btl{ namespace device
{
using namespace pcl::device;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct STSDF{
enum{
MAX_WEIGHT = 1 << 7
};
};
/*
__constant__ double _aRW[9]; //camera externals Rotation defined in world
__constant__ double _aTW[3]; //camera externals Translation defined in world
__constant__ double _aCW[3]; //camera center*/
struct SVolumn{
pcl::device::Intr sCameraIntrinsics_;
float _fVoxelSize;
float _fTruncDistanceM;
pcl::device::Mat33 _Rw;
//float3 _Tw;
float3 _Cw;
cv::gpu::DevMem2D_<float> _cvgmDepthScaled;
cv::gpu::DevMem2D_<short2> _cvgmYZxXVolume;
__device__ __forceinline__ float3 gridToCoordinateVolume(const int3& n3Grid_ )
{
float x = n3Grid_.x * _fVoxelSize;
float y = n3Grid_.y * _fVoxelSize;// - convert from cv to GL
float z = n3Grid_.z * _fVoxelSize;// - convert from cv to GL
return make_float3( x,y,z );
}
__device__ __forceinline__ void operator () (){
int nX = threadIdx.x + blockIdx.x * blockDim.x; // for each y*z z0,z1,...
int nY = threadIdx.y + blockIdx.y * blockDim.y;
if (nX >= _cvgmYZxXVolume.cols && nY >= _cvgmYZxXVolume.rows) return;
int nHalfCols = _cvgmYZxXVolume.rows/2;
float fHalfVoxelSize = _fVoxelSize/2.f;
//calc grid idx
int3 n3Grid;
n3Grid.x = nY;
n3Grid.y = nX/_cvgmYZxXVolume.rows;
n3Grid.z = nX%_cvgmYZxXVolume.rows;
//calc voxel center coordinate, 0,1|2,3 // -1.5,-0.5|0.5,1.5 //fVoxelSize = 1.0
float3 fVoxelCenter = gridToCoordinateVolume(n3Grid) ;
//convert voxel to camera coordinate (local coordinate)
//fVoxelCenterLocal = R * fVoxelCenter + T = R * ( fVoxelCenter - Cw )
float3 fVoxelCenterLocal;
fVoxelCenterLocal = _Rw * ( fVoxelCenter - _Cw );
/*fVoxelCenterLocal.x = _aRW[0]*fVoxelCenter.x+_aRW[3]*fVoxelCenter.y+_aRW[6]*fVoxelCenter.z+_aTW[0];
fVoxelCenterLocal.y = _aRW[1]*fVoxelCenter.x+_aRW[4]*fVoxelCenter.y+_aRW[7]*fVoxelCenter.z+_aTW[1];
fVoxelCenterLocal.z = _aRW[2]*fVoxelCenter.x+_aRW[5]*fVoxelCenter.y+_aRW[8]*fVoxelCenter.z+_aTW[2];*/
//project voxel local to image to pick up corresponding depth
int c = __float2int_rn((sCameraIntrinsics_.fx * fVoxelCenterLocal.x + sCameraIntrinsics_.cx * fVoxelCenterLocal.z)/fVoxelCenterLocal.z);
int r = __float2int_rn((sCameraIntrinsics_.fy * fVoxelCenterLocal.y + sCameraIntrinsics_.cy * fVoxelCenterLocal.z)/fVoxelCenterLocal.z);
if (c < 0 || r < 0 || c >= _cvgmDepthScaled.cols || r >= _cvgmDepthScaled.rows) return;
//get the depthScaled
const float& fDepth = _cvgmDepthScaled.ptr(r)[c]; if(isnan<float>(fDepth) || fDepth < 0.1) return;
float3 Tmp;
Tmp = fVoxelCenter - _Cw;
/*Tmp.x = fVoxelCenter.x - _aCW[0];
Tmp.y = fVoxelCenter.y - _aCW[1];
Tmp.z = fVoxelCenter.z - _aCW[2];*/
float fSignedDistance = fDepth - sqrt(Tmp.x*Tmp.x + Tmp.y*Tmp.y+ Tmp.z*Tmp.z); //- outside + inside
float fTrancDistInv = 1.0f / _fTruncDistanceM;
/*float fTSDF;
if(fSignedDistance > 0 ){
fTSDF = fmin ( 1.0f, fSignedDistance * fTrancDistInv );
}
else{
fTSDF = fmax (-1.0f, fSignedDistance * fTrancDistInv );
}// truncated and normalize the Signed Distance to [-1,1]
//read an unpack tsdf value and store into the volumes
short2& sValue = _cvgmYZxXVolume.ptr(nY)[nX];
float fTSDFNew;
int nWeightNew;
if(sValue.x < 30000 ){
float fTSDFPrev;
int nWeightPrev;
pcl::device::unpack_tsdf(sValue,fTSDFPrev,nWeightPrev);
fTSDFNew = (fTSDFPrev*nWeightPrev + fTSDF*1.f)/(1.f+nWeightPrev);
nWeightNew = min(STSDF::MAX_WEIGHT,nWeightPrev+1);
}else{
fTSDFNew = fTSDF;
nWeightNew = 1;
}
pcl::device::pack_tsdf( fTSDFNew,nWeightNew,sValue);*/
float fTSDF = fSignedDistance * fTrancDistInv;
//read an unpack tsdf value and store into the volumes
short2& sValue = _cvgmYZxXVolume.ptr(nY)[nX];
float fTSDFNew,fTSDFPrev;
int nWeightNew,nWeightPrev;
if(fTSDF > 0.f ){
fTSDF = fmin ( 1.f, fTSDF );
if(abs(sValue.x) < 30000 ){
pcl::device::unpack_tsdf(sValue,fTSDFPrev,nWeightPrev);
fTSDFNew = (fTSDFPrev*nWeightPrev + fTSDF*1.f)/(1.f+nWeightPrev);
nWeightNew = min(STSDF::MAX_WEIGHT,nWeightPrev+1);
}else{
fTSDFNew = fTSDF;
nWeightNew = 1;
}
pcl::device::pack_tsdf( fTSDFNew,nWeightNew,sValue);
}
else{//if (fTSDF < = 0.f)
fTSDF = fmax ( -1.f, fTSDF );
if(abs(sValue.x) < 30000 ){
pcl::device::unpack_tsdf(sValue,fTSDFPrev,nWeightPrev);
fTSDFNew = (fTSDFPrev*nWeightPrev + fTSDF*1.f)/(1.f+nWeightPrev);
nWeightNew = min(STSDF::MAX_WEIGHT,nWeightPrev+1);
}else{
fTSDFNew = fTSDF;
nWeightNew = 1;
}
pcl::device::pack_tsdf( fTSDFNew,nWeightNew,sValue);
}// truncated and normalize the Signed Distance to [-1,1]
return;
}//kernelIntegrateFrame2VolumeCVmCVm()
};
__global__ void kernelIntegrateFrame2VolumeCVmCVm( SVolumn sSV_ ){
sSV_();
}
void integrateFrame2VolumeCVCV(cv::gpu::GpuMat& cvgmDepthScaled_, const unsigned short usPyrLevel_,
const float fVoxelSize_, const float fTruncDistanceM_,
const pcl::device::Mat33& Rw_, const float3& Cw_,
//const double* pR_, const double* pT_, const double* pC_,
const float fFx_, const float fFy_, const float u_, const float v_, cv::gpu::GpuMat* pcvgmYZxXVolume_){
//pR_ is colume major
/*size_t sN1 = sizeof(double) * 9;
cudaSafeCall( cudaMemcpyToSymbol(_aRW, pR_, sN1) );
size_t sN2 = sizeof(double) * 3;
cudaSafeCall( cudaMemcpyToSymbol(_aTW, pT_, sN2) );
cudaSafeCall( cudaMemcpyToSymbol(_aCW, pC_, sN2) );*/
SVolumn sSV;
sSV._Rw = Rw_;
sSV._Cw = Cw_;
sSV.sCameraIntrinsics_ = pcl::device::Intr(fFx_,fFy_,u_,v_)(usPyrLevel_);
sSV._cvgmDepthScaled = cvgmDepthScaled_;
sSV._fVoxelSize = fVoxelSize_;
sSV._fTruncDistanceM = fTruncDistanceM_;
sSV._cvgmYZxXVolume = *pcvgmYZxXVolume_;
//define grid and block
dim3 block(64, 16);
dim3 grid(cv::gpu::divUp(pcvgmYZxXVolume_->cols, block.x), cv::gpu::divUp(pcvgmYZxXVolume_->rows, block.y));
kernelIntegrateFrame2VolumeCVmCVm<<<grid,block>>>( sSV );
cudaSafeCall ( cudaGetLastError () );
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__constant__ float _aParam[2];//0:_fThreshold;1:_fSize
__global__ void kernelThresholdVolume2by2CVGL(const cv::gpu::DevMem2D_<short2> cvgmYZxXVolume_,cv::gpu::DevMem2D_<float3> cvgmYZxXVolCenter_){
int nX = threadIdx.x + blockIdx.x * blockDim.x; // for each y*z z0,z1,...
int nY = threadIdx.y + blockIdx.y * blockDim.y;
if (nX >= cvgmYZxXVolume_.cols && nY >= cvgmYZxXVolume_.rows) return; //both nX and nX and bounded by cols as the structure is a cubic
const short2& sValue = cvgmYZxXVolume_.ptr(nY)[nX];
float3& fCenter = cvgmYZxXVolCenter_.ptr(nY)[nX];
int nGridX = nY;
int nGridY = nX/cvgmYZxXVolume_.rows;
int nGridZ = nX%cvgmYZxXVolume_.rows;
float fTSDF = pcl::device::unpack_tsdf(sValue);
if(fabsf(fTSDF)<_aParam[0]){
fCenter.x = nGridX *_aParam[1] ;
fCenter.y = nGridY *_aParam[1] ;// - convert from cv to GL
fCenter.z = nGridZ *_aParam[1] ;// - convert from cv to GL
}//within threshold
else{
fCenter.x = fCenter.y = fCenter.z = pcl::device::numeric_limits<float>::quiet_NaN();
}
return;
}//kernelThresholdVolume()
void thresholdVolumeCVGL(const cv::gpu::GpuMat& cvgmYZxXVolume_, const float fThreshold_, const float fVoxelSize_, const cv::gpu::GpuMat* pcvgmYZxXVolCenter_){
size_t sN = sizeof(float)*2;
float* const pParam = (float*) malloc( sN );
pParam[0] = fThreshold_;
pParam[1] = fVoxelSize_;
cudaSafeCall( cudaMemcpyToSymbol(_aParam, pParam, sN) );
dim3 block(64, 16);
dim3 grid(cv::gpu::divUp(cvgmYZxXVolume_.cols, block.x), cv::gpu::divUp(cvgmYZxXVolume_.rows, block.y));
//kernelThresholdVolumeCVGL<<<grid,block>>>(cvgmYZxXVolume_,*pcvgmYZxXVolCenter_);
kernelThresholdVolume2by2CVGL<<<grid,block>>>(cvgmYZxXVolume_,*pcvgmYZxXVolCenter_);
cudaSafeCall ( cudaGetLastError () );
}//thresholdVolume()
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct SCross{
ushort _usV;
cv::gpu::DevMem2D_<short2> _cvgmYZxXVolume;
cv::gpu::DevMem2D_<uchar3> _cvgmCross;
ushort _usType; // cross-section intersept with X, Y, or Z axis
__device__ __forceinline__ void operator () () {
int nX = threadIdx.x + blockIdx.x * blockDim.x; // for each y*z z0,z1,...
int nY = threadIdx.y + blockIdx.y * blockDim.y;
if (nX >= _cvgmYZxXVolume.cols && nY >= _cvgmYZxXVolume.rows) return;
//calc grid idx
int3 n3Grid;
n3Grid.x = nY;
n3Grid.y = nX/_cvgmYZxXVolume.rows;
n3Grid.z = nX%_cvgmYZxXVolume.rows;
int Axis,XX,YY;
switch(_usType){
case 1: //intercepting X
Axis = n3Grid.x;
XX = n3Grid.y;
YY = n3Grid.z;
break;
case 2: //intercepting Y
Axis = n3Grid.y;
XX = n3Grid.x;
YY = n3Grid.z;
break;
case 3: //intercepting Z
Axis = n3Grid.z;
XX = n3Grid.x;
YY = n3Grid.y;
break;
}//switch
if( Axis == _usV ){
// get truncated signed distance value and weight
short2& sValue = _cvgmYZxXVolume.ptr(nY)[nX];
float fTSDF;
int nWeight;
pcl::device::unpack_tsdf(sValue,fTSDF,nWeight);
uchar3& pixel = _cvgmCross.ptr(YY)[XX];
if( fTSDF > 0.f )
{
if (fTSDF > 1.f){
pixel.x = 0;
pixel.y = (uchar)255;
pixel.z = 0;
}
else{
pixel.x = pixel.y = pixel.z = uchar(abs(fTSDF)*255 );
}
}
else{
if (fTSDF < -1.f){
pixel.x = (uchar)255;
pixel.y = 0;
pixel.z = 0;
}
else{
pixel.x = pixel.y = pixel.z = uchar(abs(fTSDF)*255 );
}
}
}
}//kernelIntegrateFrame2VolumeCVmCVm()
};
__global__ void kernelExportVolume2CrossSection( SCross sSC_ ){
sSC_();
}
void exportVolume2CrossSectionX(const cv::gpu::GpuMat& cvgmYZxXVolContentCV_, ushort usV_, ushort usType_, cv::gpu::GpuMat* pcvgmCross_){
SCross sSC;
sSC._usV = usV_;
sSC._usType = usType_;
sSC._cvgmCross = *pcvgmCross_;
sSC._cvgmYZxXVolume = cvgmYZxXVolContentCV_;
dim3 block(64, 16);
dim3 grid(cv::gpu::divUp(cvgmYZxXVolContentCV_.cols, block.x), cv::gpu::divUp(cvgmYZxXVolContentCV_.rows, block.y));
kernelExportVolume2CrossSection<<<grid,block>>>( sSC );
cudaSafeCall ( cudaGetLastError () );
}//exportVolume2CrossSectionX()
}//device
}//btl |
876ebd0fe3d0f9690c9cae5a6317f42dec221a7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "activate.hpp"
namespace Shadow {
namespace Vision {
__device__ float ActivateValue(float x, int type, float slope) {
switch (type) {
case kRelu:
return x > 0 ? x : 0;
case kLeaky:
return x > 0 ? x : slope * x;
case kSigmoid:
return 1 / (1 + expf(-x));
case kSoftPlus:
return log1pf(expf(x));
case kTanh:
return tanhf(x);
case kRelu6:
return x < 0 ? 0 : (x > 6 ? 6 : x);
case kHardSwish:
return x < -3 ? 0 : (x > 3 ? x : (x * (x + 3) / 6.f));
case kGelu:
return 0.5f * x * (1 + erff(0.707107f * x));
default:
return x;
}
}
__global__ void KernelActivate(const float* in_data, float* out_data, int count,
int type, float slope) {
CUDA_KERNEL_LOOP(globalid, count) {
out_data[globalid] = ActivateValue(in_data[globalid], type, slope);
}
}
template <>
void Activate<DeviceType::kGPU, float>(const float* in_data, float* out_data,
int count, int type, float slope,
Context* context) {
hipLaunchKernelGGL(( KernelActivate), dim3(GetBlocks(count)), dim3(NumThreads), 0,
hipStream_t(context->stream()), in_data, out_data, count,
type, slope);
CUDA_CHECK(hipPeekAtLastError());
}
__global__ void KernelPRelu(const float* in_data, float* out_data, int count,
int channels, int dim, int div_factor,
const float* slope_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int c = (globalid / dim) % channels / div_factor;
auto value = in_data[globalid];
out_data[globalid] = value > 0 ? value : value * slope_data[c];
}
}
template <>
void PRelu<DeviceType::kGPU, float>(const float* in_data, float* out_data,
const VecInt& in_shape, bool channel_shared,
const float* slope_data, Context* context) {
int channels = in_shape[1], dim = 1;
for (int i = 2; i < in_shape.size(); ++i) dim *= in_shape[i];
int count = in_shape[0] * channels * dim;
int div_factor = channel_shared ? channels : 1;
hipLaunchKernelGGL(( KernelPRelu), dim3(GetBlocks(count)), dim3(NumThreads), 0,
hipStream_t(context->stream()),
in_data, out_data, count, channels, dim, div_factor, slope_data);
CUDA_CHECK(hipPeekAtLastError());
}
} // namespace Vision
} // namespace Shadow
namespace Shadow {
REGISTER_OP_KERNEL_DEFAULT(ActivateGPU,
ActivateKernelDefault<DeviceType::kGPU>);
#if defined(USE_CUDNN)
class ActivateKernelCUDNN : public ActivateKernel {
public:
ActivateKernelCUDNN() {
cudnn::createActivationDesc<float>(&activate_desc_);
cudnn::createTensorDesc<float>(&in_out_desc_);
default_kernel_ =
std::make_shared<ActivateKernelDefault<DeviceType::kGPU>>();
}
~ActivateKernelCUDNN() override {
if (activate_desc_ != nullptr) {
cudnnDestroyActivationDescriptor(activate_desc_);
activate_desc_ = nullptr;
}
if (in_out_desc_ != nullptr) {
cudnnDestroyTensorDescriptor(in_out_desc_);
in_out_desc_ = nullptr;
}
}
void Run(const std::shared_ptr<Blob>& input,
const std::shared_ptr<Blob>& slope, std::shared_ptr<Blob>& output,
Workspace* ws, int activate_type, float slope_val) override {
if (activate_type == kRelu || activate_type == kSigmoid ||
activate_type == kTanh || activate_type == kRelu6) {
int batch = input->shape(0), num = input->num();
cudnn::setActivationDesc<float>(&activate_desc_, activate_type,
activate_type == kRelu6 ? 6 : 0);
cudnn::setTensor4dDesc<float>(&in_out_desc_, batch, num, 1, 1);
CUDNN_CHECK(cudnnActivationForward(
cudnnHandle_t(ws->Ctx()->cudnn_handle()), activate_desc_,
cudnn::dataType<float>::one, in_out_desc_, input->data<float>(),
cudnn::dataType<float>::zero, in_out_desc_,
output->mutable_data<float>()));
} else {
default_kernel_->Run(input, slope, output, ws, activate_type, slope_val);
}
}
DeviceType device_type() const override { return DeviceType::kGPU; }
std::string kernel_type() const override { return "CUDNN"; }
private:
cudnnActivationDescriptor_t activate_desc_ = nullptr;
cudnnTensorDescriptor_t in_out_desc_ = nullptr;
std::shared_ptr<ActivateKernelDefault<DeviceType::kGPU>> default_kernel_ =
nullptr;
};
REGISTER_OP_KERNEL_CUDNN(ActivateGPU, ActivateKernelCUDNN);
#endif
} // namespace Shadow
| 876ebd0fe3d0f9690c9cae5a6317f42dec221a7b.cu | #include "activate.hpp"
namespace Shadow {
namespace Vision {
__device__ float ActivateValue(float x, int type, float slope) {
switch (type) {
case kRelu:
return x > 0 ? x : 0;
case kLeaky:
return x > 0 ? x : slope * x;
case kSigmoid:
return 1 / (1 + expf(-x));
case kSoftPlus:
return log1pf(expf(x));
case kTanh:
return tanhf(x);
case kRelu6:
return x < 0 ? 0 : (x > 6 ? 6 : x);
case kHardSwish:
return x < -3 ? 0 : (x > 3 ? x : (x * (x + 3) / 6.f));
case kGelu:
return 0.5f * x * (1 + erff(0.707107f * x));
default:
return x;
}
}
__global__ void KernelActivate(const float* in_data, float* out_data, int count,
int type, float slope) {
CUDA_KERNEL_LOOP(globalid, count) {
out_data[globalid] = ActivateValue(in_data[globalid], type, slope);
}
}
template <>
void Activate<DeviceType::kGPU, float>(const float* in_data, float* out_data,
int count, int type, float slope,
Context* context) {
KernelActivate<<<GetBlocks(count), NumThreads, 0,
cudaStream_t(context->stream())>>>(in_data, out_data, count,
type, slope);
CUDA_CHECK(cudaPeekAtLastError());
}
__global__ void KernelPRelu(const float* in_data, float* out_data, int count,
int channels, int dim, int div_factor,
const float* slope_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int c = (globalid / dim) % channels / div_factor;
auto value = in_data[globalid];
out_data[globalid] = value > 0 ? value : value * slope_data[c];
}
}
template <>
void PRelu<DeviceType::kGPU, float>(const float* in_data, float* out_data,
const VecInt& in_shape, bool channel_shared,
const float* slope_data, Context* context) {
int channels = in_shape[1], dim = 1;
for (int i = 2; i < in_shape.size(); ++i) dim *= in_shape[i];
int count = in_shape[0] * channels * dim;
int div_factor = channel_shared ? channels : 1;
KernelPRelu<<<GetBlocks(count), NumThreads, 0,
cudaStream_t(context->stream())>>>(
in_data, out_data, count, channels, dim, div_factor, slope_data);
CUDA_CHECK(cudaPeekAtLastError());
}
} // namespace Vision
} // namespace Shadow
namespace Shadow {
REGISTER_OP_KERNEL_DEFAULT(ActivateGPU,
ActivateKernelDefault<DeviceType::kGPU>);
#if defined(USE_CUDNN)
class ActivateKernelCUDNN : public ActivateKernel {
public:
ActivateKernelCUDNN() {
cudnn::createActivationDesc<float>(&activate_desc_);
cudnn::createTensorDesc<float>(&in_out_desc_);
default_kernel_ =
std::make_shared<ActivateKernelDefault<DeviceType::kGPU>>();
}
~ActivateKernelCUDNN() override {
if (activate_desc_ != nullptr) {
cudnnDestroyActivationDescriptor(activate_desc_);
activate_desc_ = nullptr;
}
if (in_out_desc_ != nullptr) {
cudnnDestroyTensorDescriptor(in_out_desc_);
in_out_desc_ = nullptr;
}
}
void Run(const std::shared_ptr<Blob>& input,
const std::shared_ptr<Blob>& slope, std::shared_ptr<Blob>& output,
Workspace* ws, int activate_type, float slope_val) override {
if (activate_type == kRelu || activate_type == kSigmoid ||
activate_type == kTanh || activate_type == kRelu6) {
int batch = input->shape(0), num = input->num();
cudnn::setActivationDesc<float>(&activate_desc_, activate_type,
activate_type == kRelu6 ? 6 : 0);
cudnn::setTensor4dDesc<float>(&in_out_desc_, batch, num, 1, 1);
CUDNN_CHECK(cudnnActivationForward(
cudnnHandle_t(ws->Ctx()->cudnn_handle()), activate_desc_,
cudnn::dataType<float>::one, in_out_desc_, input->data<float>(),
cudnn::dataType<float>::zero, in_out_desc_,
output->mutable_data<float>()));
} else {
default_kernel_->Run(input, slope, output, ws, activate_type, slope_val);
}
}
DeviceType device_type() const override { return DeviceType::kGPU; }
std::string kernel_type() const override { return "CUDNN"; }
private:
cudnnActivationDescriptor_t activate_desc_ = nullptr;
cudnnTensorDescriptor_t in_out_desc_ = nullptr;
std::shared_ptr<ActivateKernelDefault<DeviceType::kGPU>> default_kernel_ =
nullptr;
};
REGISTER_OP_KERNEL_CUDNN(ActivateGPU, ActivateKernelCUDNN);
#endif
} // namespace Shadow
|
4120e8a3a5d24a00bae1e83d2beae32d66163e37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <cmath>
#include <algorithm>
#include "amir_cuda_util/cuda_util.h"
using namespace amirstan::cuda;
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
const int h_low = floor(h);
const int w_low = floor(w);
const int h_high = h_low + 1;
const int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const scalar_t map_h = i * dilation_h + offset_h;
//const scalar_t map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void output_add_bias_kernel(scalar_t *output, scalar_t *bias, size_t step_batch, size_t step_channel, size_t n){
CUDA_KERNEL_LOOP(index, n)
{
output[index] += bias[(index%step_batch)/step_channel];
}
}
void deformable_im2col(
float* data_input, float* data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, float* data_col, hipStream_t stream)
{
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
hipLaunchKernelGGL(( deformable_im2col_gpu_kernel<float>), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream,
num_kernels, data_input, data_offset, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels, deformable_group,
height_col, width_col, data_col);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in deformable_im2col: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_im2col_cuda(
const float* data_im_, const float* data_offset_, const float* data_mask_,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, float* data_col_, hipStream_t stream)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream,
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err));
}
}
template <typename scalar_t>
void output_add_bias(scalar_t *output, scalar_t *bias, size_t batch, size_t channel, size_t height, size_t width, hipStream_t stream){
size_t step_channel = height*width;
size_t step_batch = step_channel*channel;
size_t n = step_batch*batch;
hipLaunchKernelGGL(( output_add_bias_kernel), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, stream, output, bias, step_batch, step_channel, n);
}
template void output_add_bias<float>(float *output, float *bias, size_t batch_size, size_t channel, size_t height, size_t width, hipStream_t stream);
void tensorPermute(float *dst, float* src, int *src_size, int *permute, int src_dim, hipStream_t stream){
amirstan::cuda::memcpyPermute(dst,src,src_size,permute,src_dim, stream);
} | 4120e8a3a5d24a00bae1e83d2beae32d66163e37.cu | #include <stdio.h>
#include <math.h>
#include <float.h>
#include <cmath>
#include <algorithm>
#include "amir_cuda_util/cuda_util.h"
using namespace amirstan::cuda;
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
const int h_low = floor(h);
const int w_low = floor(w);
const int h_high = h_low + 1;
const int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const scalar_t map_h = i * dilation_h + offset_h;
//const scalar_t map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void output_add_bias_kernel(scalar_t *output, scalar_t *bias, size_t step_batch, size_t step_channel, size_t n){
CUDA_KERNEL_LOOP(index, n)
{
output[index] += bias[(index%step_batch)/step_channel];
}
}
void deformable_im2col(
float* data_input, float* data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, float* data_col, cudaStream_t stream)
{
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
deformable_im2col_gpu_kernel<float><<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
num_kernels, data_input, data_offset, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels, deformable_group,
height_col, width_col, data_col);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_im2col_cuda(
const float* data_im_, const float* data_offset_, const float* data_mask_,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, float* data_col_, cudaStream_t stream)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
void output_add_bias(scalar_t *output, scalar_t *bias, size_t batch, size_t channel, size_t height, size_t width, cudaStream_t stream){
size_t step_channel = height*width;
size_t step_batch = step_channel*channel;
size_t n = step_batch*batch;
output_add_bias_kernel<<<GET_BLOCKS(n), CUDA_NUM_THREADS, 0, stream>>>(output, bias, step_batch, step_channel, n);
}
template void output_add_bias<float>(float *output, float *bias, size_t batch_size, size_t channel, size_t height, size_t width, cudaStream_t stream);
void tensorPermute(float *dst, float* src, int *src_size, int *permute, int src_dim, cudaStream_t stream){
amirstan::cuda::memcpyPermute(dst,src,src_size,permute,src_dim, stream);
} |
83625f9cf27c9ec8950a13c5b6658199d29e9b02.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_histo_per_vertex_shared.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *ct = NULL;
hipMalloc(&ct, XSIZE*YSIZE);
unsigned int *histo = NULL;
hipMalloc(&histo, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_histo_per_vertex_shared), dim3(gridBlock),dim3(threadBlock), 0, 0, ct,histo);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_histo_per_vertex_shared), dim3(gridBlock),dim3(threadBlock), 0, 0, ct,histo);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_histo_per_vertex_shared), dim3(gridBlock),dim3(threadBlock), 0, 0, ct,histo);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 83625f9cf27c9ec8950a13c5b6658199d29e9b02.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_histo_per_vertex_shared.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *ct = NULL;
cudaMalloc(&ct, XSIZE*YSIZE);
unsigned int *histo = NULL;
cudaMalloc(&histo, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_histo_per_vertex_shared<<<gridBlock,threadBlock>>>(ct,histo);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_histo_per_vertex_shared<<<gridBlock,threadBlock>>>(ct,histo);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_histo_per_vertex_shared<<<gridBlock,threadBlock>>>(ct,histo);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3edf3e991f7c0c0108e18915b82685094cccd1f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorIndex.cu"
#else
void THCTensor_(indexCopy_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexCopy)(state, dst, dim, indices_, src);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THArgCheck(numIndices == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstCopyDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = srcTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexCopyLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCTensor_(nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THCTensor_(resizeNd)(state, dst, index->nDimension, index->size, NULL);
dispatchTakePut<real, TensorTakeOp>(state, src, dst, index);
}
static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) {
THCThrustAllocator thrustAlloc(state);
auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index));
auto src_iter = thrust::device_ptr<real>(THCTensor_(data)(state, src));
auto numel = THCTensor_(numel)(state, src);
thrust::sort_by_key(
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
index_iter, index_iter + numel,
src_iter, ThrustLTOp<int64_t>());
}
void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
ptrdiff_t dstSize = THCTensor_(nElement)(state, dst);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index);
THArgCheck(THCTensor_(nElement)(state, src) == numIndices,
3, "src should have the same number of elements as index");
THArgCheck(THCTensor_(nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
int srcDims = THCTensor_(nDimension)(state, src);
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
if (accumulate) {
// wrap indices so to replace negative indices
THCudaLongTensor* sorted_index = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sorted_index, index);
THC_pointwiseApply2(state, sorted_index, index, WrapIndexOp(dstSize));
THCTensor* sorted_src = THCTensor_(newClone)(state, src);
THCTensor_(sort_indices)(state, sorted_index, sorted_src);
dispatchTakePut<real, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index);
THCTensor_(free)(state, sorted_src);
THCudaLongTensor_free(state, sorted_index);
} else {
dispatchTakePut<real, TensorPutOp>(state, dst, src, index);
}
}
void THCTensor_(indexAdd_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexAdd)(state, dst, dim, indices_, src);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THArgCheck(numIndices == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstAddDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = srcTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexFill_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexFill)(state, dst, dim, indices_, val);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, dst);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t dstFillDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = dstTotalSize / dstFillDimSize;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1);
}
} else {
if (dstInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexSelect_long)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THArgCheck(indices->nDimension <= 1, 3, "Index is supposed to be an empty tensor or a vector");
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexSelect)(state, dst, src, dim, indices_);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) <= 1, 3,
"Index is supposed to be an empty tensor or a vector");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize;
if (numIndices == 0) {
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, 0, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
return;
}
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, dim, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t srcSelectDimSize = THCTensor_(size)(state, src, dim);
ptrdiff_t sliceSize = dstTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, sliceSize, srcSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, dstTotalSize, sliceSize, srcSelectDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
#define MAX_ADVINDEX_CALC_DIMS 5
void THCTensor_(calculateAdvancedIndexingOffsets)(
THCState *state,
THCudaLongTensor *output,
THCTensor *indexed,
ptrdiff_t baseOffset,
THCudaLongTensor **indexers)
{
int ndim = THCTensor_(nDimension)(state, indexed);
THAssert(ndim <= MAX_ADVINDEX_CALC_DIMS);
// Assert all Tensors are on the same GPU, and that the indexing Tensors are
// contiguous
for (int i = 0; i < ndim; ++i) {
if (indexers[i] != NULL) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, output, indexers[i]));
THAssert(THCudaLongTensor_isContiguous(state, indexers[i]));
}
}
// Set grid, block dims
ptrdiff_t nElement = THCudaLongTensor_nElement(state, output);
const dim3 block = getApplyBlock();
dim3 grid;
THAssert(getApplyGrid(state, nElement, grid));
#define HANDLE_CASE(INDEX_TYPE, DIMS) \
{ \
LinearIndexCalcData<INDEX_TYPE, DIMS> data; \
for (int i = 0; i < DIMS; ++i) { \
data.baseSizes[i] = THCTensor_(size)(state, indexed, i); \
data.sizes[i] = indexers[i] != NULL ? \
THCudaLongTensor_nElement(state, indexers[i]) : \
THCTensor_(size)(state, indexed, i); \
data.strides[i] = THCTensor_(stride)(state, indexed, i); \
data.advIndexTensors[i] = indexers[i] != NULL ? \
THCudaLongTensor_data(state, indexers[i]) : NULL; \
} \
\
hipLaunchKernelGGL(( calculateLinearIndices<INDEX_TYPE, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
THCudaLongTensor_data(state, output), \
nElement, \
baseOffset, \
data \
); \
}
#define RUN_T(INDEX_TYPE) \
switch (ndim) { \
case 1: \
HANDLE_CASE(INDEX_TYPE, 1) \
break; \
case 2: \
HANDLE_CASE(INDEX_TYPE, 2) \
break; \
case 3: \
HANDLE_CASE(INDEX_TYPE, 3) \
break; \
case 4: \
HANDLE_CASE(INDEX_TYPE, 4) \
break; \
case 5: \
HANDLE_CASE(INDEX_TYPE, 5) \
break; \
default: \
THAssert(false); \
}
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, indexed)) {
RUN_T(unsigned int);
} else {
RUN_T(uint64_t);
}
#undef HANDLE_CASE
#undef RUN_T
THCudaCheck(hipGetLastError());
}
#undef MAX_ADVINDEX_CALC_DIMS
#endif
| 3edf3e991f7c0c0108e18915b82685094cccd1f5.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorIndex.cu"
#else
void THCTensor_(indexCopy_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexCopy)(state, dst, dim, indices_, src);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THArgCheck(numIndices == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstCopyDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = srcTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexCopyLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCTensor_(nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THCTensor_(resizeNd)(state, dst, index->nDimension, index->size, NULL);
dispatchTakePut<real, TensorTakeOp>(state, src, dst, index);
}
static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) {
THCThrustAllocator thrustAlloc(state);
auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index));
auto src_iter = thrust::device_ptr<real>(THCTensor_(data)(state, src));
auto numel = THCTensor_(numel)(state, src);
thrust::sort_by_key(
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
index_iter, index_iter + numel,
src_iter, ThrustLTOp<int64_t>());
}
void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
ptrdiff_t dstSize = THCTensor_(nElement)(state, dst);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index);
THArgCheck(THCTensor_(nElement)(state, src) == numIndices,
3, "src should have the same number of elements as index");
THArgCheck(THCTensor_(nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
int srcDims = THCTensor_(nDimension)(state, src);
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
if (accumulate) {
// wrap indices so to replace negative indices
THCudaLongTensor* sorted_index = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sorted_index, index);
THC_pointwiseApply2(state, sorted_index, index, WrapIndexOp(dstSize));
THCTensor* sorted_src = THCTensor_(newClone)(state, src);
THCTensor_(sort_indices)(state, sorted_index, sorted_src);
dispatchTakePut<real, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index);
THCTensor_(free)(state, sorted_src);
THCudaLongTensor_free(state, sorted_index);
} else {
dispatchTakePut<real, TensorPutOp>(state, dst, src, index);
}
}
void THCTensor_(indexAdd_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexAdd)(state, dst, dim, indices_, src);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THArgCheck(numIndices == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstAddDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = srcTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexAddLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexFill_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexFill)(state, dst, dim, indices_, val);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, dst);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t dstFillDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = dstTotalSize / dstFillDimSize;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1);
}
} else {
if (dstInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexSelect_long)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THArgCheck(indices->nDimension <= 1, 3, "Index is supposed to be an empty tensor or a vector");
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexSelect)(state, dst, src, dim, indices_);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) <= 1, 3,
"Index is supposed to be an empty tensor or a vector");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize;
if (numIndices == 0) {
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, 0, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
return;
}
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, dim, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t srcSelectDimSize = THCTensor_(size)(state, src, dim);
ptrdiff_t sliceSize = dstTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, sliceSize, srcSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, dstTotalSize, sliceSize, srcSelectDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
#define MAX_ADVINDEX_CALC_DIMS 5
void THCTensor_(calculateAdvancedIndexingOffsets)(
THCState *state,
THCudaLongTensor *output,
THCTensor *indexed,
ptrdiff_t baseOffset,
THCudaLongTensor **indexers)
{
int ndim = THCTensor_(nDimension)(state, indexed);
THAssert(ndim <= MAX_ADVINDEX_CALC_DIMS);
// Assert all Tensors are on the same GPU, and that the indexing Tensors are
// contiguous
for (int i = 0; i < ndim; ++i) {
if (indexers[i] != NULL) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, output, indexers[i]));
THAssert(THCudaLongTensor_isContiguous(state, indexers[i]));
}
}
// Set grid, block dims
ptrdiff_t nElement = THCudaLongTensor_nElement(state, output);
const dim3 block = getApplyBlock();
dim3 grid;
THAssert(getApplyGrid(state, nElement, grid));
#define HANDLE_CASE(INDEX_TYPE, DIMS) \
{ \
LinearIndexCalcData<INDEX_TYPE, DIMS> data; \
for (int i = 0; i < DIMS; ++i) { \
data.baseSizes[i] = THCTensor_(size)(state, indexed, i); \
data.sizes[i] = indexers[i] != NULL ? \
THCudaLongTensor_nElement(state, indexers[i]) : \
THCTensor_(size)(state, indexed, i); \
data.strides[i] = THCTensor_(stride)(state, indexed, i); \
data.advIndexTensors[i] = indexers[i] != NULL ? \
THCudaLongTensor_data(state, indexers[i]) : NULL; \
} \
\
calculateLinearIndices<INDEX_TYPE, DIMS> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
THCudaLongTensor_data(state, output), \
nElement, \
baseOffset, \
data \
); \
}
#define RUN_T(INDEX_TYPE) \
switch (ndim) { \
case 1: \
HANDLE_CASE(INDEX_TYPE, 1) \
break; \
case 2: \
HANDLE_CASE(INDEX_TYPE, 2) \
break; \
case 3: \
HANDLE_CASE(INDEX_TYPE, 3) \
break; \
case 4: \
HANDLE_CASE(INDEX_TYPE, 4) \
break; \
case 5: \
HANDLE_CASE(INDEX_TYPE, 5) \
break; \
default: \
THAssert(false); \
}
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, indexed)) {
RUN_T(unsigned int);
} else {
RUN_T(uint64_t);
}
#undef HANDLE_CASE
#undef RUN_T
THCudaCheck(cudaGetLastError());
}
#undef MAX_ADVINDEX_CALC_DIMS
#endif
|
aae1435b8beff78a93fcb53f98e028c5cbc16e5e.hip | // !!! This is a file automatically generated by hipify!!!
/* CUDA finite difference wave equation solver, written by
* Jeff Amelang, 2012
*
* Modified by Kevin Yuh, 2013-14 */
#include <cstdio>
#include <hip/hip_runtime.h>
#include "Cuda1DFDWave_cuda.cuh"
/* TODO: You'll need a kernel here, as well as any helper functions
to call it */
__global__
void waveEquationKernal(float *old_data, float *current_data, float *new_data,
int numberOfNodes, float constant, float left_boundary) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
// This is to make sure that thread index 0 can still move on to the
// next thread at blockDim.x * gridDim.x
if (index == 0) {
new_data[0] = left_boundary;
new_data[numberOfNodes - 1] = 0;
index += blockDim.x * gridDim.x;
}
while (index > 0 && index < numberOfNodes - 1) {
// Wave Equation!
// y_x,t+1 = 2*y_x,t - y_x,t-1 +
// (c*dt/dx)^2 * (y_x+1,t - 2*y_x,t + y_x-1,t)
new_data[index] = 2 * current_data[index]
- old_data[index]
+ constant
* (current_data[index + 1]
- 2 * current_data[index]
+ current_data[index - 1]);
index += blockDim.x * gridDim.x;
}
}
void waveEquation(float *old_data, float *current_data, float *new_data,
int numberOfNodes, float constant, float left_boundary,
int blocks, int threadsPerBlock) {
hipLaunchKernelGGL(( waveEquationKernal), dim3(blocks), dim3(threadsPerBlock), 0, 0, old_data, current_data,
new_data, numberOfNodes,
constant, left_boundary);
}
| aae1435b8beff78a93fcb53f98e028c5cbc16e5e.cu | /* CUDA finite difference wave equation solver, written by
* Jeff Amelang, 2012
*
* Modified by Kevin Yuh, 2013-14 */
#include <cstdio>
#include <cuda_runtime.h>
#include "Cuda1DFDWave_cuda.cuh"
/* TODO: You'll need a kernel here, as well as any helper functions
to call it */
__global__
void waveEquationKernal(float *old_data, float *current_data, float *new_data,
int numberOfNodes, float constant, float left_boundary) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
// This is to make sure that thread index 0 can still move on to the
// next thread at blockDim.x * gridDim.x
if (index == 0) {
new_data[0] = left_boundary;
new_data[numberOfNodes - 1] = 0;
index += blockDim.x * gridDim.x;
}
while (index > 0 && index < numberOfNodes - 1) {
// Wave Equation!
// y_x,t+1 = 2*y_x,t - y_x,t-1 +
// (c*dt/dx)^2 * (y_x+1,t - 2*y_x,t + y_x-1,t)
new_data[index] = 2 * current_data[index]
- old_data[index]
+ constant
* (current_data[index + 1]
- 2 * current_data[index]
+ current_data[index - 1]);
index += blockDim.x * gridDim.x;
}
}
void waveEquation(float *old_data, float *current_data, float *new_data,
int numberOfNodes, float constant, float left_boundary,
int blocks, int threadsPerBlock) {
waveEquationKernal<<<blocks, threadsPerBlock>>>(old_data, current_data,
new_data, numberOfNodes,
constant, left_boundary);
}
|
58a624f0db1b67de39e96806af64a076201cba80.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "dense_add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t sz = 1;
float_t *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
float_t *dest = NULL;
hipMalloc(&dest, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
dense_add), dim3(gridBlock),dim3(threadBlock), 0, 0, sz,src,dest);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
dense_add), dim3(gridBlock),dim3(threadBlock), 0, 0, sz,src,dest);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
dense_add), dim3(gridBlock),dim3(threadBlock), 0, 0, sz,src,dest);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 58a624f0db1b67de39e96806af64a076201cba80.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "dense_add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t sz = 1;
float_t *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
float_t *dest = NULL;
cudaMalloc(&dest, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
dense_add<<<gridBlock,threadBlock>>>(sz,src,dest);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
dense_add<<<gridBlock,threadBlock>>>(sz,src,dest);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
dense_add<<<gridBlock,threadBlock>>>(sz,src,dest);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bd871dd97647db061fdb92faa3102d3ec6e18495.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <algorithm>
//#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hiprand/hiprand.h>
#include "cuda_errorcheck.h"
#include "cuda_defs.h"
#include "collision/cuda_robobs.h"
#include "PlanningParams.h"
#include "cuda_statePrimitives.h"
#include "cuda_stateIntersect.h"
#include "cuda_timing.h"
#include "CUDACollisionCheckKernels.h"
// device memory random generator states
////////////////////////////////////////////////////////////////////////////////
// solution info
// device memory constants
////////////////////////////////////////////////////////////////////////////////
__device__ __constant__ float c_boundsMin[3];
__device__ __constant__ float c_boundsMax[3];
__device__ __constant__ float c_boundsDiff[3];
__device__ __constant__ float c_radius[MAX_LEVEL];
// device functions
////////////////////////////////////////////////////////////////////////////////
// device kernels
////////////////////////////////////////////////////////////////////////////////
//check collision of robot with different states ,and object
template <class CollideFunctor>
__global__ void checkCollisionKernel(int num_of_robot_states, SerializedStates* robot_states, int num_of_obstacle_states, SerializedStates* obstacle_states, GMesh* robot, GMesh* obstacle, CollideFunctor collideF, int* d_results)
{
int state_id = blockIdx.x * blockDim.x + threadIdx.x;
if (state_id >= num_of_robot_states) {
// printf ("state_id %d is out of boundary!\n", state_id);
return;
}
int result = COLLISION_FREE;
int obstacle_state_id = (num_of_obstacle_states == 1)? 0: state_id;
result = collideF(robot->models, robot->vertexPointers, robot->triIdxPointers, &(robot_states[state_id].x[0]), obstacle->models, obstacle->vertexPointers, obstacle->triIdxPointers, &(obstacle_states[obstacle_state_id].x[0]));
d_results[state_id] = (result == COLLISION_FREE) ? 0: 1;
#ifdef DEBUG_STEP_VALUES
if (d_results[state_id])
printf("checkCollisionKernel2:: Found collision!\n");
else
printf("checkCollisionkernel2:: No collision!\n");
#endif
}
template <class CollideFunctor>
__global__ void checkCollisionFastKernel(int num_of_robot_states, SerializedStates* robot_states, int num_of_obstacle_states, SerializedStates* obstacle_states, GMesh* robot, GMesh* obstacle, CollideFunctor collideF, volatile int* d_results)
{
int state_id = blockIdx.x * blockDim.x + threadIdx.x;
if (state_id >= num_of_robot_states) {
// printf ("state_id %d is out of boundary!\n", state_id);
return;
}
int result = COLLISION_FREE;
int obstacle_state_id = (num_of_obstacle_states == 1)? 0: state_id;
if (*d_results == 1)
return;
result = collideF(robot->models, robot->vertexPointers, robot->triIdxPointers, &(robot_states[state_id].x[0]), obstacle->models, obstacle->vertexPointers, obstacle->triIdxPointers, &(obstacle_states[obstacle_state_id].x[0]));
if (result != COLLISION_FREE && *d_results == 0) {
d_results[0] = 1;
}
#ifdef DEBUG_STEP_VALUES
if (d_results[0])
printf("checkCollisionKernel2:: Found collision!\n");
else
printf("checkCollisionkernel2:: No collision!\n");
#endif
}
namespace collision
{
namespace cuda
{
void allocMemforMesh(GMesh* &d_object) {
// meshes
GPUMALLOC((void**)&d_object, sizeof(GMesh));
}
void cpyMeshToGPU(const GMesh* h_object, GMesh*& d_object) {
TOGPU(d_object, h_object, sizeof(GMesh));
}
void allocMemforStates(const std::vector<SerializedStates>& h_states, SerializedStates* &d_states, int num_of_states) {
GPUMALLOC(&d_states, sizeof(SerializedStates) * num_of_states);
}
void allocMemforStatesAndResults(SerializedStates* &d_robot_states, SerializedStates* &d_obstacle_states, int* &d_results, int max_num_of_states)
{
//state and results gpu memory allocation
GPUMALLOC((void**)&d_obstacle_states, sizeof(SerializedStates) * max_num_of_states);
GPUMALLOC((void**)&d_robot_states, sizeof(SerializedStates) * max_num_of_states);
GPUMALLOC((void**)&d_results, sizeof(int) * max_num_of_states);
}
void freeStatesAndResults(SerializedStates* d_robot_states, SerializedStates* d_obstacle_states, int* d_results)
{
CUDA_CHECK_ERROR();
// states
GPUFREE(d_robot_states);
GPUFREE(d_obstacle_states);
// results
GPUFREE(d_results);
CUDA_CHECK_ERROR();
}
void cpyStatesToGPU(const std::vector<SerializedStates>& h_states, SerializedStates* &d_states, int num_of_states)
{
// obstacle state and robot state
TOGPU(d_states, &(h_states[0]), sizeof(SerializedStates) * num_of_states);
}
void setResults(int* d_results, int val, int num_of_states) {
GPUMEMSET(d_results, val, sizeof(int) * num_of_states);
}
void downloadResults(int* h_results, int* d_results, int num_of_robot_states)
{
CUDA_CHECK_ERROR();
CUDA_TIMING_BEGIN();
FROMGPU(h_results, d_results, sizeof(int) * num_of_robot_states);
CUDA_TIMING_END("downloadResults");
CUDA_CHECK_ERROR();
}
void initializeMemory(GMesh* &d_robot, GMesh* &d_obstacle, SerializedStates* &d_robot_states, int* &d_results, int num_of_robot_states, SerializedStates* &d_obstacle_states, int num_of_obstacle_states)
{
//obstable state
GPUMALLOC(&d_obstacle_states, sizeof(SerializedStates) * num_of_obstacle_states);
CUDA_CHECK_ERROR();
GPUMALLOC(&d_robot_states, sizeof(SerializedStates) * num_of_robot_states);
CUDA_CHECK_ERROR();
GPUMALLOC(&d_results, sizeof(int) * num_of_robot_states);
CUDA_CHECK_ERROR();
// meshes
GPUMALLOC(&d_robot, sizeof(GMesh));
GPUMALLOC(&d_obstacle, sizeof(GMesh));
CUDA_CHECK_ERROR();
}
void uploadData(const GMesh* h_robot, const GMesh* h_obstacle, const std::vector<SerializedStates>& h_robot_states, const std::vector<SerializedStates>& h_obstacle_states, GMesh*& d_robot, GMesh*& d_obstacle, SerializedStates*& d_robot_states, SerializedStates*& d_obstacle_states)
{
// meshes
TOGPU(d_robot, h_robot, sizeof(GMesh));
TOGPU(d_obstacle, h_obstacle, sizeof(GMesh));
// obstacle state and robot state
TOGPU(d_obstacle_states, &(h_obstacle_states[0]), sizeof(SerializedStates) * h_obstacle_states.size());
TOGPU(d_robot_states, &(h_robot_states[0]), sizeof(SerializedStates) * h_robot_states.size());
}
void freeMemory(GMesh* d_robot, GMesh* d_obstacle, SerializedStates* d_robot_states, SerializedStates* d_obstacle_states, int* d_results)
{
CUDA_CHECK_ERROR();
// meshes
GPUFREE(d_robot);
GPUFREE(d_obstacle);
// states
GPUFREE(d_robot_states);
GPUFREE(d_obstacle_states);
// results
GPUFREE(d_results);
CUDA_CHECK_ERROR();
}
void checkCollision(int num_of_robot_states, SerializedStates* d_robot_states, int num_of_obstacle_states, SerializedStates* d_obstacle_states, int* d_results, GMesh* d_robot, GMesh* d_obstacle)
{
int gridX = (num_of_robot_states + COLLISION_THREADS) / COLLISION_THREADS;
dim3 grids = dim3(gridX, 1, 1);
int threadX = (COLLISION_THREADS > num_of_robot_states)? num_of_robot_states: COLLISION_THREADS;
dim3 threads = dim3(threadX, 1, 1); // (64, 1, 1)
CUDA_TIMING_BEGIN();
// compute N (# of checking points)
BVHTwoStatesCollideFunctor<OBBNode, OBB, ushort2, IN_COLLISION, unsigned short> collideF;
hipLaunchKernelGGL(( checkCollisionKernel<BVHTwoStatesCollideFunctor<OBBNode, OBB, ushort2, IN_COLLISION, unsigned short> >) , dim3(grids), dim3(threads) , 0, 0,
num_of_robot_states, d_robot_states, num_of_obstacle_states, d_obstacle_states,
d_robot, d_obstacle, collideF,
d_results);
CUDA_TIMING_END("checkCollisionKernel");
CUDA_CHECK_ERROR();
}
void checkCollisionFast(int num_of_robot_states, SerializedStates* d_robot_states, int num_of_obstacle_states, SerializedStates* d_obstacle_states, int* d_results, GMesh* d_robot, GMesh* d_obstacle)
{
int gridX = (num_of_robot_states + COLLISION_THREADS) / COLLISION_THREADS;
dim3 grids = dim3(gridX, 1, 1);
int threadX = (COLLISION_THREADS > num_of_robot_states)? num_of_robot_states: COLLISION_THREADS;
dim3 threads = dim3(threadX, 1, 1); // (64, 1, 1)
CUDA_TIMING_BEGIN();
// compute N (# of checking points)
BVHTwoStatesFastCollideFunctor<OBBNode, OBB, ushort2, IN_COLLISION, unsigned short> collideF;
hipLaunchKernelGGL(( checkCollisionKernel<BVHTwoStatesFastCollideFunctor<OBBNode, OBB, ushort2, IN_COLLISION, unsigned short> >) , dim3(grids), dim3(threads) , 0, 0,
num_of_robot_states, d_robot_states, num_of_obstacle_states, d_obstacle_states,
d_robot, d_obstacle, collideF,
d_results);
CUDA_TIMING_END("checkCollisionKernel");
CUDA_CHECK_ERROR();
}
void reset()
{
}
}
}
| bd871dd97647db061fdb92faa3102d3ec6e18495.cu | #include <stdio.h>
#include <algorithm>
//#include <cuda.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <curand.h>
#include "cuda_errorcheck.h"
#include "cuda_defs.h"
#include "collision/cuda_robobs.h"
#include "PlanningParams.h"
#include "cuda_statePrimitives.h"
#include "cuda_stateIntersect.h"
#include "cuda_timing.h"
#include "CUDACollisionCheckKernels.h"
// device memory random generator states
////////////////////////////////////////////////////////////////////////////////
// solution info
// device memory constants
////////////////////////////////////////////////////////////////////////////////
__device__ __constant__ float c_boundsMin[3];
__device__ __constant__ float c_boundsMax[3];
__device__ __constant__ float c_boundsDiff[3];
__device__ __constant__ float c_radius[MAX_LEVEL];
// device functions
////////////////////////////////////////////////////////////////////////////////
// device kernels
////////////////////////////////////////////////////////////////////////////////
//check collision of robot with different states ,and object
template <class CollideFunctor>
__global__ void checkCollisionKernel(int num_of_robot_states, SerializedStates* robot_states, int num_of_obstacle_states, SerializedStates* obstacle_states, GMesh* robot, GMesh* obstacle, CollideFunctor collideF, int* d_results)
{
int state_id = blockIdx.x * blockDim.x + threadIdx.x;
if (state_id >= num_of_robot_states) {
// printf ("state_id %d is out of boundary!\n", state_id);
return;
}
int result = COLLISION_FREE;
int obstacle_state_id = (num_of_obstacle_states == 1)? 0: state_id;
result = collideF(robot->models, robot->vertexPointers, robot->triIdxPointers, &(robot_states[state_id].x[0]), obstacle->models, obstacle->vertexPointers, obstacle->triIdxPointers, &(obstacle_states[obstacle_state_id].x[0]));
d_results[state_id] = (result == COLLISION_FREE) ? 0: 1;
#ifdef DEBUG_STEP_VALUES
if (d_results[state_id])
printf("checkCollisionKernel2:: Found collision!\n");
else
printf("checkCollisionkernel2:: No collision!\n");
#endif
}
template <class CollideFunctor>
__global__ void checkCollisionFastKernel(int num_of_robot_states, SerializedStates* robot_states, int num_of_obstacle_states, SerializedStates* obstacle_states, GMesh* robot, GMesh* obstacle, CollideFunctor collideF, volatile int* d_results)
{
int state_id = blockIdx.x * blockDim.x + threadIdx.x;
if (state_id >= num_of_robot_states) {
// printf ("state_id %d is out of boundary!\n", state_id);
return;
}
int result = COLLISION_FREE;
int obstacle_state_id = (num_of_obstacle_states == 1)? 0: state_id;
if (*d_results == 1)
return;
result = collideF(robot->models, robot->vertexPointers, robot->triIdxPointers, &(robot_states[state_id].x[0]), obstacle->models, obstacle->vertexPointers, obstacle->triIdxPointers, &(obstacle_states[obstacle_state_id].x[0]));
if (result != COLLISION_FREE && *d_results == 0) {
d_results[0] = 1;
}
#ifdef DEBUG_STEP_VALUES
if (d_results[0])
printf("checkCollisionKernel2:: Found collision!\n");
else
printf("checkCollisionkernel2:: No collision!\n");
#endif
}
namespace collision
{
namespace cuda
{
void allocMemforMesh(GMesh* &d_object) {
// meshes
GPUMALLOC((void**)&d_object, sizeof(GMesh));
}
void cpyMeshToGPU(const GMesh* h_object, GMesh*& d_object) {
TOGPU(d_object, h_object, sizeof(GMesh));
}
void allocMemforStates(const std::vector<SerializedStates>& h_states, SerializedStates* &d_states, int num_of_states) {
GPUMALLOC(&d_states, sizeof(SerializedStates) * num_of_states);
}
void allocMemforStatesAndResults(SerializedStates* &d_robot_states, SerializedStates* &d_obstacle_states, int* &d_results, int max_num_of_states)
{
//state and results gpu memory allocation
GPUMALLOC((void**)&d_obstacle_states, sizeof(SerializedStates) * max_num_of_states);
GPUMALLOC((void**)&d_robot_states, sizeof(SerializedStates) * max_num_of_states);
GPUMALLOC((void**)&d_results, sizeof(int) * max_num_of_states);
}
void freeStatesAndResults(SerializedStates* d_robot_states, SerializedStates* d_obstacle_states, int* d_results)
{
CUDA_CHECK_ERROR();
// states
GPUFREE(d_robot_states);
GPUFREE(d_obstacle_states);
// results
GPUFREE(d_results);
CUDA_CHECK_ERROR();
}
void cpyStatesToGPU(const std::vector<SerializedStates>& h_states, SerializedStates* &d_states, int num_of_states)
{
// obstacle state and robot state
TOGPU(d_states, &(h_states[0]), sizeof(SerializedStates) * num_of_states);
}
void setResults(int* d_results, int val, int num_of_states) {
GPUMEMSET(d_results, val, sizeof(int) * num_of_states);
}
void downloadResults(int* h_results, int* d_results, int num_of_robot_states)
{
CUDA_CHECK_ERROR();
CUDA_TIMING_BEGIN();
FROMGPU(h_results, d_results, sizeof(int) * num_of_robot_states);
CUDA_TIMING_END("downloadResults");
CUDA_CHECK_ERROR();
}
void initializeMemory(GMesh* &d_robot, GMesh* &d_obstacle, SerializedStates* &d_robot_states, int* &d_results, int num_of_robot_states, SerializedStates* &d_obstacle_states, int num_of_obstacle_states)
{
//obstable state
GPUMALLOC(&d_obstacle_states, sizeof(SerializedStates) * num_of_obstacle_states);
CUDA_CHECK_ERROR();
GPUMALLOC(&d_robot_states, sizeof(SerializedStates) * num_of_robot_states);
CUDA_CHECK_ERROR();
GPUMALLOC(&d_results, sizeof(int) * num_of_robot_states);
CUDA_CHECK_ERROR();
// meshes
GPUMALLOC(&d_robot, sizeof(GMesh));
GPUMALLOC(&d_obstacle, sizeof(GMesh));
CUDA_CHECK_ERROR();
}
void uploadData(const GMesh* h_robot, const GMesh* h_obstacle, const std::vector<SerializedStates>& h_robot_states, const std::vector<SerializedStates>& h_obstacle_states, GMesh*& d_robot, GMesh*& d_obstacle, SerializedStates*& d_robot_states, SerializedStates*& d_obstacle_states)
{
// meshes
TOGPU(d_robot, h_robot, sizeof(GMesh));
TOGPU(d_obstacle, h_obstacle, sizeof(GMesh));
// obstacle state and robot state
TOGPU(d_obstacle_states, &(h_obstacle_states[0]), sizeof(SerializedStates) * h_obstacle_states.size());
TOGPU(d_robot_states, &(h_robot_states[0]), sizeof(SerializedStates) * h_robot_states.size());
}
void freeMemory(GMesh* d_robot, GMesh* d_obstacle, SerializedStates* d_robot_states, SerializedStates* d_obstacle_states, int* d_results)
{
CUDA_CHECK_ERROR();
// meshes
GPUFREE(d_robot);
GPUFREE(d_obstacle);
// states
GPUFREE(d_robot_states);
GPUFREE(d_obstacle_states);
// results
GPUFREE(d_results);
CUDA_CHECK_ERROR();
}
void checkCollision(int num_of_robot_states, SerializedStates* d_robot_states, int num_of_obstacle_states, SerializedStates* d_obstacle_states, int* d_results, GMesh* d_robot, GMesh* d_obstacle)
{
int gridX = (num_of_robot_states + COLLISION_THREADS) / COLLISION_THREADS;
dim3 grids = dim3(gridX, 1, 1);
int threadX = (COLLISION_THREADS > num_of_robot_states)? num_of_robot_states: COLLISION_THREADS;
dim3 threads = dim3(threadX, 1, 1); // (64, 1, 1)
CUDA_TIMING_BEGIN();
// compute N (# of checking points)
BVHTwoStatesCollideFunctor<OBBNode, OBB, ushort2, IN_COLLISION, unsigned short> collideF;
checkCollisionKernel<BVHTwoStatesCollideFunctor<OBBNode, OBB, ushort2, IN_COLLISION, unsigned short> > <<< grids, threads >>>(
num_of_robot_states, d_robot_states, num_of_obstacle_states, d_obstacle_states,
d_robot, d_obstacle, collideF,
d_results);
CUDA_TIMING_END("checkCollisionKernel");
CUDA_CHECK_ERROR();
}
void checkCollisionFast(int num_of_robot_states, SerializedStates* d_robot_states, int num_of_obstacle_states, SerializedStates* d_obstacle_states, int* d_results, GMesh* d_robot, GMesh* d_obstacle)
{
int gridX = (num_of_robot_states + COLLISION_THREADS) / COLLISION_THREADS;
dim3 grids = dim3(gridX, 1, 1);
int threadX = (COLLISION_THREADS > num_of_robot_states)? num_of_robot_states: COLLISION_THREADS;
dim3 threads = dim3(threadX, 1, 1); // (64, 1, 1)
CUDA_TIMING_BEGIN();
// compute N (# of checking points)
BVHTwoStatesFastCollideFunctor<OBBNode, OBB, ushort2, IN_COLLISION, unsigned short> collideF;
checkCollisionKernel<BVHTwoStatesFastCollideFunctor<OBBNode, OBB, ushort2, IN_COLLISION, unsigned short> > <<< grids, threads >>>(
num_of_robot_states, d_robot_states, num_of_obstacle_states, d_obstacle_states,
d_robot, d_obstacle, collideF,
d_results);
CUDA_TIMING_END("checkCollisionKernel");
CUDA_CHECK_ERROR();
}
void reset()
{
}
}
}
|
275e6c235d03ad2d84018254f7883430b0c1e250.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "cuda_neuronal_network.h"
#include "cuda_kernel.h"
namespace cuda {
neuronal_network::neuronal_network() :
m_currentContext(nullptr) {
}
neuronal_network::~neuronal_network() {
if (m_currentContext != nullptr) {
delete m_currentContext;
}
}
void neuronal_network::set_config(const config_t config) {
m_currentConfig = config;
}
bool neuronal_network::train(cuda::model& model, const std::vector<data::sample<float>>& trainingsData, const int numRelearning) const {
const std::vector<float>& refInput = trainingsData[0].internal_data();
model.init((refInput.size() + 1) * m_currentConfig.numHidden + (m_currentConfig.numHidden + 1) * m_currentConfig.numOutput);
train_data_context context(m_currentConfig, model, trainingsData);
context.synchronize(m_currentConfig, model, trainingsData);
if (!context.error_check()) {
return false;
}
for (int iteration = 0; iteration < numRelearning; iteration++) {
for (int i = 0; i < trainingsData.size(); i++) {
if (!train_sample(i, trainingsData[i], context)) {
return false;
}
}
}
context.devWeights.synch_from_device(model.get_weights());
return !context.devWeights.has_error();
}
neuronal_network::test_result_t neuronal_network::test(const cuda::model& model, const std::vector<data::sample<float>>& testData) const {
test_data_context context(m_currentConfig, model, testData);
test_result_t result;
int i = 0;
int current;
context.synchronize(m_currentConfig, model, testData);
if (!context.error_check()) {
result.total = -1;
result.error = -1;
result.ratio = -1;
return result;
}
for (const data::sample<float>& s : testData) {
current = test_sample(i, s, context);
if (current == s.get_label()) {
result.correct++;
} else if (current == -1) {
result.total = -1;
result.error = -1;
result.ratio = -1;
return result;
}
i++;
}
result.total = testData.size();
result.error = result.total - result.correct;
result.ratio = (float) result.correct / (float) result.total;
return result;
}
bool neuronal_network::set_classify_context(const cuda::model& model, const data::sample<float>& s) {
if (m_currentContext != nullptr) {
delete m_currentContext;
}
std::vector<data::sample<float>> dummy_data;
dummy_data.push_back(s);
m_currentContext = new test_data_context(m_currentConfig, model, dummy_data);
m_currentContext->synchronize(m_currentConfig, model, dummy_data);
return m_currentContext->error_check();
}
int neuronal_network::classify(const data::sample<float>& s) const {
if (m_currentContext != nullptr && m_currentContext->error_check()) {
m_currentContext->devInput.synch_to_device(s.internal_data(), 0);
hipDeviceSynchronize();
return test_sample(0, s, *m_currentContext);
}
return -1;
}
bool neuronal_network::train_sample(const int i, const data::sample<float>& sample, train_data_context& context) const {
int num_blocks;
int num_threads;
num_blocks = context.hiddenLayer.size();
num_threads = sample.size();
hipLaunchKernelGGL(( cuda_neural_network), dim3(num_blocks), dim3(num_threads), num_threads * sizeof(float), 0, (&context.devInput) + i * sample.size(), &context.devHidden, &context.devWeights);
if (hipSuccess != hipGetLastError()) {
return false;
}
num_blocks = context.outputLayer.size();
num_threads = context.hiddenLayer.size();
hipLaunchKernelGGL(( cuda_neural_network), dim3(num_blocks), dim3(num_threads), num_threads * sizeof(float), 0, &context.devHidden, &context.devOutput, (&context.devWeights) + ((sample.size() + 1) * context.hiddenLayer.size()));
if (hipSuccess != hipGetLastError()) {
return false;
}
num_blocks = context.hiddenLayer.size();
num_threads = context.outputLayer.size();
hipLaunchKernelGGL(( cuda_neural_network_error), dim3(num_blocks), dim3(num_threads), num_threads * sizeof(float), 0, &context.devHidden, &context.devOutput, (&context.devWeights) + ((sample.size() + 1) * context.hiddenLayer.size()), &context.devLearning, (&context.devLabels) + i * context.outputLayer.size(), true);
if (hipSuccess != hipGetLastError()) {
return false;
}
num_blocks = sample.size();
num_threads = context.hiddenLayer.size();
hipLaunchKernelGGL(( cuda_neural_network_error), dim3(num_blocks), dim3(num_threads), num_threads * sizeof(float), 0, (&context.devInput) + i * sample.size(), &context.devHidden, &context.devWeights, &context.devLearning, nullptr, false);
if (hipSuccess != hipGetLastError()) {
return false;
}
return true;
}
int neuronal_network::test_sample(const int i, const data::sample<float>& sample, test_data_context& context) const {
int num_blocks;
int num_threads;
num_blocks = context.hiddenLayer.size();
num_threads = sample.size();
hipLaunchKernelGGL(( cuda_neural_network), dim3(num_blocks), dim3(num_threads), num_threads * sizeof(float), 0, (&context.devInput) + i * sample.size(), &context.devHidden, &context.devWeights);
if (hipSuccess != hipGetLastError()) {
return -1;
}
num_blocks = context.outputLayer.size();
num_threads = context.hiddenLayer.size();
hipLaunchKernelGGL(( cuda_neural_network), dim3(num_blocks), dim3(num_threads), num_threads * sizeof(float), 0, &context.devHidden, &context.devOutput, (&context.devWeights) + ((sample.size() + 1) * context.hiddenLayer.size()));
if (hipSuccess != hipGetLastError()) {
return -1;
}
context.devOutput.synch_from_device(context.outputLayer);
hipDeviceSynchronize();
return std::distance(context.outputLayer.begin(), std::max_element(context.outputLayer.begin(), context.outputLayer.end()));
}
neuronal_network::train_data_context::train_data_context(const config_t config, const cuda::model& model, const std::vector<data::sample<float>>& samples) :
hiddenLayer(config.numHidden), outputLayer(config.numOutput), labels(samples.size() * outputLayer.size()), devInput(samples[0].internal_data(), samples.size()), devHidden(hiddenLayer), devOutput(outputLayer), devWeights(model.get_weights()), devLabels(labels) {
for (int i = 0; i < samples.size(); i++) {
for (int j = 0; j < outputLayer.size(); j++) {
if (samples[i].get_label() == j) {
labels[i * outputLayer.size() + j] = 1;
} else {
labels[i * outputLayer.size() + j] = 0;
}
}
}
}
void neuronal_network::train_data_context::synchronize(const config_t config, const cuda::model& model, const std::vector<data::sample<float>>& samples) {
std::vector<float> combinedData(samples.size() * samples[0].internal_data().size());
int index = 0;
for (auto& s : samples) {
for (auto& f : s.internal_data()) {
combinedData[index] = f;
index++;
}
}
devInput.synch_to_device(combinedData);
devHidden.synch_to_device(hiddenLayer);
devOutput.synch_to_device(outputLayer);
devWeights.synch_to_device(model.get_weights());
devLabels.synch_to_device(labels);
devLearning.synch_to_device(config.learningRate);
}
bool neuronal_network::train_data_context::error_check() const {
bool error = false;
error |= devInput.has_error();
error |= devHidden.has_error();
error |= devOutput.has_error();
error |= devWeights.has_error();
error |= devLabels.has_error();
error |= devLearning.has_error();
return !error;
}
neuronal_network::test_data_context::test_data_context(const config_t config, const cuda::model& model, const std::vector<data::sample<float>>& samples) :
hiddenLayer(config.numHidden), outputLayer(config.numOutput), devInput(samples[0].internal_data(), samples.size()), devHidden(hiddenLayer), devOutput(outputLayer), devWeights(model.get_weights()) {
}
void neuronal_network::test_data_context::synchronize(const config_t config, const cuda::model& model, const std::vector<data::sample<float>>& samples) {
std::vector<float> combinedData(samples.size() * samples[0].internal_data().size());
int index = 0;
for (auto& s : samples) {
for (auto& f : s.internal_data()) {
combinedData[index] = f;
index++;
}
}
devInput.synch_to_device(combinedData);
devHidden.synch_to_device(hiddenLayer);
devOutput.synch_to_device(outputLayer);
devWeights.synch_to_device(model.get_weights());
}
bool neuronal_network::test_data_context::error_check() const {
bool error = false;
error |= devInput.has_error();
error |= devHidden.has_error();
error |= devOutput.has_error();
error |= devWeights.has_error();
return !error;
}
}
| 275e6c235d03ad2d84018254f7883430b0c1e250.cu | #include <algorithm>
#include "cuda_neuronal_network.h"
#include "cuda_kernel.h"
namespace cuda {
neuronal_network::neuronal_network() :
m_currentContext(nullptr) {
}
neuronal_network::~neuronal_network() {
if (m_currentContext != nullptr) {
delete m_currentContext;
}
}
void neuronal_network::set_config(const config_t config) {
m_currentConfig = config;
}
bool neuronal_network::train(cuda::model& model, const std::vector<data::sample<float>>& trainingsData, const int numRelearning) const {
const std::vector<float>& refInput = trainingsData[0].internal_data();
model.init((refInput.size() + 1) * m_currentConfig.numHidden + (m_currentConfig.numHidden + 1) * m_currentConfig.numOutput);
train_data_context context(m_currentConfig, model, trainingsData);
context.synchronize(m_currentConfig, model, trainingsData);
if (!context.error_check()) {
return false;
}
for (int iteration = 0; iteration < numRelearning; iteration++) {
for (int i = 0; i < trainingsData.size(); i++) {
if (!train_sample(i, trainingsData[i], context)) {
return false;
}
}
}
context.devWeights.synch_from_device(model.get_weights());
return !context.devWeights.has_error();
}
neuronal_network::test_result_t neuronal_network::test(const cuda::model& model, const std::vector<data::sample<float>>& testData) const {
test_data_context context(m_currentConfig, model, testData);
test_result_t result;
int i = 0;
int current;
context.synchronize(m_currentConfig, model, testData);
if (!context.error_check()) {
result.total = -1;
result.error = -1;
result.ratio = -1;
return result;
}
for (const data::sample<float>& s : testData) {
current = test_sample(i, s, context);
if (current == s.get_label()) {
result.correct++;
} else if (current == -1) {
result.total = -1;
result.error = -1;
result.ratio = -1;
return result;
}
i++;
}
result.total = testData.size();
result.error = result.total - result.correct;
result.ratio = (float) result.correct / (float) result.total;
return result;
}
bool neuronal_network::set_classify_context(const cuda::model& model, const data::sample<float>& s) {
if (m_currentContext != nullptr) {
delete m_currentContext;
}
std::vector<data::sample<float>> dummy_data;
dummy_data.push_back(s);
m_currentContext = new test_data_context(m_currentConfig, model, dummy_data);
m_currentContext->synchronize(m_currentConfig, model, dummy_data);
return m_currentContext->error_check();
}
int neuronal_network::classify(const data::sample<float>& s) const {
if (m_currentContext != nullptr && m_currentContext->error_check()) {
m_currentContext->devInput.synch_to_device(s.internal_data(), 0);
cudaThreadSynchronize();
return test_sample(0, s, *m_currentContext);
}
return -1;
}
bool neuronal_network::train_sample(const int i, const data::sample<float>& sample, train_data_context& context) const {
int num_blocks;
int num_threads;
num_blocks = context.hiddenLayer.size();
num_threads = sample.size();
cuda_neural_network<<<num_blocks, num_threads, num_threads * sizeof(float)>>>((&context.devInput) + i * sample.size(), &context.devHidden, &context.devWeights);
if (cudaSuccess != cudaGetLastError()) {
return false;
}
num_blocks = context.outputLayer.size();
num_threads = context.hiddenLayer.size();
cuda_neural_network<<<num_blocks, num_threads, num_threads * sizeof(float)>>>(&context.devHidden, &context.devOutput, (&context.devWeights) + ((sample.size() + 1) * context.hiddenLayer.size()));
if (cudaSuccess != cudaGetLastError()) {
return false;
}
num_blocks = context.hiddenLayer.size();
num_threads = context.outputLayer.size();
cuda_neural_network_error<<<num_blocks, num_threads, num_threads * sizeof(float)>>>(&context.devHidden, &context.devOutput, (&context.devWeights) + ((sample.size() + 1) * context.hiddenLayer.size()), &context.devLearning, (&context.devLabels) + i * context.outputLayer.size(), true);
if (cudaSuccess != cudaGetLastError()) {
return false;
}
num_blocks = sample.size();
num_threads = context.hiddenLayer.size();
cuda_neural_network_error<<<num_blocks, num_threads, num_threads * sizeof(float)>>>((&context.devInput) + i * sample.size(), &context.devHidden, &context.devWeights, &context.devLearning, nullptr, false);
if (cudaSuccess != cudaGetLastError()) {
return false;
}
return true;
}
int neuronal_network::test_sample(const int i, const data::sample<float>& sample, test_data_context& context) const {
int num_blocks;
int num_threads;
num_blocks = context.hiddenLayer.size();
num_threads = sample.size();
cuda_neural_network<<<num_blocks, num_threads, num_threads * sizeof(float)>>>((&context.devInput) + i * sample.size(), &context.devHidden, &context.devWeights);
if (cudaSuccess != cudaGetLastError()) {
return -1;
}
num_blocks = context.outputLayer.size();
num_threads = context.hiddenLayer.size();
cuda_neural_network<<<num_blocks, num_threads, num_threads * sizeof(float)>>>(&context.devHidden, &context.devOutput, (&context.devWeights) + ((sample.size() + 1) * context.hiddenLayer.size()));
if (cudaSuccess != cudaGetLastError()) {
return -1;
}
context.devOutput.synch_from_device(context.outputLayer);
cudaThreadSynchronize();
return std::distance(context.outputLayer.begin(), std::max_element(context.outputLayer.begin(), context.outputLayer.end()));
}
neuronal_network::train_data_context::train_data_context(const config_t config, const cuda::model& model, const std::vector<data::sample<float>>& samples) :
hiddenLayer(config.numHidden), outputLayer(config.numOutput), labels(samples.size() * outputLayer.size()), devInput(samples[0].internal_data(), samples.size()), devHidden(hiddenLayer), devOutput(outputLayer), devWeights(model.get_weights()), devLabels(labels) {
for (int i = 0; i < samples.size(); i++) {
for (int j = 0; j < outputLayer.size(); j++) {
if (samples[i].get_label() == j) {
labels[i * outputLayer.size() + j] = 1;
} else {
labels[i * outputLayer.size() + j] = 0;
}
}
}
}
void neuronal_network::train_data_context::synchronize(const config_t config, const cuda::model& model, const std::vector<data::sample<float>>& samples) {
std::vector<float> combinedData(samples.size() * samples[0].internal_data().size());
int index = 0;
for (auto& s : samples) {
for (auto& f : s.internal_data()) {
combinedData[index] = f;
index++;
}
}
devInput.synch_to_device(combinedData);
devHidden.synch_to_device(hiddenLayer);
devOutput.synch_to_device(outputLayer);
devWeights.synch_to_device(model.get_weights());
devLabels.synch_to_device(labels);
devLearning.synch_to_device(config.learningRate);
}
bool neuronal_network::train_data_context::error_check() const {
bool error = false;
error |= devInput.has_error();
error |= devHidden.has_error();
error |= devOutput.has_error();
error |= devWeights.has_error();
error |= devLabels.has_error();
error |= devLearning.has_error();
return !error;
}
neuronal_network::test_data_context::test_data_context(const config_t config, const cuda::model& model, const std::vector<data::sample<float>>& samples) :
hiddenLayer(config.numHidden), outputLayer(config.numOutput), devInput(samples[0].internal_data(), samples.size()), devHidden(hiddenLayer), devOutput(outputLayer), devWeights(model.get_weights()) {
}
void neuronal_network::test_data_context::synchronize(const config_t config, const cuda::model& model, const std::vector<data::sample<float>>& samples) {
std::vector<float> combinedData(samples.size() * samples[0].internal_data().size());
int index = 0;
for (auto& s : samples) {
for (auto& f : s.internal_data()) {
combinedData[index] = f;
index++;
}
}
devInput.synch_to_device(combinedData);
devHidden.synch_to_device(hiddenLayer);
devOutput.synch_to_device(outputLayer);
devWeights.synch_to_device(model.get_weights());
}
bool neuronal_network::test_data_context::error_check() const {
bool error = false;
error |= devInput.has_error();
error |= devHidden.has_error();
error |= devOutput.has_error();
error |= devWeights.has_error();
return !error;
}
}
|
45232cdca43ef2c7887e7393d13c38604efd3a2c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS 30
// Variables
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
float* h_Value;
float* d_Value;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(float* Value)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
__device__ __shared__ float I3[THREADS_PER_BLOCK];
__device__ __shared__ float I4[THREADS_PER_BLOCK];
I1[i]=i;
I2[i]=i/2;
I3[i]=i;
I4[i]=i+1;
//Do Some Computation
float Value1;
float Value2;
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=ConstArray1[(i+k)%THREADS_PER_BLOCK];
Value2=ConstArray2[(i+k+1)%THREADS_PER_BLOCK];
I1[i]=Value1*2+I2[i];
I2[i]=Value2+I4[i];
I3[i]=Value1/2+I3[i];
I4[i]=Value2+I1[i];
I1[i]=I2[(i+k)%THREADS_PER_BLOCK];
I2[i]=I1[(i+k+1)%THREADS_PER_BLOCK];
I3[i]=I4[(i+k)%THREADS_PER_BLOCK];
I4[i]=I3[(i+k+1)%THREADS_PER_BLOCK];
}
__syncthreads();
*Value=I1[i]+I2[i]+I3[i]+I4[i];
}
// Host code
int main()
{
printf("Power Microbenchmarks\n");
float array1[THREADS_PER_BLOCK];
h_Value = (float *) malloc(sizeof(float));
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
hipMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
checkCudaErrors( hipMalloc((void**)&d_Value, sizeof(float)) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Value);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
checkCudaErrors( hipMemcpy(h_Value, d_Value, sizeof(float), hipMemcpyDeviceToHost) );
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
return 0;
}
| 45232cdca43ef2c7887e7393d13c38604efd3a2c.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS 30
// Variables
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
float* h_Value;
float* d_Value;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(float* Value)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
__device__ __shared__ float I3[THREADS_PER_BLOCK];
__device__ __shared__ float I4[THREADS_PER_BLOCK];
I1[i]=i;
I2[i]=i/2;
I3[i]=i;
I4[i]=i+1;
//Do Some Computation
float Value1;
float Value2;
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=ConstArray1[(i+k)%THREADS_PER_BLOCK];
Value2=ConstArray2[(i+k+1)%THREADS_PER_BLOCK];
I1[i]=Value1*2+I2[i];
I2[i]=Value2+I4[i];
I3[i]=Value1/2+I3[i];
I4[i]=Value2+I1[i];
I1[i]=I2[(i+k)%THREADS_PER_BLOCK];
I2[i]=I1[(i+k+1)%THREADS_PER_BLOCK];
I3[i]=I4[(i+k)%THREADS_PER_BLOCK];
I4[i]=I3[(i+k+1)%THREADS_PER_BLOCK];
}
__syncthreads();
*Value=I1[i]+I2[i]+I3[i]+I4[i];
}
// Host code
int main()
{
printf("Power Microbenchmarks\n");
float array1[THREADS_PER_BLOCK];
h_Value = (float *) malloc(sizeof(float));
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
cudaMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
checkCudaErrors( cudaMalloc((void**)&d_Value, sizeof(float)) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
PowerKernal<<<dimGrid,dimBlock>>>(d_Value);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
checkCudaErrors( cudaMemcpy(h_Value, d_Value, sizeof(float), cudaMemcpyDeviceToHost) );
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
return 0;
}
|
facb79341a97cfa64b1c0674030c6e78639ecd9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
cupSODA.cu: porting of LSODA to CUDA.
See file COPYING for copyright and licensing information.
*/
#include "constants.h"
#include "cupSODA.h"
#include <vector>
#include "input_reader.h"
#include "stoc2det.h"
#include <cstdio>
// extern char* device_compressed_odes;
// void save_constants(unsigned int c) { hipMemcpyToSymbol(NUM_ODES, &c, sizeof(unsigned int), 0, hipMemcpyHostToDevice); }
void LoadSystem( st2det* system ) {
printf(" * Loading flatten ODEs and Jacobian on GPU\n");
// hipMalloc((void**)&device_compressed_odes,sizeof(char) * size);
}
void SetODEarray(st2det* system ) {
#ifdef USE_CONSTANT_MEMORY
hipMemcpyToSymbol( ODE_new, system->ODE, sizeof(param_t)*system->ODE_lun );
CudaCheckError() ;
hipMemcpyToSymbol( JAC_new, system->JAC, sizeof(param_t)*system->JAC_lun );
CudaCheckError() ;
#endif
}
void SetConstants( unsigned int species, unsigned int reactions, unsigned int ode_lun, unsigned int jac_lun, unsigned int cs_lun, unsigned int time_in, unsigned int reps, unsigned int experiments, unsigned int threads, bool dump = false) {
hipMemcpyToSymbol( DEV_CONST_SPECIES, &species, sizeof(species));
CudaCheckError();
hipMemcpyToSymbol( DEV_CONST_REACTIONS, &reactions, sizeof(reactions));
CudaCheckError();
hipMemcpyToSymbol( DEV_CONST_ODELUN, &ode_lun, sizeof(ode_lun));
CudaCheckError();
hipMemcpyToSymbol( DEV_CONST_JACLUN, &jac_lun, sizeof(jac_lun));
CudaCheckError();
hipMemcpyToSymbol( DEV_CONST_SAMPLESLUN,&cs_lun, sizeof(cs_lun));
CudaCheckError();
hipMemcpyToSymbol( DEV_CONST_TIMESLUN, &time_in, sizeof(time_in));
CudaCheckError();
hipMemcpyToSymbol( DEV_CONST_SAMPLESPECIES, &cs_lun, sizeof(cs_lun));
CudaCheckError();
hipMemcpyToSymbol( DEV_CONST_SAMPLES, &time_in, sizeof(time_in));
CudaCheckError();
hipMemcpyToSymbol( DEV_CONST_REPETITIONS , &reps, sizeof(reps));
CudaCheckError();
hipMemcpyToSymbol( DEV_CONST_EXPERIMENTS , &experiments, sizeof(experiments));
CudaCheckError();
hipMemcpyToSymbol( DEV_ACTUAL_THREADS, &threads, sizeof(threads));
CudaCheckError();
unsigned int s_back = 0;
unsigned int r_back = 0;
unsigned int ol_back = 0;
unsigned int jl_back = 0;
unsigned int cs_back = 0;
unsigned int ti_back = 0;
unsigned int ss_back = 0;
hipMemcpyFromSymbol( &s_back, DEV_CONST_SPECIES, sizeof(s_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: species was set to %d\n", s_back);
hipMemcpyFromSymbol( &r_back, DEV_CONST_REACTIONS, sizeof(r_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: reactions was set to %d\n", r_back);
hipMemcpyFromSymbol( &ol_back, DEV_CONST_ODELUN, sizeof(ol_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: ode_lung was set to %d\n", ol_back);
hipMemcpyFromSymbol( &jl_back, DEV_CONST_JACLUN, sizeof(jl_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: jac_lung was set to %d\n", jl_back);
hipMemcpyFromSymbol( &cs_back, DEV_CONST_SAMPLESLUN, sizeof(cs_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: samples to be sampled was set to %d\n", cs_back);
hipMemcpyFromSymbol( &ti_back, DEV_CONST_TIMESLUN, sizeof(ti_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: ti_lun was set to %d\n", ti_back);
hipMemcpyFromSymbol( &ss_back, DEV_CONST_SAMPLESPECIES, sizeof(ss_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: number of sampled species was set to %d\n", ss_back);
}
/*
void load_compressed_odes( InputReader* ir ) {
unsigned int size = ir->GetCompODESize();
// Step 1: allocate memory
char* host_compressed_odes = (char*) malloc ( sizeof(char) * size);
hipMalloc((void**)&device_compressed_odes,sizeof(char) * size);
unsigned int pos = 0;
// Step 2: fill array
printf(" * Loading compressed ODE matrix\n");
// per ogni riga...
for (unsigned int r=0; r<ir->comp_ODE.size(); r++) {
for (unsigned int c=0; c<ir->comp_ODE[r]->size(); c++) {
printf("%d ", ir->comp_ODE[r]->at(c));
host_compressed_odes[pos++] = ir->comp_ODE[r]->at(c);
}
printf("\n");
}
// Step 3: copy on GPU
hipMemcpy(device_compressed_odes,host_compressed_odes,sizeof(char) * size,hipMemcpyHostToDevice);
// Step 4: save array size
hipMemcpyToSymbol(COMP_ODE_SIZE, &size, sizeof(unsigned int), 0, hipMemcpyHostToDevice);
// Step 5: clean up stuff
free( host_compressed_odes );
}
*/
#ifndef use_export
#ifndef CULSODA_CU
#define CULSODA_CU
#include <math.h>
#include <stdio.h>
// Begin Define block for common variables
#define conit common->CM_conit
#define crate common->CM_crate
#define ccmax common->CM_ccmax
#define el0 common->CM_el0
#define h__ common->CM_h__
#define hmin common->CM_hmin
#define hmxi common->CM_hmxi
#define hu common->CM_hu
#define rc common->CM_rc
#define tn common->CM_tn
#define uround common->CM_uround
#define pdest common->CM_pdest
#define pdlast common->CM_pdlast
#define ratio common->CM_ratio
#define hold common->CM_hold
#define rmax common->CM_rmax
#define el common->CM_el
#define elco common->CM_elco
#define tesco common->CM_tesco
#define rls common->CM_rls
#define tsw common->CM_tsw
#define pdnorm common->CM_pdnorm
#define cm1 common->CM_cm1
#define cm2 common->CM_cm2
#define rlsa common->CM_rlsa
#define sm1 common->CM_sm1
#define init common->CM_init
#define mxstep common->CM_mxstep
#define mxhnil common->CM_mxhnil
#define nhnil common->CM_nhnil
#define nslast common->CM_nslast
#define nyh common->CM_nyh
#define icf common->CM_icf
#define ierpj common->CM_ierpj
#define iersl common->CM_iersl
#define jcur common->CM_jcur
#define jstart common->CM_jstart
#define kflag common->CM_kflag
#define l common->CM_l
#define lyh common->CM_lyh
#define lewt common->CM_lewt
#define lacor common->CM_lacor
#define lsavf common->CM_lsavf
#define lwm common->CM_lwm
#define liwm common->CM_liwm
#define meth common->CM_meth
#define miter common->CM_miter
#define maxord common->CM_maxord
#define maxcor common->CM_maxcor
#define msbp common->CM_msbp
#define mxncf common->CM_mxncf
#define n common->CM_n
#define nq common->CM_nq
#define nst common->CM_nst
#define nfe common->CM_nfe
#define nje common->CM_nje
#define nqu common->CM_nqu
#define ialth common->CM_ialth
#define ipup common->CM_ipup
#define lmax common->CM_lmax
#define nqnyh common->CM_nqnyh
#define nslp common->CM_nslp
#define ils common->CM_ils
#define insufr common->CM_insufr
#define insufi common->CM_insufi
#define ixpr common->CM_ixpr
#define jtyp common->CM_jtyp
#define mused common->CM_mused
#define mxordn common->CM_mxordn
#define mxords common->CM_mxords
#define icount common->CM_icount
#define irflag common->CM_irflag
#define ilsa common->CM_ilsa
// End of Definitions
#ifdef use_export
export
#endif
__device__ int dlsoda_(myFex f, int *neq, double *y, double *t, double *tout, int *itol, double *rtol, double *atol, int *itask, int *istate, int *iopt, double *rwork, int *lrw, int *iwork, int *liw, myJex jac, int *jt, struct cuLsodaCommonBlock *common, int* debug, char* comp_ode, param_t* flattenODE, unsigned int* offsetODE, double* costanti, param_t* myjac, unsigned int* myjacoffset)
{
/* Initialized data */
//struct cuLsodaCommonBlock commonB;
//struct cuLsodaCommonBlock *common;
//common = &commonB;
int indice = threadIdx.x + blockIdx.x*blockDim.x;
int mord[2] = { 12,5 };
int mxstp0 = 500;
int mxhnl0 = 10;
/* System generated locals */
int i__1 = 0;
double d__1 = 0.;
double d__2 = 0.;
/* Local variables */
int i__;
double h0 = 0.;
int i1 = 0;
int i2 = 0;
double w0 = 0.;
int ml = 0;
double rh = 0.;
int mu = 0;
double tp = 0.;
int lf0 = 0;
double big = 0.;
int kgo = 0;
double ayi = 0.;
double hmx = 0.;
double tol = 0.;
double sum = 0.;
int len1 = 0;
int len2 = 0;
double hmax = 0.;
int ihit = 0;
double ewti = 0.;
double size = 0.;
int len1c = 0;
int len1n = 0;
int len1s = 0;
int iflag;
double atoli = 0.;
int leniw = 0;
int lenwm = 0;
int imxer = 0;
double tcrit = 0.;
int lenrw = 0;
double tdist = 0.;
double rtoli = 0.;
double tolsf = 0.;
double tnext = 0.;
int leniwc = 0;
int lenrwc = 0;
/* ----------------------------------------------------------------------- */
/* This is the 12 November 2003 version of */
/* DLSODA: Livermore Solver for Ordinary Differential Equations, with */
/* Automatic method switching for stiff and nonstiff problems. */
/* This version is in double precision. */
/* DLSODA solves the initial value problem for stiff or nonstiff */
/* systems of first order ODEs, */
/* dy/dt = f(t,y) , or, in component form, */
/* dy(i)/dt = f(i) = f(i,t,y(1),y(2),...,y(NEQ)) (i = 1,...,NEQ). */
/* This a variant version of the DLSODE package. */
/* It switches automatically between stiff and nonstiff methods. */
/* This means that the user does not have to determine whether the */
/* problem is stiff or not, and the solver will automatically choose the */
/* appropriate method. It always starts with the nonstiff method. */
/* Authors: Alan C. Hindmarsh */
/* Center for Applied Scientific Computing, L-561 */
/* Lawrence Livermore National Laboratory */
/* Livermore, CA 94551 */
/* and */
/* Linda R. Petzold */
/* Univ. of California at Santa Barbara */
/* Dept. of Computer Science */
/* Santa Barbara, CA 93106 */
/* References: */
/* 1. Alan C. Hindmarsh, ODEPACK, A Systematized Collection of ODE */
/* Solvers, in Scientific Computing, R. S. Stepleman et al. (Eds.), */
/* North-Holland, Amsterdam, 1983, pp. 55-64. */
/* 2. Linda R. Petzold, Automatic Selection of Methods for Solving */
/* Stiff and Nonstiff Systems of Ordinary Differential Equations, */
/* Siam J. Sci. Stat. Comput. 4 (1983), pp. 136-148. */
/* ----------------------------------------------------------------------- */
/* Summary of Usage. */
/* Communication between the user and the DLSODA package, for normal */
/* situations, is summarized here. This summary describes only a subset */
/* of the full set of options available. See the full description for */
/* details, including alternative treatment of the Jacobian matrix, */
/* optional inputs and outputs, nonstandard options, and */
/* instructions for special situations. See also the example */
/* problem (with program and output) following this summary. */
/* A. First provide a subroutine of the form: */
/* SUBROUTINE F (NEQ, T, Y, YDOT) */
/* DOUBLE PRECISION T, Y(*), YDOT(*) */
/* which supplies the vector function f by loading YDOT(i) with f(i). */
/* B. Write a main program which calls Subroutine DLSODA once for */
/* each point at which answers are desired. This should also provide */
/* for possible use of logical unit 6 for output of error messages */
/* by DLSODA. On the first call to DLSODA, supply arguments as follows: */
/* F = name of subroutine for right-hand side vector f. */
/* This name must be declared External in calling program. */
/* NEQ = number of first order ODEs. */
/* Y = array of initial values, of length NEQ. */
/* T = the initial value of the independent variable. */
/* TOUT = first point where output is desired (.ne. T). */
/* ITOL = 1 or 2 according as ATOL (below) is a scalar or array. */
/* RTOL = relative tolerance parameter (scalar). */
/* ATOL = absolute tolerance parameter (scalar or array). */
/* the estimated local error in y(i) will be controlled so as */
/* to be less than */
/* EWT(i) = RTOL*ABS(Y(i)) + ATOL if ITOL = 1, or */
/* EWT(i) = RTOL*ABS(Y(i)) + ATOL(i) if ITOL = 2. */
/* Thus the local error test passes if, in each component, */
/* either the absolute error is less than ATOL (or ATOL(i)), */
/* or the relative error is less than RTOL. */
/* Use RTOL = 0.0 for pure absolute error control, and */
/* use ATOL = 0.0 (or ATOL(i) = 0.0) for pure relative error */
/* control. Caution: actual (global) errors may exceed these */
/* local tolerances, so choose them conservatively. */
/* ITASK = 1 for normal computation of output values of y at t = TOUT. */
/* ISTATE = int flag (input and output). Set ISTATE = 1. */
/* IOPT = 0 to indicate no optional inputs used. */
/* RWORK = real work array of length at least: */
/* 22 + NEQ * MAX(16, NEQ + 9). */
/* See also Paragraph E below. */
/* LRW = declared length of RWORK (in user's dimension). */
/* IWORK = int work array of length at least 20 + NEQ. */
/* LIW = declared length of IWORK (in user's dimension). */
/* JAC = name of subroutine for Jacobian matrix. */
/* Use a dummy name. See also Paragraph E below. */
/* JT = Jacobian type indicator. Set JT = 2. */
/* See also Paragraph E below. */
/* Note that the main program must declare arrays Y, RWORK, IWORK, */
/* and possibly ATOL. */
/* C. The output from the first call (or any call) is: */
/* Y = array of computed values of y(t) vector. */
/* T = corresponding value of independent variable (normally TOUT). */
/* ISTATE = 2 if DLSODA was successful, negative otherwise. */
/* -1 means excess work done on this call (perhaps wrong JT). */
/* -2 means excess accuracy requested (tolerances too small). */
/* -3 means illegal input detected (see printed message). */
/* -4 means repeated error test failures (check all inputs). */
/* -5 means repeated convergence failures (perhaps bad Jacobian */
/* supplied or wrong choice of JT or tolerances). */
/* -6 means error weight became zero during problem. (Solution */
/* component i vanished, and ATOL or ATOL(i) = 0.) */
/* -7 means work space insufficient to finish (see messages). */
/* D. To continue the integration after a successful return, simply */
/* reset TOUT and call DLSODA again. No other parameters need be reset. */
/* E. Note: If and when DLSODA regards the problem as stiff, and */
/* switches methods accordingly, it must make use of the NEQ by NEQ */
/* Jacobian matrix, J = df/dy. For the sake of simplicity, the */
/* inputs to DLSODA recommended in Paragraph B above cause DLSODA to */
/* treat J as a full matrix, and to approximate it internally by */
/* difference quotients. Alternatively, J can be treated as a band */
/* matrix (with great potential reduction in the size of the RWORK */
/* array). Also, in either the full or banded case, the user can supply */
/* J in closed form, with a routine whose name is passed as the JAC */
/* argument. These alternatives are described in the paragraphs on */
/* RWORK, JAC, and JT in the full description of the call sequence below. */
/* ----------------------------------------------------------------------- */
/* Example Problem. */
/* The following is a simple example problem, with the coding */
/* needed for its solution by DLSODA. The problem is from chemical */
/* kinetics, and consists of the following three rate equations: */
/* dy1/dt = -.04*y1 + 1.e4*y2*y3 */
/* dy2/dt = .04*y1 - 1.e4*y2*y3 - 3.e7*y2**2 */
/* dy3/dt = 3.e7*y2**2 */
/* on the interval from t = 0.0 to t = 4.e10, with initial conditions */
/* y1 = 1.0, y2 = y3 = 0. The problem is stiff. */
/* The following coding solves this problem with DLSODA, */
/* printing results at t = .4, 4., ..., 4.e10. It uses */
/* ITOL = 2 and ATOL much smaller for y2 than y1 or y3 because */
/* y2 has much smaller values. */
/* At the end of the run, statistical quantities of interest are */
/* printed (see optional outputs in the full description below). */
/* EXTERNAL FEX */
/* DOUBLE PRECISION ATOL, RTOL, RWORK, T, TOUT, Y */
/* DIMENSION Y(3), ATOL(3), RWORK(70), IWORK(23) */
/* NEQ = 3 */
/* Y(1) = 1. */
/* Y(2) = 0. */
/* Y(3) = 0. */
/* T = 0. */
/* TOUT = .4 */
/* ITOL = 2 */
/* RTOL = 1.D-4 */
/* ATOL(1) = 1.D-6 */
/* ATOL(2) = 1.D-10 */
/* ATOL(3) = 1.D-6 */
/* ITASK = 1 */
/* ISTATE = 1 */
/* IOPT = 0 */
/* LRW = 70 */
/* LIW = 23 */
/* JT = 2 */
/* DO 40 IOUT = 1,12 */
/* CALL DLSODA(FEX,NEQ,Y,T,TOUT,ITOL,RTOL,ATOL,ITASK,ISTATE, */
/* 1 IOPT,RWORK,LRW,IWORK,LIW,JDUM,JT) */
/* WRITE(6,20)T,Y(1),Y(2),Y(3) */
/* 20 FORMAT(' At t =',D12.4,' Y =',3D14.6) */
/* IF (ISTATE .LT. 0) GO TO 80 */
/* 40 TOUT = TOUT*10. */
/* WRITE(6,60)IWORK(11),IWORK(12),IWORK(13),IWORK(19),RWORK(15) */
/* 60 FORMAT(/' No. steps =',I4,' No. f-s =',I4,' No. J-s =',I4/ */
/* 1 ' Method last used =',I2,' Last switch was at t =',D12.4) */
/* STOP */
/* 80 WRITE(6,90)ISTATE */
/* 90 FORMAT(///' Error halt.. ISTATE =',I3) */
/* STOP */
/* END */
/* SUBROUTINE FEX (NEQ, T, Y, YDOT) */
/* DOUBLE PRECISION T, Y, YDOT */
/* DIMENSION Y(3), YDOT(3) */
/* YDOT(1) = -.04*Y(1) + 1.D4*Y(2)*Y(3) */
/* YDOT(3) = 3.D7*Y(2)*Y(2) */
/* YDOT(2) = -YDOT(1) - YDOT(3) */
/* RETURN */
/* END */
/* The output of this program (on a CDC-7600 in single precision) */
/* is as follows: */
/* At t = 4.0000e-01 y = 9.851712e-01 3.386380e-05 1.479493e-02 */
/* At t = 4.0000e+00 Y = 9.055333e-01 2.240655e-05 9.444430e-02 */
/* At t = 4.0000e+01 Y = 7.158403e-01 9.186334e-06 2.841505e-01 */
/* At t = 4.0000e+02 Y = 4.505250e-01 3.222964e-06 5.494717e-01 */
/* At t = 4.0000e+03 Y = 1.831975e-01 8.941774e-07 8.168016e-01 */
/* At t = 4.0000e+04 Y = 3.898730e-02 1.621940e-07 9.610125e-01 */
/* At t = 4.0000e+05 Y = 4.936363e-03 1.984221e-08 9.950636e-01 */
/* At t = 4.0000e+06 Y = 5.161831e-04 2.065786e-09 9.994838e-01 */
/* At t = 4.0000e+07 Y = 5.179817e-05 2.072032e-10 9.999482e-01 */
/* At t = 4.0000e+08 Y = 5.283401e-06 2.113371e-11 9.999947e-01 */
/* At t = 4.0000e+09 Y = 4.659031e-07 1.863613e-12 9.999995e-01 */
/* At t = 4.0000e+10 Y = 1.404280e-08 5.617126e-14 1.000000e+00 */
/* No. steps = 361 No. f-s = 693 No. J-s = 64 */
/* Method last used = 2 Last switch was at t = 6.0092e-03 */
/* ----------------------------------------------------------------------- */
/* Full description of user interface to DLSODA. */
/* The user interface to DLSODA consists of the following parts. */
/* 1. The call sequence to Subroutine DLSODA, which is a driver */
/* routine for the solver. This includes descriptions of both */
/* the call sequence arguments and of user-supplied routines. */
/* following these descriptions is a description of */
/* optional inputs available through the call sequence, and then */
/* a description of optional outputs (in the work arrays). */
/* 2. Descriptions of other routines in the DLSODA package that may be */
/* (optionally) called by the user. These provide the ability to */
/* alter error message handling, save and restore the internal */
/* Common, and obtain specified derivatives of the solution y(t). */
/* 3. Descriptions of Common blocks to be declared in overlay */
/* or similar environments, or to be saved when doing an interrupt */
/* of the problem and continued solution later. */
/* 4. Description of a subroutine in the DLSODA package, */
/* which the user may replace with his/her own version, if desired. */
/* this relates to the measurement of errors. */
/* ----------------------------------------------------------------------- */
/* Part 1. Call Sequence. */
/* The call sequence parameters used for input only are */
/* F, NEQ, TOUT, ITOL, RTOL, ATOL, ITASK, IOPT, LRW, LIW, JAC, JT, */
/* and those used for both input and output are */
/* Y, T, ISTATE. */
/* The work arrays RWORK and IWORK are also used for conditional and */
/* optional inputs and optional outputs. (The term output here refers */
/* to the return from Subroutine DLSODA to the user's calling program.) */
/* The legality of input parameters will be thoroughly checked on the */
/* initial call for the problem, but not checked thereafter unless a */
/* change in input parameters is flagged by ISTATE = 3 on input. */
/* The descriptions of the call arguments are as follows. */
/* F = the name of the user-supplied subroutine defining the */
/* ODE system. The system must be put in the first-order */
/* form dy/dt = f(t,y), where f is a vector-valued function */
/* of the scalar t and the vector y. Subroutine F is to */
/* compute the function f. It is to have the form */
/* SUBROUTINE F (NEQ, T, Y, YDOT) */
/* DOUBLE PRECISION T, Y(*), YDOT(*) */
/* where NEQ, T, and Y are input, and the array YDOT = f(t,y) */
/* is output. Y and YDOT are arrays of length NEQ. */
/* Subroutine F should not alter Y(1),...,Y(NEQ). */
/* F must be declared External in the calling program. */
/* Subroutine F may access user-defined quantities in */
/* NEQ(2),... and/or in Y(NEQ(1)+1),... if NEQ is an array */
/* (dimensioned in F) and/or Y has length exceeding NEQ(1). */
/* See the descriptions of NEQ and Y below. */
/* If quantities computed in the F routine are needed */
/* externally to DLSODA, an extra call to F should be made */
/* for this purpose, for consistent and accurate results. */
/* If only the derivative dy/dt is needed, use DINTDY instead. */
/* NEQ = the size of the ODE system (number of first order */
/* ordinary differential equations). Used only for input. */
/* NEQ may be decreased, but not increased, during the problem. */
/* If NEQ is decreased (with ISTATE = 3 on input), the */
/* remaining components of Y should be left undisturbed, if */
/* these are to be accessed in F and/or JAC. */
/* Normally, NEQ is a scalar, and it is generally referred to */
/* as a scalar in this user interface description. However, */
/* NEQ may be an array, with NEQ(1) set to the system size. */
/* (The DLSODA package accesses only NEQ(1).) In either case, */
/* this parameter is passed as the NEQ argument in all calls */
/* to F and JAC. Hence, if it is an array, locations */
/* NEQ(2),... may be used to store other int data and pass */
/* it to F and/or JAC. Subroutines F and/or JAC must include */
/* NEQ in a Dimension statement in that case. */
/* Y = a real array for the vector of dependent variables, of */
/* length NEQ or more. Used for both input and output on the */
/* first call (ISTATE = 1), and only for output on other calls. */
/* On the first call, Y must contain the vector of initial */
/* values. On output, Y contains the computed solution vector, */
/* evaluated at T. If desired, the Y array may be used */
/* for other purposes between calls to the solver. */
/* This array is passed as the Y argument in all calls to */
/* F and JAC. Hence its length may exceed NEQ, and locations */
/* Y(NEQ+1),... may be used to store other real data and */
/* pass it to F and/or JAC. (The DLSODA package accesses only */
/* Y(1),...,Y(NEQ).) */
/* T = the independent variable. On input, T is used only on the */
/* first call, as the initial point of the integration. */
/* on output, after each call, T is the value at which a */
/* computed solution Y is evaluated (usually the same as TOUT). */
/* on an error return, T is the farthest point reached. */
/* TOUT = the next value of t at which a computed solution is desired. */
/* Used only for input. */
/* When starting the problem (ISTATE = 1), TOUT may be equal */
/* to T for one call, then should .ne. T for the next call. */
/* For the initial t, an input value of TOUT .ne. T is used */
/* in order to determine the direction of the integration */
/* (i.e. the algebraic sign of the step sizes) and the rough */
/* scale of the problem. Integration in either direction */
/* (forward or backward in t) is permitted. */
/* If ITASK = 2 or 5 (one-step modes), TOUT is ignored after */
/* the first call (i.e. the first call with TOUT .ne. T). */
/* Otherwise, TOUT is required on every call. */
/* If ITASK = 1, 3, or 4, the values of TOUT need not be */
/* monotone, but a value of TOUT which backs up is limited */
/* to the current internal T interval, whose endpoints are */
/* TCUR - HU and TCUR (see optional outputs, below, for */
/* TCUR and HU). */
/* ITOL = an indicator for the type of error control. See */
/* description below under ATOL. Used only for input. */
/* RTOL = a relative error tolerance parameter, either a scalar or */
/* an array of length NEQ. See description below under ATOL. */
/* Input only. */
/* ATOL = an absolute error tolerance parameter, either a scalar or */
/* an array of length NEQ. Input only. */
/* The input parameters ITOL, RTOL, and ATOL determine */
/* the error control performed by the solver. The solver will */
/* control the vector E = (E(i)) of estimated local errors */
/* in y, according to an inequality of the form */
/* max-norm of ( E(i)/EWT(i) ) .le. 1, */
/* where EWT = (EWT(i)) is a vector of positive error weights. */
/* The values of RTOL and ATOL should all be non-negative. */
/* The following table gives the types (scalar/array) of */
/* RTOL and ATOL, and the corresponding form of EWT(i). */
/* ITOL RTOL ATOL EWT(i) */
/* 1 scalar scalar RTOL*ABS(Y(i)) + ATOL */
/* 2 scalar array RTOL*ABS(Y(i)) + ATOL(i) */
/* 3 array scalar RTOL(i)*ABS(Y(i)) + ATOL */
/* 4 array array RTOL(i)*ABS(Y(i)) + ATOL(i) */
/* When either of these parameters is a scalar, it need not */
/* be dimensioned in the user's calling program. */
/* If none of the above choices (with ITOL, RTOL, and ATOL */
/* fixed throughout the problem) is suitable, more general */
/* error controls can be obtained by substituting a */
/* user-supplied routine for the setting of EWT. */
/* See Part 4 below. */
/* If global errors are to be estimated by making a repeated */
/* run on the same problem with smaller tolerances, then all */
/* components of RTOL and ATOL (i.e. of EWT) should be scaled */
/* down uniformly. */
/* ITASK = an index specifying the task to be performed. */
/* Input only. ITASK has the following values and meanings. */
/* 1 means normal computation of output values of y(t) at */
/* t = TOUT (by overshooting and interpolating). */
/* 2 means take one step only and return. */
/* 3 means stop at the first internal mesh point at or */
/* beyond t = TOUT and return. */
/* 4 means normal computation of output values of y(t) at */
/* t = TOUT but without overshooting t = TCRIT. */
/* TCRIT must be input as RWORK(1). TCRIT may be equal to */
/* or beyond TOUT, but not behind it in the direction of */
/* integration. This option is useful if the problem */
/* has a singularity at or beyond t = TCRIT. */
/* 5 means take one step, without passing TCRIT, and return. */
/* TCRIT must be input as RWORK(1). */
/* Note: If ITASK = 4 or 5 and the solver reaches TCRIT */
/* (within roundoff), it will return T = TCRIT (exactly) to */
/* indicate this (unless ITASK = 4 and TOUT comes before TCRIT, */
/* in which case answers at t = TOUT are returned first). */
/* ISTATE = an index used for input and output to specify the */
/* the state of the calculation. */
/* On input, the values of ISTATE are as follows. */
/* 1 means this is the first call for the problem */
/* (initializations will be done). See note below. */
/* 2 means this is not the first call, and the calculation */
/* is to continue normally, with no change in any input */
/* parameters except possibly TOUT and ITASK. */
/* (If ITOL, RTOL, and/or ATOL are changed between calls */
/* with ISTATE = 2, the new values will be used but not */
/* tested for legality.) */
/* 3 means this is not the first call, and the */
/* calculation is to continue normally, but with */
/* a change in input parameters other than */
/* TOUT and ITASK. Changes are allowed in */
/* NEQ, ITOL, RTOL, ATOL, IOPT, LRW, LIW, JT, ML, MU, */
/* and any optional inputs except H0, MXORDN, and MXORDS. */
/* (See IWORK description for ML and MU.) */
/* Note: A preliminary call with TOUT = T is not counted */
/* as a first call here, as no initialization or checking of */
/* input is done. (Such a call is sometimes useful for the */
/* purpose of outputting the initial conditions.) */
/* Thus the first call for which TOUT .ne. T requires */
/* ISTATE = 1 on input. */
/* On output, ISTATE has the following values and meanings. */
/* 1 means nothing was done; TOUT = T and ISTATE = 1 on input. */
/* 2 means the integration was performed successfully. */
/* -1 means an excessive amount of work (more than MXSTEP */
/* steps) was done on this call, before completing the */
/* requested task, but the integration was otherwise */
/* successful as far as T. (MXSTEP is an optional input */
/* and is normally 500.) To continue, the user may */
/* simply reset ISTATE to a value .gt. 1 and call again */
/* (the excess work step counter will be reset to 0). */
/* In addition, the user may increase MXSTEP to avoid */
/* this error return (see below on optional inputs). */
/* -2 means too much accuracy was requested for the precision */
/* of the machine being used. This was detected before */
/* completing the requested task, but the integration */
/* was successful as far as T. To continue, the tolerance */
/* parameters must be reset, and ISTATE must be set */
/* to 3. The optional output TOLSF may be used for this */
/* purpose. (Note: If this condition is detected before */
/* taking any steps, then an illegal input return */
/* (ISTATE = -3) occurs instead.) */
/* -3 means illegal input was detected, before taking any */
/* integration steps. See written message for details. */
/* Note: If the solver detects an infinite loop of calls */
/* to the solver with illegal input, it will cause */
/* the run to stop. */
/* -4 means there were repeated error test failures on */
/* one attempted step, before completing the requested */
/* task, but the integration was successful as far as T. */
/* The problem may have a singularity, or the input */
/* may be inappropriate. */
/* -5 means there were repeated convergence test failures on */
/* one attempted step, before completing the requested */
/* task, but the integration was successful as far as T. */
/* This may be caused by an inaccurate Jacobian matrix, */
/* if one is being used. */
/* -6 means EWT(i) became zero for some i during the */
/* integration. Pure relative error control (ATOL(i)=0.0) */
/* was requested on a variable which has now vanished. */
/* The integration was successful as far as T. */
/* -7 means the length of RWORK and/or IWORK was too small to */
/* proceed, but the integration was successful as far as T. */
/* This happens when DLSODA chooses to switch methods */
/* but LRW and/or LIW is too small for the new method. */
/* Note: Since the normal output value of ISTATE is 2, */
/* it does not need to be reset for normal continuation. */
/* Also, since a negative input value of ISTATE will be */
/* regarded as illegal, a negative output value requires the */
/* user to change it, and possibly other inputs, before */
/* calling the solver again. */
/* IOPT = an int flag to specify whether or not any optional */
/* inputs are being used on this call. Input only. */
/* The optional inputs are listed separately below. */
/* IOPT = 0 means no optional inputs are being used. */
/* default values will be used in all cases. */
/* IOPT = 1 means one or more optional inputs are being used. */
/* RWORK = a real array (double precision) for work space, and (in the */
/* first 20 words) for conditional and optional inputs and */
/* optional outputs. */
/* As DLSODA switches automatically between stiff and nonstiff */
/* methods, the required length of RWORK can change during the */
/* problem. Thus the RWORK array passed to DLSODA can either */
/* have a static (fixed) length large enough for both methods, */
/* or have a dynamic (changing) length altered by the calling */
/* program in response to output from DLSODA. */
/* --- Fixed Length Case --- */
/* If the RWORK length is to be fixed, it should be at least */
/* MAX (LRN, LRS), */
/* where LRN and LRS are the RWORK lengths required when the */
/* current method is nonstiff or stiff, respectively. */
/* The separate RWORK length requirements LRN and LRS are */
/* as follows: */
/* IF NEQ is constant and the maximum method orders have */
/* their default values, then */
/* LRN = 20 + 16*NEQ, */
/* LRS = 22 + 9*NEQ + NEQ**2 if JT = 1 or 2, */
/* LRS = 22 + 10*NEQ + (2*ML+MU)*NEQ if JT = 4 or 5. */
/* Under any other conditions, LRN and LRS are given by: */
/* LRN = 20 + NYH*(MXORDN+1) + 3*NEQ, */
/* LRS = 20 + NYH*(MXORDS+1) + 3*NEQ + LMAT, */
/* where */
/* NYH = the initial value of NEQ, */
/* MXORDN = 12, unless a smaller value is given as an */
/* optional input, */
/* MXORDS = 5, unless a smaller value is given as an */
/* optional input, */
/* LMAT = length of matrix work space: */
/* LMAT = NEQ**2 + 2 if JT = 1 or 2, */
/* LMAT = (2*ML + MU + 1)*NEQ + 2 if JT = 4 or 5. */
/* --- Dynamic Length Case --- */
/* If the length of RWORK is to be dynamic, then it should */
/* be at least LRN or LRS, as defined above, depending on the */
/* current method. Initially, it must be at least LRN (since */
/* DLSODA starts with the nonstiff method). On any return */
/* from DLSODA, the optional output MCUR indicates the current */
/* method. If MCUR differs from the value it had on the */
/* previous return, or if there has only been one call to */
/* DLSODA and MCUR is now 2, then DLSODA has switched */
/* methods during the last call, and the length of RWORK */
/* should be reset (to LRN if MCUR = 1, or to LRS if */
/* MCUR = 2). (An increase in the RWORK length is required */
/* if DLSODA returned ISTATE = -7, but not otherwise.) */
/* After resetting the length, call DLSODA with ISTATE = 3 */
/* to signal that change. */
/* LRW = the length of the array RWORK, as declared by the user. */
/* (This will be checked by the solver.) */
/* IWORK = an int array for work space. */
/* As DLSODA switches automatically between stiff and nonstiff */
/* methods, the required length of IWORK can change during */
/* problem, between */
/* LIS = 20 + NEQ and LIN = 20, */
/* respectively. Thus the IWORK array passed to DLSODA can */
/* either have a fixed length of at least 20 + NEQ, or have a */
/* dynamic length of at least LIN or LIS, depending on the */
/* current method. The comments on dynamic length under */
/* RWORK above apply here. Initially, this length need */
/* only be at least LIN = 20. */
/* The first few words of IWORK are used for conditional and */
/* optional inputs and optional outputs. */
/* The following 2 words in IWORK are conditional inputs: */
/* IWORK(1) = ML these are the lower and upper */
/* IWORK(2) = MU half-bandwidths, respectively, of the */
/* banded Jacobian, excluding the main diagonal. */
/* The band is defined by the matrix locations */
/* (i,j) with i-ML .le. j .le. i+MU. ML and MU */
/* must satisfy 0 .le. ML,MU .le. NEQ-1. */
/* These are required if JT is 4 or 5, and */
/* ignored otherwise. ML and MU may in fact be */
/* the band parameters for a matrix to which */
/* df/dy is only approximately equal. */
/* LIW = the length of the array IWORK, as declared by the user. */
/* (This will be checked by the solver.) */
/* Note: The base addresses of the work arrays must not be */
/* altered between calls to DLSODA for the same problem. */
/* The contents of the work arrays must not be altered */
/* between calls, except possibly for the conditional and */
/* optional inputs, and except for the last 3*NEQ words of RWORK. */
/* The latter space is used for internal scratch space, and so is */
/* available for use by the user outside DLSODA between calls, if */
/* desired (but not for use by F or JAC). */
/* JAC = the name of the user-supplied routine to compute the */
/* Jacobian matrix, df/dy, if JT = 1 or 4. The JAC routine */
/* is optional, but if the problem is expected to be stiff much */
/* of the time, you are encouraged to supply JAC, for the sake */
/* of efficiency. (Alternatively, set JT = 2 or 5 to have */
/* DLSODA compute df/dy internally by difference quotients.) */
/* If and when DLSODA uses df/dy, it treats this NEQ by NEQ */
/* matrix either as full (JT = 1 or 2), or as banded (JT = */
/* 4 or 5) with half-bandwidths ML and MU (discussed under */
/* IWORK above). In either case, if JT = 1 or 4, the JAC */
/* routine must compute df/dy as a function of the scalar t */
/* and the vector y. It is to have the form */
/* SUBROUTINE JAC (NEQ, T, Y, ML, MU, PD, NROWPD) */
/* DOUBLE PRECISION T, Y(*), PD(NROWPD,*) */
/* where NEQ, T, Y, ML, MU, and NROWPD are input and the array */
/* PD is to be loaded with partial derivatives (elements of */
/* the Jacobian matrix) on output. PD must be given a first */
/* dimension of NROWPD. T and Y have the same meaning as in */
/* Subroutine F. */
/* In the full matrix case (JT = 1), ML and MU are */
/* ignored, and the Jacobian is to be loaded into PD in */
/* columnwise manner, with df(i)/dy(j) loaded into PD(i,j). */
/* In the band matrix case (JT = 4), the elements */
/* within the band are to be loaded into PD in columnwise */
/* manner, with diagonal lines of df/dy loaded into the rows */
/* of PD. Thus df(i)/dy(j) is to be loaded into PD(i-j+MU+1,j). */
/* ML and MU are the half-bandwidth parameters (see IWORK). */
/* The locations in PD in the two triangular areas which */
/* correspond to nonexistent matrix elements can be ignored */
/* or loaded arbitrarily, as they are overwritten by DLSODA. */
/* JAC need not provide df/dy exactly. A crude */
/* approximation (possibly with a smaller bandwidth) will do. */
/* In either case, PD is preset to zero by the solver, */
/* so that only the nonzero elements need be loaded by JAC. */
/* Each call to JAC is preceded by a call to F with the same */
/* arguments NEQ, T, and Y. Thus to gain some efficiency, */
/* intermediate quantities shared by both calculations may be */
/* saved in a user Common block by F and not recomputed by JAC, */
/* if desired. Also, JAC may alter the Y array, if desired. */
/* JAC must be declared External in the calling program. */
/* Subroutine JAC may access user-defined quantities in */
/* NEQ(2),... and/or in Y(NEQ(1)+1),... if NEQ is an array */
/* (dimensioned in JAC) and/or Y has length exceeding NEQ(1). */
/* See the descriptions of NEQ and Y above. */
/* JT = Jacobian type indicator. Used only for input. */
/* JT specifies how the Jacobian matrix df/dy will be */
/* treated, if and when DLSODA requires this matrix. */
/* JT has the following values and meanings: */
/* 1 means a user-supplied full (NEQ by NEQ) Jacobian. */
/* 2 means an internally generated (difference quotient) full */
/* Jacobian (using NEQ extra calls to F per df/dy value). */
/* 4 means a user-supplied banded Jacobian. */
/* 5 means an internally generated banded Jacobian (using */
/* ML+MU+1 extra calls to F per df/dy evaluation). */
/* If JT = 1 or 4, the user must supply a Subroutine JAC */
/* (the name is arbitrary) as described above under JAC. */
/* If JT = 2 or 5, a dummy argument can be used. */
/* ----------------------------------------------------------------------- */
/* Optional Inputs. */
/* The following is a list of the optional inputs provided for in the */
/* call sequence. (See also Part 2.) For each such input variable, */
/* this table lists its name as used in this documentation, its */
/* location in the call sequence, its meaning, and the default value. */
/* The use of any of these inputs requires IOPT = 1, and in that */
/* case all of these inputs are examined. A value of zero for any */
/* of these optional inputs will cause the default value to be used. */
/* Thus to use a subset of the optional inputs, simply preload */
/* locations 5 to 10 in RWORK and IWORK to 0.0 and 0 respectively, and */
/* then set those of interest to nonzero values. */
/* Name Location Meaning and Default Value */
/* H0 RWORK(5) the step size to be attempted on the first step. */
/* The default value is determined by the solver. */
/* HMAX RWORK(6) the maximum absolute step size allowed. */
/* The default value is infinite. */
/* HMIN RWORK(7) the minimum absolute step size allowed. */
/* The default value is 0. (This lower bound is not */
/* enforced on the final step before reaching TCRIT */
/* when ITASK = 4 or 5.) */
/* IXPR IWORK(5) flag to generate extra printing at method switches. */
/* IXPR = 0 means no extra printing (the default). */
/* IXPR = 1 means print data on each switch. */
/* T, H, and NST will be printed on the same logical */
/* unit as used for error messages. */
/* MXSTEP IWORK(6) maximum number of (internally defined) steps */
/* allowed during one call to the solver. */
/* The default value is 500. */
/* MXHNIL IWORK(7) maximum number of messages printed (per problem) */
/* warning that T + H = T on a step (H = step size). */
/* This must be positive to result in a non-default */
/* value. The default value is 10. */
/* MXORDN IWORK(8) the maximum order to be allowed for the nonstiff */
/* (Adams) method. the default value is 12. */
/* if MXORDN exceeds the default value, it will */
/* be reduced to the default value. */
/* MXORDN is held constant during the problem. */
/* MXORDS IWORK(9) the maximum order to be allowed for the stiff */
/* (BDF) method. The default value is 5. */
/* If MXORDS exceeds the default value, it will */
/* be reduced to the default value. */
/* MXORDS is held constant during the problem. */
/* ----------------------------------------------------------------------- */
/* Optional Outputs. */
/* As optional additional output from DLSODA, the variables listed */
/* below are quantities related to the performance of DLSODA */
/* which are available to the user. These are communicated by way of */
/* the work arrays, but also have internal mnemonic names as shown. */
/* except where stated otherwise, all of these outputs are defined */
/* on any successful return from DLSODA, and on any return with */
/* ISTATE = -1, -2, -4, -5, or -6. On an illegal input return */
/* (ISTATE = -3), they will be unchanged from their existing values */
/* (if any), except possibly for TOLSF, LENRW, and LENIW. */
/* On any error return, outputs relevant to the error will be defined, */
/* as noted below. */
/* Name Location Meaning */
/* HU RWORK(11) the step size in t last used (successfully). */
/* HCUR RWORK(12) the step size to be attempted on the next step. */
/* TCUR RWORK(13) the current value of the independent variable */
/* which the solver has actually reached, i.e. the */
/* current internal mesh point in t. On output, TCUR */
/* will always be at least as far as the argument */
/* T, but may be farther (if interpolation was done). */
/* TOLSF RWORK(14) a tolerance scale factor, greater than 1.0, */
/* computed when a request for too much accuracy was */
/* detected (ISTATE = -3 if detected at the start of */
/* the problem, ISTATE = -2 otherwise). If ITOL is */
/* left unaltered but RTOL and ATOL are uniformly */
/* scaled up by a factor of TOLSF for the next call, */
/* then the solver is deemed likely to succeed. */
/* (The user may also ignore TOLSF and alter the */
/* tolerance parameters in any other way appropriate.) */
/* TSW RWORK(15) the value of t at the time of the last method */
/* switch, if any. */
/* NST IWORK(11) the number of steps taken for the problem so far. */
/* NFE IWORK(12) the number of f evaluations for the problem so far. */
/* NJE IWORK(13) the number of Jacobian evaluations (and of matrix */
/* LU decompositions) for the problem so far. */
/* NQU IWORK(14) the method order last used (successfully). */
/* NQCUR IWORK(15) the order to be attempted on the next step. */
/* IMXER IWORK(16) the index of the component of largest magnitude in */
/* the weighted local error vector ( E(i)/EWT(i) ), */
/* on an error return with ISTATE = -4 or -5. */
/* LENRW IWORK(17) the length of RWORK actually required, assuming */
/* that the length of RWORK is to be fixed for the */
/* rest of the problem, and that switching may occur. */
/* This is defined on normal returns and on an illegal */
/* input return for insufficient storage. */
/* LENIW IWORK(18) the length of IWORK actually required, assuming */
/* that the length of IWORK is to be fixed for the */
/* rest of the problem, and that switching may occur. */
/* This is defined on normal returns and on an illegal */
/* input return for insufficient storage. */
/* MUSED IWORK(19) the method indicator for the last successful step: */
/* 1 means Adams (nonstiff), 2 means BDF (stiff). */
/* MCUR IWORK(20) the current method indicator: */
/* 1 means Adams (nonstiff), 2 means BDF (stiff). */
/* This is the method to be attempted */
/* on the next step. Thus it differs from MUSED */
/* only if a method switch has just been made. */
/* The following two arrays are segments of the RWORK array which */
/* may also be of interest to the user as optional outputs. */
/* For each array, the table below gives its internal name, */
/* its base address in RWORK, and its description. */
/* Name Base Address Description */
/* YH 21 the Nordsieck history array, of size NYH by */
/* (NQCUR + 1), where NYH is the initial value */
/* of NEQ. For j = 0,1,...,NQCUR, column j+1 */
/* of YH contains HCUR**j/factorial(j) times */
/* the j-th derivative of the interpolating */
/* polynomial currently representing the solution, */
/* evaluated at T = TCUR. */
/* ACOR LACOR array of size NEQ used for the accumulated */
/* (from Common corrections on each step, scaled on output */
/* as noted) to represent the estimated local error in y */
/* on the last step. This is the vector E in */
/* the description of the error control. It is */
/* defined only on a successful return from */
/* DLSODA. The base address LACOR is obtained by */
/* including in the user's program the */
/* following 2 lines: */
/* COMMON /DLS001/ RLS(218), ILS(37) */
/* LACOR = ILS(22) */
/* ----------------------------------------------------------------------- */
/* Part 2. Other Routines Callable. */
/* The following are optional calls which the user may make to */
/* gain additional capabilities in conjunction with DLSODA. */
/* (The routines XSETUN and XSETF are designed to conform to the */
/* SLATEC error handling package.) */
/* Form of Call Function */
/* CALL XSETUN(LUN) set the logical unit number, LUN, for */
/* output of messages from DLSODA, if */
/* the default is not desired. */
/* The default value of LUN is 6. */
/* CALL XSETF(MFLAG) set a flag to control the printing of */
/* messages by DLSODA. */
/* MFLAG = 0 means do not print. (Danger: */
/* This risks losing valuable information.) */
/* MFLAG = 1 means print (the default). */
/* Either of the above calls may be made at */
/* any time and will take effect immediately. */
/* CALL DSRCMA(RSAV,ISAV,JOB) saves and restores the contents of */
/* the internal Common blocks used by */
/* DLSODA (see Part 3 below). */
/* RSAV must be a real array of length 240 */
/* or more, and ISAV must be an int */
/* array of length 46 or more. */
/* JOB=1 means save Common into RSAV/ISAV. */
/* JOB=2 means restore Common from RSAV/ISAV. */
/* DSRCMA is useful if one is */
/* interrupting a run and restarting */
/* later, or alternating between two or */
/* more problems solved with DLSODA. */
/* CALL DINTDY(,,,,,) provide derivatives of y, of various */
/* (see below) orders, at a specified point t, if */
/* desired. It may be called only after */
/* a successful return from DLSODA. */
/* The detailed instructions for using DINTDY are as follows. */
/* The form of the call is: */
/* CALL DINTDY (T, K, RWORK(21), NYH, DKY, IFLAG) */
/* The input parameters are: */
/* T = value of independent variable where answers are desired */
/* (normally the same as the T last returned by DLSODA). */
/* For valid results, T must lie between TCUR - HU and TCUR. */
/* (See optional outputs for TCUR and HU.) */
/* K = int order of the derivative desired. K must satisfy */
/* 0 .le. K .le. NQCUR, where NQCUR is the current order */
/* (see optional outputs). The capability corresponding */
/* to K = 0, i.e. computing y(T), is already provided */
/* by DLSODA directly. Since NQCUR .ge. 1, the first */
/* derivative dy/dt is always available with DINTDY. */
/* RWORK(21) = the base address of the history array YH. */
/* NYH = column length of YH, equal to the initial value of NEQ. */
/* The output parameters are: */
/* DKY = a real array of length NEQ containing the computed value */
/* of the K-th derivative of y(t). */
/* IFLAG = int flag, returned as 0 if K and T were legal, */
/* -1 if K was illegal, and -2 if T was illegal. */
/* On an error return, a message is also written. */
/* ----------------------------------------------------------------------- */
/* Part 3. Common Blocks. */
/* If DLSODA is to be used in an overlay situation, the user */
/* must declare, in the primary overlay, the variables in: */
/* (1) the call sequence to DLSODA, and */
/* (2) the two internal Common blocks */
/* /DLS001/ of length 255 (218 double precision words */
/* followed by 37 int words), */
/* /DLSA01/ of length 31 (22 double precision words */
/* followed by 9 int words). */
/* If DLSODA is used on a system in which the contents of internal */
/* Common blocks are not preserved between calls, the user should */
/* declare the above Common blocks in the calling program to insure */
/* that their contents are preserved. */
/* If the solution of a given problem by DLSODA is to be interrupted */
/* and then later continued, such as when restarting an interrupted run */
/* or alternating between two or more problems, the user should save, */
/* following the return from the last DLSODA call prior to the */
/* interruption, the contents of the call sequence variables and the */
/* internal Common blocks, and later restore these values before the */
/* next DLSODA call for that problem. To save and restore the Common */
/* blocks, use Subroutine DSRCMA (see Part 2 above). */
/* ----------------------------------------------------------------------- */
/* Part 4. Optionally Replaceable Solver Routines. */
/* Below is a description of a routine in the DLSODA package which */
/* relates to the measurement of errors, and can be */
/* replaced by a user-supplied version, if desired. However, since such */
/* a replacement may have a major impact on performance, it should be */
/* done only when absolutely necessary, and only with great caution. */
/* (Note: The means by which the package version of a routine is */
/* superseded by the user's version may be system-dependent.) */
/* (a) DEWSET. */
/* The following subroutine is called just before each internal */
/* integration step, and sets the array of error weights, EWT, as */
/* described under ITOL/RTOL/ATOL above: */
/* Subroutine DEWSET (NEQ, ITOL, RTOL, ATOL, YCUR, EWT) */
/* where NEQ, ITOL, RTOL, and ATOL are as in the DLSODA call sequence, */
/* YCUR contains the current dependent variable vector, and */
/* EWT is the array of weights set by DEWSET. */
/* If the user supplies this subroutine, it must return in EWT(i) */
/* (i = 1,...,NEQ) a positive quantity suitable for comparing errors */
/* in y(i) to. The EWT array returned by DEWSET is passed to the */
/* DMNORM routine, and also used by DLSODA in the computation */
/* of the optional output IMXER, and the increments for difference */
/* quotient Jacobians. */
/* In the user-supplied version of DEWSET, it may be desirable to use */
/* the current values of derivatives of y. Derivatives up to order NQ */
/* are available from the history array YH, described above under */
/* optional outputs. In DEWSET, YH is identical to the YCUR array, */
/* extended to NQ + 1 columns with a column length of NYH and scale */
/* factors of H**j/factorial(j). On the first call for the problem, */
/* given by NST = 0, NQ is 1 and H is temporarily set to 1.0. */
/* NYH is the initial value of NEQ. The quantities NQ, H, and NST */
/* can be obtained by including in DEWSET the statements: */
/* DOUBLE PRECISION RLS */
/* COMMON /DLS001/ RLS(218),ILS(37) */
/* NQ = ILS(33) */
/* NST = ILS(34) */
/* H = RLS(212) */
/* Thus, for example, the current value of dy/dt can be obtained as */
/* YCUR(NYH+i)/H (i=1,...,NEQ) (and the division by H is */
/* unnecessary when NST = 0). */
/* ----------------------------------------------------------------------- */
/* ***REVISION HISTORY (YYYYMMDD) */
/* 19811102 DATE WRITTEN */
/* 19820126 Fixed bug in tests of work space lengths; */
/* minor corrections in main prologue and comments. */
/* 19870330 Major update: corrected comments throughout; */
/* removed TRET from Common; rewrote EWSET with 4 loops; */
/* fixed t test in INTDY; added Cray directives in STODA; */
/* in STODA, fixed DELP init. and logic around dprja_ call; */
/* combined routines to save/restore Common; */
/* passed LEVEL = 0 in error message calls (except run abort). */
/* 19970225 Fixed lines setting JSTART = -2 in Subroutine LSODA. */
/* 20010425 Major update: convert source lines to upper case; */
/* added *DECK lines; changed from 1 to * in dummy dimensions; */
/* changed names R1MACH/D1MACH to RUMACH/DUMACH; */
/* renamed routines for uniqueness across single/double prec.; */
/* converted intrinsic names to generic form; */
/* removed ILLIN and NTREP (data loaded) from Common; */
/* removed all 'own' variables from Common; */
/* changed error messages to quoted strings; */
/* replaced XERRWV/XERRWD with 1993 revised version; */
/* converted prologues, comments, error messages to mixed case; */
/* numerous corrections to prologues and internal comments. */
/* 20010507 Converted single precision source to double precision. */
/* 20010613 Revised excess accuracy test (to match rest of ODEPACK). */
/* 20010808 Fixed bug in DPRJA (matrix in DBNORM call). */
/* 20020502 Corrected declarations in descriptions of user routines. */
/* 20031105 Restored 'own' variables to Common blocks, to enable */
/* interrupt/restart feature. */
/* 20031112 Added SAVE statements for data-loaded constants. */
/* ----------------------------------------------------------------------- */
/* Other routines in the DLSODA package. */
/* In addition to Subroutine DLSODA, the DLSODA package includes the */
/* following subroutines and function routines: */
/* DINTDY computes an interpolated value of the y vector at t = TOUT. */
/* DSTODA is the core integrator, which does one step of the */
/* integration and the associated error control. */
/* DCFODE sets all method coefficients and test constants. */
/* DPRJA computes and preprocesses the Jacobian matrix J = df/dy */
/* and the Newton iteration matrix P = I - h*l0*J. */
/* DSOLSY manages solution of linear system in chord iteration. */
/* DEWSET sets the error weight vector EWT before each step. */
/* DMNORM computes the weighted max-norm of a vector. */
/* DFNORM computes the norm of a full matrix consistent with the */
/* weighted max-norm on vectors. */
/* DBNORM computes the norm of a band matrix consistent with the */
/* weighted max-norm on vectors. */
/* DSRCMA is a user-callable routine to save and restore */
/* the contents of the internal Common blocks. */
/* DGEFA and DGESL are routines from LINPACK for solving full */
/* systems of linear algebraic equations. */
/* DGBFA and DGBSL are routines from LINPACK for solving banded */
/* linear systems. */
/* DUMACH computes the unit roundoff in a machine-independent manner. */
/* XERRWD, XSETUN, XSETF, IXSAV, and IUMACH handle the printing of all */
/* error messages and warnings. XERRWD is machine-dependent. */
/* Note: DMNORM, DFNORM, DBNORM, DUMACH, IXSAV, and IUMACH are */
/* function routines. All the others are subroutines. */
/* ----------------------------------------------------------------------- */
/* ----------------------------------------------------------------------- */
/* The following two internal Common blocks contain */
/* (a) variables which are local to any subroutine but whose values must */
/* be preserved between calls to the routine ("own" variables), and */
/* (b) variables which are communicated between subroutines. */
/* The block DLS001 is declared in subroutines DLSODA, DINTDY, DSTODA, */
/* DPRJA, and DSOLSY. */
/* The block DLSA01 is declared in subroutines DLSODA, DSTODA, and DPRJA. */
/* Groups of variables are replaced by dummy arrays in the Common */
/* declarations in routines where those variables are not used. */
/* ----------------------------------------------------------------------- */
/* Parameter adjustments */
//--neq;
//--y;
//--rtol;
//--atol;
//--rwork;
// --iwork;
/* Function Body */
/* ----------------------------------------------------------------------- */
/* Block A. */
/* This code block is executed on every call. */
/* It tests ISTATE and ITASK for legality and branches appropriately. */
/* If ISTATE .gt. 1 but the flag INIT shows that initialization has */
/* not yet been done, an error return occurs. */
/* If ISTATE = 1 and TOUT = T, return immediately. */
/* ----------------------------------------------------------------------- */
if (*istate < 1 || *istate > 3) {
goto L601;
}
if (*itask < 1 || *itask > 5) {
goto L602;
}
if (*istate == 1) {
goto L10;
}
if (init == 0) {
goto L603;
}
if (*istate == 2) {
goto L200;
}
goto L20;
L10:
init = 0;
if (*tout == *t) {
return 0;
}
/* ----------------------------------------------------------------------- */
/* Block B. */
/* The next code block is executed for the initial call (ISTATE = 1), */
/* or for a continuation call with parameter changes (ISTATE = 3). */
/* It contains checking of all inputs and various initializations. */
/* First check legality of the non-optional inputs NEQ, ITOL, IOPT, */
/* JT, ML, and MU. */
/* ----------------------------------------------------------------------- */
L20:
if (neq[0] <= 0) { //fixed
goto L604;
}
if (*istate == 1) {
goto L25;
}
if (neq[0] > n) { //fixed
goto L605;
}
L25:
n = neq[0]; //fixed
if (*itol < 1 || *itol > 4) {
goto L606;
}
if (*iopt < 0 || *iopt > 1) {
goto L607;
}
if (*jt == 3 || *jt < 1 || *jt > 5) {
goto L608;
}
jtyp = *jt;
if (*jt <= 2) {
goto L30;
}
ml = iwork[0];
mu = iwork[1];
if (ml < 0 || ml >= n) {
goto L609;
}
if (mu < 0 || mu >= n) {
goto L610;
}
L30:
/* Next process and check the optional inputs. -------------------------- */
if (*iopt == 1) {
goto L40;
}
ixpr = 0;
mxstep = mxstp0;
mxhnil = mxhnl0;
hmxi = 0.;
hmin = 0.;
if (*istate != 1) {
goto L60;
}
h0 = 0.;
mxordn = mord[0];
mxords = mord[1];
goto L60;
L40:
ixpr = iwork[4];
if (ixpr < 0 || ixpr > 1) {
goto L611;
}
mxstep = iwork[5];
if (mxstep < 0) {
goto L612;
}
if (mxstep == 0) {
mxstep = mxstp0;
}
mxhnil = iwork[6];
if (mxhnil < 0) {
goto L613;
}
if (mxhnil == 0) {
mxhnil = mxhnl0;
}
if (*istate != 1) {
goto L50;
}
h0 = rwork[4];
mxordn = iwork[7];
if (mxordn < 0) {
goto L628;
}
if (mxordn == 0) {
mxordn = 100;
}
mxordn = min(mxordn,mord[0]);
mxords = iwork[8];
if (mxords < 0) {
goto L629;
}
if (mxords == 0) {
mxords = 100;
}
mxords = min(mxords,mord[1]);
if ((*tout - *t) * h0 < 0.) {
goto L614;
}
L50:
hmax = rwork[5];
if (hmax < 0.) {
goto L615;
}
hmxi = 0.;
if (hmax > 0.) {
hmxi = 1. / hmax;
}
hmin = rwork[6];
if (hmin < 0.) {
goto L616;
}
/* ----------------------------------------------------------------------- */
/* Set work array pointers and check lengths LRW and LIW. */
/* If ISTATE = 1, METH is initialized to 1 here to facilitate the */
/* checking of work space lengths. */
/* Pointers to segments of RWORK and IWORK are named by prefixing L to */
/* the name of the segment. E.g., the segment YH starts at RWORK(LYH). */
/* Segments of RWORK (in order) are denoted YH, WM, EWT, SAVF, ACOR. */
/* If the lengths provided are insufficient for the current method, */
/* an error return occurs. This is treated as illegal input on the */
/* first call, but as a problem interruption with ISTATE = -7 on a */
/* continuation call. If the lengths are sufficient for the current */
/* method but not for both methods, a warning message is sent. */
/* ----------------------------------------------------------------------- */
L60:
if (*istate == 1) {
meth = 1;
}
if (*istate == 1) {
nyh = n;
}
lyh = 21;
len1n = (mxordn + 1) * nyh + 20;
len1s = (mxords + 1) * nyh + 20;
lwm = len1s + 1;
if (*jt <= 2) {
lenwm = n * n + 2;
}
if (*jt >= 4) {
lenwm = ((ml << 1) + mu + 1) * n + 2;
}
len1s += lenwm;
len1c = len1n;
if (meth == 2) {
len1c = len1s;
}
len1 = max(len1n,len1s);
len2 = n * 3;
lenrw = len1 + len2;
lenrwc = len1c + len2;
iwork[16] = lenrw;
liwm = 1;
leniw = n + 20;
leniwc = 20;
if (meth == 2) {
leniwc = leniw;
}
iwork[17] = leniw;
if (*istate == 1 && *lrw < lenrwc) {
goto L617;
}
if (*istate == 1 && *liw < leniwc) {
goto L618;
}
if (*istate == 3 && *lrw < lenrwc) {
goto L550;
}
if (*istate == 3 && *liw < leniwc) {
goto L555;
}
lewt = len1 + 1;
insufr = 0;
if (*lrw >= lenrw) {
goto L65;
}
insufr = 2;
lewt = len1c + 1;
L65:
lsavf = lewt + n;
lacor = lsavf + n;
insufi = 0;
if (*liw >= leniw) {
goto L70;
}
insufi = 2;
L70:
/* Check RTOL and ATOL for legality. ------------------------------------ */
rtoli = rtol[0];
atoli = atol[0];
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
// for (i__ = 0; i__ < i__1; ++i__) {
if (*itol >= 3) {
rtoli = rtol[i__ -1];
}
if (*itol == 2 || *itol == 4) {
atoli = atol[i__ -1];
}
if (rtoli < 0.) {
goto L619;
}
if (atoli < 0.) {
goto L620;
}
/* L75: */
}
if (*istate == 1) {
goto L100;
}
/* If ISTATE = 3, set flag to signal parameter changes to DSTODA. ------- */
jstart = -1;
if (n == nyh) {
goto L200;
}
/* NEQ was reduced. Zero part of YH to avoid undefined references. ----- */
i1 = lyh + l * nyh;
i2 = lyh + (maxord + 1) * nyh - 1;
if (i1 > i2) {
goto L200;
}
i__1 = i2;
for (i__ = i1; i__ <= i__1; ++i__) {
/* L95: */
rwork[i__ -1] = 0.;
}
goto L200;
/* ----------------------------------------------------------------------- */
/* Block C. */
/* The next block is for the initial call only (ISTATE = 1). */
/* It contains all remaining initializations, the initial call to F, */
/* and the calculation of the initial step size. */
/* The error weights in EWT are inverted after being loaded. */
/* ----------------------------------------------------------------------- */
L100:
uround = dumach_(common);
tn = *t;
tsw = *t;
maxord = mxordn;
if (*itask != 4 && *itask != 5) {
goto L110;
}
tcrit = rwork[0];
if ((tcrit - *tout) * (*tout - *t) < 0.) {
goto L625;
}
if (h0 != 0. && (*t + h0 - tcrit) * h0 > 0.) {
h0 = tcrit - *t;
}
L110:
jstart = 0;
nhnil = 0;
nst = 0;
nje = 0;
nslast = 0;
hu = 0.;
nqu = 0;
mused = 0;
miter = 0;
ccmax = .3;
maxcor = 3;
msbp = 20;
mxncf = 10;
/* Initial call to F. (LF0 points to YH(*,2).) ------------------------- */
lf0 = lyh + nyh;
f(neq, t, y, &rwork[lf0 -1], comp_ode, flattenODE, offsetODE, costanti); //fixed neq y
nfe = 1;
/* Load the initial value vector in YH. --------------------------------- */
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L115: */
rwork[i__ + lyh - 1 -1] = y[i__ - 1];
}
/* Load and invert the EWT array. (H is temporarily set to 1.0.) ------- */
nq = 1;
h__ = 1.;
dewset_(&n, itol, rtol, atol, &rwork[lyh -1], &rwork[lewt -1], common);
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
if (rwork[i__ + lewt - 1 -1] <= 0.) {
goto L621;
}
/* L120: */
rwork[i__ + lewt - 1 -1] = 1. / rwork[i__ + lewt - 1 -1];
}
/* ----------------------------------------------------------------------- */
/* The coding below computes the step size, H0, to be attempted on the */
/* first step, unless the user has supplied a value for this. */
/* First check that TOUT - T differs significantly from zero. */
/* A scalar tolerance quantity TOL is computed, as MAX(RTOL(i)) */
/* if this is positive, or MAX(ATOL(i)/ABS(Y(i))) otherwise, adjusted */
/* so as to be between 100*UROUND and 1.0E-3. */
/* Then the computed value H0 is given by: */
/* H0**(-2) = 1./(TOL * w0**2) + TOL * (norm(F))**2 */
/* where w0 = MAX ( ABS(T), ABS(TOUT) ), */
/* F = the initial value of the vector f(t,y), and */
/* norm() = the weighted vector norm used throughout, given by */
/* the DMNORM function routine, and weighted by the */
/* tolerances initially loaded into the EWT array. */
/* The sign of H0 is inferred from the initial values of TOUT and T. */
/* ABS(H0) is made .le. ABS(TOUT-T) in any case. */
/* ----------------------------------------------------------------------- */
if (h0 != 0.) {
goto L180;
}
tdist = (d__1 = *tout - *t, fabs(d__1));
/* Computing MAX */
d__1 = fabs(*t), d__2 = fabs(*tout);
w0 = max(d__1,d__2);
if (tdist < uround * 2. * w0) {
goto L622;
}
tol = rtol[0];
if (*itol <= 2) {
goto L140;
}
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L130: */
/* Computing MAX */
d__1 = tol, d__2 = rtol[i__ -1];
tol = max(d__1,d__2);
}
L140:
if (tol > 0.) {
goto L160;
}
atoli = atol[0];
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
if (*itol == 2 || *itol == 4) {
atoli = atol[i__ -1];
}
ayi = (d__1 = y[i__ - 1], fabs(d__1));
if (ayi != 0.) {
/* Computing MAX */
d__1 = tol, d__2 = atoli / ayi;
tol = max(d__1,d__2);
}
/* L150: */
}
L160:
/* Computing MAX */
d__1 = tol, d__2 = uround * 100.;
tol = max(d__1,d__2);
tol = min(tol,.001);
sum = dmnorm_(&n, &rwork[lf0 -1], &rwork[lewt -1], common);
/* Computing 2nd power */
d__1 = sum;
sum = 1. / (tol * w0 * w0) + tol * (d__1 * d__1);
h0 = 1. / sqrt(sum);
h0 = min(h0,tdist);
d__1 = *tout - *t;
h0 = d_sign(&h0, &d__1);
/* Adjust H0 if necessary to meet HMAX bound. --------------------------- */
L180:
rh = fabs(h0) * hmxi;
if (rh > 1.) {
h0 /= rh;
}
/* Load H with H0 and scale YH(*,2) by H0. ------------------------------ */
h__ = h0;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L190: */
rwork[i__ + lf0 - 1 -1] = h0 * rwork[i__ + lf0 - 1 -1];
}
goto L270;
/* ----------------------------------------------------------------------- */
/* Block D. */
/* The next code block is for continuation calls only (ISTATE = 2 or 3) */
/* and is to check stop conditions before taking a step. */
/* ----------------------------------------------------------------------- */
L200:
nslast = nst;
switch (*itask) {
case 1: goto L210;
case 2: goto L250;
case 3: goto L220;
case 4: goto L230;
case 5: goto L240;
}
L210:
if ((tn - *tout) * h__ < 0.) {
goto L250;
}
dintdy_(tout, 0, &rwork[lyh -1], &nyh, y, &iflag, common); //fixed y
if (iflag != 0) {
goto L627;
}
*t = *tout;
goto L420;
L220:
tp = tn - hu * (uround * 100. + 1.);
if ((tp - *tout) * h__ > 0.) {
goto L623;
}
if ((tn - *tout) * h__ < 0.) {
goto L250;
}
*t = tn;
goto L400;
L230:
tcrit = rwork[0];
if ((tn - tcrit) * h__ > 0.) {
goto L624;
}
if ((tcrit - *tout) * h__ < 0.) {
goto L625;
}
if ((tn - *tout) * h__ < 0.) {
goto L245;
}
dintdy_(tout, 0, &rwork[lyh -1], &nyh, y, &iflag, common); //fixed y
if (iflag != 0) {
goto L627;
}
*t = *tout;
goto L420;
L240:
tcrit = rwork[0];
if ((tn - tcrit) * h__ > 0.) {
goto L624;
}
L245:
hmx = fabs(tn) + fabs(h__);
ihit = (d__1 = tn - tcrit, fabs(d__1)) <= uround * 100. *
hmx;
if (ihit) {
*t = tcrit;
}
if (ihit) {
goto L400;
}
tnext = tn + h__ * (uround * 4. + 1.);
if ((tnext - tcrit) * h__ <= 0.) {
goto L250;
}
h__ = (tcrit - tn) * (1. - uround * 4.);
if (*istate == 2 && jstart >= 0) {
jstart = -2;
}
/* ----------------------------------------------------------------------- */
/* Block E. */
/* The next block is normally executed for all calls and contains */
/* the call to the one-step core integrator DSTODA. */
/* This is a looping point for the integration steps. */
/* First check for too many steps being taken, update EWT (if not at */
/* start of problem), check for too much accuracy being requested, and */
/* check for H below the roundoff level in T. */
/* ----------------------------------------------------------------------- */
L250:
if (meth == mused) {
goto L255;
}
if (insufr == 1) {
goto L550;
}
if (insufi == 1) {
goto L555;
}
L255:
if (nst - nslast >= mxstep) {
goto L500;
}
dewset_(&n, itol, rtol, atol, &rwork[lyh -1], &rwork[lewt -1], common);
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
if (rwork[i__ + lewt - 1 -1] <= 0.) {
goto L510;
}
/* L260: */
rwork[i__ + lewt - 1 -1] = 1. / rwork[i__ + lewt - 1 -1];
}
L270:
tolsf = uround * dmnorm_(&n, &rwork[lyh -1], &rwork[lewt -1], common);
if (tolsf <= 1.) {
goto L280;
}
tolsf *= 2.;
if (nst == 0) {
goto L626;
}
goto L520;
L280:
if (tn + h__ != tn) {
goto L290;
}
++nhnil;
if (nhnil > mxhnil) {
goto L290;
}
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- Warning..Internal T (=R1) and H (=R2) are\n");
#endif
//xerrwd_(msg, &c__50, 101, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " such that in the machine, T + H = T on the next step \n");
#endif
//xerrwd_(msg, &c__60, 101, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " (H = step size). Solver will continue anyway.\n");
#endif
//xerrwd_(msg, &c__50, 101, &c__0, &c__0, &c__0, &c__0, &c__2, &tn, &h__, (ftnlen)60);
if (nhnil < mxhnil) {
goto L290;
}
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- Above warning has been issued I1 times. \n");
#endif
//xerrwd_(msg, &c__50, 102, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " It will not be issued again for this problem.\n");
#endif
//xerrwd_(msg, &c__50, 102, &c__0, 1, &mxhnil, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
L290:
/* ----------------------------------------------------------------------- */
/* CALL DSTODA(NEQ,Y,YH,NYH,YH,EWT,SAVF,ACOR,WM,IWM,F,JAC,DPRJA,DSOLSY) */
/* ----------------------------------------------------------------------- */
dstoda_(neq, y, &rwork[lyh -1], &nyh, &rwork[lyh -1], &rwork[lewt -1], &rwork[lsavf -1], &rwork[lacor -1], &rwork[lwm -1], &iwork[liwm -1], f, jac, common, comp_ode, flattenODE, offsetODE, costanti, myjac, myjacoffset); //fixed neq y
kgo = 1 - kflag;
switch (kgo) {
case 1: goto L300;
case 2: goto L530;
case 3: goto L540;
}
/* ----------------------------------------------------------------------- */
/* Block F. */
/* The following block handles the case of a successful return from the */
/* core integrator (KFLAG = 0). */
/* If a method switch was just made, record TSW, reset MAXORD, */
/* set JSTART to -1 to signal DSTODA to complete the switch, */
/* and do extra printing of data if IXPR = 1. */
/* Then, in any case, check for stop conditions. */
/* ----------------------------------------------------------------------- */
L300:
init = 1;
if (meth == mused) {
goto L310;
}
tsw = tn;
maxord = mxordn;
if (meth == 2) {
maxord = mxords;
}
if (meth == 2) {
rwork[lwm -1] = sqrt(uround);
}
insufr = min(insufr,1);
insufi = min(insufi,1);
jstart = -1;
if (ixpr == 0) {
goto L310;
}
if (meth == 2) {
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- A switch to the BDF (stiff) method has occurred\n");
#endif
//xerrwd_(msg, &c__60, 105, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
}
if (meth == 1) {
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- A switch to the Adams (nonstiff) method has occ\n");
#endif
//xerrwd_(msg, &c__60, 106, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
}
#ifdef EMULATION_MODE
fprintf(stderr, " at T = R1, tentative step size H = R2, step NST = I1 \n");
#endif
//xerrwd_(msg, &c__60, 107, &c__0, 1, &nst, &c__0, &c__2, &tn, &h__, (ftnlen)60);
L310:
switch (*itask) {
case 1: goto L320;
case 2: goto L400;
case 3: goto L330;
case 4: goto L340;
case 5: goto L350;
}
/* ITASK = 1. If TOUT has been reached, interpolate. ------------------- */
L320:
if ((tn - *tout) * h__ < 0.) {
goto L250;
}
dintdy_(tout, 0, &rwork[lyh -1], &nyh, y, &iflag, common);
*t = *tout;
goto L420;
/* ITASK = 3. Jump to exit if TOUT was reached. ------------------------ */
L330:
if ((tn - *tout) * h__ >= 0.) {
goto L400;
}
goto L250;
/* ITASK = 4. See if TOUT or TCRIT was reached. Adjust H if necessary. */
L340:
if ((tn - *tout) * h__ < 0.) {
goto L345;
}
dintdy_(tout, 0, &rwork[lyh -1], &nyh, y, &iflag, common);
*t = *tout;
goto L420;
L345:
hmx = fabs(tn) + fabs(h__);
ihit = (d__1 = tn - tcrit, fabs(d__1)) <= uround * 100. *
hmx;
if (ihit) {
goto L400;
}
tnext = tn + h__ * (uround * 4. + 1.);
if ((tnext - tcrit) * h__ <= 0.) {
goto L250;
}
h__ = (tcrit - tn) * (1. - uround * 4.);
if (jstart >= 0) {
jstart = -2;
}
goto L250;
/* ITASK = 5. See if TCRIT was reached and jump to exit. --------------- */
L350:
hmx = fabs(tn) + fabs(h__);
ihit = (d__1 = tn - tcrit, fabs(d__1)) <= uround * 100. *
hmx;
/* ----------------------------------------------------------------------- */
/* Block G. */
/* The following block handles all successful returns from DLSODA. */
/* If ITASK .ne. 1, Y is loaded from YH and T is set accordingly. */
/* ISTATE is set to 2, and the optional outputs are loaded into the */
/* work arrays before returning. */
/* ----------------------------------------------------------------------- */
L400:
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L410: */
y[i__ - 1] = rwork[i__ + lyh - 1 -1]; //fixed y
}
*t = tn;
if (*itask != 4 && *itask != 5) {
goto L420;
}
if (ihit) {
*t = tcrit;
}
L420:
*istate = 2;
rwork[10] = hu;
rwork[11] = h__;
rwork[12] = tn;
rwork[14] = tsw;
iwork[10] = nst;
iwork[11] = nfe;
iwork[12] = nje;
iwork[13] = nqu;
iwork[14] = nq;
iwork[18] = mused;
iwork[19] = meth;
return 0;
/* ----------------------------------------------------------------------- */
/* Block H. */
/* The following block handles all unsuccessful returns other than */
/* those for illegal input. First the error message routine is called. */
/* If there was an error test or convergence test failure, IMXER is set. */
/* Then Y is loaded from YH and T is set to TN. */
/* The optional outputs are loaded into the work arrays before returning. */
/* ----------------------------------------------------------------------- */
/* The maximum number of steps was taken before reaching TOUT. ---------- */
L500:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At current T (=R1), MXSTEP (=I1) steps \n");
#endif
//xerrwd_(msg, &c__50, &c__201, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " taken on this call before reaching TOUT \n");
#endif
//xerrwd_(msg, &c__50, &c__201, &c__0, 1, &mxstep, &c__0, 1, &tn, &c_b62, (ftnlen)60);
*istate = -1;
goto L580;
/* EWT(i) .le. 0.0 for some i (not at start of problem). ---------------- */
L510:
ewti = rwork[lewt + i__ - 1 -1];
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At T (=R1), EWT(%d) has become R2 .le. 0.\n",ewti);
#endif
//xerrwd_(msg, &c__50, &c__202, &c__0, 1, &i__, &c__0, &c__2, &tn, &ewti, (ftnlen)60);
*istate = -6;
goto L580;
/* Too much accuracy requested for machine precision. ------------------- */
L520:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At T (=R1), too much accuracy requested \n");
#endif
//xerrwd_(msg, &c__50, &c__203, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " for precision of machine.. See TOLSF (=R2) \n");
#endif
//xerrwd_(msg, &c__50, &c__203, &c__0, &c__0, &c__0, &c__0, &c__2, &tn, &tolsf, (ftnlen)60);
rwork[13] = tolsf;
*istate = -2;
goto L580;
/* KFLAG = -1. Error test failed repeatedly or with ABS(H) = HMIN. ----- */
L530:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At T(=R1) and step size H(=R2), the error\n");
#endif
//xerrwd_(msg, &c__50, &c__204, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " test failed repeatedly or with ABS(H) = HMIN\n");
#endif
//xerrwd_(msg, &c__50, &c__204, &c__0, &c__0, &c__0, &c__0, &c__2, &tn, &h__, (ftnlen)60);
*istate = -4;
goto L560;
/* KFLAG = -2. Convergence failed repeatedly or with ABS(H) = HMIN. ---- */
L540:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At T (=R1) and step size H (=R2), the \n");
#endif
//xerrwd_(msg, &c__50, &c__205, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " corrector convergence failed repeatedly \n");
#endif
//xerrwd_(msg, &c__50, &c__205, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " or with ABS(H) = HMIN \n");
#endif
//xerrwd_(msg, &c__30, &c__205, &c__0, &c__0, &c__0, &c__0, &c__2, &tn, &h__, (ftnlen)60);
*istate = -5;
goto L560;
/* RWORK length too small to proceed. ----------------------------------- */
L550:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At current T(=R1), RWORK length too small\n");
#endif
//xerrwd_(msg, &c__50, &c__206, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " to proceed. The integration was otherwise successful.\n");
#endif
//xerrwd_(msg, &c__60, &c__206, &c__0, &c__0, &c__0, &c__0, 1, &tn, &c_b62, (ftnlen)60);
*istate = -7;
goto L580;
/* IWORK length too small to proceed. ----------------------------------- */
L555:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At current T(=R1), IWORK length too small\n");
#endif
//xerrwd_(msg, &c__50, &c__207, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " to proceed. The integration was otherwise successful.\n");
#endif
//xerrwd_(msg, &c__60, &c__207, &c__0, &c__0, &c__0, &c__0, 1, &tn, &c_b62, (ftnlen)60);
*istate = -7;
goto L580;
/* Compute IMXER if relevant. ------------------------------------------- */
L560:
big = 0.;
imxer = 1;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
size = (d__1 = rwork[i__ + lacor - 1 -1] * rwork[i__ + lewt - 1 -1], fabs(d__1));
if (big >= size) {
goto L570;
}
big = size;
imxer = i__;
L570:
;
}
iwork[15] = imxer;
/* Set Y vector, T, and optional outputs. ------------------------------- */
L580:
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L590: */
y[i__ - 1] = rwork[i__ + lyh - 1 -1]; //fixed y
}
*t = tn;
rwork[10] = hu;
rwork[11] = h__;
rwork[12] = tn;
rwork[14] = tsw;
iwork[10] = nst;
iwork[11] = nfe;
iwork[12] = nje;
iwork[13] = nqu;
iwork[14] = nq;
iwork[18] = mused;
iwork[19] = meth;
return 0;
/* ----------------------------------------------------------------------- */
/* Block I. */
/* The following block handles all error returns due to illegal input */
/* (ISTATE = -3), as detected before calling the core integrator. */
/* First the error message routine is called. If the illegal input */
/* is a negative ISTATE, the run is aborted (apparent infinite loop). */
/* ----------------------------------------------------------------------- */
L601:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ISTATE (=I1) illegal.\n");
#endif
//xerrwd_(msg, &c__30, 1, &c__0, 1, istate, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
if (*istate < 0) {
goto L800;
}
debug[indice] = 601;
goto L700;
L602:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ITASK (=I1) illegal. \n");
#endif
//xerrwd_(msg, &c__30, &c__2, &c__0, 1, itask, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 602;
goto L700;
L603:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ISTATE .gt. 1 but DLSODA not initialized.\n");
#endif
//xerrwd_(msg, &c__50, &c__3, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 603;
goto L700;
L604:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- NEQ (=I1) .lt. 1 \n");
#endif
//xerrwd_(msg, &c__30, &c__4, &c__0, 1, &neq[1], &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 604;
goto L700;
L605:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ISTATE = 3 and NEQ increased (I1 to I2). \n");
#endif
//xerrwd_(msg, &c__50, &c__5, &c__0, &c__2, &n, &neq[1], &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 605;
goto L700;
L606:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ITOL (=I1) illegal. \n");
#endif
//xerrwd_(msg, &c__30, &c__6, &c__0, 1, itol, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 606;
goto L700;
L607:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- IOPT (=I1) illegal. \n");
#endif
//xerrwd_(msg, &c__30, &c__7, &c__0, 1, iopt, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 607;
goto L700;
L608:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- JT (=I1) illegal. \n");
#endif
//xerrwd_(msg, &c__30, &c__8, &c__0, 1, jt, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 608;
goto L700;
L609:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ML (=I1) illegal: .lt.0 or .ge.NEQ (=I2) \n");
#endif
//xerrwd_(msg, &c__50, &c__9, &c__0, &c__2, &ml, &neq[1], &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 609;
goto L700;
L610:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- MU (=I1) illegal: .lt.0 or .ge.NEQ (=I2) \n");
#endif
//xerrwd_(msg, &c__50, 10, &c__0, &c__2, &mu, &neq[1], &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 610;
goto L700;
L611:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- IXPR (=I1) illegal. \n");
#endif
//xerrwd_(msg, &c__30, 11, &c__0, 1, &ixpr, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 611;
goto L700;
L612:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- MXSTEP (=I1) .lt. 0 \n");
#endif
//xerrwd_(msg, &c__30, 12, &c__0, 1, &mxstep, &c__0, &c__0,&c_b62, &c_b62, (ftnlen)60);
debug[indice] = 612;
goto L700;
L613:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- MXHNIL (=I1) .lt. 0 \n");
#endif
//xerrwd_(msg, &c__30, 13, &c__0, 1, &mxhnil, &c__0, &c__0,&c_b62, &c_b62, (ftnlen)60);
debug[indice] = 613;
goto L700;
L614:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- TOUT (=R1) behind T (=R2) \n");
#endif
//xerrwd_(msg, &c__40, 14, &c__0, &c__0, &c__0, &c__0, &c__2, tout, t, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " Integration direction is given by H0 (=R1) \n");
#endif
//xerrwd_(msg, &c__50, 14, &c__0, &c__0, &c__0, &c__0, 1, &h0, &c_b62, (ftnlen)60);
debug[indice] = 614;
goto L700;
L615:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- HMAX (=R1) .lt. 0.0 \n");
#endif
//xerrwd_(msg, &c__30, 15, &c__0, &c__0, &c__0, &c__0, 1, &hmax, &c_b62, (ftnlen)60);
debug[indice] = 615;
goto L700;
L616:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- HMIN (=R1) .lt. 0.0 \n");
#endif
//xerrwd_(msg, &c__30, 16, &c__0, &c__0, &c__0, &c__0, 1, &hmin, &c_b62, (ftnlen)60);
debug[indice] = 616;
goto L700;
L617:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- RWORK length needed, LENRW (=I1), exceeds LRW (=I2)\n");
#endif
//xerrwd_(msg, &c__60, 17, &c__0, &c__2, &lenrw, lrw, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 617;
goto L700;
L618:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- IWORK length needed, LENIW (=I1), exceeds LIW (=I2)\n");
#endif
//xerrwd_(msg, &c__60, 18, &c__0, &c__2, &leniw, liw, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 618;
goto L700;
L619:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- RTOL(I1) is R1 .lt. 0.0 \n");
#endif
//xerrwd_(msg, &c__40, 19, &c__0, 1, &i__, &c__0, 1, &rtoli, &c_b62, (ftnlen)60);
debug[indice] = 619;
goto L700;
L620:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ATOL(I1) is R1 .lt. 0.0 \n");
#endif
//xerrwd_(msg, &c__40, &c__20, &c__0, 1, &i__, &c__0, 1, &atoli, &c_b62, (ftnlen)60);
debug[indice] = 620;
goto L700;
L621:
ewti = rwork[lewt + i__ - 1 -1];
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- EWT(I1) is R1 .le. 0.0 \n");
#endif
//xerrwd_(msg, &c__40, &c__21, &c__0, 1, &i__, &c__0, 1, &ewti, &c_b62, (ftnlen)60);
debug[indice] = 621;
goto L700;
L622:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- TOUT(=R1) too close to T(=R2) to start integration.\n");
#endif
//xerrwd_(msg, &c__60, &c__22, &c__0, &c__0, &c__0, &c__0, &c__2, tout, t, (ftnlen)60);
debug[indice] = 622;
goto L700;
L623:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ITASK = I1 and TOUT (=R1) behind TCUR - HU (= R2) \n");
#endif
//xerrwd_(msg, &c__60, &c__23, &c__0, 1, itask, &c__0, &c__2, tout, &tp,(ftnlen)60);
debug[indice] = 623;
goto L700;
L624:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ITASK = 4 or 5 and TCRIT (=R1) behind TCUR (=R2) \n");
#endif
//xerrwd_(msg, &c__60, &c__24, &c__0, &c__0, &c__0, &c__0, &c__2, &tcrit, &tn, (ftnlen)60);
debug[indice] = 624;
goto L700;
L625:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ITASK = 4 or 5 and TCRIT (=R1) behind TOUT (=R2) \n");
#endif
//xerrwd_(msg, &c__60, &c__25, &c__0, &c__0, &c__0, &c__0, &c__2, &tcrit, tout, (ftnlen)60);
debug[indice] = 625;
goto L700;
L626:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At start of problem, too much accuracy \n");
#endif
//xerrwd_(msg, &c__50, &c__26, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " requested for precision of machine.. See TOLSF (=R1) \n");
#endif
//xerrwd_(msg, &c__60, &c__26, &c__0, &c__0, &c__0, &c__0, 1, &tolsf, &c_b62, (ftnlen)60);
rwork[13] = tolsf;
debug[indice] = 626;
goto L700;
L627:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- Trouble in DINTDY. ITASK = I1, TOUT = R1\n");
#endif
//xerrwd_(msg, &c__50, &c__27, &c__0, 1, itask, &c__0, 1, tout, &c_b62, (ftnlen)60);
debug[indice] = 627;
goto L700;
L628:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- MXORDN (=I1) .lt. 0 \n");
#endif
//xerrwd_(msg, &c__30, &c__28, &c__0, 1, &mxordn, &c__0, &c__0,&c_b62, &c_b62, (ftnlen)60);
debug[indice] = 628;
goto L700;
L629:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- MXORDS (=I1) .lt. 0 \n");
#endif
//xerrwd_(msg, &c__30, &c__29, &c__0, 1, &mxords, &c__0, &c__0,&c_b62, &c_b62, (ftnlen)60);
debug[indice] = 629;
L700:
// debug[indice] = 123;
*istate = -3;
return 0;
L800:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- Run aborted.. apparent infinite loop. \n");
#endif
//xerrwd_(msg, &c__50, &c__303, &c__2, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice]-=1;
return 0;
/* ----------------------- End of Subroutine DLSODA ---------------------- */
} /* dlsoda_ */
/* DECK DINTDY */
/* Subroutine */
__device__ int dintdy_(double *t, int k, double *yh, int *NOT_nyh, double *dky, int *iflag, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int yh_dim1 = 0;
int yh_offset = 0;
int i__1 = 0;
int i__2 = 0;
double d__1 = 0.;
/* Builtin functions */
/* Local variables */
double c__ = 0.;
int i__ = 0;
int j = 0;
double r__ = 0;
double s = 0;
int ic = 0;
int jb = 0;
int jj = 0;
double tp = 0;
int jb2 = 0;
int jj1 = 0;
int jp1 = 0;
/* ***BEGIN PROLOGUE DINTDY */
/* ***SUBSIDIARY */
/* ***PURPOSE Interpolate solution derivatives. */
/* ***TYPE DOUBLE PRECISION (SINTDY-S, DINTDY-D) */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***DESCRIPTION */
/* DINTDY computes interpolated values of the K-th derivative of the */
/* dependent variable vector y, and stores it in DKY. This routine */
/* is called within the package with K = 0 and T = TOUT, but may */
/* also be called by the user for any K up to the current order. */
/* (See detailed instructions in the usage documentation.) */
/* The computed values in DKY are gotten by interpolation using the */
/* Nordsieck history array YH. This array corresponds uniquely to a */
/* vector-valued polynomial of degree NQCUR or less, and DKY is set */
/* to the K-th derivative of this polynomial at T. */
/* The formula for DKY is: */
/* q */
/* DKY(i) = sum c(j,K) * (T - tn)**(j-K) * h**(-j) * YH(i,j+1) */
/* j=K */
/* where c(j,K) = j*(j-1)*...*(j-K+1), q = NQCUR, tn = TCUR, h = HCUR. */
/* The quantities nq = NQCUR, l = nq+1, N = NEQ, tn, and h are */
/* communicated by COMMON. The above sum is done in reverse order. */
/* IFLAG is returned negative if either K or T is out of bounds. */
/* ***SEE ALSO DLSODE */
/* ***ROUTINES CALLED XERRWD */
/* ***COMMON BLOCKS DLS001 */
/* ***REVISION HISTORY (YYMMDD) */
/* 791129 DATE WRITTEN */
/* 890501 Modified prologue to SLATEC/LDOC format. (FNF) */
/* 890503 Minor cosmetic changes. (FNF) */
/* 930809 Renamed to allow single/double precision versions. (ACH) */
/* 010418 Reduced size of Common block /DLS001/. (ACH) */
/* 031105 Restored 'own' variables to Common block /DLS001/, to */
/* enable interrupt/restart feature. (ACH) */
/* 050427 Corrected roundoff decrement in TP. (ACH) */
/* ***END PROLOGUE DINTDY */
/* **End */
/* ***FIRST EXECUTABLE STATEMENT DINTDY */
/* Parameter adjustments */
yh_dim1 = *NOT_nyh;
yh_offset = 1 + yh_dim1;
//yh -= yh_offset;
//--dky;
/* Function Body */
*iflag = 0;
if (k < 0 || k > nq) {
goto L80;
}
d__1 = fabs(tn) + fabs(hu);
tp = tn - hu - uround * 100. * d_sign(&d__1, &hu);
if ((*t - tp) * (*t - tn) > 0.) {
goto L90;
}
s = (*t - tn) / h__;
ic = 1;
if (k == 0) {
goto L15;
}
jj1 = l - k;
i__1 = nq;
for (jj = jj1; jj <= i__1; ++jj) {
/* L10: */
ic *= jj;
}
L15:
c__ = (double) ic;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L20: */
dky[i__ -1] = c__ * yh[i__ + l * yh_dim1 -yh_offset];
}
if (k == nq) {
goto L55;
}
jb2 = nq - k;
i__1 = jb2;
for (jb = 1; jb <= i__1; ++jb) {
j = nq - jb;
jp1 = j + 1;
ic = 1;
if (k == 0) {
goto L35;
}
jj1 = jp1 - k;
i__2 = j;
for (jj = jj1; jj <= i__2; ++jj) {
/* L30: */
ic *= jj;
}
L35:
c__ = (double) ic;
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
/* L40: */
dky[i__ -1] = c__ * yh[i__ + jp1 * yh_dim1 -yh_offset] + s * dky[i__ -1];
}
/* L50: */
}
if (k == 0) {
return 0;
}
L55:
i__1 = -(k);
r__ = pow(h__, (double)i__1);
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L60: */
dky[i__ -1] = r__ * dky[i__ -1];
}
return 0;
L80:
#ifdef EMULATION_MODE
fprintf(stderr, "DINTDY- K (=I1) illegal \n");
#endif
//xerrwd_(msg, &c__30, &c__51, &c__0, 1, k, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)80);
*iflag = -1;
return 0;
L90:
#ifdef EMULATION_MODE
fprintf(stderr, "DINTDY- T (=R1) illegal \n");
#endif
//xerrwd_(msg, &c__30, &c__52, &c__0, &c__0, &c__0, &c__0, 1, t, &c_b62,(ftnlen)80);
#ifdef EMULATION_MODE
fprintf(stderr, " T not in interval TCUR - HU (= R1) to TCUR (=R2) \n");
#endif
//xerrwd_(msg, &c__60, &c__52, &c__0, &c__0, &c__0, &c__0, &c__2, &tp, &tn, (ftnlen)80);
*iflag = -2;
return 0;
/* ----------------------- END OF SUBROUTINE DINTDY ---------------------- */
} /* dintdy_ */
/* DECK DSTODA */
/* Subroutine */
#ifdef use_export
export
#endif
__device__ int dstoda_(int *neq, double *y, double *yh, int *NOT_nyh, double *yh1, double *ewt, double *savf, double *acor, double *wm, int *iwm, myFex f, myJex jac, struct cuLsodaCommonBlock *common, char* comp_ode, param_t* flattenODE, unsigned int* offsetODE, double* costanti, param_t* myjac, unsigned int* myjacoffset)
{
/* Initialized data */
/* System generated locals */
int yh_dim1 = 0;
int yh_offset = 0;
int i__1 = 0;
int i__2 = 0;
double d__1 = 0.;
double d__2 = 0.;
double d__3 = 0.;
/* Builtin functions */
/* Local variables */
int i__ = 0;
int j = 0;
int m = 0;
double r__ = 0.;
int i1 = 0;
int jb = 0;
double rh = 0.;
double rm = 0.;
double dm1 = 0.;
double dm2 = 0.;
int lm1 = 0;
int lm2 = 0;
double rh1 = 0.;
double rh2 = 0.;
double del = 0.;
double ddn = 0.;
int ncf = 0;
double pdh = 0.;
double dsm = 0.;
double dup = 0.;
double exm1 = 0.;
double exm2 = 0.;
int nqm1 = 0;
int nqm2 = 0;
double dcon = 0.;
double delp = 0.;
int lm1p1 = 0;
int lm2p1 = 0;
double exdn = 0.;
double rhdn = 0.;
int iret = 0;
double told = 0.;
double rhsm = 0.;
int newq = 0;
double exsm = 0.;
double rhup = 0.;
double rate = 0.;
double exup = 0.;
double rh1it = 0.;
double alpha = 0.;
int iredo = 0;
double pnorm = 0.;
/* Parameter adjustments */
//--neq; //fixed
// --y;
yh_dim1 = *NOT_nyh;
yh_offset = 1 + yh_dim1;
//yh -= yh_offset;
//--yh1;
//--ewt;
//--savf;
//--acor;
//--wm;
//--iwm;
/* Function Body */
/* ----------------------------------------------------------------------- */
/* DSTODA performs one step of the integration of an initial value */
/* problem for a system of ordinary differential equations. */
/* Note: DSTODA is independent of the value of the iteration method */
/* indicator MITER, when this is .ne. 0, and hence is independent */
/* of the type of chord method used, or the Jacobian structure. */
/* Communication with DSTODA is done with the following variables: */
/* Y = an array of length .ge. N used as the Y argument in */
/* all calls to F and JAC. */
/* NEQ = int array containing problem size in NEQ(1), and */
/* passed as the NEQ argument in all calls to F and JAC. */
/* YH = an NYH by LMAX array containing the dependent variables */
/* and their approximate scaled derivatives, where */
/* LMAX = MAXORD + 1. YH(i,j+1) contains the approximate */
/* j-th derivative of y(i), scaled by H**j/factorial(j) */
/* (j = 0,1,...,NQ). On entry for the first step, the first */
/* two columns of YH must be set from the initial values. */
/* NYH = a constant int .ge. N, the first dimension of YH. */
/* YH1 = a one-dimensional array occupying the same space as YH. */
/* EWT = an array of length N containing multiplicative weights */
/* for local error measurements. Local errors in y(i) are */
/* compared to 1.0/EWT(i) in various error tests. */
/* SAVF = an array of working storage, of length N. */
/* ACOR = a work array of length N, used for the accumulated */
/* corrections. On a successful return, ACOR(i) contains */
/* the estimated one-step local error in y(i). */
/* WM,IWM = real and int work arrays associated with matrix */
/* operations in chord iteration (MITER .ne. 0). */
/* dprja_ = name of routine to evaluate and preprocess Jacobian matrix */
/* and P = I - H*EL0*Jac, if a chord method is being used. */
/* It also returns an estimate of norm(Jac) in PDNORM. */
/* dsolsy_ = name of routine to solve linear system in chord iteration. */
/* CCMAX = maximum relative change in H*EL0 before dprja_ is called. */
/* H = the step size to be attempted on the next step. */
/* H is altered by the error control algorithm during the */
/* problem. H can be either positive or negative, but its */
/* sign must remain constant throughout the problem. */
/* HMIN = the minimum absolute value of the step size H to be used. */
/* HMXI = inverse of the maximum absolute value of H to be used. */
/* HMXI = 0.0 is allowed and corresponds to an infinite HMAX. */
/* HMIN and HMXI may be changed at any time, but will not */
/* take effect until the next change of H is considered. */
/* TN = the independent variable. TN is updated on each step taken. */
/* JSTART = an int used for input only, with the following */
/* values and meanings: */
/* 0 perform the first step. */
/* .gt.0 take a new step continuing from the last. */
/* -1 take the next step with a new value of H, */
/* N, METH, MITER, and/or matrix parameters. */
/* -2 take the next step with a new value of H, */
/* but with other inputs unchanged. */
/* On return, JSTART is set to 1 to facilitate continuation. */
/* KFLAG = a completion code with the following meanings: */
/* 0 the step was succesful. */
/* -1 the requested error could not be achieved. */
/* -2 corrector convergence could not be achieved. */
/* -3 fatal error in dprja_ or dsolsy_. */
/* A return with KFLAG = -1 or -2 means either */
/* ABS(H) = HMIN or 10 consecutive failures occurred. */
/* On a return with KFLAG negative, the values of TN and */
/* the YH array are as of the beginning of the last */
/* step, and H is the last step size attempted. */
/* MAXORD = the maximum order of integration method to be allowed. */
/* MAXCOR = the maximum number of corrector iterations allowed. */
/* MSBP = maximum number of steps between dprja_ calls (MITER .gt. 0). */
/* MXNCF = maximum number of convergence failures allowed. */
/* METH = current method. */
/* METH = 1 means Adams method (nonstiff) */
/* METH = 2 means BDF method (stiff) */
/* METH may be reset by DSTODA. */
/* MITER = corrector iteration method. */
/* MITER = 0 means functional iteration. */
/* MITER = JT .gt. 0 means a chord iteration corresponding */
/* to Jacobian type JT. (The DLSODA/DLSODAR argument JT is */
/* communicated here as JTYP, but is not used in DSTODA */
/* except to load MITER following a method switch.) */
/* MITER may be reset by DSTODA. */
/* N = the number of first-order differential equations. */
/* ----------------------------------------------------------------------- */
kflag = 0;
told = tn;
ncf = 0;
ierpj = 0;
iersl = 0;
jcur = 0;
icf = 0;
delp = 0.;
if (jstart > 0) {
goto L200;
}
if (jstart == -1) {
goto L100;
}
if (jstart == -2) {
goto L160;
}
/* ----------------------------------------------------------------------- */
/* On the first call, the order is set to 1, and other variables are */
/* initialized. RMAX is the maximum ratio by which H can be increased */
/* in a single step. It is initially 1.E4 to compensate for the small */
/* initial H, but then is normally equal to 10. If a failure */
/* occurs (in corrector convergence or error test), RMAX is set at 2 */
/* for the next increase. */
/* DCFODE is called to get the needed coefficients for both methods. */
/* ----------------------------------------------------------------------- */
lmax = maxord + 1;
nq = 1;
l = 2;
ialth = 2;
rmax = 1e4;
rc = 0.;
el0 = 1.;
crate = .7;
hold = h__;
nslp = 0;
ipup = miter;
iret = 3;
/* Initialize switching parameters. METH = 1 is assumed initially. ----- */
icount = 20;
irflag = 0;
pdest = 0.;
pdlast = 0.;
ratio = 5.;
dcfode_(2, elco, tesco, common);
for (i__ = 1; i__ <= 5; ++i__) {
/* L10: */
cm2[i__ - 1] = tesco[i__ * 3 - 2] * elco[i__ + 1 + i__ * 13 - 14];
}
dcfode_(1, elco, tesco, common);
for (i__ = 1; i__ <= 12; ++i__) {
/* L20: */
cm1[i__ - 1] = tesco[i__ * 3 - 2] * elco[i__ + 1 + i__ * 13 - 14];
}
goto L150;
/* ----------------------------------------------------------------------- */
/* The following block handles preliminaries needed when JSTART = -1. */
/* IPUP is set to MITER to force a matrix update. */
/* If an order increase is about to be considered (IALTH = 1), */
/* IALTH is reset to 2 to postpone consideration one more step. */
/* If the caller has changed METH, DCFODE is called to reset */
/* the coefficients of the method. */
/* If H is to be changed, YH must be rescaled. */
/* If H or METH is being changed, IALTH is reset to L = NQ + 1 */
/* to prevent further changes in H for that many steps. */
/* ----------------------------------------------------------------------- */
L100:
ipup = miter;
lmax = maxord + 1;
if (ialth == 1) {
ialth = 2;
}
if (meth == mused) {
goto L160;
}
dcfode_(meth, elco, tesco, common);
ialth = l;
iret = 1;
/* ----------------------------------------------------------------------- */
/* The el vector and related constants are reset */
/* whenever the order NQ is changed, or at the start of the problem. */
/* ----------------------------------------------------------------------- */
L150:
i__1 = l;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L155: */
el[i__ - 1] = elco[i__ + nq * 13 - 14];
}
nqnyh = nq * *NOT_nyh;
rc = rc * el[0] / el0;
el0 = el[0];
conit = .5 / (double)(nq + 2);
switch (iret) {
case 1: goto L160;
case 2: goto L170;
case 3: goto L200;
}
/* ----------------------------------------------------------------------- */
/* If H is being changed, the H ratio RH is checked against */
/* RMAX, HMIN, and HMXI, and the YH array rescaled. IALTH is set to */
/* L = NQ + 1 to prevent a change of H for that many steps, unless */
/* forced by a convergence or error test failure. */
/* ----------------------------------------------------------------------- */
L160:
if (h__ == hold) {
goto L200;
}
rh = h__ / hold;
h__ = hold;
iredo = 3;
goto L175;
L170:
/* Computing MAX */
d__1 = rh, d__2 = hmin / fabs(h__);
rh = max(d__1,d__2);
L175:
rh = min(rh,rmax);
/* Computing MAX */
d__1 = 1., d__2 = fabs(h__) * hmxi * rh;
rh /= max(d__1,d__2);
/* ----------------------------------------------------------------------- */
/* If METH = 1, also restrict the new step size by the stability region. */
/* If this reduces H, set IRFLAG to 1 so that if there are roundoff */
/* problems later, we can assume that is the cause of the trouble. */
/* ----------------------------------------------------------------------- */
if (meth == 2) {
goto L178;
}
irflag = 0;
/* Computing MAX */
d__1 = fabs(h__) * pdlast;
pdh = max(d__1,1e-6);
if (rh * pdh * 1.00001 < sm1[nq - 1]) {
goto L178;
}
rh = sm1[nq - 1] / pdh;
irflag = 1;
L178:
r__ = 1.;
i__1 = l;
for (j = 2; j <= i__1; ++j) {
r__ *= rh;
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
/* L180: */
yh[i__ + j * yh_dim1 -yh_offset] *= r__;
}
}
h__ *= rh;
rc *= rh;
ialth = l;
if (iredo == 0) {
goto L690;
}
/* ----------------------------------------------------------------------- */
/* This section computes the predicted values by effectively */
/* multiplying the YH array by the Pascal triangle matrix. */
/* RC is the ratio of new to old values of the coefficient H*EL(1). */
/* When RC differs from 1 by more than CCMAX, IPUP is set to MITER */
/* to force dprja_ to be called, if a Jacobian is involved. */
/* In any case, dprja_ is called at least every MSBP steps. */
/* ----------------------------------------------------------------------- */
L200:
if ((d__1 = rc - 1., fabs(d__1)) > ccmax) {
ipup = miter;
}
if (nst >= nslp + msbp) {
ipup = miter;
}
tn += h__;
i1 = nqnyh + 1;
i__2 = nq;
for (jb = 1; jb <= i__2; ++jb) {
i1 -= *NOT_nyh;
/* DIR$ IVDEP */
i__1 = nqnyh;
for (i__ = i1; i__ <= i__1; ++i__) {
/* L210: */
yh1[i__ -1] += yh1[i__ + *NOT_nyh -1];
}
/* L215: */
}
pnorm = dmnorm_(&n, yh1, ewt, common);
/* ----------------------------------------------------------------------- */
/* Up to MAXCOR corrector iterations are taken. A convergence test is */
/* made on the RMS-norm of each correction, weighted by the error */
/* weight vector EWT. The sum of the corrections is accumulated in the */
/* vector ACOR(i). The YH array is not altered in the corrector loop. */
/* ----------------------------------------------------------------------- */
L220:
m = 0;
rate = 0.;
del = 0.;
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
/* L230: */
y[i__ - 1] = yh[i__ + yh_dim1 -yh_offset]; //fixed y
}
f(neq, &tn, y, savf, comp_ode, flattenODE, offsetODE,costanti); //fixed neq y
++nfe;
if (ipup <= 0) {
goto L250;
}
/* ----------------------------------------------------------------------- */
/* If indicated, the matrix P = I - H*EL(1)*J is reevaluated and */
/* preprocessed before starting the corrector iteration. IPUP is set */
/* to 0 as an indicator that this has been done. */
/* ----------------------------------------------------------------------- */
dprja_(neq, y, &yh[yh_offset -yh_offset], NOT_nyh, ewt, acor, savf, wm, iwm, f, jac, common, comp_ode, flattenODE, offsetODE, myjac, myjacoffset, costanti ); //fixed neq y
ipup = 0;
rc = 1.;
nslp = nst;
crate = .7;
if (ierpj != 0) {
goto L430;
}
L250:
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
/* L260: */
acor[i__ -1] = 0.;
}
L270:
if (miter != 0) {
goto L350;
}
/* ----------------------------------------------------------------------- */
/* In the case of functional iteration, update Y directly from */
/* the result of the last function evaluation. */
/* ----------------------------------------------------------------------- */
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
savf[i__ -1] = h__ * savf[i__ -1] - yh[i__ + (yh_dim1 << 1) -yh_offset];
/* L290: */
y[i__ - 1] = savf[i__ -1] - acor[i__ -1]; //fixed y
}
del = dmnorm_(&n, y, ewt, common);
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
y[i__ - 1] = yh[i__ + yh_dim1 -yh_offset] + el[0] * savf[i__ -1];
/* L300: */
acor[i__ -1] = savf[i__ -1];
}
goto L400;
/* ----------------------------------------------------------------------- */
/* In the case of the chord method, compute the corrector error, */
/* and solve the linear system with that as right-hand side and */
/* P as coefficient matrix. */
/* ----------------------------------------------------------------------- */
L350:
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
/* L360: */
y[i__ - 1] = h__ * savf[i__ -1] - (yh[i__ + (yh_dim1 << 1) -yh_offset] + acor[i__ -1]);
}
dsolsy_(wm, iwm, y, savf, common);
if (iersl < 0) {
goto L430;
}
if (iersl > 0) {
goto L410;
}
del = dmnorm_(&n, y, ewt, common);
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
acor[i__ -1] += y[i__ - 1];
/* L380: */
y[i__ - 1] = yh[i__ + yh_dim1 -yh_offset] + el[0] * acor[i__ -1];
}
/* ----------------------------------------------------------------------- */
/* Test for convergence. If M .gt. 0, an estimate of the convergence */
/* rate constant is stored in CRATE, and this is used in the test. */
/* We first check for a change of iterates that is the size of */
/* roundoff error. If this occurs, the iteration has converged, and a */
/* new rate estimate is not formed. */
/* In all other cases, force at least two iterations to estimate a */
/* local Lipschitz constant estimate for Adams methods. */
/* On convergence, form PDEST = local maximum Lipschitz constant */
/* estimate. PDLAST is the most recent nonzero estimate. */
/* ----------------------------------------------------------------------- */
L400:
if (del <= pnorm * 100. * uround) {
goto L450;
}
if (m == 0 && meth == 1) {
goto L405;
}
if (m == 0) {
goto L402;
}
rm = 1024.;
if (del <= delp * 1024.) {
rm = del / delp;
}
rate = max(rate,rm);
/* Computing MAX */
d__1 = crate * .2;
crate = max(d__1,rm);
L402:
/* Computing MIN */
d__1 = 1., d__2 = crate * 1.5;
dcon = del * min(d__1,d__2) / (tesco[nq * 3 - 2] * conit);
if (dcon > 1.) {
goto L405;
}
/* Computing MAX */
d__2 = pdest, d__3 = rate / (d__1 = h__ * el[0]
, fabs(d__1));
pdest = max(d__2,d__3);
if (pdest != 0.) {
pdlast = pdest;
}
goto L450;
L405:
++m;
if (m == maxcor) {
goto L410;
}
if (m >= 2 && del > delp * 2.) {
goto L410;
}
delp = del;
f(neq, &tn, y, savf, comp_ode, flattenODE, offsetODE, costanti); //fixed neq y
++nfe;
goto L270;
/* ----------------------------------------------------------------------- */
/* The corrector iteration failed to converge. */
/* If MITER .ne. 0 and the Jacobian is out of date, dprja_ is called for */
/* the next try. Otherwise the YH array is retracted to its values */
/* before prediction, and H is reduced, if possible. If H cannot be */
/* reduced or MXNCF failures have occurred, exit with KFLAG = -2. */
/* ----------------------------------------------------------------------- */
L410:
if (miter == 0 || jcur == 1) {
goto L430;
}
icf = 1;
ipup = miter;
goto L220;
L430:
icf = 2;
++ncf;
rmax = 2.;
tn = told;
i1 = nqnyh + 1;
i__2 = nq;
for (jb = 1; jb <= i__2; ++jb) {
i1 -= *NOT_nyh;
/* DIR$ IVDEP */
i__1 = nqnyh;
for (i__ = i1; i__ <= i__1; ++i__) {
/* L440: */
yh1[i__ -1] -= yh1[i__ + *NOT_nyh -1];
}
/* L445: */
}
if (ierpj < 0 || iersl < 0) {
goto L680;
}
if (fabs(h__) <= hmin * 1.00001) {
goto L670;
}
if (ncf == mxncf) {
goto L670;
}
rh = .25;
ipup = miter;
iredo = 1;
goto L170;
/* ----------------------------------------------------------------------- */
/* The corrector has converged. JCUR is set to 0 */
/* to signal that the Jacobian involved may need updating later. */
/* The local error test is made and control passes to statement 500 */
/* if it fails. */
/* ----------------------------------------------------------------------- */
L450:
jcur = 0;
if (m == 0) {
dsm = del / tesco[nq * 3 - 2];
}
if (m > 0) {
dsm = dmnorm_(&n, acor, ewt, common) / tesco[nq * 3 - 2];
}
if (dsm > 1.) {
goto L500;
}
/* ----------------------------------------------------------------------- */
/* After a successful step, update the YH array. */
/* Decrease ICOUNT by 1, and if it is -1, consider switching methods. */
/* If a method switch is made, reset various parameters, */
/* rescale the YH array, and exit. If there is no switch, */
/* consider changing H if IALTH = 1. Otherwise decrease IALTH by 1. */
/* If IALTH is then 1 and NQ .lt. MAXORD, then ACOR is saved for */
/* use in a possible order increase on the next step. */
/* If a change in H is considered, an increase or decrease in order */
/* by one is considered also. A change in H is made only if it is by a */
/* factor of at least 1.1. If not, IALTH is set to 3 to prevent */
/* testing for that many steps. */
/* ----------------------------------------------------------------------- */
kflag = 0;
iredo = 0;
++nst;
hu = h__;
nqu = nq;
mused = meth;
i__2 = l;
for (j = 1; j <= i__2; ++j) {
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L460: */
yh[i__ + j * yh_dim1 -yh_offset] += el[j - 1] * acor[i__ -1];
}
}
--icount;
if (icount >= 0) {
goto L488;
}
if (meth == 2) {
goto L480;
}
/* ----------------------------------------------------------------------- */
/* We are currently using an Adams method. Consider switching to BDF. */
/* If the current order is greater than 5, assume the problem is */
/* not stiff, and skip this section. */
/* If the Lipschitz constant and error estimate are not polluted */
/* by roundoff, go to 470 and perform the usual test. */
/* Otherwise, switch to the BDF methods if the last step was */
/* restricted to insure stability (irflag = 1), and stay with Adams */
/* method if not. When switching to BDF with polluted error estimates, */
/* in the absence of other information, double the step size. */
/* When the estimates are OK, we make the usual test by computing */
/* the step size we could have (ideally) used on this step, */
/* with the current (Adams) method, and also that for the BDF. */
/* If NQ .gt. MXORDS, we consider changing to order MXORDS on switching. */
/* Compare the two step sizes to decide whether to switch. */
/* The step size advantage must be at least RATIO = 5 to switch. */
/* ----------------------------------------------------------------------- */
if (nq > 5) {
goto L488;
}
if (dsm > pnorm * 100. * uround && pdest != 0.) {
goto L470;
}
if (irflag == 0) {
goto L488;
}
rh2 = 2.;
nqm2 = min(nq,mxords);
goto L478;
L470:
exsm = 1. / (double)l;
rh1 = 1. / (pow(dsm, exsm) * 1.2 + 1.2e-6);
rh1it = rh1 * 2.;
pdh = pdlast * fabs(h__);
if (pdh * rh1 > 1e-5) {
rh1it = sm1[nq - 1] / pdh;
}
rh1 = min(rh1,rh1it);
if (nq <= mxords) {
goto L474;
}
nqm2 = mxords;
lm2 = mxords + 1;
exm2 = 1. / (double)lm2;
lm2p1 = lm2 + 1;
dm2 = dmnorm_(&n, &yh[lm2p1 * yh_dim1 + 1 -yh_offset], ewt, common) / cm2[mxords - 1];
rh2 = 1. / (pow(dm2, exm2) * 1.2 + 1.2e-6);
goto L476;
L474:
dm2 = dsm * (cm1[nq - 1] / cm2[nq - 1]
);
rh2 = 1. / (pow(dm2, exsm) * 1.2 + 1.2e-6);
nqm2 = nq;
L476:
if (rh2 < ratio * rh1) {
goto L488;
}
/* THE SWITCH TEST PASSED. RESET RELEVANT QUANTITIES FOR BDF. ---------- */
L478:
rh = rh2;
icount = 20;
meth = 2;
miter = jtyp;
pdlast = 0.;
nq = nqm2;
l = nq + 1;
goto L170;
/* ----------------------------------------------------------------------- */
/* We are currently using a BDF method. Consider switching to Adams. */
/* Compute the step size we could have (ideally) used on this step, */
/* with the current (BDF) method, and also that for the Adams. */
/* If NQ .gt. MXORDN, we consider changing to order MXORDN on switching. */
/* Compare the two step sizes to decide whether to switch. */
/* The step size advantage must be at least 5/RATIO = 1 to switch. */
/* If the step size for Adams would be so small as to cause */
/* roundoff pollution, we stay with BDF. */
/* ----------------------------------------------------------------------- */
L480:
exsm = 1. / (double)l;
if (mxordn >= nq) {
goto L484;
}
nqm1 = mxordn;
lm1 = mxordn + 1;
exm1 = 1. / (double)lm1;
lm1p1 = lm1 + 1;
dm1 = dmnorm_(&n, &yh[lm1p1 * yh_dim1 + 1 -yh_offset], ewt, common) / cm1[mxordn - 1];
rh1 = 1. / (pow(dm1, exm1) * 1.2 + 1.2e-6);
goto L486;
L484:
dm1 = dsm * (cm2[nq - 1] / cm1[nq - 1]
);
rh1 = 1. / (pow(dm1, exsm) * 1.2 + 1.2e-6);
nqm1 = nq;
exm1 = exsm;
L486:
rh1it = rh1 * 2.;
pdh = pdnorm * fabs(h__);
if (pdh * rh1 > 1e-5) {
rh1it = sm1[nqm1 - 1] / pdh;
}
rh1 = min(rh1,rh1it);
rh2 = 1. / (pow(dsm, exsm) * 1.2 + 1.2e-6);
if (rh1 * ratio < rh2 * 5.) {
goto L488;
}
alpha = max(.001,rh1);
dm1 = pow(alpha, exm1) * dm1;
if (dm1 <= uround * 1e3 * pnorm) {
goto L488;
}
/* The switch test passed. Reset relevant quantities for Adams. -------- */
rh = rh1;
icount = 20;
meth = 1;
miter = 0;
pdlast = 0.;
nq = nqm1;
l = nq + 1;
goto L170;
/* No method switch is being made. Do the usual step/order selection. -- */
L488:
--ialth;
if (ialth == 0) {
goto L520;
}
if (ialth > 1) {
// debug[indice] = -1;
goto L700;
}
if (l == lmax) {
goto L700;
}
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L490: */
yh[i__ + lmax * yh_dim1 -yh_offset] = acor[i__ -1];
}
goto L700;
/* ----------------------------------------------------------------------- */
/* The error test failed. KFLAG keeps track of multiple failures. */
/* Restore TN and the YH array to their previous values, and prepare */
/* to try the step again. Compute the optimum step size for this or */
/* one lower order. After 2 or more failures, H is forced to decrease */
/* by a factor of 0.2 or less. */
/* ----------------------------------------------------------------------- */
L500:
--kflag;
tn = told;
i1 = nqnyh + 1;
i__1 = nq;
for (jb = 1; jb <= i__1; ++jb) {
i1 -= *NOT_nyh;
/* DIR$ IVDEP */
i__2 = nqnyh;
for (i__ = i1; i__ <= i__2; ++i__) {
/* L510: */
yh1[i__ -1] -= yh1[i__ + *NOT_nyh -1];
}
/* L515: */
}
rmax = 2.;
if (fabs(h__) <= hmin * 1.00001) {
goto L660;
}
if (kflag <= -3) {
goto L640;
}
iredo = 2;
rhup = 0.;
goto L540;
/* ----------------------------------------------------------------------- */
/* Regardless of the success or failure of the step, factors */
/* RHDN, RHSM, and RHUP are computed, by which H could be multiplied */
/* at order NQ - 1, order NQ, or order NQ + 1, respectively. */
/* In the case of failure, RHUP = 0.0 to avoid an order increase. */
/* The largest of these is determined and the new order chosen */
/* accordingly. If the order is to be increased, we compute one */
/* additional scaled derivative. */
/* ----------------------------------------------------------------------- */
L520:
rhup = 0.;
if (l == lmax) {
goto L540;
}
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L530: */
savf[i__ -1] = acor[i__ -1] - yh[i__ + lmax * yh_dim1 -yh_offset];
}
dup = dmnorm_(&n, savf, ewt, common) / tesco[nq * 3 - 1];
exup = (double)1. / (double)(l + 1);
rhup = (double)1. / (pow(dup, exup) * (double)1.4 + (double)1.4e-6);
L540:
exsm = (double)1. / l;
rhsm = (double)1. / (pow(dsm, exsm) * (double)1.2 + (double)1.2e-6);
rhdn = 0.;
if (nq == 1) {
goto L550;
}
ddn = dmnorm_(&n, &yh[l * yh_dim1 + 1 -yh_offset], ewt, common) /
tesco[nq * 3 - 3];
exdn = (double)1. / (double)nq;
rhdn = (double)1. / (pow(ddn, exdn) * (double)1.3 + (double)1.3e-6);
/* If METH = 1, limit RH according to the stability region also. -------- */
L550:
if (meth == 2) {
goto L560;
}
/* Computing MAX */
d__1 = fabs(h__) * pdlast;
pdh = max(d__1,1e-6);
if (l < lmax) {
/* Computing MIN */
d__1 = rhup, d__2 = sm1[l - 1] / pdh;
rhup = min(d__1,d__2);
}
/* Computing MIN */
d__1 = rhsm, d__2 = sm1[nq - 1] / pdh;
rhsm = min(d__1,d__2);
if (nq > 1) {
/* Computing MIN */
d__1 = rhdn, d__2 = sm1[nq - 2] / pdh;
rhdn = min(d__1,d__2);
}
pdest = 0.;
L560:
if (rhsm >= rhup) {
goto L570;
}
if (rhup > rhdn) {
goto L590;
}
goto L580;
L570:
if (rhsm < rhdn) {
goto L580;
}
newq = nq;
rh = rhsm;
goto L620;
L580:
newq = nq - 1;
rh = rhdn;
if (kflag < 0 && rh > 1.) {
rh = 1.;
}
goto L620;
L590:
newq = l;
rh = rhup;
if (rh < 1.1) {
goto L610;
}
r__ = el[l - 1] / (double)l;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L600: */
yh[i__ + (newq + 1) * yh_dim1 -yh_offset] = acor[i__ -1] * r__;
}
goto L630;
L610:
ialth = 3;
goto L700;
/* If METH = 1 and H is restricted by stability, bypass 10 percent test. */
L620:
if (meth == 2) {
goto L622;
}
if (rh * pdh * 1.00001 >= sm1[newq - 1]) {
goto L625;
}
L622:
if (kflag == 0 && rh < 1.1) {
goto L610;
}
L625:
if (kflag <= -2) {
rh = min(rh,.2);
}
/* ----------------------------------------------------------------------- */
/* If there is a change of order, reset NQ, L, and the coefficients. */
/* In any case H is reset according to RH and the YH array is rescaled. */
/* Then exit from 690 if the step was OK, or redo the step otherwise. */
/* ----------------------------------------------------------------------- */
if (newq == nq) {
goto L170;
}
L630:
nq = newq;
l = nq + 1;
iret = 2;
goto L150;
/* ----------------------------------------------------------------------- */
/* Control reaches this section if 3 or more failures have occured. */
/* If 10 failures have occurred, exit with KFLAG = -1. */
/* It is assumed that the derivatives that have accumulated in the */
/* YH array have errors of the wrong order. Hence the first */
/* derivative is recomputed, and the order is set to 1. Then */
/* H is reduced by a factor of 10, and the step is retried, */
/* until it succeeds or H reaches HMIN. */
/* ----------------------------------------------------------------------- */
L640:
if (kflag == -10) {
goto L660;
}
rh = .1;
/* Computing MAX */
d__1 = hmin / fabs(h__);
rh = max(d__1,rh);
h__ *= rh;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L645: */
y[i__ - 1] = yh[i__ + yh_dim1 -yh_offset];
}
// f(neq, &tn, y, savf, comp_ode, flattenODE, offsetODE, costanti); //fixed neq
f(neq, &tn, y, savf, comp_ode, flattenODE, offsetODE, costanti); //fixed neq
++nfe;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L650: */
yh[i__ + (yh_dim1 << 1) -yh_offset] = h__ * savf[i__ -1];
}
ipup = miter;
ialth = 5;
if (nq == 1) {
goto L200;
}
nq = 1;
l = 2;
iret = 3;
goto L150;
/* ----------------------------------------------------------------------- */
/* All returns are made through this section. H is saved in HOLD */
/* to allow the caller to change H on the next step. */
/* ----------------------------------------------------------------------- */
L660:
kflag = -1;
goto L720;
L670:
kflag = -2;
goto L720;
L680:
kflag = -3;
goto L720;
L690:
rmax = 10.;
L700:
r__ = 1. / tesco[nqu * 3 - 2];
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L710: */
acor[i__ -1] *= r__;
}
L720:
hold = h__;
jstart = 1;
return 0;
/* ----------------------- End of Subroutine DSTODA ---------------------- */
} /* dstoda_ */
/* DECK DCFODE */
/* Subroutine */
__device__ int dcfode_(int PARAM_meth, double *DCFODE_elco, double *DCFODE_tesco, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
/* Local variables */
int i__ = 0;
int ib = 0;
double pc[12];
for (int bubb = 0; bubb < 12; bubb ++)
{
pc[bubb] = 0.;
}
int DCFODE_nq = 0;
double fnq = 0.;
int nqm1 = 0;
int nqp1 = 0;
double ragq = 0.;
double pint = 0.;
double xpin = 0.;
double fnqm1 = 0.;
double agamq = 0.;
double rqfac = 0.;
double tsign = 0.;
double rq1fac = 0.;
/* ***BEGIN PROLOGUE DCFODE */
/* ***SUBSIDIARY */
/* ***PURPOSE Set ODE integrator coefficients. */
/* ***TYPE DOUBLE PRECISION (SCFODE-S, DCFODE-D) */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***DESCRIPTION */
/* DCFODE is called by the integrator routine to set coefficients */
/* needed there. The coefficients for the current method, as */
/* given by the value of METH, are set for all orders and saved. */
/* The maximum order assumed here is 12 if METH = 1 and 5 if METH = 2. */
/* (A smaller value of the maximum order is also allowed.) */
/* DCFODE is called once at the beginning of the problem, */
/* and is not called again unless and until METH is changed. */
/* The ELCO array contains the basic method coefficients. */
/* The coefficients el(i), 1 .le. i .le. nq+1, for the method of */
/* order nq are stored in ELCO(i,nq). They are given by a genetrating */
/* polynomial, i.e., */
/* l(x) = el(1) + el(2)*x + ... + el(nq+1)*x**nq. */
/* For the implicit Adams methods, l(x) is given by */
/* dl/dx = (x+1)*(x+2)*...*(x+nq-1)/factorial(nq-1), l(-1) = 0. */
/* For the BDF methods, l(x) is given by */
/* l(x) = (x+1)*(x+2)* ... *(x+nq)/K, */
/* where K = factorial(nq)*(1 + 1/2 + ... + 1/nq). */
/* The TESCO array contains test constants used for the */
/* local error test and the selection of step size and/or order. */
/* At order nq, TESCO(k,nq) is used for the selection of step */
/* size at order nq - 1 if k = 1, at order nq if k = 2, and at order */
/* nq + 1 if k = 3. */
/* ***SEE ALSO DLSODE */
/* ***ROUTINES CALLED (NONE) */
/* ***REVISION HISTORY (YYMMDD) */
/* 791129 DATE WRITTEN */
/* 890501 Modified prologue to SLATEC/LDOC format. (FNF) */
/* 890503 Minor cosmetic changes. (FNF) */
/* 930809 Renamed to allow single/double precision versions. (ACH) */
/* ***END PROLOGUE DCFODE */
/* **End */
/* ***FIRST EXECUTABLE STATEMENT DCFODE */
/* Parameter adjustments */
//DCFODE_tesco -= 4;
//DCFODE_elco -= 14;
/* Function Body */
switch (PARAM_meth) {
case 1: goto L100;
case 2: goto L200;
}
L100:
DCFODE_elco[14 -14] = 1.;
DCFODE_elco[15 -14] = 1.;
DCFODE_tesco[4 -4] = 0.;
DCFODE_tesco[5 -4] = 2.;
DCFODE_tesco[7 -4] = 1.;
DCFODE_tesco[39 -4] = 0.;
pc[0] = (double)1.;
rqfac =(double) 1.;
for (DCFODE_nq = 2; DCFODE_nq <= 12; ++DCFODE_nq) {
/* ----------------------------------------------------------------------- */
/* The PC array will contain the coefficients of the polynomial */
/* p(x) = (x+1)*(x+2)*...*(x+nq-1). */
/* Initially, p(x) = 1. */
/* ----------------------------------------------------------------------- */
rq1fac = rqfac;
rqfac /= (double)DCFODE_nq;
nqm1 = DCFODE_nq - 1;
fnqm1 = (double) nqm1;
nqp1 = DCFODE_nq + 1;
/* Form coefficients of p(x)*(x+nq-1). ---------------------------------- */
pc[DCFODE_nq - 1] = 0.;
i__1 = nqm1;
for (ib = 1; ib <= i__1; ++ib) {
i__ = nqp1 - ib;
/* L110: */
pc[i__ - 1] = pc[i__ - 2] + fnqm1 * pc[i__ - 1];
}
pc[0] = fnqm1 * pc[0];
/* Compute integral, -1 to 0, of p(x) and x*p(x). ----------------------- */
pint = pc[0];
xpin = pc[0] / 2.;
tsign = 1.;
i__1 = DCFODE_nq;
for (i__ = 2; i__ <= i__1; ++i__) {
tsign = -tsign;
pint += tsign * pc[i__ - 1] / (double)i__;
/* L120: */
xpin += tsign * pc[i__ - 1] / (double)(i__ + 1);
}
/* Store coefficients in ELCO and TESCO. -------------------------------- */
DCFODE_elco[DCFODE_nq * 13 + 1 -14] = pint * rq1fac;
DCFODE_elco[DCFODE_nq * 13 + 2 -14] = 1.;
i__1 = DCFODE_nq;
for (i__ = 2; i__ <= i__1; ++i__) {
/* L130: */
DCFODE_elco[i__ + 1 + DCFODE_nq * 13 -14] = rq1fac * pc[i__ - 1] / (double)i__;
}
agamq = rqfac * xpin;
ragq = 1. / agamq;
DCFODE_tesco[DCFODE_nq * 3 + 2 -4] = ragq;
if (DCFODE_nq < 12) {
DCFODE_tesco[nqp1 * 3 + 1 -4] = ragq * rqfac / (double)nqp1;
}
DCFODE_tesco[nqm1 * 3 + 3 -4] = ragq;
/* L140: */
}
return 0;
L200:
pc[0] = 1.;
rq1fac = 1.;
for (DCFODE_nq = 1; DCFODE_nq <= 5; ++DCFODE_nq) {
/* ----------------------------------------------------------------------- */
/* The PC array will contain the coefficients of the polynomial */
/* p(x) = (x+1)*(x+2)*...*(x+nq). */
/* Initially, p(x) = 1. */
/* ----------------------------------------------------------------------- */
fnq = (double) DCFODE_nq;
nqp1 = DCFODE_nq + 1;
/* Form coefficients of p(x)*(x+nq). ------------------------------------ */
pc[nqp1 - 1] = 0.;
i__1 = DCFODE_nq;
for (ib = 1; ib <= i__1; ++ib) {
i__ = DCFODE_nq + 2 - ib;
/* L210: */
pc[i__ - 1] = pc[i__ - 2] + fnq * pc[i__ - 1];
}
pc[0] = fnq * pc[0];
/* Store coefficients in ELCO and TESCO. -------------------------------- */
i__1 = nqp1;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L220: */
DCFODE_elco[i__ + DCFODE_nq * 13 -14] = pc[i__ - 1] / pc[1];
}
DCFODE_elco[DCFODE_nq * 13 + 2 -14] = 1.;
DCFODE_tesco[DCFODE_nq * 3 + 1 -4] = rq1fac;
DCFODE_tesco[DCFODE_nq * 3 + 2 -4] = ((double)nqp1) / DCFODE_elco[DCFODE_nq * 13 + 1 -14];
DCFODE_tesco[DCFODE_nq * 3 + 3 -4] = ((double)(DCFODE_nq + 2)) / DCFODE_elco[DCFODE_nq * 13 + 1 -14];
rq1fac /= fnq;
/* L230: */
}
return 0;
/* ----------------------- END OF SUBROUTINE DCFODE ---------------------- */
} /* dcfode_ */
/* DECK DPRJA */
/* Subroutine */
#ifdef use_export
export
#endif
__device__ int dprja_(int *neq, double *y, double *yh, int *NOT_nyh, double *ewt, double *ftem, double *savf, double *wm, int *iwm, myFex f, myJex jac, struct cuLsodaCommonBlock *common, char* comp_ode, param_t* flattenODE, unsigned int* offsetODE, param_t* myjac, unsigned int* myjacoffset, double* costanti)
{
/* System generated locals */
int yh_dim1, yh_offset, i__1, i__2, i__3, i__4;
double d__1 = 0.;
double d__2 = 0.;
/* Local variables */
int i__ = 0;
int j;
double r__;
int i1, i2, j1;
double r0;
int ii = 0;
int jj = 0;
int ml = 0;
int mu = 0;
double yi = 0.;
double yj = 0.;
double hl0;
int ml3 = 0;
int np1 = 0;
double fac;
int mba = 0;
int ier = 0;
double con = 0.;
double yjj;
int meb1 = 0;
int lenp = 0;
double srur;
int mband = 0;
int meband = 0;
/* ----------------------------------------------------------------------- */
/* DPRJA is called by DSTODA to compute and process the matrix */
/* P = I - H*EL(1)*J , where J is an approximation to the Jacobian. */
/* Here J is computed by the user-supplied routine JAC if */
/* MITER = 1 or 4 or by finite differencing if MITER = 2 or 5. */
/* J, scaled by -H*EL(1), is stored in WM. Then the norm of J (the */
/* matrix norm consistent with the weighted max-norm on vectors given */
/* by DMNORM) is computed, and J is overwritten by P. P is then */
/* subjected to LU decomposition in preparation for later solution */
/* of linear systems with P as coefficient matrix. This is done */
/* by DGEFA if MITER = 1 or 2, and by DGBFA if MITER = 4 or 5. */
/* In addition to variables described previously, communication */
/* with DPRJA uses the following: */
/* Y = array containing predicted values on entry. */
/* FTEM = work array of length N (ACOR in DSTODA). */
/* SAVF = array containing f evaluated at predicted y. */
/* WM = real work space for matrices. On output it contains the */
/* LU decomposition of P. */
/* Storage of matrix elements starts at WM(3). */
/* WM also contains the following matrix-related data: */
/* WM(1) = SQRT(UROUND), used in numerical Jacobian increments. */
/* IWM = int work space containing pivot information, starting at */
/* IWM(21). IWM also contains the band parameters */
/* ML = IWM(1) and MU = IWM(2) if MITER is 4 or 5. */
/* EL0 = EL(1) (input). */
/* PDNORM= norm of Jacobian matrix. (Output). */
/* IERPJ = output error flag, = 0 if no trouble, .gt. 0 if */
/* P matrix found to be singular. */
/* JCUR = output flag = 1 to indicate that the Jacobian matrix */
/* (or approximation) is now current. */
/* This routine also uses the Common variables EL0, H, TN, UROUND, */
/* MITER, N, NFE, and NJE. */
/* ----------------------------------------------------------------------- */
/* Parameter adjustments */
// --neq;
//--y;
yh_dim1 = *NOT_nyh;
yh_offset = 1 + yh_dim1;
//yh -= yh_offset;
//--ewt;
//--ftem;
//--savf;
//--wm;
//--iwm;
/* Function Body */
++nje;
ierpj = 0;
jcur = 1;
hl0 = h__ * el0;
switch (miter) {
case 1: goto L100;
case 2: goto L200;
case 3: goto L300;
case 4: goto L400;
case 5: goto L500;
}
/* If MITER = 1, call JAC and multiply by scalar. ----------------------- */
L100:
lenp = n * n;
i__1 = lenp;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L110: */
wm[i__ + 2 -1] = 0.;
}
jac(neq, &tn, y, 0, 0, &wm[3 -1], n, myjac, myjacoffset, costanti); //fixed neq
con = -hl0;
i__1 = lenp;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L120: */
wm[i__ + 2 -1] *= con;
}
goto L240;
/* If MITER = 2, make N calls to F to approximate J. -------------------- */
L200:
fac = dmnorm_(&n, savf, ewt, common);
r0 = fabs(h__) * 1e3 * uround * ((double)n) * fac;
if (r0 == 0.) {
r0 = 1.;
}
srur = wm[0];
j1 = 2;
i__1 = n;
for (j = 1; j <= i__1; ++j) {
yj = y[j - 1];
/* Computing MAX */
d__1 = srur * fabs(yj), d__2 = r0 / ewt[j -1];
r__ = max(d__1,d__2);
y[j - 1] += r__;
fac = -hl0 / r__;
f(neq, &tn, y, ftem, comp_ode, flattenODE, offsetODE, costanti); //fixed neq
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
/* L220: */
wm[i__ + j1 -1] = (ftem[i__ -1] - savf[i__ -1]) * fac;
}
y[j -1] = yj;
j1 += n;
/* L230: */
}
nfe += n;
L240:
/* Compute norm of Jacobian. -------------------------------------------- */
pdnorm = dfnorm_(&n, &wm[3 -1], ewt, common) / fabs(hl0);
/* Add identity matrix. ------------------------------------------------- */
j = 3;
np1 = n + 1;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
wm[j -1] += 1.;
/* L250: */
j += np1;
}
/* Do LU decomposition on P. -------------------------------------------- */
dgefa_(&wm[3 -1], &n, &n, &iwm[21 -1], &ier, common);
if (ier != 0) {
ierpj = 1;
}
return 0;
/* Dummy block only, since MITER is never 3 in this routine. ------------ */
L300:
return 0;
/* If MITER = 4, call JAC and multiply by scalar. ----------------------- */
L400:
ml = iwm[0];
mu = iwm[1];
ml3 = ml + 3;
mband = ml + mu + 1;
meband = mband + ml;
lenp = meband * n;
i__1 = lenp;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L410: */
wm[i__ + 2 -1] = 0.;
}
jac(neq, &tn, y, ml, mu, &wm[ml3 -1], meband, myjac, myjacoffset, costanti); //fixed neq
con = -hl0;
i__1 = lenp;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L420: */
wm[i__ + 2 -1] *= con;
}
goto L570;
/* If MITER = 5, make MBAND calls to F to approximate J. ---------------- */
L500:
ml = iwm[0];
mu = iwm[1];
mband = ml + mu + 1;
mba = min(mband,n);
meband = mband + ml;
meb1 = meband - 1;
srur = wm[0];
fac = dmnorm_(&n, savf, ewt, common);
r0 = fabs(h__) * 1e3 * uround * n * fac;
if (r0 == 0.) {
r0 = 1.;
}
i__1 = mba;
for (j = 1; j <= i__1; ++j) {
i__2 = n;
i__3 = mband;
for (i__ = j; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3) {
yi = y[i__ -1];
/* Computing MAX */
d__1 = srur * fabs(yi), d__2 = r0 / ewt[i__ -1];
r__ = max(d__1,d__2);
/* L530: */
y[i__ - 1] += r__;
}
f(neq, &tn, y, ftem, comp_ode, flattenODE, offsetODE, costanti); //fixed neq
i__3 = n;
i__2 = mband;
for (jj = j; i__2 < 0 ? jj >= i__3 : jj <= i__3; jj += i__2) {
y[jj - 1] = yh[jj + yh_dim1 -yh_offset];
yjj = y[jj - 1];
/* Computing MAX */
d__1 = srur * fabs(yjj), d__2 = r0 / ewt[jj -1];
r__ = max(d__1,d__2);
fac = -hl0 / r__;
/* Computing MAX */
i__4 = jj - mu;
i1 = max(i__4,1);
/* Computing MIN */
i__4 = jj + ml;
i2 = min(i__4,n);
ii = jj * meb1 - ml + 2;
i__4 = i2;
for (i__ = i1; i__ <= i__4; ++i__) {
/* L540: */
wm[ii + i__ -1] = (ftem[i__ -1] - savf[i__ -1]) * fac;
}
/* L550: */
}
/* L560: */
}
nfe += mba;
L570:
/* Compute norm of Jacobian. -------------------------------------------- */
pdnorm = dbnorm_(&n, &wm[ml + 3 -1], &meband, &ml, &mu, ewt, common) / fabs(hl0);
/* Add identity matrix. ------------------------------------------------- */
ii = mband + 2;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
wm[ii -1] += 1.;
/* L580: */
ii += meband;
}
/* Do LU decomposition of P. -------------------------------------------- */
dgbfa_(&wm[3 -1], &meband, &n, &ml, &mu, &iwm[21 -1], &ier, common);
if (ier != 0) {
ierpj = 1;
}
return 0;
/* ----------------------- End of Subroutine DPRJA ----------------------- */
} /* dprja_ */
/* DECK DSOLSY */
/* Subroutine */
__device__ int dsolsy_(double *wm, int *iwm, double *x, double *tem, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
/* Local variables */
int i__ = 0;
double r__ = 0.;
double di = 0.;
int ml = 0;
int mu = 0;
double hl0 = 0.;
double phl0 = 0.;
int meband = 0;
/* ***BEGIN PROLOGUE DSOLSY */
/* ***SUBSIDIARY */
/* ***PURPOSE ODEPACK linear system solver. */
/* ***TYPE DOUBLE PRECISION (SSOLSY-S, DSOLSY-D) */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***DESCRIPTION */
/* This routine manages the solution of the linear system arising from */
/* a chord iteration. It is called if MITER .ne. 0. */
/* If MITER is 1 or 2, it calls DGESL to accomplish this. */
/* If MITER = 3 it updates the coefficient h*EL0 in the diagonal */
/* matrix, and then computes the solution. */
/* If MITER is 4 or 5, it calls DGBSL. */
/* Communication with DSOLSY uses the following variables: */
/* WM = real work space containing the inverse diagonal matrix if */
/* MITER = 3 and the LU decomposition of the matrix otherwise. */
/* Storage of matrix elements starts at WM(3). */
/* WM also contains the following matrix-related data: */
/* WM(1) = SQRT(UROUND) (not used here), */
/* WM(2) = HL0, the previous value of h*EL0, used if MITER = 3. */
/* IWM = int work space containing pivot information, starting at */
/* IWM(21), if MITER is 1, 2, 4, or 5. IWM also contains band */
/* parameters ML = IWM(1) and MU = IWM(2) if MITER is 4 or 5. */
/* X = the right-hand side vector on input, and the solution vector */
/* on output, of length N. */
/* TEM = vector of work space of length N, not used in this version. */
/* IERSL = output flag (in COMMON). IERSL = 0 if no trouble occurred. */
/* IERSL = 1 if a singular matrix arose with MITER = 3. */
/* This routine also uses the COMMON variables EL0, H, MITER, and N. */
/* ***SEE ALSO DLSODE */
/* ***ROUTINES CALLED DGBSL, DGESL */
/* ***COMMON BLOCKS DLS001 */
/* ***REVISION HISTORY (YYMMDD) */
/* 791129 DATE WRITTEN */
/* 890501 Modified prologue to SLATEC/LDOC format. (FNF) */
/* 890503 Minor cosmetic changes. (FNF) */
/* 930809 Renamed to allow single/double precision versions. (ACH) */
/* 010418 Reduced size of Common block /DLS001/. (ACH) */
/* 031105 Restored 'own' variables to Common block /DLS001/, to */
/* enable interrupt/restart feature. (ACH) */
/* ***END PROLOGUE DSOLSY */
/* **End */
/* ***FIRST EXECUTABLE STATEMENT DSOLSY */
/* Parameter adjustments */
//--tem;
//--x;
//--iwm;
//--wm;
/* Function Body */
iersl = 0;
switch (miter) {
case 1: goto L100;
case 2: goto L100;
case 3: goto L300;
case 4: goto L400;
case 5: goto L400;
}
L100:
dgesl_(&wm[3 -1], &n, &n, &iwm[21 -1], x, 0, common);
return 0;
L300:
phl0 = wm[1];
hl0 = h__ * el0;
wm[1] = hl0;
if (hl0 == phl0) {
goto L330;
}
r__ = hl0 / phl0;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
di = 1. - r__ * (1. - 1. / wm[i__ + 2 -1]);
if (fabs(di) == 0.) {
goto L390;
}
/* L320: */
wm[i__ + 2 -1] = 1. / di;
}
L330:
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L340: */
x[i__ -1] = wm[i__ + 2 -1] * x[i__ -1];
}
return 0;
L390:
iersl = 1;
return 0;
L400:
ml = iwm[0];
mu = iwm[1];
meband = (ml << 1) + mu + 1;
dgbsl_(&wm[3 -1], &meband, &n, &ml, &mu, &iwm[21 -1], x, 0, common);
return 0;
/* ----------------------- END OF SUBROUTINE DSOLSY ---------------------- */
} /* dsolsy_ */
/* DECK DEWSET */
/* Subroutine */
__device__ int dewset_(int *PARAM_n, int *itol, double *rtol, double *atol, double *ycur, double *ewt, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
double d__1 = 0.;
/* Local variables */
int i__ = 0;
/* ***BEGIN PROLOGUE DEWSET */
/* ***SUBSIDIARY */
/* ***PURPOSE Set error weight vector. */
/* ***TYPE DOUBLE PRECISION (SEWSET-S, DEWSET-D) */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***DESCRIPTION */
/* This subroutine sets the error weight vector EWT according to */
/* EWT(i) = RTOL(i)*ABS(YCUR(i)) + ATOL(i), i = 1,...,N, */
/* with the subscript on RTOL and/or ATOL possibly replaced by 1 above, */
/* depending on the value of ITOL. */
/* ***SEE ALSO DLSODE */
/* ***ROUTINES CALLED (NONE) */
/* ***REVISION HISTORY (YYMMDD) */
/* 791129 DATE WRITTEN */
/* 890501 Modified prologue to SLATEC/LDOC format. (FNF) */
/* 890503 Minor cosmetic changes. (FNF) */
/* 930809 Renamed to allow single/double precision versions. (ACH) */
/* ***END PROLOGUE DEWSET */
/* **End */
/* ***FIRST EXECUTABLE STATEMENT DEWSET */
/* Parameter adjustments */
//--ewt;
//--ycur;
//--rtol;
//--atol;
/* Function Body */
switch (*itol) {
case 1: goto L10;
case 2: goto L20;
case 3: goto L30;
case 4: goto L40;
}
L10:
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L15: */
ewt[i__ -1] = rtol[0] * (d__1 = ycur[i__ -1], fabs(d__1)) + atol[0];
}
return 0;
L20:
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L25: */
ewt[i__ -1] = rtol[0] * (d__1 = ycur[i__ -1], fabs(d__1)) + atol[i__ -1];
}
return 0;
L30:
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L35: */
ewt[i__ -1] = rtol[i__ - 1] * (d__1 = ycur[i__ -1], fabs(d__1)) + atol[0];
}
return 0;
L40:
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L45: */
ewt[i__ -1] = rtol[i__ - 1] * (d__1 = ycur[i__ -1], fabs(d__1)) + atol[i__ -1];
}
return 0;
/* ----------------------- END OF SUBROUTINE DEWSET ---------------------- */
} /* dewset_ */
/* DECK DMNORM */
__device__ double dmnorm_(int *PARAM_n, double *v, double *w, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
double ret_val = 0.;
double d__1 = 0.;
double d__2 = 0.;
double d__3 = 0.;
/* Local variables */
int i__ = 0;
double vm = 0.;
/* ----------------------------------------------------------------------- */
/* This function routine computes the weighted max-norm */
/* of the vector of length N contained in the array V, with weights */
/* contained in the array w of length N: */
/* DMNORM = MAX(i=1,...,N) ABS(V(i))*W(i) */
/* ----------------------------------------------------------------------- */
/* Parameter adjustments */
//--w;
//--v;
/* Function Body */
vm = 0.;
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L10: */
/* Computing MAX */
d__2 = vm, d__3 = (d__1 = v[i__ -1], fabs(d__1)) * w[i__ -1];
vm = max(d__2,d__3);
}
ret_val = vm;
return ret_val;
/* ----------------------- End of Function DMNORM ------------------------ */
} /* dmnorm_ */
/* DECK DFNORM */
__device__ double dfnorm_(int *PARAM_n, double *a, double *w, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int a_dim1 = 0;
int a_offset = 0;
int i__1 = 0;
int i__2 = 0;
double ret_val = 0.;
double d__1 = 0.;
double d__2 = 0.;
/* Local variables */
int i__ = 0;
int j = 0;
double an = 0.;
double sum = 0.;
/* ----------------------------------------------------------------------- */
/* This function computes the norm of a full N by N matrix, */
/* stored in the array A, that is consistent with the weighted max-norm */
/* on vectors, with weights stored in the array W: */
/* DFNORM = MAX(i=1,...,N) ( W(i) * Sum(j=1,...,N) ABS(a(i,j))/W(j) ) */
/* ----------------------------------------------------------------------- */
/* Parameter adjustments */
//--w;
a_dim1 = *PARAM_n;
a_offset = 1 + a_dim1;
//a -= a_offset;
/* Function Body */
an = 0.;
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
sum = 0.;
i__2 = *PARAM_n;
for (j = 1; j <= i__2; ++j) {
/* L10: */
sum += (d__1 = a[i__ + j * a_dim1 -a_offset], fabs(d__1)) / w[j -1];
}
/* Computing MAX */
d__1 = an, d__2 = sum * w[i__ -1];
an = max(d__1,d__2);
/* L20: */
}
ret_val = an;
return ret_val;
/* ----------------------- End of Function DFNORM ------------------------ */
} /* dfnorm_ */
/* DECK DBNORM */
__device__ double dbnorm_(int *PARAM_n, double *a, int *nra, int *ml, int *mu, double *w, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int a_dim1 = 0;
int a_offset = 0;
int i__1 = 0;
int i__2 = 0;
double ret_val = 0.;
double d__1 = 0.;
double d__2 = 0.;
/* Local variables */
int i__ = 0;
int j = 0;
double an = 0.;
double sum = 0.;
int i1 = 0;
int jhi = 0;
int jlo = 0;
/* ----------------------------------------------------------------------- */
/* This function computes the norm of a banded N by N matrix, */
/* stored in the array A, that is consistent with the weighted max-norm */
/* on vectors, with weights stored in the array W. */
/* ML and MU are the lower and upper half-bandwidths of the matrix. */
/* NRA is the first dimension of the A array, NRA .ge. ML+MU+1. */
/* In terms of the matrix elements a(i,j), the norm is given by: */
/* DBNORM = MAX(i=1,...,N) ( W(i) * Sum(j=1,...,N) ABS(a(i,j))/W(j) ) */
/* ----------------------------------------------------------------------- */
/* Parameter adjustments */
//--w;
a_dim1 = *nra;
a_offset = 1 + a_dim1;
//a -= a_offset;
/* Function Body */
an = 0.;
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
sum = 0.;
i1 = i__ + *mu + 1;
/* Computing MAX */
i__2 = i__ - *ml;
jlo = max(i__2,1);
/* Computing MIN */
i__2 = i__ + *mu;
jhi = min(i__2,*PARAM_n);
i__2 = jhi;
for (j = jlo; j <= i__2; ++j) {
/* L10: */
sum += (d__1 = a[i1 - j + j * a_dim1 -a_offset], fabs(d__1)) / w[j -1];
}
/* Computing MAX */
d__1 = an, d__2 = sum * w[i__ -1];
an = max(d__1,d__2);
/* L20: */
}
ret_val = an;
return ret_val;
/* ----------------------- End of Function DBNORM ------------------------ */
} /* dbnorm_ */
/* DECK DSRCMA */
/* Subroutine */
//__device__ int dsrcma_(double *rsav, int *isav, int *job, struct cuLsodaCommonBlock *common)
//{
// /* Initialized data */
//
// int lenrls = 218;
// int lenils = 37;
// int lenrla = 22;
// int lenila = 9;
//
// /* System generated locals */
// int i__1 = 0;
//
// /* Local variables */
// int i__ = 0;
//
// /* ----------------------------------------------------------------------- */
// /* This routine saves or restores (depending on JOB) the contents of */
// /* the Common blocks DLS001, DLSA01, which are used */
// /* internally by one or more ODEPACK solvers. */
//
// /* RSAV = real array of length 240 or more. */
// /* ISAV = int array of length 46 or more. */
// /* JOB = flag indicating to save or restore the Common blocks: */
// /* JOB = 1 if Common is to be saved (written to RSAV/ISAV) */
// /* JOB = 2 if Common is to be restored (read from RSAV/ISAV) */
// /* A call with JOB = 2 presumes a prior call with JOB = 1. */
// /* ----------------------------------------------------------------------- */
// /* Parameter adjustments */
// //--isav;
// //--rsav;
//
// /* Function Body */
//
// if (*job == 2) {
// goto L100;
// }
// i__1 = lenrls;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L10: */
// rsav[i__ -1] = rls[i__ - 1];
// }
// i__1 = lenrla;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L15: */
// rsav[lenrls + i__ -1] = rlsa[i__ - 1];
// }
//
// i__1 = lenils;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L20: */
// isav[i__ -1] = ils[i__ - 1];
// }
// i__1 = lenila;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L25: */
// isav[lenils + i__ -1] = ilsa[i__ - 1];
// }
//
// return 0;
//
//L100:
// i__1 = lenrls;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L110: */
// rls[i__ - 1] = rsav[i__ -1];
// }
// i__1 = lenrla;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L115: */
// rlsa[i__ - 1] = rsav[lenrls + i__ -1];
// }
//
// i__1 = lenils;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L120: */
// ils[i__ - 1] = isav[i__ -1];
// }
// i__1 = lenila;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L125: */
// ilsa[i__ - 1] = isav[lenils + i__ -1];
// }
//
// return 0;
// /* ----------------------- End of Subroutine DSRCMA ---------------------- */
//} /* dsrcma_ */
/* DECK DGEFA */
/* Subroutine */
__device__ int dgefa_(double *a, int *lda, int *PARAM_n, int *ipvt, int *info, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int a_dim1 = 0;
int a_offset = 0;
int i__1 = 0;
int i__2 = 0;
int i__3 = 0;
/* Local variables */
int j = 0;
int k = 0;
int DGEFA_l = 0;
double t = 0.;
int kp1 = 0;
int nm1 = 0;
/* ***BEGIN PROLOGUE DGEFA */
/* ***PURPOSE Factor a matrix using Gaussian elimination. */
/* ***CATEGORY D2A1 */
/* ***TYPE DOUBLE PRECISION (SGEFA-S, DGEFA-D, CGEFA-C) */
/* ***KEYWORDS GENERAL MATRIX, LINEAR ALGEBRA, LINPACK, */
/* MATRIX FACTORIZATION */
/* ***AUTHOR Moler, C. B., (U. of New Mexico) */
/* ***DESCRIPTION */
/* DGEFA factors a double precision matrix by Gaussian elimination. */
/* DGEFA is usually called by DGECO, but it can be called */
/* directly with a saving in time if RCOND is not needed. */
/* (Time for DGECO) = (1 + 9/N)*(Time for DGEFA) . */
/* On Entry */
/* A DOUBLE PRECISION(LDA, N) */
/* the matrix to be factored. */
/* LDA int */
/* the leading dimension of the array A . */
/* N int */
/* the order of the matrix A . */
/* On Return */
/* A an upper triangular matrix and the multipliers */
/* which were used to obtain it. */
/* The factorization can be written A = L*U where */
/* L is a product of permutation and unit lower */
/* triangular matrices and U is upper triangular. */
/* IPVT int(N) */
/* an int vector of pivot indices. */
/* INFO int */
/* = 0 normal value. */
/* = K if U(K,K) .EQ. 0.0 . This is not an error */
/* condition for this subroutine, but it does */
/* indicate that DGESL or DGEDI will divide by zero */
/* if called. Use RCOND in DGECO for a reliable */
/* indication of singularity. */
/* ***REFERENCES J. J. Dongarra, J. R. Bunch, C. B. Moler, and G. W. */
/* Stewart, LINPACK Users' Guide, SIAM, 1979. */
/* ***ROUTINES CALLED DAXPY, DSCAL, IDAMAX */
/* ***REVISION HISTORY (YYMMDD) */
/* 780814 DATE WRITTEN */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 900326 Removed duplicate information from DESCRIPTION section. */
/* (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DGEFA */
/* GAUSSIAN ELIMINATION WITH PARTIAL PIVOTING */
/* ***FIRST EXECUTABLE STATEMENT DGEFA */
/* Parameter adjustments */
a_dim1 = *lda;
a_offset = 1 + a_dim1;
//a -= a_offset;
//--ipvt;
/* Function Body */
*info = 0;
nm1 = *PARAM_n - 1;
if (nm1 < 1) {
goto L70;
}
i__1 = nm1;
for (k = 1; k <= i__1; ++k) {
kp1 = k + 1;
/* FIND L = PIVOT INDEX */
i__2 = *PARAM_n - k + 1;
DGEFA_l = idamax_(&i__2, &a[k + k * a_dim1 -a_offset], 1, common) + k - 1;
ipvt[k -1] = DGEFA_l;
/* ZERO PIVOT IMPLIES THIS COLUMN ALREADY TRIANGULARIZED */
if (a[DGEFA_l + k * a_dim1 -a_offset] == 0.) {
goto L40;
}
/* INTERCHANGE IF NECESSARY */
if (DGEFA_l == k) {
goto L10;
}
t = a[DGEFA_l + k * a_dim1 -a_offset];
a[DGEFA_l + k * a_dim1 -a_offset] = a[k + k * a_dim1 -a_offset];
a[k + k * a_dim1 -a_offset] = t;
L10:
/* COMPUTE MULTIPLIERS */
t = -1. / a[k + k * a_dim1 -a_offset];
i__2 = *PARAM_n - k;
dscal_(&i__2, &t, &a[k + 1 + k * a_dim1 -a_offset], 1, common);
/* ROW ELIMINATION WITH COLUMN INDEXING */
i__2 = *PARAM_n;
for (j = kp1; j <= i__2; ++j) {
t = a[DGEFA_l + j * a_dim1 -a_offset];
if (DGEFA_l == k) {
goto L20;
}
a[DGEFA_l + j * a_dim1 -a_offset] = a[k + j * a_dim1 -a_offset];
a[k + j * a_dim1 -a_offset] = t;
L20:
i__3 = *PARAM_n - k;
daxpy_(&i__3, &t, &a[k + 1 + k * a_dim1 -a_offset], 1, &a[k + 1 + j * a_dim1 -a_offset], 1, common);
/* L30: */
}
goto L50;
L40:
*info = k;
L50:
/* L60: */
;
}
L70:
ipvt[*PARAM_n -1] = *PARAM_n;
if (a[*PARAM_n + *PARAM_n * a_dim1 -a_offset] == 0.) {
*info = *PARAM_n;
}
return 0;
} /* dgefa_ */
/* DECK DGESL */
/* Subroutine */
__device__ int dgesl_(double *a, int *lda, int *PARAM_n, int *ipvt, double *b, int job, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int a_dim1 = 0;
int a_offset = 0;
int i__1 = 0;
int i__2 = 0;
/* Local variables */
int k = 0;
int DGESL_l = 0.;
double t = 0.;
int kb = 0;
int nm1 = 0;
/* ***BEGIN PROLOGUE DGESL */
/* ***PURPOSE Solve the real system A*X=B or TRANS(A)*X=B using the */
/* factors computed by DGECO or DGEFA. */
/* ***CATEGORY D2A1 */
/* ***TYPE DOUBLE PRECISION (SGESL-S, DGESL-D, CGESL-C) */
/* ***KEYWORDS LINEAR ALGEBRA, LINPACK, MATRIX, SOLVE */
/* ***AUTHOR Moler, C. B., (U. of New Mexico) */
/* ***DESCRIPTION */
/* DGESL solves the double precision system */
/* A * X = B or TRANS(A) * X = B */
/* using the factors computed by DGECO or DGEFA. */
/* On Entry */
/* A DOUBLE PRECISION(LDA, N) */
/* the output from DGECO or DGEFA. */
/* LDA int */
/* the leading dimension of the array A . */
/* N int */
/* the order of the matrix A . */
/* IPVT int(N) */
/* the pivot vector from DGECO or DGEFA. */
/* B DOUBLE PRECISION(N) */
/* the right hand side vector. */
/* JOB int */
/* = 0 to solve A*X = B , */
/* = nonzero to solve TRANS(A)*X = B where */
/* TRANS(A) is the transpose. */
/* On Return */
/* B the solution vector X . */
/* Error Condition */
/* A division by zero will occur if the input factor contains a */
/* zero on the diagonal. Technically this indicates singularity */
/* but it is often caused by improper arguments or improper */
/* setting of LDA . It will not occur if the subroutines are */
/* called correctly and if DGECO has set RCOND .GT. 0.0 */
/* or DGEFA has set INFO .EQ. 0 . */
/* To compute INVERSE(A) * C where C is a matrix */
/* with P columns */
/* CALL DGECO(A,LDA,N,IPVT,RCOND,Z) */
/* IF (RCOND is too small) GO TO ... */
/* DO 10 J = 1, P */
/* CALL DGESL(A,LDA,N,IPVT,C(1,J),0) */
/* 10 CONTINUE */
/* ***REFERENCES J. J. Dongarra, J. R. Bunch, C. B. Moler, and G. W. */
/* Stewart, LINPACK Users' Guide, SIAM, 1979. */
/* ***ROUTINES CALLED DAXPY, DDOT */
/* ***REVISION HISTORY (YYMMDD) */
/* 780814 DATE WRITTEN */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 900326 Removed duplicate information from DESCRIPTION section. */
/* (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DGESL */
/* ***FIRST EXECUTABLE STATEMENT DGESL */
/* Parameter adjustments */
a_dim1 = *lda;
a_offset = 1 + a_dim1;
//a -= a_offset;
//--ipvt;
//--b;
/* Function Body */
nm1 = *PARAM_n - 1;
if (job != 0) {
goto L50;
}
/* JOB = 0 , SOLVE A * X = B */
/* FIRST SOLVE L*Y = B */
if (nm1 < 1) {
goto L30;
}
i__1 = nm1;
for (k = 1; k <= i__1; ++k) {
DGESL_l = ipvt[k -1];
t = b[DGESL_l -1];
if (DGESL_l == k) {
goto L10;
}
b[DGESL_l -1] = b[k -1];
b[k -1] = t;
L10:
i__2 = *PARAM_n - k;
daxpy_(&i__2, &t, &a[k + 1 + k * a_dim1 -a_offset], 1, &b[k + 1 -1], 1, common);
/* L20: */
}
L30:
/* NOW SOLVE U*X = Y */
i__1 = *PARAM_n;
for (kb = 1; kb <= i__1; ++kb) {
k = *PARAM_n + 1 - kb;
b[k -1] /= a[k + k * a_dim1 -a_offset];
t = -b[k -1];
i__2 = k - 1;
daxpy_(&i__2, &t, &a[k * a_dim1 + 1 -a_offset], 1, b, 1, common);
/* L40: */
}
goto L100;
L50:
/* JOB = NONZERO, SOLVE TRANS(A) * X = B */
/* FIRST SOLVE TRANS(U)*Y = B */
i__1 = *PARAM_n;
for (k = 1; k <= i__1; ++k) {
i__2 = k - 1;
t = ddot_(&i__2, &a[k * a_dim1 + 1 -a_offset], 1, b, 1, common);
b[k -1] = (b[k -1] - t) / a[k + k * a_dim1 -a_offset];
/* L60: */
}
/* NOW SOLVE TRANS(L)*X = Y */
if (nm1 < 1) {
goto L90;
}
i__1 = nm1;
for (kb = 1; kb <= i__1; ++kb) {
k = *PARAM_n - kb;
i__2 = *PARAM_n - k;
b[k -1] += ddot_(&i__2, &a[k + 1 + k * a_dim1 -a_offset], 1, &b[k + 1 -1], 1, common);
DGESL_l = ipvt[k -1];
if (DGESL_l == k) {
goto L70;
}
t = b[DGESL_l -1];
b[DGESL_l -1] = b[k -1];
b[k -1] = t;
L70:
/* L80: */
;
}
L90:
L100:
return 0;
} /* dgesl_ */
/* DECK DGBFA */
/* Subroutine */
__device__ int dgbfa_(double *abd, int *lda, int *PARAM_n, int *ml, int *mu, int *ipvt, int *info, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int abd_dim1 = 0;
int abd_offset = 0;
int i__1 = 0;
int i__2 = 0;
int i__3 = 0;
int i__4 = 0;
/* Local variables */
int i__ = 0;
int j = 0;
int k = 0;
int DGBFA_l = 0;
int m = 0;
double t = 0.;
int i0 = 0;
int j0 = 0;
int j1 = 0;
int lm = 0;
int mm = 0;
int ju = 0;
int jz = 0;
int kp1 = 0;
int nm1 = 0;
/* ***BEGIN PROLOGUE DGBFA */
/* ***PURPOSE Factor a band matrix using Gaussian elimination. */
/* ***CATEGORY D2A2 */
/* ***TYPE DOUBLE PRECISION (SGBFA-S, DGBFA-D, CGBFA-C) */
/* ***KEYWORDS BANDED, LINEAR ALGEBRA, LINPACK, MATRIX FACTORIZATION */
/* ***AUTHOR Moler, C. B., (U. of New Mexico) */
/* ***DESCRIPTION */
/* DGBFA factors a double precision band matrix by elimination. */
/* DGBFA is usually called by DGBCO, but it can be called */
/* directly with a saving in time if RCOND is not needed. */
/* On Entry */
/* ABD DOUBLE PRECISION(LDA, N) */
/* contains the matrix in band storage. The columns */
/* of the matrix are stored in the columns of ABD and */
/* the diagonals of the matrix are stored in rows */
/* ML+1 through 2*ML+MU+1 of ABD . */
/* See the comments below for details. */
/* LDA int */
/* the leading dimension of the array ABD . */
/* LDA must be .GE. 2*ML + MU + 1 . */
/* N int */
/* the order of the original matrix. */
/* ML int */
/* number of diagonals below the main diagonal. */
/* 0 .LE. ML .LT. N . */
/* MU int */
/* number of diagonals above the main diagonal. */
/* 0 .LE. MU .LT. N . */
/* More efficient if ML .LE. MU . */
/* On Return */
/* ABD an upper triangular matrix in band storage and */
/* the multipliers which were used to obtain it. */
/* The factorization can be written A = L*U where */
/* L is a product of permutation and unit lower */
/* triangular matrices and U is upper triangular. */
/* IPVT int(N) */
/* an int vector of pivot indices. */
/* INFO int */
/* = 0 normal value. */
/* = K if U(K,K) .EQ. 0.0 . This is not an error */
/* condition for this subroutine, but it does */
/* indicate that DGBSL will divide by zero if */
/* called. Use RCOND in DGBCO for a reliable */
/* indication of singularity. */
/* Band Storage */
/* If A is a band matrix, the following program segment */
/* will set up the input. */
/* ML = (band width below the diagonal) */
/* MU = (band width above the diagonal) */
/* M = ML + MU + 1 */
/* DO 20 J = 1, N */
/* I1 = MAX(1, J-MU) */
/* I2 = MIN(N, J+ML) */
/* DO 10 I = I1, I2 */
/* K = I - J + M */
/* ABD(K,J) = A(I,J) */
/* 10 CONTINUE */
/* 20 CONTINUE */
/* This uses rows ML+1 through 2*ML+MU+1 of ABD . */
/* In addition, the first ML rows in ABD are used for */
/* elements generated during the triangularization. */
/* The total number of rows needed in ABD is 2*ML+MU+1 . */
/* The ML+MU by ML+MU upper left triangle and the */
/* ML by ML lower right triangle are not referenced. */
/* ***REFERENCES J. J. Dongarra, J. R. Bunch, C. B. Moler, and G. W. */
/* Stewart, LINPACK Users' Guide, SIAM, 1979. */
/* ***ROUTINES CALLED DAXPY, DSCAL, IDAMAX */
/* ***REVISION HISTORY (YYMMDD) */
/* 780814 DATE WRITTEN */
/* 890531 Changed all specific intrinsics to generic. (WRB) */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 900326 Removed duplicate information from DESCRIPTION section. */
/* (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DGBFA */
/* ***FIRST EXECUTABLE STATEMENT DGBFA */
/* Parameter adjustments */
abd_dim1 = *lda;
abd_offset = 1 + abd_dim1;
//abd -= abd_offset;
//--ipvt;
/* Function Body */
m = *ml + *mu + 1;
*info = 0;
/* ZERO INITIAL FILL-IN COLUMNS */
j0 = *mu + 2;
j1 = min(*PARAM_n,m) - 1;
if (j1 < j0) {
goto L30;
}
i__1 = j1;
for (jz = j0; jz <= i__1; ++jz) {
i0 = m + 1 - jz;
i__2 = *ml;
for (i__ = i0; i__ <= i__2; ++i__) {
abd[i__ + jz * abd_dim1 -abd_offset] = 0.;
/* L10: */
}
/* L20: */
}
L30:
jz = j1;
ju = 0;
/* GAUSSIAN ELIMINATION WITH PARTIAL PIVOTING */
nm1 = *PARAM_n - 1;
if (nm1 < 1) {
goto L130;
}
i__1 = nm1;
for (k = 1; k <= i__1; ++k) {
kp1 = k + 1;
/* ZERO NEXT FILL-IN COLUMN */
++jz;
if (jz > *PARAM_n) {
goto L50;
}
if (*ml < 1) {
goto L50;
}
i__2 = *ml;
for (i__ = 1; i__ <= i__2; ++i__) {
abd[i__ + jz * abd_dim1 -abd_offset] = 0.;
/* L40: */
}
L50:
/* FIND L = PIVOT INDEX */
/* Computing MIN */
i__2 = *ml, i__3 = *PARAM_n - k;
lm = min(i__2,i__3);
i__2 = lm + 1;
DGBFA_l = idamax_(&i__2, &abd[m + k * abd_dim1 -abd_offset], 1, common) + m - 1;
ipvt[k -1] = DGBFA_l + k - m;
/* ZERO PIVOT IMPLIES THIS COLUMN ALREADY TRIANGULARIZED */
if (abd[DGBFA_l + k * abd_dim1 -abd_offset] == 0.) {
goto L100;
}
/* INTERCHANGE IF NECESSARY */
if (DGBFA_l == m) {
goto L60;
}
t = abd[DGBFA_l + k * abd_dim1 -abd_offset];
abd[DGBFA_l + k * abd_dim1 -abd_offset] = abd[m + k * abd_dim1 -abd_offset];
abd[m + k * abd_dim1 -abd_offset] = t;
L60:
/* COMPUTE MULTIPLIERS */
t = -1. / abd[m + k * abd_dim1 -abd_offset];
dscal_(&lm, &t, &abd[m + 1 + k * abd_dim1 -abd_offset], 1, common);
/* ROW ELIMINATION WITH COLUMN INDEXING */
/* Computing MIN */
/* Computing MAX */
i__3 = ju, i__4 = *mu + ipvt[k -1];
i__2 = max(i__3,i__4);
ju = min(i__2,*PARAM_n);
mm = m;
if (ju < kp1) {
goto L90;
}
i__2 = ju;
for (j = kp1; j <= i__2; ++j) {
--DGBFA_l;
--mm;
t = abd[DGBFA_l + j * abd_dim1 -abd_offset];
if (DGBFA_l == mm) {
goto L70;
}
abd[DGBFA_l + j * abd_dim1 -abd_offset] = abd[mm + j * abd_dim1 -abd_offset];
abd[mm + j * abd_dim1 -abd_offset] = t;
L70:
daxpy_(&lm, &t, &abd[m + 1 + k * abd_dim1 -abd_offset], 1, &abd[mm + 1 + j * abd_dim1 -abd_offset], 1, common);
/* L80: */
}
L90:
goto L110;
L100:
*info = k;
L110:
/* L120: */
;
}
L130:
ipvt[*PARAM_n -1] = *PARAM_n;
if (abd[m + *PARAM_n * abd_dim1 -abd_offset] == 0.) {
*info = *PARAM_n;
}
return 0;
} /* dgbfa_ */
/* DECK DGBSL */
/* Subroutine */
__device__ int dgbsl_(double *abd, int *lda, int *PARAM_n, int *ml, int *mu, int *ipvt, double *b, int job, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int abd_dim1 = 0;
int abd_offset = 0;
int i__1 = 0;
int i__2 = 0;
int i__3 = 0;
/* Local variables */
int k = 0;
int DGBSL_l = 0;
int m = 0;
double t = 0.;
int kb = 0;
int la = 0;
int lb = 0;
int lm = 0;
int nm1 = 0;
/* ***BEGIN PROLOGUE DGBSL */
/* ***PURPOSE Solve the real band system A*X=B or TRANS(A)*X=B using */
/* the factors computed by DGBCO or DGBFA. */
/* ***CATEGORY D2A2 */
/* ***TYPE DOUBLE PRECISION (SGBSL-S, DGBSL-D, CGBSL-C) */
/* ***KEYWORDS BANDED, LINEAR ALGEBRA, LINPACK, MATRIX, SOLVE */
/* ***AUTHOR Moler, C. B., (U. of New Mexico) */
/* ***DESCRIPTION */
/* DGBSL solves the double precision band system */
/* A * X = B or TRANS(A) * X = B */
/* using the factors computed by DGBCO or DGBFA. */
/* On Entry */
/* ABD DOUBLE PRECISION(LDA, N) */
/* the output from DGBCO or DGBFA. */
/* LDA int */
/* the leading dimension of the array ABD . */
/* N int */
/* the order of the original matrix. */
/* ML int */
/* number of diagonals below the main diagonal. */
/* MU int */
/* number of diagonals above the main diagonal. */
/* IPVT int(N) */
/* the pivot vector from DGBCO or DGBFA. */
/* B DOUBLE PRECISION(N) */
/* the right hand side vector. */
/* JOB int */
/* = 0 to solve A*X = B , */
/* = nonzero to solve TRANS(A)*X = B , where */
/* TRANS(A) is the transpose. */
/* On Return */
/* B the solution vector X . */
/* Error Condition */
/* A division by zero will occur if the input factor contains a */
/* zero on the diagonal. Technically this indicates singularity */
/* but it is often caused by improper arguments or improper */
/* setting of LDA . It will not occur if the subroutines are */
/* called correctly and if DGBCO has set RCOND .GT. 0.0 */
/* or DGBFA has set INFO .EQ. 0 . */
/* To compute INVERSE(A) * C where C is a matrix */
/* with P columns */
/* CALL DGBCO(ABD,LDA,N,ML,MU,IPVT,RCOND,Z) */
/* IF (RCOND is too small) GO TO ... */
/* DO 10 J = 1, P */
/* CALL DGBSL(ABD,LDA,N,ML,MU,IPVT,C(1,J),0) */
/* 10 CONTINUE */
/* ***REFERENCES J. J. Dongarra, J. R. Bunch, C. B. Moler, and G. W. */
/* Stewart, LINPACK Users' Guide, SIAM, 1979. */
/* ***ROUTINES CALLED DAXPY, DDOT */
/* ***REVISION HISTORY (YYMMDD) */
/* 780814 DATE WRITTEN */
/* 890531 Changed all specific intrinsics to generic. (WRB) */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 900326 Removed duplicate information from DESCRIPTION section. */
/* (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DGBSL */
/* ***FIRST EXECUTABLE STATEMENT DGBSL */
/* Parameter adjustments */
abd_dim1 = *lda;
abd_offset = 1 + abd_dim1;
//abd -= abd_offset;
//--ipvt;
//--b;
/* Function Body */
m = *mu + *ml + 1;
nm1 = *PARAM_n - 1;
if (job != 0) {
goto L50;
}
/* JOB = 0 , SOLVE A * X = B */
/* FIRST SOLVE L*Y = B */
if (*ml == 0) {
goto L30;
}
if (nm1 < 1) {
goto L30;
}
i__1 = nm1;
for (k = 1; k <= i__1; ++k) {
/* Computing MIN */
i__2 = *ml, i__3 = *PARAM_n - k;
lm = min(i__2,i__3);
DGBSL_l = ipvt[k -1];
t = b[DGBSL_l -1];
if (DGBSL_l == k) {
goto L10;
}
b[DGBSL_l -1] = b[k -1];
b[k -1] = t;
L10:
daxpy_(&lm, &t, &abd[m + 1 + k * abd_dim1 -abd_offset], 1, &b[k + 1 -1], 1, common);
/* L20: */
}
L30:
/* NOW SOLVE U*X = Y */
i__1 = *PARAM_n;
for (kb = 1; kb <= i__1; ++kb) {
k = *PARAM_n + 1 - kb;
b[k -1] /= abd[m + k * abd_dim1 -abd_offset];
lm = min(k,m) - 1;
la = m - lm;
lb = k - lm;
t = -b[k -1];
daxpy_(&lm, &t, &abd[la + k * abd_dim1 -abd_offset], 1, &b[lb -1], 1, common);
/* L40: */
}
goto L100;
L50:
/* JOB = NONZERO, SOLVE TRANS(A) * X = B */
/* FIRST SOLVE TRANS(U)*Y = B */
i__1 = *PARAM_n;
for (k = 1; k <= i__1; ++k) {
lm = min(k,m) - 1;
la = m - lm;
lb = k - lm;
t = ddot_(&lm, &abd[la + k * abd_dim1 -abd_offset], 1, &b[lb -1], 1, common);
b[k -1] = (b[k -1] - t) / abd[m + k * abd_dim1 -abd_offset];
/* L60: */
}
/* NOW SOLVE TRANS(L)*X = Y */
if (*ml == 0) {
goto L90;
}
if (nm1 < 1) {
goto L90;
}
i__1 = nm1;
for (kb = 1; kb <= i__1; ++kb) {
k = *PARAM_n - kb;
/* Computing MIN */
i__2 = *ml, i__3 = *PARAM_n - k;
lm = min(i__2,i__3);
b[k -1] += ddot_(&lm, &abd[m + 1 + k * abd_dim1 -abd_offset], 1, &b[k + 1 -1], 1, common);
DGBSL_l = ipvt[k -1];
if (DGBSL_l == k) {
goto L70;
}
t = b[DGBSL_l -1];
b[DGBSL_l -1] = b[k -1];
b[k -1] = t;
L70:
/* L80: */
;
}
L90:
L100:
return 0;
} /* dgbsl_ */
/* DECK DUMACH */
__device__ double dumach_(struct cuLsodaCommonBlock *common)
{
/* System generated locals */
double ret_val = 0.;
/* Local variables */
double u = 0.;
double comp = 0.;
/* ***BEGIN PROLOGUE DUMACH */
/* ***PURPOSE Compute the unit roundoff of the machine. */
/* ***CATEGORY R1 */
/* ***TYPE DOUBLE PRECISION (RUMACH-S, DUMACH-D) */
/* ***KEYWORDS MACHINE CONSTANTS */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***DESCRIPTION */
/* *Usage: */
/* DOUBLE PRECISION A, DUMACH */
/* A = DUMACH() */
/* *Function Return Values: */
/* A : the unit roundoff of the machine. */
/* *Description: */
/* The unit roundoff is defined as the smallest positive machine */
/* number u such that 1.0 + u .ne. 1.0. This is computed by DUMACH */
/* in a machine-independent manner. */
/* ***REFERENCES (NONE) */
/* ***ROUTINES CALLED DUMSUM */
/* ***REVISION HISTORY (YYYYMMDD) */
/* 19930216 DATE WRITTEN */
/* 19930818 Added SLATEC-format prologue. (FNF) */
/* 20030707 Added DUMSUM to force normal storage of COMP. (ACH) */
/* ***END PROLOGUE DUMACH */
/* ***FIRST EXECUTABLE STATEMENT DUMACH */
u = 1.;
L10:
u *= .5;
dumsum_(1., u, &comp, common);
if (comp != 1.) {
goto L10;
}
ret_val = u * 2.;
return ret_val;
/* ----------------------- End of Function DUMACH ------------------------ */
} /* dumach_ */
/* DECK XSETF */
/* Subroutine */
//__device__ int xsetf_(int *mflag, struct cuLsodaCommonBlock *common)
//{
// int junk;
/* ***BEGIN PROLOGUE XSETF */
/* ***PURPOSE Reset the error print control flag. */
/* ***CATEGORY R3A */
/* ***TYPE ALL (XSETF-A) */
/* ***KEYWORDS ERROR CONTROL */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***DESCRIPTION */
/* XSETF sets the error print control flag to MFLAG: */
/* MFLAG=1 means print all messages (the default). */
/* MFLAG=0 means no printing. */
/* ***SEE ALSO XERRWD, XERRWV */
/* ***REFERENCES (NONE) */
/* ***ROUTINES CALLED IXSAV */
/* ***REVISION HISTORY (YYMMDD) */
/* 921118 DATE WRITTEN */
/* 930329 Added SLATEC format prologue. (FNF) */
/* 930407 Corrected SEE ALSO section. (FNF) */
/* 930922 Made user-callable, and other cosmetic changes. (FNF) */
/* ***END PROLOGUE XSETF */
/* Subroutines called by XSETF.. None */
/* Function routine called by XSETF.. IXSAV */
/* ----------------------------------------------------------------------- */
/* **End */
/* ***FIRST EXECUTABLE STATEMENT XSETF */
// if (*mflag == 0 || *mflag == 1) {
// junk = ixsav_(2, mflag, 1, common);
// }
// return 0;
/* ----------------------- End of Subroutine XSETF ----------------------- */
//} /* xsetf_ */
/* DECK XSETUN */
/* Subroutine */
//__device__ int xsetun_(int *lun, struct cuLsodaCommonBlock *common)
//{
// int junk;
/* ***BEGIN PROLOGUE XSETUN */
/* ***PURPOSE Reset the logical unit number for error messages. */
/* ***CATEGORY R3B */
/* ***TYPE ALL (XSETUN-A) */
/* ***KEYWORDS ERROR CONTROL */
/* ***DESCRIPTION */
/* XSETUN sets the logical unit number for error messages to LUN. */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***SEE ALSO XERRWD, XERRWV */
/* ***REFERENCES (NONE) */
/* ***ROUTINES CALLED IXSAV */
/* ***REVISION HISTORY (YYMMDD) */
/* 921118 DATE WRITTEN */
/* 930329 Added SLATEC format prologue. (FNF) */
/* 930407 Corrected SEE ALSO section. (FNF) */
/* 930922 Made user-callable, and other cosmetic changes. (FNF) */
/* ***END PROLOGUE XSETUN */
/* Subroutines called by XSETUN.. None */
/* Function routine called by XSETUN.. IXSAV */
/* ----------------------------------------------------------------------- */
/* **End */
/* ***FIRST EXECUTABLE STATEMENT XSETUN */
// if (*lun > 0) {
// junk = ixsav_(1, lun, 1, common);
// }
// return 0;
/* ----------------------- End of Subroutine XSETUN ---------------------- */
//} /* xsetun_ */
/* DECK IXSAV */
//__device__ int ixsav_(int ipar, int *ivalue, int iset, struct cuLsodaCommonBlock *common)
//{
// /* Initialized data */
//
// int lunit = -1;
// int mesflg = 1;
//
// /* System generated locals */
// int ret_val = 0;
//
// /* Local variables */
//
//
// /* ***BEGIN PROLOGUE IXSAV */
// /* ***SUBSIDIARY */
// /* ***PURPOSE Save and recall error message control parameters. */
// /* ***CATEGORY R3C */
// /* ***TYPE ALL (IXSAV-A) */
// /* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
// /* ***DESCRIPTION */
//
// /* IXSAV saves and recalls one of two error message parameters: */
// /* LUNIT, the logical unit number to which messages are printed, and */
// /* MESFLG, the message print flag. */
// /* This is a modification of the SLATEC library routine J4SAVE. */
//
// /* Saved local variables.. */
// /* LUNIT = Logical unit number for messages. The default is obtained */
// /* by a call to IUMACH (may be machine-dependent). */
// /* MESFLG = Print control flag.. */
// /* 1 means print all messages (the default). */
// /* 0 means no printing. */
//
// /* On input.. */
// /* IPAR = Parameter indicator (1 for LUNIT, 2 for MESFLG). */
// /* IVALUE = The value to be set for the parameter, if ISET = .TRUE. */
// /* ISET = Logical flag to indicate whether to read or write. */
// /* If ISET = .TRUE., the parameter will be given */
// /* the value IVALUE. If ISET = .FALSE., the parameter */
// /* will be unchanged, and IVALUE is a dummy argument. */
//
// /* On return.. */
// /* IXSAV = The (old) value of the parameter. */
//
// /* ***SEE ALSO XERRWD, XERRWV */
// /* ***ROUTINES CALLED IUMACH */
// /* ***REVISION HISTORY (YYMMDD) */
// /* 921118 DATE WRITTEN */
// /* 930329 Modified prologue to SLATEC format. (FNF) */
// /* 930915 Added IUMACH call to get default output unit. (ACH) */
// /* 930922 Minor cosmetic changes. (FNF) */
// /* 010425 Type declaration for IUMACH added. (ACH) */
// /* ***END PROLOGUE IXSAV */
//
// /* Subroutines called by IXSAV.. None */
// /* Function routine called by IXSAV.. IUMACH */
// /* ----------------------------------------------------------------------- */
// /* **End */
// /* ----------------------------------------------------------------------- */
// /* ----------------------------------------------------------------------- */
// /* The following Fortran-77 declaration is to cause the values of the */
// /* listed (local) variables to be saved between calls to this routine. */
// /* ----------------------------------------------------------------------- */
//
// /* ***FIRST EXECUTABLE STATEMENT IXSAV */
// if (ipar == 1) {
// if (lunit == -1) {
// lunit = 6;
// }
// ret_val = lunit;
// if (iset) {
// lunit = *ivalue;
// }
// }
//
// if (ipar == 2) {
// ret_val = mesflg;
// if (iset) {
// mesflg = *ivalue;
// }
// }
//
// return ret_val;
// /* ----------------------- End of Function IXSAV ------------------------- */
//} /* ixsav_ */
/* DECK IDAMAX */
__device__ int idamax_(int *PARAM_n, double *dx, int incx, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int ret_val = 0;
int i__1 = 0;
double d__1 = 0;
/* Local variables */
int i__ = 0;
int ix = 0;
double dmax__ = 0.;
double xmag = 0.;
/* ***BEGIN PROLOGUE IDAMAX */
/* ***PURPOSE Find the smallest index of that component of a vector */
/* having the maximum magnitude. */
/* ***CATEGORY D1A2 */
/* ***TYPE DOUBLE PRECISION (ISAMAX-S, IDAMAX-D, ICAMAX-C) */
/* ***KEYWORDS BLAS, LINEAR ALGEBRA, MAXIMUM COMPONENT, VECTOR */
/* ***AUTHOR Lawson, C. L., (JPL) */
/* Hanson, R. J., (SNLA) */
/* Kincaid, D. R., (U. of Texas) */
/* Krogh, F. T., (JPL) */
/* ***DESCRIPTION */
/* B L A S Subprogram */
/* Description of Parameters */
/* --Input-- */
/* N number of elements in input vector(s) */
/* DX double precision vector with N elements */
/* INCX storage spacing between elements of DX */
/* --Output-- */
/* IDAMAX smallest index (zero if N .LE. 0) */
/* Find smallest index of maximum magnitude of double precision DX. */
/* IDAMAX = first I, I = 1 to N, to maximize ABS(DX(IX+(I-1)*INCX)), */
/* where IX = 1 if INCX .GE. 0, else IX = 1+(1-N)*INCX. */
/* ***REFERENCES C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. */
/* Krogh, Basic linear algebra subprograms for Fortran */
/* usage, Algorithm No. 539, Transactions on Mathematical */
/* Software 5, 3 (September 1979), pp. 308-323. */
/* ***ROUTINES CALLED (NONE) */
/* ***REVISION HISTORY (YYMMDD) */
/* 791001 DATE WRITTEN */
/* 890531 Changed all specific intrinsics to generic. (WRB) */
/* 890531 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 900821 Modified to correct problem with a negative increment. */
/* (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE IDAMAX */
/* ***FIRST EXECUTABLE STATEMENT IDAMAX */
/* Parameter adjustments */
//--dx;
/* Function Body */
ret_val = 0;
if (*PARAM_n <= 0) {
return ret_val;
}
ret_val = 1;
if (*PARAM_n == 1) {
return ret_val;
}
if (incx == 1) {
goto L20;
}
/* Code for increments not equal to 1. */
ix = 1;
if (incx < 0) {
ix = (-(*PARAM_n) + 1) * incx + 1;
}
dmax__ = (d__1 = dx[ix -1], fabs(d__1));
ix += incx;
i__1 = *PARAM_n;
for (i__ = 2; i__ <= i__1; ++i__) {
xmag = (d__1 = dx[ix -1], fabs(d__1));
if (xmag > dmax__) {
ret_val = i__;
dmax__ = xmag;
}
ix += incx;
/* L10: */
}
return ret_val;
/* Code for increments equal to 1. */
L20:
dmax__ = fabs(dx[0]);
i__1 = *PARAM_n;
for (i__ = 2; i__ <= i__1; ++i__) {
xmag = (d__1 = dx[i__ -1], fabs(d__1));
if (xmag > dmax__) {
ret_val = i__;
dmax__ = xmag;
}
/* L30: */
}
return ret_val;
} /* idamax_ */
/* DECK DAXPY */
/* Subroutine */
__device__ int daxpy_(int *PARAM_n, double *da, double *dx, int incx, double *dy, int incy, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
int i__2 = 0;
/* Local variables */
int i__ = 0;
int m = 0;
int ix = 0;
int iy = 0;
int ns = 0;
int mp1 = 0;
/* ***BEGIN PROLOGUE DAXPY */
/* ***PURPOSE Compute a constant times a vector plus a vector. */
/* ***CATEGORY D1A7 */
/* ***TYPE DOUBLE PRECISION (SAXPY-S, DAXPY-D, CAXPY-C) */
/* ***KEYWORDS BLAS, LINEAR ALGEBRA, TRIAD, VECTOR */
/* ***AUTHOR Lawson, C. L., (JPL) */
/* Hanson, R. J., (SNLA) */
/* Kincaid, D. R., (U. of Texas) */
/* Krogh, F. T., (JPL) */
/* ***DESCRIPTION */
/* B L A S Subprogram */
/* Description of Parameters */
/* --Input-- */
/* N number of elements in input vector(s) */
/* DA double precision scalar multiplier */
/* DX double precision vector with N elements */
/* INCX storage spacing between elements of DX */
/* DY double precision vector with N elements */
/* INCY storage spacing between elements of DY */
/* --Output-- */
/* DY double precision result (unchanged if N .LE. 0) */
/* Overwrite double precision DY with double precision DA*DX + DY. */
/* For I = 0 to N-1, replace DY(LY+I*INCY) with DA*DX(LX+I*INCX) + */
/* DY(LY+I*INCY), */
/* where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is */
/* defined in a similar way using INCY. */
/* ***REFERENCES C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. */
/* Krogh, Basic linear algebra subprograms for Fortran */
/* usage, Algorithm No. 539, Transactions on Mathematical */
/* Software 5, 3 (September 1979), pp. 308-323. */
/* ***ROUTINES CALLED (NONE) */
/* ***REVISION HISTORY (YYMMDD) */
/* 791001 DATE WRITTEN */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 920310 Corrected definition of LX in DESCRIPTION. (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DAXPY */
/* ***FIRST EXECUTABLE STATEMENT DAXPY */
/* Parameter adjustments */
//--dy;
//--dx;
/* Function Body */
if (*PARAM_n <= 0 || *da == 0.) {
return 0;
}
if (incx == incy) {
if ((i__1 = incx - 1) < 0) {
goto L5;
} else if (i__1 == 0) {
goto L20;
} else {
goto L60;
}
}
/* Code for unequal or nonpositive increments. */
L5:
ix = 1;
iy = 1;
if (incx < 0) {
ix = (-(*PARAM_n) + 1) * incx + 1;
}
if (incy < 0) {
iy = (-(*PARAM_n) + 1) * incy + 1;
}
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
dy[iy -1] += *da * dx[ix -1];
ix += incx;
iy += incy;
/* L10: */
}
return 0;
/* Code for both increments equal to 1. */
/* Clean-up loop so remaining vector length is a multiple of 4. */
L20:
m = *PARAM_n % 4;
if (m == 0) {
goto L40;
}
i__1 = m;
for (i__ = 1; i__ <= i__1; ++i__) {
dy[i__ -1] += *da * dx[i__ -1];
/* L30: */
}
if (*PARAM_n < 4) {
return 0;
}
L40:
mp1 = m + 1;
i__1 = *PARAM_n;
for (i__ = mp1; i__ <= i__1; i__ += 4) {
dy[i__ -1] += *da * dx[i__ -1];
dy[i__ + 1 -1] += *da * dx[i__ + 1 -1];
dy[i__ + 2 -1] += *da * dx[i__ + 2 -1];
dy[i__ + 3 -1] += *da * dx[i__ + 3 -1];
/* L50: */
}
return 0;
/* Code for equal, positive, non-unit increments. */
L60:
ns = *PARAM_n * incx;
i__1 = ns;
i__2 = incx;
for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
dy[i__ -1] = *da * dx[i__ -1] + dy[i__ -1];
/* L70: */
}
return 0;
} /* daxpy_ */
/* Subroutine */
__device__ int dumsum_(double a, double b, double *c__, struct cuLsodaCommonBlock *common)
{
/* Routine to force normal storing of A + B, for DUMACH. */
*c__ = a + b;
return 0;
} /* dumsum_ */
/* DECK DSCAL */
/* Subroutine */
__device__ int dscal_(int *PARAM_n, double *da, double *dx, int incx, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
/* Local variables */
int i__ = 0;
int m = 0;
int ix = 0;
int mp1 = 0;
/* ***BEGIN PROLOGUE DSCAL */
/* ***PURPOSE Multiply a vector by a constant. */
/* ***CATEGORY D1A6 */
/* ***TYPE DOUBLE PRECISION (SSCAL-S, DSCAL-D, CSCAL-C) */
/* ***KEYWORDS BLAS, LINEAR ALGEBRA, SCALE, VECTOR */
/* ***AUTHOR Lawson, C. L., (JPL) */
/* Hanson, R. J., (SNLA) */
/* Kincaid, D. R., (U. of Texas) */
/* Krogh, F. T., (JPL) */
/* ***DESCRIPTION */
/* B L A S Subprogram */
/* Description of Parameters */
/* --Input-- */
/* N number of elements in input vector(s) */
/* DA double precision scale factor */
/* DX double precision vector with N elements */
/* INCX storage spacing between elements of DX */
/* --Output-- */
/* DX double precision result (unchanged if N.LE.0) */
/* Replace double precision DX by double precision DA*DX. */
/* For I = 0 to N-1, replace DX(IX+I*INCX) with DA * DX(IX+I*INCX), */
/* where IX = 1 if INCX .GE. 0, else IX = 1+(1-N)*INCX. */
/* ***REFERENCES C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. */
/* Krogh, Basic linear algebra subprograms for Fortran */
/* usage, Algorithm No. 539, Transactions on Mathematical */
/* Software 5, 3 (September 1979), pp. 308-323. */
/* ***ROUTINES CALLED (NONE) */
/* ***REVISION HISTORY (YYMMDD) */
/* 791001 DATE WRITTEN */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 900821 Modified to correct problem with a negative increment. */
/* (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DSCAL */
/* ***FIRST EXECUTABLE STATEMENT DSCAL */
/* Parameter adjustments */
//--dx;
/* Function Body */
if (*PARAM_n <= 0) {
return 0;
}
if (incx == 1) {
goto L20;
}
/* Code for increment not equal to 1. */
ix = 1;
if (incx < 0) {
ix = (-(*PARAM_n) + 1) * incx + 1;
}
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
dx[ix -1] = *da * dx[ix -1];
ix += incx;
/* L10: */
}
return 0;
/* Code for increment equal to 1. */
/* Clean-up loop so remaining vector length is a multiple of 5. */
L20:
m = *PARAM_n % 5;
if (m == 0) {
goto L40;
}
i__1 = m;
for (i__ = 1; i__ <= i__1; ++i__) {
dx[i__ -1] = *da * dx[i__ -1];
/* L30: */
}
if (*PARAM_n < 5) {
return 0;
}
L40:
mp1 = m + 1;
i__1 = *PARAM_n;
for (i__ = mp1; i__ <= i__1; i__ += 5) {
dx[i__ -1] = *da * dx[i__ -1];
dx[i__ + 1 -1] = *da * dx[i__ + 1 -1];
dx[i__ + 2 -1] = *da * dx[i__ + 2 -1];
dx[i__ + 3 -1] = *da * dx[i__ + 3 -1];
dx[i__ + 4 -1] = *da * dx[i__ + 4 -1];
/* L50: */
}
return 0;
} /* dscal_ */
/* DECK DDOT */
__device__ double ddot_(int *PARAM_n, double *dx, int incx, double *dy, int incy, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
int i__2 = 0;
double ret_val = 0.;
/* Local variables */
int i__ = 0;
int m = 0;
int ix = 0;
int iy = 0;
int ns = 0;
int mp1 = 0;
/* ***BEGIN PROLOGUE DDOT */
/* ***PURPOSE Compute the inner product of two vectors. */
/* ***CATEGORY D1A4 */
/* ***TYPE DOUBLE PRECISION (SDOT-S, DDOT-D, CDOTU-C) */
/* ***KEYWORDS BLAS, INNER PRODUCT, LINEAR ALGEBRA, VECTOR */
/* ***AUTHOR Lawson, C. L., (JPL) */
/* Hanson, R. J., (SNLA) */
/* Kincaid, D. R., (U. of Texas) */
/* Krogh, F. T., (JPL) */
/* ***DESCRIPTION */
/* B L A S Subprogram */
/* Description of Parameters */
/* --Input-- */
/* N number of elements in input vector(s) */
/* DX double precision vector with N elements */
/* INCX storage spacing between elements of DX */
/* DY double precision vector with N elements */
/* INCY storage spacing between elements of DY */
/* --Output-- */
/* DDOT double precision dot product (zero if N .LE. 0) */
/* Returns the dot product of double precision DX and DY. */
/* DDOT = sum for I = 0 to N-1 of DX(LX+I*INCX) * DY(LY+I*INCY), */
/* where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is */
/* defined in a similar way using INCY. */
/* ***REFERENCES C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. */
/* Krogh, Basic linear algebra subprograms for Fortran */
/* usage, Algorithm No. 539, Transactions on Mathematical */
/* Software 5, 3 (September 1979), pp. 308-323. */
/* ***ROUTINES CALLED (NONE) */
/* ***REVISION HISTORY (YYMMDD) */
/* 791001 DATE WRITTEN */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 920310 Corrected definition of LX in DESCRIPTION. (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DDOT */
/* ***FIRST EXECUTABLE STATEMENT DDOT */
/* Parameter adjustments */
//--dy;
//--dx;
/* Function Body */
ret_val = 0.;
if (*PARAM_n <= 0) {
return ret_val;
}
if (incx == incy) {
if ((i__1 = incx - 1) < 0) {
goto L5;
} else if (i__1 == 0) {
goto L20;
} else {
goto L60;
}
}
/* Code for unequal or nonpositive increments. */
L5:
ix = 1;
iy = 1;
if (incx < 0) {
ix = (-(*PARAM_n) + 1) * incx + 1;
}
if (incy < 0) {
iy = (-(*PARAM_n) + 1) * incy + 1;
}
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
ret_val += dx[ix -1] * dy[iy -1];
ix += incx;
iy += incy;
/* L10: */
}
return ret_val;
/* Code for both increments equal to 1. */
/* Clean-up loop so remaining vector length is a multiple of 5. */
L20:
m = *PARAM_n % 5;
if (m == 0) {
goto L40;
}
i__1 = m;
for (i__ = 1; i__ <= i__1; ++i__) {
ret_val += dx[i__ -1] * dy[i__ -1];
/* L30: */
}
if (*PARAM_n < 5) {
return ret_val;
}
L40:
mp1 = m + 1;
i__1 = *PARAM_n;
for (i__ = mp1; i__ <= i__1; i__ += 5)
{
ret_val = ret_val + dx[i__ -1] * dy[i__ -1] + dx[i__ + 1 -1] * dy[i__ + 1 -1] +
dx[i__ + 2 -1] * dy[i__ + 2 -1] + dx[i__ + 3 -1] * dy[i__ + 3 -1] + dx[i__ + 4 -1] * dy[i__ + 4 -1];
/* L50: */
}
return ret_val;
/* Code for equal, positive, non-unit increments. */
L60:
ns = *PARAM_n * incx;
i__1 = ns;
i__2 = incx;
for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2)
{
ret_val += dx[i__ -1] * dy[i__ -1];
/* L70: */
}
return ret_val;
} /* ddot_ */
__device__ double d_sign(double *a, double *b)
{
double x = 0.;
x = (*a >= 0 ? *a : - *a);
return( *b >= 0 ? x : -x);
}
__host__ __device__ void cuLsodaCommonBlockInit(struct cuLsodaCommonBlock *common, unsigned int threads)
{
// for (unsigned int i=0; i<THREADS*BLOCKS; i++) {
for (unsigned int i=0; i<threads; i++) {
// for (unsigned int i=0; i<blockDim.x*gridDim.x; i++) {
/* common[i] block initialization */
for (int bugger = 0; bugger < 13; bugger++)
{
common[i].CM_el[bugger] = 0.;
}
for (int bugger = 0; bugger < 156; bugger++)
{
common[i].CM_elco[bugger] = 0.;
}
for (int bugger = 0; bugger < 36; bugger++)
{
common[i].CM_tesco[bugger] = 0.;
}
for (int bugger = 0; bugger < 218; bugger++)
{
common[i].CM_rls[bugger] = 0.;
}
for (int bugger = 0; bugger < 12; bugger++)
{
common[i].CM_cm1[bugger] = 0.;
}
for (int bugger = 0; bugger < 5; bugger++)
{
common[i].CM_cm2[bugger] = 0.;
}
for (int bugger = 0; bugger < 22; bugger++)
{
common[i].CM_rlsa[bugger] = 0.;
}
for (int bugger = 0; bugger < 37; bugger++)
{
common[i].CM_ils[bugger] = 0;
}
for (int bugger = 0; bugger < 9; bugger++)
{
common[i].CM_ilsa[bugger] = 0;
}
double smThing[12] = { .5,.575,.55,.45,.35,.25,.2,.15,.1,.075,.05,.025 };
for(int bob = 0; bob <12; bob ++)
{
common[i].CM_sm1[bob] = smThing[bob];
}
// initialize doubles in the common[i] block to zero
common[i].CM_conit = 0.;
common[i].CM_crate = 0.;
common[i].CM_ccmax = 0.;
common[i].CM_el0 = 0.;
common[i].CM_h__ = 0.;
common[i].CM_hmin = 0.;
common[i].CM_hmxi = 0.;
common[i].CM_hu = 0.;
common[i].CM_rc = 0.;
common[i].CM_tn = 0.;
common[i].CM_uround = 0.;
common[i].CM_pdest = 0.;
common[i].CM_pdlast = 0.;
common[i].CM_ratio = 0.;
common[i].CM_hold = 0.;
common[i].CM_rmax = 0.;
common[i].CM_tsw = 0.;
common[i].CM_pdnorm = 0.;
// initialize ints in common[i] block to zero
common[i].CM_init = 0;
common[i].CM_mxstep = 0;
common[i].CM_mxhnil = 0;
common[i].CM_nhnil = 0;
common[i].CM_nslast = 0;
common[i].CM_nyh = 0;
common[i].CM_icf = 0;
common[i].CM_ierpj = 0;
common[i].CM_iersl = 0;
common[i].CM_jcur = 0;
common[i].CM_jstart = 0;
common[i].CM_kflag = 0;
common[i].CM_l = 0;
common[i].CM_lyh = 0;
common[i].CM_lewt = 0;
common[i].CM_lacor = 0;
common[i].CM_lsavf = 0;
common[i].CM_lwm = 0;
common[i].CM_liwm = 0;
common[i].CM_meth = 0;
common[i].CM_miter = 0;
common[i].CM_maxord = 0;
common[i].CM_maxcor = 0;
common[i].CM_msbp = 0;
common[i].CM_mxncf = 0;
common[i].CM_n = 0;
common[i].CM_nq = 0;
common[i].CM_nst = 0;
common[i].CM_nfe = 0;
common[i].CM_nje = 0;
common[i].CM_nqu = 0;
common[i].CM_ialth = 0;
common[i].CM_ipup = 0;
common[i].CM_lmax = 0;
common[i].CM_nqnyh = 0;
common[i].CM_nslp = 0;
common[i].CM_insufr = 0;
common[i].CM_insufi = 0;
common[i].CM_ixpr = 0;
common[i].CM_jtyp = 0;
common[i].CM_mused = 0;
common[i].CM_mxordn = 0;
common[i].CM_mxords = 0;
common[i].CM_icount = 0;
common[i].CM_irflag = 0;
/* End Common Block initialization */
}
}
#endif
#endif
/*
This is the entrypoint of cuLSODO.
All these state variables has been changed to multidimensional arrays, in order to exploit the SIMD architecture.
*/
//template <bool ACTIVATE_SHARED_MEMORY>
__global__ void cuLsoda(myFex fex, int *neq, double *y, double *t, double *tout, int *itol, double *rtol,
double *atol, int *itask, int *istate, int *iopt, double *rwork, int *lrw, int *iwork, int *liw, myJex jac, int *jt, struct cuLsodaCommonBlock *common, int* debug, char* comp_ode, param_t* flattenODE, unsigned int* offsetODE, double* costanti, conc_t* device_X, unsigned int campione, unsigned int* s2s,
param_t* myjac, unsigned int* myjacoffset, bool ACTIVATE_SHARED_MEMORY, bool ACTIVATE_CONSTANT_MEMORY)
{
// unsigned int tid = threadIdx.x;
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
// EXPERIMENTAL
if (tid>=DEV_ACTUAL_THREADS) {
return;
}
/* We move time and state out of global memory, by putting 'em into the faster shared memory. */
//if (ACTIVATE_SHARED_MEMORY) {
// #ifdef USE_SHARED_MEMORY
//double* sh_y = (double*)shared;
//double* sh_t = (double*)&sh_y[DEV_CONST_SPECIES*blockDim.x];
// #endif
//}
double* sh_y = (double*)shared;
double* sh_t = (double*)&sh_y[DEV_CONST_SPECIES*blockDim.x];
/* Load shared memory */
if (ACTIVATE_SHARED_MEMORY) {
// #ifdef USE_SHARED_MEMORY
for (unsigned int i=0; i<DEV_CONST_SPECIES; i++) {
sh_y[threadIdx.x*DEV_CONST_SPECIES+i] = y[tid*DEV_CONST_SPECIES+i]; // watch out! different offsets!
}
sh_t[threadIdx.x] = t[tid];
}
//#endif
if (ACTIVATE_SHARED_MEMORY) {
if (ACTIVATE_CONSTANT_MEMORY) {
dlsoda_(fex, neq+tid, sh_y+threadIdx.x*DEV_CONST_SPECIES, sh_t+threadIdx.x, tout+tid, itol+tid, rtol+tid, atol+tid*DEV_CONST_SPECIES, itask+tid, istate+tid,
iopt+tid, rwork+(*(lrw+tid))*tid, lrw+tid, iwork+(*(liw+tid))*tid, liw+tid, jac, jt+tid, common+tid, debug, comp_ode, ODE_new, offsetODE, costanti+tid*DEV_CONST_REACTIONS, JAC_new, myjacoffset);
} else {
dlsoda_(fex, neq+tid, sh_y+threadIdx.x*DEV_CONST_SPECIES, sh_t+threadIdx.x, tout+tid, itol+tid, rtol+tid, atol+tid*DEV_CONST_SPECIES, itask+tid, istate+tid,
iopt+tid, rwork+(*(lrw+tid))*tid, lrw+tid, iwork+(*(liw+tid))*tid, liw+tid, jac, jt+tid, common+tid, debug, comp_ode, flattenODE, offsetODE, costanti+tid*DEV_CONST_REACTIONS, myjac, myjacoffset);
}
} else {
dlsoda_(fex, neq+tid, y+tid*DEV_CONST_SPECIES, t+tid, tout+tid, itol+tid, rtol+tid, atol+tid*DEV_CONST_SPECIES, itask+tid, istate+tid,
iopt+tid, rwork+(*(lrw+tid))*tid, lrw+tid, iwork+(*(liw+tid))*tid, liw+tid, jac, jt+tid, common+tid, debug, comp_ode, flattenODE, offsetODE, costanti+tid*DEV_CONST_REACTIONS, myjac, myjacoffset);
}
/* Unload shared memory */
if (ACTIVATE_SHARED_MEMORY) {
//#ifdef USE_SHARED_MEMORY
for (unsigned int i=0; i<DEV_CONST_SPECIES; i++) {
y[tid*DEV_CONST_SPECIES+i] = sh_y[threadIdx.x*DEV_CONST_SPECIES+i]; // watch out! different offsets!
}
t[tid]=sh_t[threadIdx.x];
// #endif
}
// store samples
unsigned int larg = blockDim.x * gridDim.x;
// #define ACCESS_SAMPLE larg*DEV_CONST_SAMPLESLUN*campione + larg*s + tid
for (unsigned int s=0; s<DEV_CONST_SAMPLESLUN; s++) {
if (ACTIVATE_SHARED_MEMORY) {
//#ifdef USE_SHARED_MEMORY
device_X[ ACCESS_SAMPLE ] = sh_y[threadIdx.x*DEV_CONST_SPECIES+s2s[s]];
} else {
// #else
device_X[ ACCESS_SAMPLE ] = y[tid*DEV_CONST_SPECIES+s2s[s]];
//#endif
}
}
// return;
}
bool CheckArguments(unsigned int argc, char** argv) {
if (argc<6) {
printf("ERROR: please specify:\n - path to BioSimWare project\n - number of CUDA blocks\n - output folder\n - output prefix (excluding folder)\n - GPU number\n - Fitness enabled (1) or disabled (0) \n - Memory configuration: (0) only global memory, (1) just shared memory, (2) both shared and constant memory\n");
return false;
}
unsigned int GPU = atoi(argv[5]);
// printf(" * Launching cupSODA batch of simulations on GPU%d\n", GPU);
hipSetDevice(GPU);
CudaCheckError();
return true;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////// FITNESS stuff //////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/* Questa fitness specifica del PRR */
__global__ void calculateFitnessPRR( double* samples, double* target, double* fitness, char* swarm ) {
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int larg = blockDim.x * gridDim.x;
unsigned int experiment = swarm[tid];
double conc_u1 = 0;
double conc_u2 = 0;
double conc_u3 = 0;
double conc_all = 0;
unsigned int num_specie_u1 = 4;
unsigned int num_specie_u2 = 3;
unsigned int num_specie_u3 = 2;
unsigned int num_swarms = 3;
unsigned int totale_specie = 3;
fitness[tid] = 0;
for (unsigned int campione=0; campione<DEV_CONST_TIMESLUN; campione++) {
unsigned int offset=campione*(num_swarms*totale_specie) + experiment*totale_specie;
// if (threadIdx.x==0) printf("%f\n", target[offset]);
if (target[offset]==-1) continue;
for (unsigned int s=0; s<num_specie_u1; s++) {
// conc_u1 += samples[ tid*DEV_CONST_SPECIES+s ];
conc_u1 += samples[ ACCESS_SAMPLE ];
}
for (unsigned int s=num_specie_u1; s<num_specie_u1+num_specie_u2; s++) {
// conc_u2 += samples[ tid*DEV_CONST_SPECIES+s ];
conc_u2 += samples[ ACCESS_SAMPLE ];
}
for (unsigned int s=num_specie_u1+num_specie_u2; s<num_specie_u1+num_specie_u2+num_specie_u3; s++) {
// conc_u3 += samples[ tid*DEV_CONST_SPECIES+s ];
conc_u3 += samples[ ACCESS_SAMPLE ];
}
conc_all = (conc_u1+conc_u2+conc_u3);
double ratio_u1 = 0;
double ratio_u2 = 0;
double ratio_u3 = 0;
if(conc_all!=0) {
ratio_u1 = (conc_u1 / conc_all)*100;
ratio_u2 = (conc_u2 / conc_all)*100;
ratio_u3 = (conc_u3 / conc_all)*100;
}
/*if (tid==0){
printf("%d\n", campione);
printf("%f\t", target[offset]);
printf("%f\t", target[offset+1]);
printf("%f\t", target[offset+2]);
printf("\n");
}*/
fitness[tid] +=
abs(target[offset+0]-ratio_u1) +
abs(target[offset+1]-ratio_u2) +
abs(target[offset+2]-ratio_u3);
}
};
__device__ inline double fetch_target(double* source, unsigned int species, unsigned int experiment, unsigned int repetition, unsigned int sample) {
return source[
sample*(DEV_CONST_SAMPLESLUN*DEV_CONST_EXPERIMENTS*DEV_CONST_REPETITIONS) +
experiment*(DEV_CONST_SAMPLESLUN*DEV_CONST_REPETITIONS) +
repetition*(DEV_CONST_SAMPLESLUN) + species ];
}
__device__ inline double fetch_simulation(double* source, unsigned int species, unsigned int sample) {
unsigned int gid = threadIdx.x + blockDim.x*blockIdx.x;
return source[ blockDim.x*gridDim.x*DEV_CONST_SAMPLESLUN*sample + blockDim.x*gridDim.x*species+ gid ];
}
__global__ void calculateFitness( double* simulation, double* target, double* output_fitness, char* swarm ) {
unsigned int larg = blockDim.x * gridDim.x;
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int SWARMS = swarm[ larg-1 ] + 1;
unsigned int D = swarm[tid];
// const unsigned int WATCHED = 32;
double subfitness =0;
for (unsigned int campione=1; campione<DEV_CONST_SAMPLES; campione++) {
for (unsigned int s=0; s<DEV_CONST_SAMPLESLUN; s++) {
unsigned int sid = SWARMS*DEV_CONST_SAMPLESLUN*campione + DEV_CONST_SAMPLESLUN*D + s;
subfitness += abs( simulation[ACCESS_SAMPLE] - target[sid] ) /target[sid];
//if (tid==WATCHED && s==0) {
// if (tid==0) printf("%d, %d, %d, %f\t%f\n", campione, sid, s, simulation[ACCESS_SAMPLE], target[sid]);
//}
}
}
output_fitness[tid] = subfitness;
}
/*
__global__ void calculateFitness_old( double* samples, double* target, double* fitness, char* swarm ) {
const unsigned int WATCHED = 70;
unsigned int larg = blockDim.x * gridDim.x;
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int D = DEV_CONST_EXPERIMENTS; // dosi
unsigned int E = DEV_CONST_REPETITIONS; // ripetizioni ad ogni dose
unsigned int SAMPLESPECIES = DEV_CONST_SAMPLESLUN; // vogliamo solo specie campionate
unsigned int experiment = blockIdx.x; // "dose"
double subfitness =0;
// for each sample...
for (unsigned int campione=1; campione<DEV_CONST_SAMPLES; campione++) {
// ...for each (sampled) chemical species...
for (unsigned int s=0; s<DEV_CONST_SAMPLESPECIES; s++) {
// ACCESS_SAMPLE larg*DEV_CONST_SAMPLESLUN*campione + larg*s + tid
double sample = samples[ ACCESS_SAMPLE ];
if (tid==WATCHED && s==0) {
printf("%d, %d, %e\t", campione, s, sample);
}
// calculate the distance from target time series
for (unsigned int repetition=0; repetition<E; repetition++) {
double tgt = fetch_target(target, s, experiment, repetition, campione-1);
if (tid==WATCHED && s==0) {
printf("%e\n", tgt);
}
double dist = (double)abs(tgt-sample);
if (tgt>0) dist /= tgt;
subfitness += dist;
}
}
}
// divisione per il numero di specie e per il numero di campioni
fitness[tid] = (1.0/DEV_CONST_TIMESLUN)*(1.0/DEV_CONST_SAMPLESLUN)*subfitness;
if (tid==WATCHED)
printf("FITNESS: %f\n", fitness[tid]);
// fitness[tid] = tid;
};
*/ | facb79341a97cfa64b1c0674030c6e78639ecd9d.cu | /*
cupSODA.cu: porting of LSODA to CUDA.
See file COPYING for copyright and licensing information.
*/
#include "constants.h"
#include "cupSODA.h"
#include <vector>
#include "input_reader.h"
#include "stoc2det.h"
#include <cstdio>
// extern char* device_compressed_odes;
// void save_constants(unsigned int c) { cudaMemcpyToSymbol(NUM_ODES, &c, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); }
void LoadSystem( st2det* system ) {
printf(" * Loading flatten ODEs and Jacobian on GPU\n");
// cudaMalloc((void**)&device_compressed_odes,sizeof(char) * size);
}
void SetODEarray(st2det* system ) {
#ifdef USE_CONSTANT_MEMORY
cudaMemcpyToSymbol( ODE_new, system->ODE, sizeof(param_t)*system->ODE_lun );
CudaCheckError() ;
cudaMemcpyToSymbol( JAC_new, system->JAC, sizeof(param_t)*system->JAC_lun );
CudaCheckError() ;
#endif
}
void SetConstants( unsigned int species, unsigned int reactions, unsigned int ode_lun, unsigned int jac_lun, unsigned int cs_lun, unsigned int time_in, unsigned int reps, unsigned int experiments, unsigned int threads, bool dump = false) {
cudaMemcpyToSymbol( DEV_CONST_SPECIES, &species, sizeof(species));
CudaCheckError();
cudaMemcpyToSymbol( DEV_CONST_REACTIONS, &reactions, sizeof(reactions));
CudaCheckError();
cudaMemcpyToSymbol( DEV_CONST_ODELUN, &ode_lun, sizeof(ode_lun));
CudaCheckError();
cudaMemcpyToSymbol( DEV_CONST_JACLUN, &jac_lun, sizeof(jac_lun));
CudaCheckError();
cudaMemcpyToSymbol( DEV_CONST_SAMPLESLUN,&cs_lun, sizeof(cs_lun));
CudaCheckError();
cudaMemcpyToSymbol( DEV_CONST_TIMESLUN, &time_in, sizeof(time_in));
CudaCheckError();
cudaMemcpyToSymbol( DEV_CONST_SAMPLESPECIES, &cs_lun, sizeof(cs_lun));
CudaCheckError();
cudaMemcpyToSymbol( DEV_CONST_SAMPLES, &time_in, sizeof(time_in));
CudaCheckError();
cudaMemcpyToSymbol( DEV_CONST_REPETITIONS , &reps, sizeof(reps));
CudaCheckError();
cudaMemcpyToSymbol( DEV_CONST_EXPERIMENTS , &experiments, sizeof(experiments));
CudaCheckError();
cudaMemcpyToSymbol( DEV_ACTUAL_THREADS, &threads, sizeof(threads));
CudaCheckError();
unsigned int s_back = 0;
unsigned int r_back = 0;
unsigned int ol_back = 0;
unsigned int jl_back = 0;
unsigned int cs_back = 0;
unsigned int ti_back = 0;
unsigned int ss_back = 0;
cudaMemcpyFromSymbol( &s_back, DEV_CONST_SPECIES, sizeof(s_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: species was set to %d\n", s_back);
cudaMemcpyFromSymbol( &r_back, DEV_CONST_REACTIONS, sizeof(r_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: reactions was set to %d\n", r_back);
cudaMemcpyFromSymbol( &ol_back, DEV_CONST_ODELUN, sizeof(ol_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: ode_lung was set to %d\n", ol_back);
cudaMemcpyFromSymbol( &jl_back, DEV_CONST_JACLUN, sizeof(jl_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: jac_lung was set to %d\n", jl_back);
cudaMemcpyFromSymbol( &cs_back, DEV_CONST_SAMPLESLUN, sizeof(cs_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: samples to be sampled was set to %d\n", cs_back);
cudaMemcpyFromSymbol( &ti_back, DEV_CONST_TIMESLUN, sizeof(ti_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: ti_lun was set to %d\n", ti_back);
cudaMemcpyFromSymbol( &ss_back, DEV_CONST_SAMPLESPECIES, sizeof(ss_back) );
CudaCheckError();
if (dump)
printf( " * Verified on GPU: number of sampled species was set to %d\n", ss_back);
}
/*
void load_compressed_odes( InputReader* ir ) {
unsigned int size = ir->GetCompODESize();
// Step 1: allocate memory
char* host_compressed_odes = (char*) malloc ( sizeof(char) * size);
cudaMalloc((void**)&device_compressed_odes,sizeof(char) * size);
unsigned int pos = 0;
// Step 2: fill array
printf(" * Loading compressed ODE matrix\n");
// per ogni riga...
for (unsigned int r=0; r<ir->comp_ODE.size(); r++) {
for (unsigned int c=0; c<ir->comp_ODE[r]->size(); c++) {
printf("%d ", ir->comp_ODE[r]->at(c));
host_compressed_odes[pos++] = ir->comp_ODE[r]->at(c);
}
printf("\n");
}
// Step 3: copy on GPU
cudaMemcpy(device_compressed_odes,host_compressed_odes,sizeof(char) * size,cudaMemcpyHostToDevice);
// Step 4: save array size
cudaMemcpyToSymbol(COMP_ODE_SIZE, &size, sizeof(unsigned int), 0, cudaMemcpyHostToDevice);
// Step 5: clean up stuff
free( host_compressed_odes );
}
*/
#ifndef use_export
#ifndef CULSODA_CU
#define CULSODA_CU
#include <math.h>
#include <stdio.h>
// Begin Define block for common variables
#define conit common->CM_conit
#define crate common->CM_crate
#define ccmax common->CM_ccmax
#define el0 common->CM_el0
#define h__ common->CM_h__
#define hmin common->CM_hmin
#define hmxi common->CM_hmxi
#define hu common->CM_hu
#define rc common->CM_rc
#define tn common->CM_tn
#define uround common->CM_uround
#define pdest common->CM_pdest
#define pdlast common->CM_pdlast
#define ratio common->CM_ratio
#define hold common->CM_hold
#define rmax common->CM_rmax
#define el common->CM_el
#define elco common->CM_elco
#define tesco common->CM_tesco
#define rls common->CM_rls
#define tsw common->CM_tsw
#define pdnorm common->CM_pdnorm
#define cm1 common->CM_cm1
#define cm2 common->CM_cm2
#define rlsa common->CM_rlsa
#define sm1 common->CM_sm1
#define init common->CM_init
#define mxstep common->CM_mxstep
#define mxhnil common->CM_mxhnil
#define nhnil common->CM_nhnil
#define nslast common->CM_nslast
#define nyh common->CM_nyh
#define icf common->CM_icf
#define ierpj common->CM_ierpj
#define iersl common->CM_iersl
#define jcur common->CM_jcur
#define jstart common->CM_jstart
#define kflag common->CM_kflag
#define l common->CM_l
#define lyh common->CM_lyh
#define lewt common->CM_lewt
#define lacor common->CM_lacor
#define lsavf common->CM_lsavf
#define lwm common->CM_lwm
#define liwm common->CM_liwm
#define meth common->CM_meth
#define miter common->CM_miter
#define maxord common->CM_maxord
#define maxcor common->CM_maxcor
#define msbp common->CM_msbp
#define mxncf common->CM_mxncf
#define n common->CM_n
#define nq common->CM_nq
#define nst common->CM_nst
#define nfe common->CM_nfe
#define nje common->CM_nje
#define nqu common->CM_nqu
#define ialth common->CM_ialth
#define ipup common->CM_ipup
#define lmax common->CM_lmax
#define nqnyh common->CM_nqnyh
#define nslp common->CM_nslp
#define ils common->CM_ils
#define insufr common->CM_insufr
#define insufi common->CM_insufi
#define ixpr common->CM_ixpr
#define jtyp common->CM_jtyp
#define mused common->CM_mused
#define mxordn common->CM_mxordn
#define mxords common->CM_mxords
#define icount common->CM_icount
#define irflag common->CM_irflag
#define ilsa common->CM_ilsa
// End of Definitions
#ifdef use_export
export
#endif
__device__ int dlsoda_(myFex f, int *neq, double *y, double *t, double *tout, int *itol, double *rtol, double *atol, int *itask, int *istate, int *iopt, double *rwork, int *lrw, int *iwork, int *liw, myJex jac, int *jt, struct cuLsodaCommonBlock *common, int* debug, char* comp_ode, param_t* flattenODE, unsigned int* offsetODE, double* costanti, param_t* myjac, unsigned int* myjacoffset)
{
/* Initialized data */
//struct cuLsodaCommonBlock commonB;
//struct cuLsodaCommonBlock *common;
//common = &commonB;
int indice = threadIdx.x + blockIdx.x*blockDim.x;
int mord[2] = { 12,5 };
int mxstp0 = 500;
int mxhnl0 = 10;
/* System generated locals */
int i__1 = 0;
double d__1 = 0.;
double d__2 = 0.;
/* Local variables */
int i__;
double h0 = 0.;
int i1 = 0;
int i2 = 0;
double w0 = 0.;
int ml = 0;
double rh = 0.;
int mu = 0;
double tp = 0.;
int lf0 = 0;
double big = 0.;
int kgo = 0;
double ayi = 0.;
double hmx = 0.;
double tol = 0.;
double sum = 0.;
int len1 = 0;
int len2 = 0;
double hmax = 0.;
int ihit = 0;
double ewti = 0.;
double size = 0.;
int len1c = 0;
int len1n = 0;
int len1s = 0;
int iflag;
double atoli = 0.;
int leniw = 0;
int lenwm = 0;
int imxer = 0;
double tcrit = 0.;
int lenrw = 0;
double tdist = 0.;
double rtoli = 0.;
double tolsf = 0.;
double tnext = 0.;
int leniwc = 0;
int lenrwc = 0;
/* ----------------------------------------------------------------------- */
/* This is the 12 November 2003 version of */
/* DLSODA: Livermore Solver for Ordinary Differential Equations, with */
/* Automatic method switching for stiff and nonstiff problems. */
/* This version is in double precision. */
/* DLSODA solves the initial value problem for stiff or nonstiff */
/* systems of first order ODEs, */
/* dy/dt = f(t,y) , or, in component form, */
/* dy(i)/dt = f(i) = f(i,t,y(1),y(2),...,y(NEQ)) (i = 1,...,NEQ). */
/* This a variant version of the DLSODE package. */
/* It switches automatically between stiff and nonstiff methods. */
/* This means that the user does not have to determine whether the */
/* problem is stiff or not, and the solver will automatically choose the */
/* appropriate method. It always starts with the nonstiff method. */
/* Authors: Alan C. Hindmarsh */
/* Center for Applied Scientific Computing, L-561 */
/* Lawrence Livermore National Laboratory */
/* Livermore, CA 94551 */
/* and */
/* Linda R. Petzold */
/* Univ. of California at Santa Barbara */
/* Dept. of Computer Science */
/* Santa Barbara, CA 93106 */
/* References: */
/* 1. Alan C. Hindmarsh, ODEPACK, A Systematized Collection of ODE */
/* Solvers, in Scientific Computing, R. S. Stepleman et al. (Eds.), */
/* North-Holland, Amsterdam, 1983, pp. 55-64. */
/* 2. Linda R. Petzold, Automatic Selection of Methods for Solving */
/* Stiff and Nonstiff Systems of Ordinary Differential Equations, */
/* Siam J. Sci. Stat. Comput. 4 (1983), pp. 136-148. */
/* ----------------------------------------------------------------------- */
/* Summary of Usage. */
/* Communication between the user and the DLSODA package, for normal */
/* situations, is summarized here. This summary describes only a subset */
/* of the full set of options available. See the full description for */
/* details, including alternative treatment of the Jacobian matrix, */
/* optional inputs and outputs, nonstandard options, and */
/* instructions for special situations. See also the example */
/* problem (with program and output) following this summary. */
/* A. First provide a subroutine of the form: */
/* SUBROUTINE F (NEQ, T, Y, YDOT) */
/* DOUBLE PRECISION T, Y(*), YDOT(*) */
/* which supplies the vector function f by loading YDOT(i) with f(i). */
/* B. Write a main program which calls Subroutine DLSODA once for */
/* each point at which answers are desired. This should also provide */
/* for possible use of logical unit 6 for output of error messages */
/* by DLSODA. On the first call to DLSODA, supply arguments as follows: */
/* F = name of subroutine for right-hand side vector f. */
/* This name must be declared External in calling program. */
/* NEQ = number of first order ODEs. */
/* Y = array of initial values, of length NEQ. */
/* T = the initial value of the independent variable. */
/* TOUT = first point where output is desired (.ne. T). */
/* ITOL = 1 or 2 according as ATOL (below) is a scalar or array. */
/* RTOL = relative tolerance parameter (scalar). */
/* ATOL = absolute tolerance parameter (scalar or array). */
/* the estimated local error in y(i) will be controlled so as */
/* to be less than */
/* EWT(i) = RTOL*ABS(Y(i)) + ATOL if ITOL = 1, or */
/* EWT(i) = RTOL*ABS(Y(i)) + ATOL(i) if ITOL = 2. */
/* Thus the local error test passes if, in each component, */
/* either the absolute error is less than ATOL (or ATOL(i)), */
/* or the relative error is less than RTOL. */
/* Use RTOL = 0.0 for pure absolute error control, and */
/* use ATOL = 0.0 (or ATOL(i) = 0.0) for pure relative error */
/* control. Caution: actual (global) errors may exceed these */
/* local tolerances, so choose them conservatively. */
/* ITASK = 1 for normal computation of output values of y at t = TOUT. */
/* ISTATE = int flag (input and output). Set ISTATE = 1. */
/* IOPT = 0 to indicate no optional inputs used. */
/* RWORK = real work array of length at least: */
/* 22 + NEQ * MAX(16, NEQ + 9). */
/* See also Paragraph E below. */
/* LRW = declared length of RWORK (in user's dimension). */
/* IWORK = int work array of length at least 20 + NEQ. */
/* LIW = declared length of IWORK (in user's dimension). */
/* JAC = name of subroutine for Jacobian matrix. */
/* Use a dummy name. See also Paragraph E below. */
/* JT = Jacobian type indicator. Set JT = 2. */
/* See also Paragraph E below. */
/* Note that the main program must declare arrays Y, RWORK, IWORK, */
/* and possibly ATOL. */
/* C. The output from the first call (or any call) is: */
/* Y = array of computed values of y(t) vector. */
/* T = corresponding value of independent variable (normally TOUT). */
/* ISTATE = 2 if DLSODA was successful, negative otherwise. */
/* -1 means excess work done on this call (perhaps wrong JT). */
/* -2 means excess accuracy requested (tolerances too small). */
/* -3 means illegal input detected (see printed message). */
/* -4 means repeated error test failures (check all inputs). */
/* -5 means repeated convergence failures (perhaps bad Jacobian */
/* supplied or wrong choice of JT or tolerances). */
/* -6 means error weight became zero during problem. (Solution */
/* component i vanished, and ATOL or ATOL(i) = 0.) */
/* -7 means work space insufficient to finish (see messages). */
/* D. To continue the integration after a successful return, simply */
/* reset TOUT and call DLSODA again. No other parameters need be reset. */
/* E. Note: If and when DLSODA regards the problem as stiff, and */
/* switches methods accordingly, it must make use of the NEQ by NEQ */
/* Jacobian matrix, J = df/dy. For the sake of simplicity, the */
/* inputs to DLSODA recommended in Paragraph B above cause DLSODA to */
/* treat J as a full matrix, and to approximate it internally by */
/* difference quotients. Alternatively, J can be treated as a band */
/* matrix (with great potential reduction in the size of the RWORK */
/* array). Also, in either the full or banded case, the user can supply */
/* J in closed form, with a routine whose name is passed as the JAC */
/* argument. These alternatives are described in the paragraphs on */
/* RWORK, JAC, and JT in the full description of the call sequence below. */
/* ----------------------------------------------------------------------- */
/* Example Problem. */
/* The following is a simple example problem, with the coding */
/* needed for its solution by DLSODA. The problem is from chemical */
/* kinetics, and consists of the following three rate equations: */
/* dy1/dt = -.04*y1 + 1.e4*y2*y3 */
/* dy2/dt = .04*y1 - 1.e4*y2*y3 - 3.e7*y2**2 */
/* dy3/dt = 3.e7*y2**2 */
/* on the interval from t = 0.0 to t = 4.e10, with initial conditions */
/* y1 = 1.0, y2 = y3 = 0. The problem is stiff. */
/* The following coding solves this problem with DLSODA, */
/* printing results at t = .4, 4., ..., 4.e10. It uses */
/* ITOL = 2 and ATOL much smaller for y2 than y1 or y3 because */
/* y2 has much smaller values. */
/* At the end of the run, statistical quantities of interest are */
/* printed (see optional outputs in the full description below). */
/* EXTERNAL FEX */
/* DOUBLE PRECISION ATOL, RTOL, RWORK, T, TOUT, Y */
/* DIMENSION Y(3), ATOL(3), RWORK(70), IWORK(23) */
/* NEQ = 3 */
/* Y(1) = 1. */
/* Y(2) = 0. */
/* Y(3) = 0. */
/* T = 0. */
/* TOUT = .4 */
/* ITOL = 2 */
/* RTOL = 1.D-4 */
/* ATOL(1) = 1.D-6 */
/* ATOL(2) = 1.D-10 */
/* ATOL(3) = 1.D-6 */
/* ITASK = 1 */
/* ISTATE = 1 */
/* IOPT = 0 */
/* LRW = 70 */
/* LIW = 23 */
/* JT = 2 */
/* DO 40 IOUT = 1,12 */
/* CALL DLSODA(FEX,NEQ,Y,T,TOUT,ITOL,RTOL,ATOL,ITASK,ISTATE, */
/* 1 IOPT,RWORK,LRW,IWORK,LIW,JDUM,JT) */
/* WRITE(6,20)T,Y(1),Y(2),Y(3) */
/* 20 FORMAT(' At t =',D12.4,' Y =',3D14.6) */
/* IF (ISTATE .LT. 0) GO TO 80 */
/* 40 TOUT = TOUT*10. */
/* WRITE(6,60)IWORK(11),IWORK(12),IWORK(13),IWORK(19),RWORK(15) */
/* 60 FORMAT(/' No. steps =',I4,' No. f-s =',I4,' No. J-s =',I4/ */
/* 1 ' Method last used =',I2,' Last switch was at t =',D12.4) */
/* STOP */
/* 80 WRITE(6,90)ISTATE */
/* 90 FORMAT(///' Error halt.. ISTATE =',I3) */
/* STOP */
/* END */
/* SUBROUTINE FEX (NEQ, T, Y, YDOT) */
/* DOUBLE PRECISION T, Y, YDOT */
/* DIMENSION Y(3), YDOT(3) */
/* YDOT(1) = -.04*Y(1) + 1.D4*Y(2)*Y(3) */
/* YDOT(3) = 3.D7*Y(2)*Y(2) */
/* YDOT(2) = -YDOT(1) - YDOT(3) */
/* RETURN */
/* END */
/* The output of this program (on a CDC-7600 in single precision) */
/* is as follows: */
/* At t = 4.0000e-01 y = 9.851712e-01 3.386380e-05 1.479493e-02 */
/* At t = 4.0000e+00 Y = 9.055333e-01 2.240655e-05 9.444430e-02 */
/* At t = 4.0000e+01 Y = 7.158403e-01 9.186334e-06 2.841505e-01 */
/* At t = 4.0000e+02 Y = 4.505250e-01 3.222964e-06 5.494717e-01 */
/* At t = 4.0000e+03 Y = 1.831975e-01 8.941774e-07 8.168016e-01 */
/* At t = 4.0000e+04 Y = 3.898730e-02 1.621940e-07 9.610125e-01 */
/* At t = 4.0000e+05 Y = 4.936363e-03 1.984221e-08 9.950636e-01 */
/* At t = 4.0000e+06 Y = 5.161831e-04 2.065786e-09 9.994838e-01 */
/* At t = 4.0000e+07 Y = 5.179817e-05 2.072032e-10 9.999482e-01 */
/* At t = 4.0000e+08 Y = 5.283401e-06 2.113371e-11 9.999947e-01 */
/* At t = 4.0000e+09 Y = 4.659031e-07 1.863613e-12 9.999995e-01 */
/* At t = 4.0000e+10 Y = 1.404280e-08 5.617126e-14 1.000000e+00 */
/* No. steps = 361 No. f-s = 693 No. J-s = 64 */
/* Method last used = 2 Last switch was at t = 6.0092e-03 */
/* ----------------------------------------------------------------------- */
/* Full description of user interface to DLSODA. */
/* The user interface to DLSODA consists of the following parts. */
/* 1. The call sequence to Subroutine DLSODA, which is a driver */
/* routine for the solver. This includes descriptions of both */
/* the call sequence arguments and of user-supplied routines. */
/* following these descriptions is a description of */
/* optional inputs available through the call sequence, and then */
/* a description of optional outputs (in the work arrays). */
/* 2. Descriptions of other routines in the DLSODA package that may be */
/* (optionally) called by the user. These provide the ability to */
/* alter error message handling, save and restore the internal */
/* Common, and obtain specified derivatives of the solution y(t). */
/* 3. Descriptions of Common blocks to be declared in overlay */
/* or similar environments, or to be saved when doing an interrupt */
/* of the problem and continued solution later. */
/* 4. Description of a subroutine in the DLSODA package, */
/* which the user may replace with his/her own version, if desired. */
/* this relates to the measurement of errors. */
/* ----------------------------------------------------------------------- */
/* Part 1. Call Sequence. */
/* The call sequence parameters used for input only are */
/* F, NEQ, TOUT, ITOL, RTOL, ATOL, ITASK, IOPT, LRW, LIW, JAC, JT, */
/* and those used for both input and output are */
/* Y, T, ISTATE. */
/* The work arrays RWORK and IWORK are also used for conditional and */
/* optional inputs and optional outputs. (The term output here refers */
/* to the return from Subroutine DLSODA to the user's calling program.) */
/* The legality of input parameters will be thoroughly checked on the */
/* initial call for the problem, but not checked thereafter unless a */
/* change in input parameters is flagged by ISTATE = 3 on input. */
/* The descriptions of the call arguments are as follows. */
/* F = the name of the user-supplied subroutine defining the */
/* ODE system. The system must be put in the first-order */
/* form dy/dt = f(t,y), where f is a vector-valued function */
/* of the scalar t and the vector y. Subroutine F is to */
/* compute the function f. It is to have the form */
/* SUBROUTINE F (NEQ, T, Y, YDOT) */
/* DOUBLE PRECISION T, Y(*), YDOT(*) */
/* where NEQ, T, and Y are input, and the array YDOT = f(t,y) */
/* is output. Y and YDOT are arrays of length NEQ. */
/* Subroutine F should not alter Y(1),...,Y(NEQ). */
/* F must be declared External in the calling program. */
/* Subroutine F may access user-defined quantities in */
/* NEQ(2),... and/or in Y(NEQ(1)+1),... if NEQ is an array */
/* (dimensioned in F) and/or Y has length exceeding NEQ(1). */
/* See the descriptions of NEQ and Y below. */
/* If quantities computed in the F routine are needed */
/* externally to DLSODA, an extra call to F should be made */
/* for this purpose, for consistent and accurate results. */
/* If only the derivative dy/dt is needed, use DINTDY instead. */
/* NEQ = the size of the ODE system (number of first order */
/* ordinary differential equations). Used only for input. */
/* NEQ may be decreased, but not increased, during the problem. */
/* If NEQ is decreased (with ISTATE = 3 on input), the */
/* remaining components of Y should be left undisturbed, if */
/* these are to be accessed in F and/or JAC. */
/* Normally, NEQ is a scalar, and it is generally referred to */
/* as a scalar in this user interface description. However, */
/* NEQ may be an array, with NEQ(1) set to the system size. */
/* (The DLSODA package accesses only NEQ(1).) In either case, */
/* this parameter is passed as the NEQ argument in all calls */
/* to F and JAC. Hence, if it is an array, locations */
/* NEQ(2),... may be used to store other int data and pass */
/* it to F and/or JAC. Subroutines F and/or JAC must include */
/* NEQ in a Dimension statement in that case. */
/* Y = a real array for the vector of dependent variables, of */
/* length NEQ or more. Used for both input and output on the */
/* first call (ISTATE = 1), and only for output on other calls. */
/* On the first call, Y must contain the vector of initial */
/* values. On output, Y contains the computed solution vector, */
/* evaluated at T. If desired, the Y array may be used */
/* for other purposes between calls to the solver. */
/* This array is passed as the Y argument in all calls to */
/* F and JAC. Hence its length may exceed NEQ, and locations */
/* Y(NEQ+1),... may be used to store other real data and */
/* pass it to F and/or JAC. (The DLSODA package accesses only */
/* Y(1),...,Y(NEQ).) */
/* T = the independent variable. On input, T is used only on the */
/* first call, as the initial point of the integration. */
/* on output, after each call, T is the value at which a */
/* computed solution Y is evaluated (usually the same as TOUT). */
/* on an error return, T is the farthest point reached. */
/* TOUT = the next value of t at which a computed solution is desired. */
/* Used only for input. */
/* When starting the problem (ISTATE = 1), TOUT may be equal */
/* to T for one call, then should .ne. T for the next call. */
/* For the initial t, an input value of TOUT .ne. T is used */
/* in order to determine the direction of the integration */
/* (i.e. the algebraic sign of the step sizes) and the rough */
/* scale of the problem. Integration in either direction */
/* (forward or backward in t) is permitted. */
/* If ITASK = 2 or 5 (one-step modes), TOUT is ignored after */
/* the first call (i.e. the first call with TOUT .ne. T). */
/* Otherwise, TOUT is required on every call. */
/* If ITASK = 1, 3, or 4, the values of TOUT need not be */
/* monotone, but a value of TOUT which backs up is limited */
/* to the current internal T interval, whose endpoints are */
/* TCUR - HU and TCUR (see optional outputs, below, for */
/* TCUR and HU). */
/* ITOL = an indicator for the type of error control. See */
/* description below under ATOL. Used only for input. */
/* RTOL = a relative error tolerance parameter, either a scalar or */
/* an array of length NEQ. See description below under ATOL. */
/* Input only. */
/* ATOL = an absolute error tolerance parameter, either a scalar or */
/* an array of length NEQ. Input only. */
/* The input parameters ITOL, RTOL, and ATOL determine */
/* the error control performed by the solver. The solver will */
/* control the vector E = (E(i)) of estimated local errors */
/* in y, according to an inequality of the form */
/* max-norm of ( E(i)/EWT(i) ) .le. 1, */
/* where EWT = (EWT(i)) is a vector of positive error weights. */
/* The values of RTOL and ATOL should all be non-negative. */
/* The following table gives the types (scalar/array) of */
/* RTOL and ATOL, and the corresponding form of EWT(i). */
/* ITOL RTOL ATOL EWT(i) */
/* 1 scalar scalar RTOL*ABS(Y(i)) + ATOL */
/* 2 scalar array RTOL*ABS(Y(i)) + ATOL(i) */
/* 3 array scalar RTOL(i)*ABS(Y(i)) + ATOL */
/* 4 array array RTOL(i)*ABS(Y(i)) + ATOL(i) */
/* When either of these parameters is a scalar, it need not */
/* be dimensioned in the user's calling program. */
/* If none of the above choices (with ITOL, RTOL, and ATOL */
/* fixed throughout the problem) is suitable, more general */
/* error controls can be obtained by substituting a */
/* user-supplied routine for the setting of EWT. */
/* See Part 4 below. */
/* If global errors are to be estimated by making a repeated */
/* run on the same problem with smaller tolerances, then all */
/* components of RTOL and ATOL (i.e. of EWT) should be scaled */
/* down uniformly. */
/* ITASK = an index specifying the task to be performed. */
/* Input only. ITASK has the following values and meanings. */
/* 1 means normal computation of output values of y(t) at */
/* t = TOUT (by overshooting and interpolating). */
/* 2 means take one step only and return. */
/* 3 means stop at the first internal mesh point at or */
/* beyond t = TOUT and return. */
/* 4 means normal computation of output values of y(t) at */
/* t = TOUT but without overshooting t = TCRIT. */
/* TCRIT must be input as RWORK(1). TCRIT may be equal to */
/* or beyond TOUT, but not behind it in the direction of */
/* integration. This option is useful if the problem */
/* has a singularity at or beyond t = TCRIT. */
/* 5 means take one step, without passing TCRIT, and return. */
/* TCRIT must be input as RWORK(1). */
/* Note: If ITASK = 4 or 5 and the solver reaches TCRIT */
/* (within roundoff), it will return T = TCRIT (exactly) to */
/* indicate this (unless ITASK = 4 and TOUT comes before TCRIT, */
/* in which case answers at t = TOUT are returned first). */
/* ISTATE = an index used for input and output to specify the */
/* the state of the calculation. */
/* On input, the values of ISTATE are as follows. */
/* 1 means this is the first call for the problem */
/* (initializations will be done). See note below. */
/* 2 means this is not the first call, and the calculation */
/* is to continue normally, with no change in any input */
/* parameters except possibly TOUT and ITASK. */
/* (If ITOL, RTOL, and/or ATOL are changed between calls */
/* with ISTATE = 2, the new values will be used but not */
/* tested for legality.) */
/* 3 means this is not the first call, and the */
/* calculation is to continue normally, but with */
/* a change in input parameters other than */
/* TOUT and ITASK. Changes are allowed in */
/* NEQ, ITOL, RTOL, ATOL, IOPT, LRW, LIW, JT, ML, MU, */
/* and any optional inputs except H0, MXORDN, and MXORDS. */
/* (See IWORK description for ML and MU.) */
/* Note: A preliminary call with TOUT = T is not counted */
/* as a first call here, as no initialization or checking of */
/* input is done. (Such a call is sometimes useful for the */
/* purpose of outputting the initial conditions.) */
/* Thus the first call for which TOUT .ne. T requires */
/* ISTATE = 1 on input. */
/* On output, ISTATE has the following values and meanings. */
/* 1 means nothing was done; TOUT = T and ISTATE = 1 on input. */
/* 2 means the integration was performed successfully. */
/* -1 means an excessive amount of work (more than MXSTEP */
/* steps) was done on this call, before completing the */
/* requested task, but the integration was otherwise */
/* successful as far as T. (MXSTEP is an optional input */
/* and is normally 500.) To continue, the user may */
/* simply reset ISTATE to a value .gt. 1 and call again */
/* (the excess work step counter will be reset to 0). */
/* In addition, the user may increase MXSTEP to avoid */
/* this error return (see below on optional inputs). */
/* -2 means too much accuracy was requested for the precision */
/* of the machine being used. This was detected before */
/* completing the requested task, but the integration */
/* was successful as far as T. To continue, the tolerance */
/* parameters must be reset, and ISTATE must be set */
/* to 3. The optional output TOLSF may be used for this */
/* purpose. (Note: If this condition is detected before */
/* taking any steps, then an illegal input return */
/* (ISTATE = -3) occurs instead.) */
/* -3 means illegal input was detected, before taking any */
/* integration steps. See written message for details. */
/* Note: If the solver detects an infinite loop of calls */
/* to the solver with illegal input, it will cause */
/* the run to stop. */
/* -4 means there were repeated error test failures on */
/* one attempted step, before completing the requested */
/* task, but the integration was successful as far as T. */
/* The problem may have a singularity, or the input */
/* may be inappropriate. */
/* -5 means there were repeated convergence test failures on */
/* one attempted step, before completing the requested */
/* task, but the integration was successful as far as T. */
/* This may be caused by an inaccurate Jacobian matrix, */
/* if one is being used. */
/* -6 means EWT(i) became zero for some i during the */
/* integration. Pure relative error control (ATOL(i)=0.0) */
/* was requested on a variable which has now vanished. */
/* The integration was successful as far as T. */
/* -7 means the length of RWORK and/or IWORK was too small to */
/* proceed, but the integration was successful as far as T. */
/* This happens when DLSODA chooses to switch methods */
/* but LRW and/or LIW is too small for the new method. */
/* Note: Since the normal output value of ISTATE is 2, */
/* it does not need to be reset for normal continuation. */
/* Also, since a negative input value of ISTATE will be */
/* regarded as illegal, a negative output value requires the */
/* user to change it, and possibly other inputs, before */
/* calling the solver again. */
/* IOPT = an int flag to specify whether or not any optional */
/* inputs are being used on this call. Input only. */
/* The optional inputs are listed separately below. */
/* IOPT = 0 means no optional inputs are being used. */
/* default values will be used in all cases. */
/* IOPT = 1 means one or more optional inputs are being used. */
/* RWORK = a real array (double precision) for work space, and (in the */
/* first 20 words) for conditional and optional inputs and */
/* optional outputs. */
/* As DLSODA switches automatically between stiff and nonstiff */
/* methods, the required length of RWORK can change during the */
/* problem. Thus the RWORK array passed to DLSODA can either */
/* have a static (fixed) length large enough for both methods, */
/* or have a dynamic (changing) length altered by the calling */
/* program in response to output from DLSODA. */
/* --- Fixed Length Case --- */
/* If the RWORK length is to be fixed, it should be at least */
/* MAX (LRN, LRS), */
/* where LRN and LRS are the RWORK lengths required when the */
/* current method is nonstiff or stiff, respectively. */
/* The separate RWORK length requirements LRN and LRS are */
/* as follows: */
/* IF NEQ is constant and the maximum method orders have */
/* their default values, then */
/* LRN = 20 + 16*NEQ, */
/* LRS = 22 + 9*NEQ + NEQ**2 if JT = 1 or 2, */
/* LRS = 22 + 10*NEQ + (2*ML+MU)*NEQ if JT = 4 or 5. */
/* Under any other conditions, LRN and LRS are given by: */
/* LRN = 20 + NYH*(MXORDN+1) + 3*NEQ, */
/* LRS = 20 + NYH*(MXORDS+1) + 3*NEQ + LMAT, */
/* where */
/* NYH = the initial value of NEQ, */
/* MXORDN = 12, unless a smaller value is given as an */
/* optional input, */
/* MXORDS = 5, unless a smaller value is given as an */
/* optional input, */
/* LMAT = length of matrix work space: */
/* LMAT = NEQ**2 + 2 if JT = 1 or 2, */
/* LMAT = (2*ML + MU + 1)*NEQ + 2 if JT = 4 or 5. */
/* --- Dynamic Length Case --- */
/* If the length of RWORK is to be dynamic, then it should */
/* be at least LRN or LRS, as defined above, depending on the */
/* current method. Initially, it must be at least LRN (since */
/* DLSODA starts with the nonstiff method). On any return */
/* from DLSODA, the optional output MCUR indicates the current */
/* method. If MCUR differs from the value it had on the */
/* previous return, or if there has only been one call to */
/* DLSODA and MCUR is now 2, then DLSODA has switched */
/* methods during the last call, and the length of RWORK */
/* should be reset (to LRN if MCUR = 1, or to LRS if */
/* MCUR = 2). (An increase in the RWORK length is required */
/* if DLSODA returned ISTATE = -7, but not otherwise.) */
/* After resetting the length, call DLSODA with ISTATE = 3 */
/* to signal that change. */
/* LRW = the length of the array RWORK, as declared by the user. */
/* (This will be checked by the solver.) */
/* IWORK = an int array for work space. */
/* As DLSODA switches automatically between stiff and nonstiff */
/* methods, the required length of IWORK can change during */
/* problem, between */
/* LIS = 20 + NEQ and LIN = 20, */
/* respectively. Thus the IWORK array passed to DLSODA can */
/* either have a fixed length of at least 20 + NEQ, or have a */
/* dynamic length of at least LIN or LIS, depending on the */
/* current method. The comments on dynamic length under */
/* RWORK above apply here. Initially, this length need */
/* only be at least LIN = 20. */
/* The first few words of IWORK are used for conditional and */
/* optional inputs and optional outputs. */
/* The following 2 words in IWORK are conditional inputs: */
/* IWORK(1) = ML these are the lower and upper */
/* IWORK(2) = MU half-bandwidths, respectively, of the */
/* banded Jacobian, excluding the main diagonal. */
/* The band is defined by the matrix locations */
/* (i,j) with i-ML .le. j .le. i+MU. ML and MU */
/* must satisfy 0 .le. ML,MU .le. NEQ-1. */
/* These are required if JT is 4 or 5, and */
/* ignored otherwise. ML and MU may in fact be */
/* the band parameters for a matrix to which */
/* df/dy is only approximately equal. */
/* LIW = the length of the array IWORK, as declared by the user. */
/* (This will be checked by the solver.) */
/* Note: The base addresses of the work arrays must not be */
/* altered between calls to DLSODA for the same problem. */
/* The contents of the work arrays must not be altered */
/* between calls, except possibly for the conditional and */
/* optional inputs, and except for the last 3*NEQ words of RWORK. */
/* The latter space is used for internal scratch space, and so is */
/* available for use by the user outside DLSODA between calls, if */
/* desired (but not for use by F or JAC). */
/* JAC = the name of the user-supplied routine to compute the */
/* Jacobian matrix, df/dy, if JT = 1 or 4. The JAC routine */
/* is optional, but if the problem is expected to be stiff much */
/* of the time, you are encouraged to supply JAC, for the sake */
/* of efficiency. (Alternatively, set JT = 2 or 5 to have */
/* DLSODA compute df/dy internally by difference quotients.) */
/* If and when DLSODA uses df/dy, it treats this NEQ by NEQ */
/* matrix either as full (JT = 1 or 2), or as banded (JT = */
/* 4 or 5) with half-bandwidths ML and MU (discussed under */
/* IWORK above). In either case, if JT = 1 or 4, the JAC */
/* routine must compute df/dy as a function of the scalar t */
/* and the vector y. It is to have the form */
/* SUBROUTINE JAC (NEQ, T, Y, ML, MU, PD, NROWPD) */
/* DOUBLE PRECISION T, Y(*), PD(NROWPD,*) */
/* where NEQ, T, Y, ML, MU, and NROWPD are input and the array */
/* PD is to be loaded with partial derivatives (elements of */
/* the Jacobian matrix) on output. PD must be given a first */
/* dimension of NROWPD. T and Y have the same meaning as in */
/* Subroutine F. */
/* In the full matrix case (JT = 1), ML and MU are */
/* ignored, and the Jacobian is to be loaded into PD in */
/* columnwise manner, with df(i)/dy(j) loaded into PD(i,j). */
/* In the band matrix case (JT = 4), the elements */
/* within the band are to be loaded into PD in columnwise */
/* manner, with diagonal lines of df/dy loaded into the rows */
/* of PD. Thus df(i)/dy(j) is to be loaded into PD(i-j+MU+1,j). */
/* ML and MU are the half-bandwidth parameters (see IWORK). */
/* The locations in PD in the two triangular areas which */
/* correspond to nonexistent matrix elements can be ignored */
/* or loaded arbitrarily, as they are overwritten by DLSODA. */
/* JAC need not provide df/dy exactly. A crude */
/* approximation (possibly with a smaller bandwidth) will do. */
/* In either case, PD is preset to zero by the solver, */
/* so that only the nonzero elements need be loaded by JAC. */
/* Each call to JAC is preceded by a call to F with the same */
/* arguments NEQ, T, and Y. Thus to gain some efficiency, */
/* intermediate quantities shared by both calculations may be */
/* saved in a user Common block by F and not recomputed by JAC, */
/* if desired. Also, JAC may alter the Y array, if desired. */
/* JAC must be declared External in the calling program. */
/* Subroutine JAC may access user-defined quantities in */
/* NEQ(2),... and/or in Y(NEQ(1)+1),... if NEQ is an array */
/* (dimensioned in JAC) and/or Y has length exceeding NEQ(1). */
/* See the descriptions of NEQ and Y above. */
/* JT = Jacobian type indicator. Used only for input. */
/* JT specifies how the Jacobian matrix df/dy will be */
/* treated, if and when DLSODA requires this matrix. */
/* JT has the following values and meanings: */
/* 1 means a user-supplied full (NEQ by NEQ) Jacobian. */
/* 2 means an internally generated (difference quotient) full */
/* Jacobian (using NEQ extra calls to F per df/dy value). */
/* 4 means a user-supplied banded Jacobian. */
/* 5 means an internally generated banded Jacobian (using */
/* ML+MU+1 extra calls to F per df/dy evaluation). */
/* If JT = 1 or 4, the user must supply a Subroutine JAC */
/* (the name is arbitrary) as described above under JAC. */
/* If JT = 2 or 5, a dummy argument can be used. */
/* ----------------------------------------------------------------------- */
/* Optional Inputs. */
/* The following is a list of the optional inputs provided for in the */
/* call sequence. (See also Part 2.) For each such input variable, */
/* this table lists its name as used in this documentation, its */
/* location in the call sequence, its meaning, and the default value. */
/* The use of any of these inputs requires IOPT = 1, and in that */
/* case all of these inputs are examined. A value of zero for any */
/* of these optional inputs will cause the default value to be used. */
/* Thus to use a subset of the optional inputs, simply preload */
/* locations 5 to 10 in RWORK and IWORK to 0.0 and 0 respectively, and */
/* then set those of interest to nonzero values. */
/* Name Location Meaning and Default Value */
/* H0 RWORK(5) the step size to be attempted on the first step. */
/* The default value is determined by the solver. */
/* HMAX RWORK(6) the maximum absolute step size allowed. */
/* The default value is infinite. */
/* HMIN RWORK(7) the minimum absolute step size allowed. */
/* The default value is 0. (This lower bound is not */
/* enforced on the final step before reaching TCRIT */
/* when ITASK = 4 or 5.) */
/* IXPR IWORK(5) flag to generate extra printing at method switches. */
/* IXPR = 0 means no extra printing (the default). */
/* IXPR = 1 means print data on each switch. */
/* T, H, and NST will be printed on the same logical */
/* unit as used for error messages. */
/* MXSTEP IWORK(6) maximum number of (internally defined) steps */
/* allowed during one call to the solver. */
/* The default value is 500. */
/* MXHNIL IWORK(7) maximum number of messages printed (per problem) */
/* warning that T + H = T on a step (H = step size). */
/* This must be positive to result in a non-default */
/* value. The default value is 10. */
/* MXORDN IWORK(8) the maximum order to be allowed for the nonstiff */
/* (Adams) method. the default value is 12. */
/* if MXORDN exceeds the default value, it will */
/* be reduced to the default value. */
/* MXORDN is held constant during the problem. */
/* MXORDS IWORK(9) the maximum order to be allowed for the stiff */
/* (BDF) method. The default value is 5. */
/* If MXORDS exceeds the default value, it will */
/* be reduced to the default value. */
/* MXORDS is held constant during the problem. */
/* ----------------------------------------------------------------------- */
/* Optional Outputs. */
/* As optional additional output from DLSODA, the variables listed */
/* below are quantities related to the performance of DLSODA */
/* which are available to the user. These are communicated by way of */
/* the work arrays, but also have internal mnemonic names as shown. */
/* except where stated otherwise, all of these outputs are defined */
/* on any successful return from DLSODA, and on any return with */
/* ISTATE = -1, -2, -4, -5, or -6. On an illegal input return */
/* (ISTATE = -3), they will be unchanged from their existing values */
/* (if any), except possibly for TOLSF, LENRW, and LENIW. */
/* On any error return, outputs relevant to the error will be defined, */
/* as noted below. */
/* Name Location Meaning */
/* HU RWORK(11) the step size in t last used (successfully). */
/* HCUR RWORK(12) the step size to be attempted on the next step. */
/* TCUR RWORK(13) the current value of the independent variable */
/* which the solver has actually reached, i.e. the */
/* current internal mesh point in t. On output, TCUR */
/* will always be at least as far as the argument */
/* T, but may be farther (if interpolation was done). */
/* TOLSF RWORK(14) a tolerance scale factor, greater than 1.0, */
/* computed when a request for too much accuracy was */
/* detected (ISTATE = -3 if detected at the start of */
/* the problem, ISTATE = -2 otherwise). If ITOL is */
/* left unaltered but RTOL and ATOL are uniformly */
/* scaled up by a factor of TOLSF for the next call, */
/* then the solver is deemed likely to succeed. */
/* (The user may also ignore TOLSF and alter the */
/* tolerance parameters in any other way appropriate.) */
/* TSW RWORK(15) the value of t at the time of the last method */
/* switch, if any. */
/* NST IWORK(11) the number of steps taken for the problem so far. */
/* NFE IWORK(12) the number of f evaluations for the problem so far. */
/* NJE IWORK(13) the number of Jacobian evaluations (and of matrix */
/* LU decompositions) for the problem so far. */
/* NQU IWORK(14) the method order last used (successfully). */
/* NQCUR IWORK(15) the order to be attempted on the next step. */
/* IMXER IWORK(16) the index of the component of largest magnitude in */
/* the weighted local error vector ( E(i)/EWT(i) ), */
/* on an error return with ISTATE = -4 or -5. */
/* LENRW IWORK(17) the length of RWORK actually required, assuming */
/* that the length of RWORK is to be fixed for the */
/* rest of the problem, and that switching may occur. */
/* This is defined on normal returns and on an illegal */
/* input return for insufficient storage. */
/* LENIW IWORK(18) the length of IWORK actually required, assuming */
/* that the length of IWORK is to be fixed for the */
/* rest of the problem, and that switching may occur. */
/* This is defined on normal returns and on an illegal */
/* input return for insufficient storage. */
/* MUSED IWORK(19) the method indicator for the last successful step: */
/* 1 means Adams (nonstiff), 2 means BDF (stiff). */
/* MCUR IWORK(20) the current method indicator: */
/* 1 means Adams (nonstiff), 2 means BDF (stiff). */
/* This is the method to be attempted */
/* on the next step. Thus it differs from MUSED */
/* only if a method switch has just been made. */
/* The following two arrays are segments of the RWORK array which */
/* may also be of interest to the user as optional outputs. */
/* For each array, the table below gives its internal name, */
/* its base address in RWORK, and its description. */
/* Name Base Address Description */
/* YH 21 the Nordsieck history array, of size NYH by */
/* (NQCUR + 1), where NYH is the initial value */
/* of NEQ. For j = 0,1,...,NQCUR, column j+1 */
/* of YH contains HCUR**j/factorial(j) times */
/* the j-th derivative of the interpolating */
/* polynomial currently representing the solution, */
/* evaluated at T = TCUR. */
/* ACOR LACOR array of size NEQ used for the accumulated */
/* (from Common corrections on each step, scaled on output */
/* as noted) to represent the estimated local error in y */
/* on the last step. This is the vector E in */
/* the description of the error control. It is */
/* defined only on a successful return from */
/* DLSODA. The base address LACOR is obtained by */
/* including in the user's program the */
/* following 2 lines: */
/* COMMON /DLS001/ RLS(218), ILS(37) */
/* LACOR = ILS(22) */
/* ----------------------------------------------------------------------- */
/* Part 2. Other Routines Callable. */
/* The following are optional calls which the user may make to */
/* gain additional capabilities in conjunction with DLSODA. */
/* (The routines XSETUN and XSETF are designed to conform to the */
/* SLATEC error handling package.) */
/* Form of Call Function */
/* CALL XSETUN(LUN) set the logical unit number, LUN, for */
/* output of messages from DLSODA, if */
/* the default is not desired. */
/* The default value of LUN is 6. */
/* CALL XSETF(MFLAG) set a flag to control the printing of */
/* messages by DLSODA. */
/* MFLAG = 0 means do not print. (Danger: */
/* This risks losing valuable information.) */
/* MFLAG = 1 means print (the default). */
/* Either of the above calls may be made at */
/* any time and will take effect immediately. */
/* CALL DSRCMA(RSAV,ISAV,JOB) saves and restores the contents of */
/* the internal Common blocks used by */
/* DLSODA (see Part 3 below). */
/* RSAV must be a real array of length 240 */
/* or more, and ISAV must be an int */
/* array of length 46 or more. */
/* JOB=1 means save Common into RSAV/ISAV. */
/* JOB=2 means restore Common from RSAV/ISAV. */
/* DSRCMA is useful if one is */
/* interrupting a run and restarting */
/* later, or alternating between two or */
/* more problems solved with DLSODA. */
/* CALL DINTDY(,,,,,) provide derivatives of y, of various */
/* (see below) orders, at a specified point t, if */
/* desired. It may be called only after */
/* a successful return from DLSODA. */
/* The detailed instructions for using DINTDY are as follows. */
/* The form of the call is: */
/* CALL DINTDY (T, K, RWORK(21), NYH, DKY, IFLAG) */
/* The input parameters are: */
/* T = value of independent variable where answers are desired */
/* (normally the same as the T last returned by DLSODA). */
/* For valid results, T must lie between TCUR - HU and TCUR. */
/* (See optional outputs for TCUR and HU.) */
/* K = int order of the derivative desired. K must satisfy */
/* 0 .le. K .le. NQCUR, where NQCUR is the current order */
/* (see optional outputs). The capability corresponding */
/* to K = 0, i.e. computing y(T), is already provided */
/* by DLSODA directly. Since NQCUR .ge. 1, the first */
/* derivative dy/dt is always available with DINTDY. */
/* RWORK(21) = the base address of the history array YH. */
/* NYH = column length of YH, equal to the initial value of NEQ. */
/* The output parameters are: */
/* DKY = a real array of length NEQ containing the computed value */
/* of the K-th derivative of y(t). */
/* IFLAG = int flag, returned as 0 if K and T were legal, */
/* -1 if K was illegal, and -2 if T was illegal. */
/* On an error return, a message is also written. */
/* ----------------------------------------------------------------------- */
/* Part 3. Common Blocks. */
/* If DLSODA is to be used in an overlay situation, the user */
/* must declare, in the primary overlay, the variables in: */
/* (1) the call sequence to DLSODA, and */
/* (2) the two internal Common blocks */
/* /DLS001/ of length 255 (218 double precision words */
/* followed by 37 int words), */
/* /DLSA01/ of length 31 (22 double precision words */
/* followed by 9 int words). */
/* If DLSODA is used on a system in which the contents of internal */
/* Common blocks are not preserved between calls, the user should */
/* declare the above Common blocks in the calling program to insure */
/* that their contents are preserved. */
/* If the solution of a given problem by DLSODA is to be interrupted */
/* and then later continued, such as when restarting an interrupted run */
/* or alternating between two or more problems, the user should save, */
/* following the return from the last DLSODA call prior to the */
/* interruption, the contents of the call sequence variables and the */
/* internal Common blocks, and later restore these values before the */
/* next DLSODA call for that problem. To save and restore the Common */
/* blocks, use Subroutine DSRCMA (see Part 2 above). */
/* ----------------------------------------------------------------------- */
/* Part 4. Optionally Replaceable Solver Routines. */
/* Below is a description of a routine in the DLSODA package which */
/* relates to the measurement of errors, and can be */
/* replaced by a user-supplied version, if desired. However, since such */
/* a replacement may have a major impact on performance, it should be */
/* done only when absolutely necessary, and only with great caution. */
/* (Note: The means by which the package version of a routine is */
/* superseded by the user's version may be system-dependent.) */
/* (a) DEWSET. */
/* The following subroutine is called just before each internal */
/* integration step, and sets the array of error weights, EWT, as */
/* described under ITOL/RTOL/ATOL above: */
/* Subroutine DEWSET (NEQ, ITOL, RTOL, ATOL, YCUR, EWT) */
/* where NEQ, ITOL, RTOL, and ATOL are as in the DLSODA call sequence, */
/* YCUR contains the current dependent variable vector, and */
/* EWT is the array of weights set by DEWSET. */
/* If the user supplies this subroutine, it must return in EWT(i) */
/* (i = 1,...,NEQ) a positive quantity suitable for comparing errors */
/* in y(i) to. The EWT array returned by DEWSET is passed to the */
/* DMNORM routine, and also used by DLSODA in the computation */
/* of the optional output IMXER, and the increments for difference */
/* quotient Jacobians. */
/* In the user-supplied version of DEWSET, it may be desirable to use */
/* the current values of derivatives of y. Derivatives up to order NQ */
/* are available from the history array YH, described above under */
/* optional outputs. In DEWSET, YH is identical to the YCUR array, */
/* extended to NQ + 1 columns with a column length of NYH and scale */
/* factors of H**j/factorial(j). On the first call for the problem, */
/* given by NST = 0, NQ is 1 and H is temporarily set to 1.0. */
/* NYH is the initial value of NEQ. The quantities NQ, H, and NST */
/* can be obtained by including in DEWSET the statements: */
/* DOUBLE PRECISION RLS */
/* COMMON /DLS001/ RLS(218),ILS(37) */
/* NQ = ILS(33) */
/* NST = ILS(34) */
/* H = RLS(212) */
/* Thus, for example, the current value of dy/dt can be obtained as */
/* YCUR(NYH+i)/H (i=1,...,NEQ) (and the division by H is */
/* unnecessary when NST = 0). */
/* ----------------------------------------------------------------------- */
/* ***REVISION HISTORY (YYYYMMDD) */
/* 19811102 DATE WRITTEN */
/* 19820126 Fixed bug in tests of work space lengths; */
/* minor corrections in main prologue and comments. */
/* 19870330 Major update: corrected comments throughout; */
/* removed TRET from Common; rewrote EWSET with 4 loops; */
/* fixed t test in INTDY; added Cray directives in STODA; */
/* in STODA, fixed DELP init. and logic around dprja_ call; */
/* combined routines to save/restore Common; */
/* passed LEVEL = 0 in error message calls (except run abort). */
/* 19970225 Fixed lines setting JSTART = -2 in Subroutine LSODA. */
/* 20010425 Major update: convert source lines to upper case; */
/* added *DECK lines; changed from 1 to * in dummy dimensions; */
/* changed names R1MACH/D1MACH to RUMACH/DUMACH; */
/* renamed routines for uniqueness across single/double prec.; */
/* converted intrinsic names to generic form; */
/* removed ILLIN and NTREP (data loaded) from Common; */
/* removed all 'own' variables from Common; */
/* changed error messages to quoted strings; */
/* replaced XERRWV/XERRWD with 1993 revised version; */
/* converted prologues, comments, error messages to mixed case; */
/* numerous corrections to prologues and internal comments. */
/* 20010507 Converted single precision source to double precision. */
/* 20010613 Revised excess accuracy test (to match rest of ODEPACK). */
/* 20010808 Fixed bug in DPRJA (matrix in DBNORM call). */
/* 20020502 Corrected declarations in descriptions of user routines. */
/* 20031105 Restored 'own' variables to Common blocks, to enable */
/* interrupt/restart feature. */
/* 20031112 Added SAVE statements for data-loaded constants. */
/* ----------------------------------------------------------------------- */
/* Other routines in the DLSODA package. */
/* In addition to Subroutine DLSODA, the DLSODA package includes the */
/* following subroutines and function routines: */
/* DINTDY computes an interpolated value of the y vector at t = TOUT. */
/* DSTODA is the core integrator, which does one step of the */
/* integration and the associated error control. */
/* DCFODE sets all method coefficients and test constants. */
/* DPRJA computes and preprocesses the Jacobian matrix J = df/dy */
/* and the Newton iteration matrix P = I - h*l0*J. */
/* DSOLSY manages solution of linear system in chord iteration. */
/* DEWSET sets the error weight vector EWT before each step. */
/* DMNORM computes the weighted max-norm of a vector. */
/* DFNORM computes the norm of a full matrix consistent with the */
/* weighted max-norm on vectors. */
/* DBNORM computes the norm of a band matrix consistent with the */
/* weighted max-norm on vectors. */
/* DSRCMA is a user-callable routine to save and restore */
/* the contents of the internal Common blocks. */
/* DGEFA and DGESL are routines from LINPACK for solving full */
/* systems of linear algebraic equations. */
/* DGBFA and DGBSL are routines from LINPACK for solving banded */
/* linear systems. */
/* DUMACH computes the unit roundoff in a machine-independent manner. */
/* XERRWD, XSETUN, XSETF, IXSAV, and IUMACH handle the printing of all */
/* error messages and warnings. XERRWD is machine-dependent. */
/* Note: DMNORM, DFNORM, DBNORM, DUMACH, IXSAV, and IUMACH are */
/* function routines. All the others are subroutines. */
/* ----------------------------------------------------------------------- */
/* ----------------------------------------------------------------------- */
/* The following two internal Common blocks contain */
/* (a) variables which are local to any subroutine but whose values must */
/* be preserved between calls to the routine ("own" variables), and */
/* (b) variables which are communicated between subroutines. */
/* The block DLS001 is declared in subroutines DLSODA, DINTDY, DSTODA, */
/* DPRJA, and DSOLSY. */
/* The block DLSA01 is declared in subroutines DLSODA, DSTODA, and DPRJA. */
/* Groups of variables are replaced by dummy arrays in the Common */
/* declarations in routines where those variables are not used. */
/* ----------------------------------------------------------------------- */
/* Parameter adjustments */
//--neq;
//--y;
//--rtol;
//--atol;
//--rwork;
// --iwork;
/* Function Body */
/* ----------------------------------------------------------------------- */
/* Block A. */
/* This code block is executed on every call. */
/* It tests ISTATE and ITASK for legality and branches appropriately. */
/* If ISTATE .gt. 1 but the flag INIT shows that initialization has */
/* not yet been done, an error return occurs. */
/* If ISTATE = 1 and TOUT = T, return immediately. */
/* ----------------------------------------------------------------------- */
if (*istate < 1 || *istate > 3) {
goto L601;
}
if (*itask < 1 || *itask > 5) {
goto L602;
}
if (*istate == 1) {
goto L10;
}
if (init == 0) {
goto L603;
}
if (*istate == 2) {
goto L200;
}
goto L20;
L10:
init = 0;
if (*tout == *t) {
return 0;
}
/* ----------------------------------------------------------------------- */
/* Block B. */
/* The next code block is executed for the initial call (ISTATE = 1), */
/* or for a continuation call with parameter changes (ISTATE = 3). */
/* It contains checking of all inputs and various initializations. */
/* First check legality of the non-optional inputs NEQ, ITOL, IOPT, */
/* JT, ML, and MU. */
/* ----------------------------------------------------------------------- */
L20:
if (neq[0] <= 0) { //fixed
goto L604;
}
if (*istate == 1) {
goto L25;
}
if (neq[0] > n) { //fixed
goto L605;
}
L25:
n = neq[0]; //fixed
if (*itol < 1 || *itol > 4) {
goto L606;
}
if (*iopt < 0 || *iopt > 1) {
goto L607;
}
if (*jt == 3 || *jt < 1 || *jt > 5) {
goto L608;
}
jtyp = *jt;
if (*jt <= 2) {
goto L30;
}
ml = iwork[0];
mu = iwork[1];
if (ml < 0 || ml >= n) {
goto L609;
}
if (mu < 0 || mu >= n) {
goto L610;
}
L30:
/* Next process and check the optional inputs. -------------------------- */
if (*iopt == 1) {
goto L40;
}
ixpr = 0;
mxstep = mxstp0;
mxhnil = mxhnl0;
hmxi = 0.;
hmin = 0.;
if (*istate != 1) {
goto L60;
}
h0 = 0.;
mxordn = mord[0];
mxords = mord[1];
goto L60;
L40:
ixpr = iwork[4];
if (ixpr < 0 || ixpr > 1) {
goto L611;
}
mxstep = iwork[5];
if (mxstep < 0) {
goto L612;
}
if (mxstep == 0) {
mxstep = mxstp0;
}
mxhnil = iwork[6];
if (mxhnil < 0) {
goto L613;
}
if (mxhnil == 0) {
mxhnil = mxhnl0;
}
if (*istate != 1) {
goto L50;
}
h0 = rwork[4];
mxordn = iwork[7];
if (mxordn < 0) {
goto L628;
}
if (mxordn == 0) {
mxordn = 100;
}
mxordn = min(mxordn,mord[0]);
mxords = iwork[8];
if (mxords < 0) {
goto L629;
}
if (mxords == 0) {
mxords = 100;
}
mxords = min(mxords,mord[1]);
if ((*tout - *t) * h0 < 0.) {
goto L614;
}
L50:
hmax = rwork[5];
if (hmax < 0.) {
goto L615;
}
hmxi = 0.;
if (hmax > 0.) {
hmxi = 1. / hmax;
}
hmin = rwork[6];
if (hmin < 0.) {
goto L616;
}
/* ----------------------------------------------------------------------- */
/* Set work array pointers and check lengths LRW and LIW. */
/* If ISTATE = 1, METH is initialized to 1 here to facilitate the */
/* checking of work space lengths. */
/* Pointers to segments of RWORK and IWORK are named by prefixing L to */
/* the name of the segment. E.g., the segment YH starts at RWORK(LYH). */
/* Segments of RWORK (in order) are denoted YH, WM, EWT, SAVF, ACOR. */
/* If the lengths provided are insufficient for the current method, */
/* an error return occurs. This is treated as illegal input on the */
/* first call, but as a problem interruption with ISTATE = -7 on a */
/* continuation call. If the lengths are sufficient for the current */
/* method but not for both methods, a warning message is sent. */
/* ----------------------------------------------------------------------- */
L60:
if (*istate == 1) {
meth = 1;
}
if (*istate == 1) {
nyh = n;
}
lyh = 21;
len1n = (mxordn + 1) * nyh + 20;
len1s = (mxords + 1) * nyh + 20;
lwm = len1s + 1;
if (*jt <= 2) {
lenwm = n * n + 2;
}
if (*jt >= 4) {
lenwm = ((ml << 1) + mu + 1) * n + 2;
}
len1s += lenwm;
len1c = len1n;
if (meth == 2) {
len1c = len1s;
}
len1 = max(len1n,len1s);
len2 = n * 3;
lenrw = len1 + len2;
lenrwc = len1c + len2;
iwork[16] = lenrw;
liwm = 1;
leniw = n + 20;
leniwc = 20;
if (meth == 2) {
leniwc = leniw;
}
iwork[17] = leniw;
if (*istate == 1 && *lrw < lenrwc) {
goto L617;
}
if (*istate == 1 && *liw < leniwc) {
goto L618;
}
if (*istate == 3 && *lrw < lenrwc) {
goto L550;
}
if (*istate == 3 && *liw < leniwc) {
goto L555;
}
lewt = len1 + 1;
insufr = 0;
if (*lrw >= lenrw) {
goto L65;
}
insufr = 2;
lewt = len1c + 1;
L65:
lsavf = lewt + n;
lacor = lsavf + n;
insufi = 0;
if (*liw >= leniw) {
goto L70;
}
insufi = 2;
L70:
/* Check RTOL and ATOL for legality. ------------------------------------ */
rtoli = rtol[0];
atoli = atol[0];
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
// for (i__ = 0; i__ < i__1; ++i__) {
if (*itol >= 3) {
rtoli = rtol[i__ -1];
}
if (*itol == 2 || *itol == 4) {
atoli = atol[i__ -1];
}
if (rtoli < 0.) {
goto L619;
}
if (atoli < 0.) {
goto L620;
}
/* L75: */
}
if (*istate == 1) {
goto L100;
}
/* If ISTATE = 3, set flag to signal parameter changes to DSTODA. ------- */
jstart = -1;
if (n == nyh) {
goto L200;
}
/* NEQ was reduced. Zero part of YH to avoid undefined references. ----- */
i1 = lyh + l * nyh;
i2 = lyh + (maxord + 1) * nyh - 1;
if (i1 > i2) {
goto L200;
}
i__1 = i2;
for (i__ = i1; i__ <= i__1; ++i__) {
/* L95: */
rwork[i__ -1] = 0.;
}
goto L200;
/* ----------------------------------------------------------------------- */
/* Block C. */
/* The next block is for the initial call only (ISTATE = 1). */
/* It contains all remaining initializations, the initial call to F, */
/* and the calculation of the initial step size. */
/* The error weights in EWT are inverted after being loaded. */
/* ----------------------------------------------------------------------- */
L100:
uround = dumach_(common);
tn = *t;
tsw = *t;
maxord = mxordn;
if (*itask != 4 && *itask != 5) {
goto L110;
}
tcrit = rwork[0];
if ((tcrit - *tout) * (*tout - *t) < 0.) {
goto L625;
}
if (h0 != 0. && (*t + h0 - tcrit) * h0 > 0.) {
h0 = tcrit - *t;
}
L110:
jstart = 0;
nhnil = 0;
nst = 0;
nje = 0;
nslast = 0;
hu = 0.;
nqu = 0;
mused = 0;
miter = 0;
ccmax = .3;
maxcor = 3;
msbp = 20;
mxncf = 10;
/* Initial call to F. (LF0 points to YH(*,2).) ------------------------- */
lf0 = lyh + nyh;
f(neq, t, y, &rwork[lf0 -1], comp_ode, flattenODE, offsetODE, costanti); //fixed neq y
nfe = 1;
/* Load the initial value vector in YH. --------------------------------- */
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L115: */
rwork[i__ + lyh - 1 -1] = y[i__ - 1];
}
/* Load and invert the EWT array. (H is temporarily set to 1.0.) ------- */
nq = 1;
h__ = 1.;
dewset_(&n, itol, rtol, atol, &rwork[lyh -1], &rwork[lewt -1], common);
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
if (rwork[i__ + lewt - 1 -1] <= 0.) {
goto L621;
}
/* L120: */
rwork[i__ + lewt - 1 -1] = 1. / rwork[i__ + lewt - 1 -1];
}
/* ----------------------------------------------------------------------- */
/* The coding below computes the step size, H0, to be attempted on the */
/* first step, unless the user has supplied a value for this. */
/* First check that TOUT - T differs significantly from zero. */
/* A scalar tolerance quantity TOL is computed, as MAX(RTOL(i)) */
/* if this is positive, or MAX(ATOL(i)/ABS(Y(i))) otherwise, adjusted */
/* so as to be between 100*UROUND and 1.0E-3. */
/* Then the computed value H0 is given by: */
/* H0**(-2) = 1./(TOL * w0**2) + TOL * (norm(F))**2 */
/* where w0 = MAX ( ABS(T), ABS(TOUT) ), */
/* F = the initial value of the vector f(t,y), and */
/* norm() = the weighted vector norm used throughout, given by */
/* the DMNORM function routine, and weighted by the */
/* tolerances initially loaded into the EWT array. */
/* The sign of H0 is inferred from the initial values of TOUT and T. */
/* ABS(H0) is made .le. ABS(TOUT-T) in any case. */
/* ----------------------------------------------------------------------- */
if (h0 != 0.) {
goto L180;
}
tdist = (d__1 = *tout - *t, fabs(d__1));
/* Computing MAX */
d__1 = fabs(*t), d__2 = fabs(*tout);
w0 = max(d__1,d__2);
if (tdist < uround * 2. * w0) {
goto L622;
}
tol = rtol[0];
if (*itol <= 2) {
goto L140;
}
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L130: */
/* Computing MAX */
d__1 = tol, d__2 = rtol[i__ -1];
tol = max(d__1,d__2);
}
L140:
if (tol > 0.) {
goto L160;
}
atoli = atol[0];
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
if (*itol == 2 || *itol == 4) {
atoli = atol[i__ -1];
}
ayi = (d__1 = y[i__ - 1], fabs(d__1));
if (ayi != 0.) {
/* Computing MAX */
d__1 = tol, d__2 = atoli / ayi;
tol = max(d__1,d__2);
}
/* L150: */
}
L160:
/* Computing MAX */
d__1 = tol, d__2 = uround * 100.;
tol = max(d__1,d__2);
tol = min(tol,.001);
sum = dmnorm_(&n, &rwork[lf0 -1], &rwork[lewt -1], common);
/* Computing 2nd power */
d__1 = sum;
sum = 1. / (tol * w0 * w0) + tol * (d__1 * d__1);
h0 = 1. / sqrt(sum);
h0 = min(h0,tdist);
d__1 = *tout - *t;
h0 = d_sign(&h0, &d__1);
/* Adjust H0 if necessary to meet HMAX bound. --------------------------- */
L180:
rh = fabs(h0) * hmxi;
if (rh > 1.) {
h0 /= rh;
}
/* Load H with H0 and scale YH(*,2) by H0. ------------------------------ */
h__ = h0;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L190: */
rwork[i__ + lf0 - 1 -1] = h0 * rwork[i__ + lf0 - 1 -1];
}
goto L270;
/* ----------------------------------------------------------------------- */
/* Block D. */
/* The next code block is for continuation calls only (ISTATE = 2 or 3) */
/* and is to check stop conditions before taking a step. */
/* ----------------------------------------------------------------------- */
L200:
nslast = nst;
switch (*itask) {
case 1: goto L210;
case 2: goto L250;
case 3: goto L220;
case 4: goto L230;
case 5: goto L240;
}
L210:
if ((tn - *tout) * h__ < 0.) {
goto L250;
}
dintdy_(tout, 0, &rwork[lyh -1], &nyh, y, &iflag, common); //fixed y
if (iflag != 0) {
goto L627;
}
*t = *tout;
goto L420;
L220:
tp = tn - hu * (uround * 100. + 1.);
if ((tp - *tout) * h__ > 0.) {
goto L623;
}
if ((tn - *tout) * h__ < 0.) {
goto L250;
}
*t = tn;
goto L400;
L230:
tcrit = rwork[0];
if ((tn - tcrit) * h__ > 0.) {
goto L624;
}
if ((tcrit - *tout) * h__ < 0.) {
goto L625;
}
if ((tn - *tout) * h__ < 0.) {
goto L245;
}
dintdy_(tout, 0, &rwork[lyh -1], &nyh, y, &iflag, common); //fixed y
if (iflag != 0) {
goto L627;
}
*t = *tout;
goto L420;
L240:
tcrit = rwork[0];
if ((tn - tcrit) * h__ > 0.) {
goto L624;
}
L245:
hmx = fabs(tn) + fabs(h__);
ihit = (d__1 = tn - tcrit, fabs(d__1)) <= uround * 100. *
hmx;
if (ihit) {
*t = tcrit;
}
if (ihit) {
goto L400;
}
tnext = tn + h__ * (uround * 4. + 1.);
if ((tnext - tcrit) * h__ <= 0.) {
goto L250;
}
h__ = (tcrit - tn) * (1. - uround * 4.);
if (*istate == 2 && jstart >= 0) {
jstart = -2;
}
/* ----------------------------------------------------------------------- */
/* Block E. */
/* The next block is normally executed for all calls and contains */
/* the call to the one-step core integrator DSTODA. */
/* This is a looping point for the integration steps. */
/* First check for too many steps being taken, update EWT (if not at */
/* start of problem), check for too much accuracy being requested, and */
/* check for H below the roundoff level in T. */
/* ----------------------------------------------------------------------- */
L250:
if (meth == mused) {
goto L255;
}
if (insufr == 1) {
goto L550;
}
if (insufi == 1) {
goto L555;
}
L255:
if (nst - nslast >= mxstep) {
goto L500;
}
dewset_(&n, itol, rtol, atol, &rwork[lyh -1], &rwork[lewt -1], common);
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
if (rwork[i__ + lewt - 1 -1] <= 0.) {
goto L510;
}
/* L260: */
rwork[i__ + lewt - 1 -1] = 1. / rwork[i__ + lewt - 1 -1];
}
L270:
tolsf = uround * dmnorm_(&n, &rwork[lyh -1], &rwork[lewt -1], common);
if (tolsf <= 1.) {
goto L280;
}
tolsf *= 2.;
if (nst == 0) {
goto L626;
}
goto L520;
L280:
if (tn + h__ != tn) {
goto L290;
}
++nhnil;
if (nhnil > mxhnil) {
goto L290;
}
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- Warning..Internal T (=R1) and H (=R2) are\n");
#endif
//xerrwd_(msg, &c__50, 101, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " such that in the machine, T + H = T on the next step \n");
#endif
//xerrwd_(msg, &c__60, 101, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " (H = step size). Solver will continue anyway.\n");
#endif
//xerrwd_(msg, &c__50, 101, &c__0, &c__0, &c__0, &c__0, &c__2, &tn, &h__, (ftnlen)60);
if (nhnil < mxhnil) {
goto L290;
}
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- Above warning has been issued I1 times. \n");
#endif
//xerrwd_(msg, &c__50, 102, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " It will not be issued again for this problem.\n");
#endif
//xerrwd_(msg, &c__50, 102, &c__0, 1, &mxhnil, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
L290:
/* ----------------------------------------------------------------------- */
/* CALL DSTODA(NEQ,Y,YH,NYH,YH,EWT,SAVF,ACOR,WM,IWM,F,JAC,DPRJA,DSOLSY) */
/* ----------------------------------------------------------------------- */
dstoda_(neq, y, &rwork[lyh -1], &nyh, &rwork[lyh -1], &rwork[lewt -1], &rwork[lsavf -1], &rwork[lacor -1], &rwork[lwm -1], &iwork[liwm -1], f, jac, common, comp_ode, flattenODE, offsetODE, costanti, myjac, myjacoffset); //fixed neq y
kgo = 1 - kflag;
switch (kgo) {
case 1: goto L300;
case 2: goto L530;
case 3: goto L540;
}
/* ----------------------------------------------------------------------- */
/* Block F. */
/* The following block handles the case of a successful return from the */
/* core integrator (KFLAG = 0). */
/* If a method switch was just made, record TSW, reset MAXORD, */
/* set JSTART to -1 to signal DSTODA to complete the switch, */
/* and do extra printing of data if IXPR = 1. */
/* Then, in any case, check for stop conditions. */
/* ----------------------------------------------------------------------- */
L300:
init = 1;
if (meth == mused) {
goto L310;
}
tsw = tn;
maxord = mxordn;
if (meth == 2) {
maxord = mxords;
}
if (meth == 2) {
rwork[lwm -1] = sqrt(uround);
}
insufr = min(insufr,1);
insufi = min(insufi,1);
jstart = -1;
if (ixpr == 0) {
goto L310;
}
if (meth == 2) {
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- A switch to the BDF (stiff) method has occurred\n");
#endif
//xerrwd_(msg, &c__60, 105, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
}
if (meth == 1) {
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- A switch to the Adams (nonstiff) method has occ\n");
#endif
//xerrwd_(msg, &c__60, 106, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
}
#ifdef EMULATION_MODE
fprintf(stderr, " at T = R1, tentative step size H = R2, step NST = I1 \n");
#endif
//xerrwd_(msg, &c__60, 107, &c__0, 1, &nst, &c__0, &c__2, &tn, &h__, (ftnlen)60);
L310:
switch (*itask) {
case 1: goto L320;
case 2: goto L400;
case 3: goto L330;
case 4: goto L340;
case 5: goto L350;
}
/* ITASK = 1. If TOUT has been reached, interpolate. ------------------- */
L320:
if ((tn - *tout) * h__ < 0.) {
goto L250;
}
dintdy_(tout, 0, &rwork[lyh -1], &nyh, y, &iflag, common);
*t = *tout;
goto L420;
/* ITASK = 3. Jump to exit if TOUT was reached. ------------------------ */
L330:
if ((tn - *tout) * h__ >= 0.) {
goto L400;
}
goto L250;
/* ITASK = 4. See if TOUT or TCRIT was reached. Adjust H if necessary. */
L340:
if ((tn - *tout) * h__ < 0.) {
goto L345;
}
dintdy_(tout, 0, &rwork[lyh -1], &nyh, y, &iflag, common);
*t = *tout;
goto L420;
L345:
hmx = fabs(tn) + fabs(h__);
ihit = (d__1 = tn - tcrit, fabs(d__1)) <= uround * 100. *
hmx;
if (ihit) {
goto L400;
}
tnext = tn + h__ * (uround * 4. + 1.);
if ((tnext - tcrit) * h__ <= 0.) {
goto L250;
}
h__ = (tcrit - tn) * (1. - uround * 4.);
if (jstart >= 0) {
jstart = -2;
}
goto L250;
/* ITASK = 5. See if TCRIT was reached and jump to exit. --------------- */
L350:
hmx = fabs(tn) + fabs(h__);
ihit = (d__1 = tn - tcrit, fabs(d__1)) <= uround * 100. *
hmx;
/* ----------------------------------------------------------------------- */
/* Block G. */
/* The following block handles all successful returns from DLSODA. */
/* If ITASK .ne. 1, Y is loaded from YH and T is set accordingly. */
/* ISTATE is set to 2, and the optional outputs are loaded into the */
/* work arrays before returning. */
/* ----------------------------------------------------------------------- */
L400:
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L410: */
y[i__ - 1] = rwork[i__ + lyh - 1 -1]; //fixed y
}
*t = tn;
if (*itask != 4 && *itask != 5) {
goto L420;
}
if (ihit) {
*t = tcrit;
}
L420:
*istate = 2;
rwork[10] = hu;
rwork[11] = h__;
rwork[12] = tn;
rwork[14] = tsw;
iwork[10] = nst;
iwork[11] = nfe;
iwork[12] = nje;
iwork[13] = nqu;
iwork[14] = nq;
iwork[18] = mused;
iwork[19] = meth;
return 0;
/* ----------------------------------------------------------------------- */
/* Block H. */
/* The following block handles all unsuccessful returns other than */
/* those for illegal input. First the error message routine is called. */
/* If there was an error test or convergence test failure, IMXER is set. */
/* Then Y is loaded from YH and T is set to TN. */
/* The optional outputs are loaded into the work arrays before returning. */
/* ----------------------------------------------------------------------- */
/* The maximum number of steps was taken before reaching TOUT. ---------- */
L500:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At current T (=R1), MXSTEP (=I1) steps \n");
#endif
//xerrwd_(msg, &c__50, &c__201, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " taken on this call before reaching TOUT \n");
#endif
//xerrwd_(msg, &c__50, &c__201, &c__0, 1, &mxstep, &c__0, 1, &tn, &c_b62, (ftnlen)60);
*istate = -1;
goto L580;
/* EWT(i) .le. 0.0 for some i (not at start of problem). ---------------- */
L510:
ewti = rwork[lewt + i__ - 1 -1];
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At T (=R1), EWT(%d) has become R2 .le. 0.\n",ewti);
#endif
//xerrwd_(msg, &c__50, &c__202, &c__0, 1, &i__, &c__0, &c__2, &tn, &ewti, (ftnlen)60);
*istate = -6;
goto L580;
/* Too much accuracy requested for machine precision. ------------------- */
L520:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At T (=R1), too much accuracy requested \n");
#endif
//xerrwd_(msg, &c__50, &c__203, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " for precision of machine.. See TOLSF (=R2) \n");
#endif
//xerrwd_(msg, &c__50, &c__203, &c__0, &c__0, &c__0, &c__0, &c__2, &tn, &tolsf, (ftnlen)60);
rwork[13] = tolsf;
*istate = -2;
goto L580;
/* KFLAG = -1. Error test failed repeatedly or with ABS(H) = HMIN. ----- */
L530:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At T(=R1) and step size H(=R2), the error\n");
#endif
//xerrwd_(msg, &c__50, &c__204, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " test failed repeatedly or with ABS(H) = HMIN\n");
#endif
//xerrwd_(msg, &c__50, &c__204, &c__0, &c__0, &c__0, &c__0, &c__2, &tn, &h__, (ftnlen)60);
*istate = -4;
goto L560;
/* KFLAG = -2. Convergence failed repeatedly or with ABS(H) = HMIN. ---- */
L540:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At T (=R1) and step size H (=R2), the \n");
#endif
//xerrwd_(msg, &c__50, &c__205, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " corrector convergence failed repeatedly \n");
#endif
//xerrwd_(msg, &c__50, &c__205, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " or with ABS(H) = HMIN \n");
#endif
//xerrwd_(msg, &c__30, &c__205, &c__0, &c__0, &c__0, &c__0, &c__2, &tn, &h__, (ftnlen)60);
*istate = -5;
goto L560;
/* RWORK length too small to proceed. ----------------------------------- */
L550:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At current T(=R1), RWORK length too small\n");
#endif
//xerrwd_(msg, &c__50, &c__206, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " to proceed. The integration was otherwise successful.\n");
#endif
//xerrwd_(msg, &c__60, &c__206, &c__0, &c__0, &c__0, &c__0, 1, &tn, &c_b62, (ftnlen)60);
*istate = -7;
goto L580;
/* IWORK length too small to proceed. ----------------------------------- */
L555:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At current T(=R1), IWORK length too small\n");
#endif
//xerrwd_(msg, &c__50, &c__207, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " to proceed. The integration was otherwise successful.\n");
#endif
//xerrwd_(msg, &c__60, &c__207, &c__0, &c__0, &c__0, &c__0, 1, &tn, &c_b62, (ftnlen)60);
*istate = -7;
goto L580;
/* Compute IMXER if relevant. ------------------------------------------- */
L560:
big = 0.;
imxer = 1;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
size = (d__1 = rwork[i__ + lacor - 1 -1] * rwork[i__ + lewt - 1 -1], fabs(d__1));
if (big >= size) {
goto L570;
}
big = size;
imxer = i__;
L570:
;
}
iwork[15] = imxer;
/* Set Y vector, T, and optional outputs. ------------------------------- */
L580:
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L590: */
y[i__ - 1] = rwork[i__ + lyh - 1 -1]; //fixed y
}
*t = tn;
rwork[10] = hu;
rwork[11] = h__;
rwork[12] = tn;
rwork[14] = tsw;
iwork[10] = nst;
iwork[11] = nfe;
iwork[12] = nje;
iwork[13] = nqu;
iwork[14] = nq;
iwork[18] = mused;
iwork[19] = meth;
return 0;
/* ----------------------------------------------------------------------- */
/* Block I. */
/* The following block handles all error returns due to illegal input */
/* (ISTATE = -3), as detected before calling the core integrator. */
/* First the error message routine is called. If the illegal input */
/* is a negative ISTATE, the run is aborted (apparent infinite loop). */
/* ----------------------------------------------------------------------- */
L601:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ISTATE (=I1) illegal.\n");
#endif
//xerrwd_(msg, &c__30, 1, &c__0, 1, istate, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
if (*istate < 0) {
goto L800;
}
debug[indice] = 601;
goto L700;
L602:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ITASK (=I1) illegal. \n");
#endif
//xerrwd_(msg, &c__30, &c__2, &c__0, 1, itask, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 602;
goto L700;
L603:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ISTATE .gt. 1 but DLSODA not initialized.\n");
#endif
//xerrwd_(msg, &c__50, &c__3, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 603;
goto L700;
L604:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- NEQ (=I1) .lt. 1 \n");
#endif
//xerrwd_(msg, &c__30, &c__4, &c__0, 1, &neq[1], &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 604;
goto L700;
L605:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ISTATE = 3 and NEQ increased (I1 to I2). \n");
#endif
//xerrwd_(msg, &c__50, &c__5, &c__0, &c__2, &n, &neq[1], &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 605;
goto L700;
L606:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ITOL (=I1) illegal. \n");
#endif
//xerrwd_(msg, &c__30, &c__6, &c__0, 1, itol, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 606;
goto L700;
L607:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- IOPT (=I1) illegal. \n");
#endif
//xerrwd_(msg, &c__30, &c__7, &c__0, 1, iopt, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 607;
goto L700;
L608:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- JT (=I1) illegal. \n");
#endif
//xerrwd_(msg, &c__30, &c__8, &c__0, 1, jt, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 608;
goto L700;
L609:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ML (=I1) illegal: .lt.0 or .ge.NEQ (=I2) \n");
#endif
//xerrwd_(msg, &c__50, &c__9, &c__0, &c__2, &ml, &neq[1], &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 609;
goto L700;
L610:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- MU (=I1) illegal: .lt.0 or .ge.NEQ (=I2) \n");
#endif
//xerrwd_(msg, &c__50, 10, &c__0, &c__2, &mu, &neq[1], &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 610;
goto L700;
L611:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- IXPR (=I1) illegal. \n");
#endif
//xerrwd_(msg, &c__30, 11, &c__0, 1, &ixpr, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 611;
goto L700;
L612:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- MXSTEP (=I1) .lt. 0 \n");
#endif
//xerrwd_(msg, &c__30, 12, &c__0, 1, &mxstep, &c__0, &c__0,&c_b62, &c_b62, (ftnlen)60);
debug[indice] = 612;
goto L700;
L613:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- MXHNIL (=I1) .lt. 0 \n");
#endif
//xerrwd_(msg, &c__30, 13, &c__0, 1, &mxhnil, &c__0, &c__0,&c_b62, &c_b62, (ftnlen)60);
debug[indice] = 613;
goto L700;
L614:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- TOUT (=R1) behind T (=R2) \n");
#endif
//xerrwd_(msg, &c__40, 14, &c__0, &c__0, &c__0, &c__0, &c__2, tout, t, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " Integration direction is given by H0 (=R1) \n");
#endif
//xerrwd_(msg, &c__50, 14, &c__0, &c__0, &c__0, &c__0, 1, &h0, &c_b62, (ftnlen)60);
debug[indice] = 614;
goto L700;
L615:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- HMAX (=R1) .lt. 0.0 \n");
#endif
//xerrwd_(msg, &c__30, 15, &c__0, &c__0, &c__0, &c__0, 1, &hmax, &c_b62, (ftnlen)60);
debug[indice] = 615;
goto L700;
L616:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- HMIN (=R1) .lt. 0.0 \n");
#endif
//xerrwd_(msg, &c__30, 16, &c__0, &c__0, &c__0, &c__0, 1, &hmin, &c_b62, (ftnlen)60);
debug[indice] = 616;
goto L700;
L617:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- RWORK length needed, LENRW (=I1), exceeds LRW (=I2)\n");
#endif
//xerrwd_(msg, &c__60, 17, &c__0, &c__2, &lenrw, lrw, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 617;
goto L700;
L618:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- IWORK length needed, LENIW (=I1), exceeds LIW (=I2)\n");
#endif
//xerrwd_(msg, &c__60, 18, &c__0, &c__2, &leniw, liw, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice] = 618;
goto L700;
L619:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- RTOL(I1) is R1 .lt. 0.0 \n");
#endif
//xerrwd_(msg, &c__40, 19, &c__0, 1, &i__, &c__0, 1, &rtoli, &c_b62, (ftnlen)60);
debug[indice] = 619;
goto L700;
L620:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ATOL(I1) is R1 .lt. 0.0 \n");
#endif
//xerrwd_(msg, &c__40, &c__20, &c__0, 1, &i__, &c__0, 1, &atoli, &c_b62, (ftnlen)60);
debug[indice] = 620;
goto L700;
L621:
ewti = rwork[lewt + i__ - 1 -1];
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- EWT(I1) is R1 .le. 0.0 \n");
#endif
//xerrwd_(msg, &c__40, &c__21, &c__0, 1, &i__, &c__0, 1, &ewti, &c_b62, (ftnlen)60);
debug[indice] = 621;
goto L700;
L622:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- TOUT(=R1) too close to T(=R2) to start integration.\n");
#endif
//xerrwd_(msg, &c__60, &c__22, &c__0, &c__0, &c__0, &c__0, &c__2, tout, t, (ftnlen)60);
debug[indice] = 622;
goto L700;
L623:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ITASK = I1 and TOUT (=R1) behind TCUR - HU (= R2) \n");
#endif
//xerrwd_(msg, &c__60, &c__23, &c__0, 1, itask, &c__0, &c__2, tout, &tp,(ftnlen)60);
debug[indice] = 623;
goto L700;
L624:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ITASK = 4 or 5 and TCRIT (=R1) behind TCUR (=R2) \n");
#endif
//xerrwd_(msg, &c__60, &c__24, &c__0, &c__0, &c__0, &c__0, &c__2, &tcrit, &tn, (ftnlen)60);
debug[indice] = 624;
goto L700;
L625:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- ITASK = 4 or 5 and TCRIT (=R1) behind TOUT (=R2) \n");
#endif
//xerrwd_(msg, &c__60, &c__25, &c__0, &c__0, &c__0, &c__0, &c__2, &tcrit, tout, (ftnlen)60);
debug[indice] = 625;
goto L700;
L626:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- At start of problem, too much accuracy \n");
#endif
//xerrwd_(msg, &c__50, &c__26, &c__0, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
#ifdef EMULATION_MODE
fprintf(stderr, " requested for precision of machine.. See TOLSF (=R1) \n");
#endif
//xerrwd_(msg, &c__60, &c__26, &c__0, &c__0, &c__0, &c__0, 1, &tolsf, &c_b62, (ftnlen)60);
rwork[13] = tolsf;
debug[indice] = 626;
goto L700;
L627:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- Trouble in DINTDY. ITASK = I1, TOUT = R1\n");
#endif
//xerrwd_(msg, &c__50, &c__27, &c__0, 1, itask, &c__0, 1, tout, &c_b62, (ftnlen)60);
debug[indice] = 627;
goto L700;
L628:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- MXORDN (=I1) .lt. 0 \n");
#endif
//xerrwd_(msg, &c__30, &c__28, &c__0, 1, &mxordn, &c__0, &c__0,&c_b62, &c_b62, (ftnlen)60);
debug[indice] = 628;
goto L700;
L629:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- MXORDS (=I1) .lt. 0 \n");
#endif
//xerrwd_(msg, &c__30, &c__29, &c__0, 1, &mxords, &c__0, &c__0,&c_b62, &c_b62, (ftnlen)60);
debug[indice] = 629;
L700:
// debug[indice] = 123;
*istate = -3;
return 0;
L800:
#ifdef EMULATION_MODE
fprintf(stderr, "DLSODA- Run aborted.. apparent infinite loop. \n");
#endif
//xerrwd_(msg, &c__50, &c__303, &c__2, &c__0, &c__0, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)60);
debug[indice]-=1;
return 0;
/* ----------------------- End of Subroutine DLSODA ---------------------- */
} /* dlsoda_ */
/* DECK DINTDY */
/* Subroutine */
__device__ int dintdy_(double *t, int k, double *yh, int *NOT_nyh, double *dky, int *iflag, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int yh_dim1 = 0;
int yh_offset = 0;
int i__1 = 0;
int i__2 = 0;
double d__1 = 0.;
/* Builtin functions */
/* Local variables */
double c__ = 0.;
int i__ = 0;
int j = 0;
double r__ = 0;
double s = 0;
int ic = 0;
int jb = 0;
int jj = 0;
double tp = 0;
int jb2 = 0;
int jj1 = 0;
int jp1 = 0;
/* ***BEGIN PROLOGUE DINTDY */
/* ***SUBSIDIARY */
/* ***PURPOSE Interpolate solution derivatives. */
/* ***TYPE DOUBLE PRECISION (SINTDY-S, DINTDY-D) */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***DESCRIPTION */
/* DINTDY computes interpolated values of the K-th derivative of the */
/* dependent variable vector y, and stores it in DKY. This routine */
/* is called within the package with K = 0 and T = TOUT, but may */
/* also be called by the user for any K up to the current order. */
/* (See detailed instructions in the usage documentation.) */
/* The computed values in DKY are gotten by interpolation using the */
/* Nordsieck history array YH. This array corresponds uniquely to a */
/* vector-valued polynomial of degree NQCUR or less, and DKY is set */
/* to the K-th derivative of this polynomial at T. */
/* The formula for DKY is: */
/* q */
/* DKY(i) = sum c(j,K) * (T - tn)**(j-K) * h**(-j) * YH(i,j+1) */
/* j=K */
/* where c(j,K) = j*(j-1)*...*(j-K+1), q = NQCUR, tn = TCUR, h = HCUR. */
/* The quantities nq = NQCUR, l = nq+1, N = NEQ, tn, and h are */
/* communicated by COMMON. The above sum is done in reverse order. */
/* IFLAG is returned negative if either K or T is out of bounds. */
/* ***SEE ALSO DLSODE */
/* ***ROUTINES CALLED XERRWD */
/* ***COMMON BLOCKS DLS001 */
/* ***REVISION HISTORY (YYMMDD) */
/* 791129 DATE WRITTEN */
/* 890501 Modified prologue to SLATEC/LDOC format. (FNF) */
/* 890503 Minor cosmetic changes. (FNF) */
/* 930809 Renamed to allow single/double precision versions. (ACH) */
/* 010418 Reduced size of Common block /DLS001/. (ACH) */
/* 031105 Restored 'own' variables to Common block /DLS001/, to */
/* enable interrupt/restart feature. (ACH) */
/* 050427 Corrected roundoff decrement in TP. (ACH) */
/* ***END PROLOGUE DINTDY */
/* **End */
/* ***FIRST EXECUTABLE STATEMENT DINTDY */
/* Parameter adjustments */
yh_dim1 = *NOT_nyh;
yh_offset = 1 + yh_dim1;
//yh -= yh_offset;
//--dky;
/* Function Body */
*iflag = 0;
if (k < 0 || k > nq) {
goto L80;
}
d__1 = fabs(tn) + fabs(hu);
tp = tn - hu - uround * 100. * d_sign(&d__1, &hu);
if ((*t - tp) * (*t - tn) > 0.) {
goto L90;
}
s = (*t - tn) / h__;
ic = 1;
if (k == 0) {
goto L15;
}
jj1 = l - k;
i__1 = nq;
for (jj = jj1; jj <= i__1; ++jj) {
/* L10: */
ic *= jj;
}
L15:
c__ = (double) ic;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L20: */
dky[i__ -1] = c__ * yh[i__ + l * yh_dim1 -yh_offset];
}
if (k == nq) {
goto L55;
}
jb2 = nq - k;
i__1 = jb2;
for (jb = 1; jb <= i__1; ++jb) {
j = nq - jb;
jp1 = j + 1;
ic = 1;
if (k == 0) {
goto L35;
}
jj1 = jp1 - k;
i__2 = j;
for (jj = jj1; jj <= i__2; ++jj) {
/* L30: */
ic *= jj;
}
L35:
c__ = (double) ic;
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
/* L40: */
dky[i__ -1] = c__ * yh[i__ + jp1 * yh_dim1 -yh_offset] + s * dky[i__ -1];
}
/* L50: */
}
if (k == 0) {
return 0;
}
L55:
i__1 = -(k);
r__ = pow(h__, (double)i__1);
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L60: */
dky[i__ -1] = r__ * dky[i__ -1];
}
return 0;
L80:
#ifdef EMULATION_MODE
fprintf(stderr, "DINTDY- K (=I1) illegal \n");
#endif
//xerrwd_(msg, &c__30, &c__51, &c__0, 1, k, &c__0, &c__0, &c_b62, &c_b62, (ftnlen)80);
*iflag = -1;
return 0;
L90:
#ifdef EMULATION_MODE
fprintf(stderr, "DINTDY- T (=R1) illegal \n");
#endif
//xerrwd_(msg, &c__30, &c__52, &c__0, &c__0, &c__0, &c__0, 1, t, &c_b62,(ftnlen)80);
#ifdef EMULATION_MODE
fprintf(stderr, " T not in interval TCUR - HU (= R1) to TCUR (=R2) \n");
#endif
//xerrwd_(msg, &c__60, &c__52, &c__0, &c__0, &c__0, &c__0, &c__2, &tp, &tn, (ftnlen)80);
*iflag = -2;
return 0;
/* ----------------------- END OF SUBROUTINE DINTDY ---------------------- */
} /* dintdy_ */
/* DECK DSTODA */
/* Subroutine */
#ifdef use_export
export
#endif
__device__ int dstoda_(int *neq, double *y, double *yh, int *NOT_nyh, double *yh1, double *ewt, double *savf, double *acor, double *wm, int *iwm, myFex f, myJex jac, struct cuLsodaCommonBlock *common, char* comp_ode, param_t* flattenODE, unsigned int* offsetODE, double* costanti, param_t* myjac, unsigned int* myjacoffset)
{
/* Initialized data */
/* System generated locals */
int yh_dim1 = 0;
int yh_offset = 0;
int i__1 = 0;
int i__2 = 0;
double d__1 = 0.;
double d__2 = 0.;
double d__3 = 0.;
/* Builtin functions */
/* Local variables */
int i__ = 0;
int j = 0;
int m = 0;
double r__ = 0.;
int i1 = 0;
int jb = 0;
double rh = 0.;
double rm = 0.;
double dm1 = 0.;
double dm2 = 0.;
int lm1 = 0;
int lm2 = 0;
double rh1 = 0.;
double rh2 = 0.;
double del = 0.;
double ddn = 0.;
int ncf = 0;
double pdh = 0.;
double dsm = 0.;
double dup = 0.;
double exm1 = 0.;
double exm2 = 0.;
int nqm1 = 0;
int nqm2 = 0;
double dcon = 0.;
double delp = 0.;
int lm1p1 = 0;
int lm2p1 = 0;
double exdn = 0.;
double rhdn = 0.;
int iret = 0;
double told = 0.;
double rhsm = 0.;
int newq = 0;
double exsm = 0.;
double rhup = 0.;
double rate = 0.;
double exup = 0.;
double rh1it = 0.;
double alpha = 0.;
int iredo = 0;
double pnorm = 0.;
/* Parameter adjustments */
//--neq; //fixed
// --y;
yh_dim1 = *NOT_nyh;
yh_offset = 1 + yh_dim1;
//yh -= yh_offset;
//--yh1;
//--ewt;
//--savf;
//--acor;
//--wm;
//--iwm;
/* Function Body */
/* ----------------------------------------------------------------------- */
/* DSTODA performs one step of the integration of an initial value */
/* problem for a system of ordinary differential equations. */
/* Note: DSTODA is independent of the value of the iteration method */
/* indicator MITER, when this is .ne. 0, and hence is independent */
/* of the type of chord method used, or the Jacobian structure. */
/* Communication with DSTODA is done with the following variables: */
/* Y = an array of length .ge. N used as the Y argument in */
/* all calls to F and JAC. */
/* NEQ = int array containing problem size in NEQ(1), and */
/* passed as the NEQ argument in all calls to F and JAC. */
/* YH = an NYH by LMAX array containing the dependent variables */
/* and their approximate scaled derivatives, where */
/* LMAX = MAXORD + 1. YH(i,j+1) contains the approximate */
/* j-th derivative of y(i), scaled by H**j/factorial(j) */
/* (j = 0,1,...,NQ). On entry for the first step, the first */
/* two columns of YH must be set from the initial values. */
/* NYH = a constant int .ge. N, the first dimension of YH. */
/* YH1 = a one-dimensional array occupying the same space as YH. */
/* EWT = an array of length N containing multiplicative weights */
/* for local error measurements. Local errors in y(i) are */
/* compared to 1.0/EWT(i) in various error tests. */
/* SAVF = an array of working storage, of length N. */
/* ACOR = a work array of length N, used for the accumulated */
/* corrections. On a successful return, ACOR(i) contains */
/* the estimated one-step local error in y(i). */
/* WM,IWM = real and int work arrays associated with matrix */
/* operations in chord iteration (MITER .ne. 0). */
/* dprja_ = name of routine to evaluate and preprocess Jacobian matrix */
/* and P = I - H*EL0*Jac, if a chord method is being used. */
/* It also returns an estimate of norm(Jac) in PDNORM. */
/* dsolsy_ = name of routine to solve linear system in chord iteration. */
/* CCMAX = maximum relative change in H*EL0 before dprja_ is called. */
/* H = the step size to be attempted on the next step. */
/* H is altered by the error control algorithm during the */
/* problem. H can be either positive or negative, but its */
/* sign must remain constant throughout the problem. */
/* HMIN = the minimum absolute value of the step size H to be used. */
/* HMXI = inverse of the maximum absolute value of H to be used. */
/* HMXI = 0.0 is allowed and corresponds to an infinite HMAX. */
/* HMIN and HMXI may be changed at any time, but will not */
/* take effect until the next change of H is considered. */
/* TN = the independent variable. TN is updated on each step taken. */
/* JSTART = an int used for input only, with the following */
/* values and meanings: */
/* 0 perform the first step. */
/* .gt.0 take a new step continuing from the last. */
/* -1 take the next step with a new value of H, */
/* N, METH, MITER, and/or matrix parameters. */
/* -2 take the next step with a new value of H, */
/* but with other inputs unchanged. */
/* On return, JSTART is set to 1 to facilitate continuation. */
/* KFLAG = a completion code with the following meanings: */
/* 0 the step was succesful. */
/* -1 the requested error could not be achieved. */
/* -2 corrector convergence could not be achieved. */
/* -3 fatal error in dprja_ or dsolsy_. */
/* A return with KFLAG = -1 or -2 means either */
/* ABS(H) = HMIN or 10 consecutive failures occurred. */
/* On a return with KFLAG negative, the values of TN and */
/* the YH array are as of the beginning of the last */
/* step, and H is the last step size attempted. */
/* MAXORD = the maximum order of integration method to be allowed. */
/* MAXCOR = the maximum number of corrector iterations allowed. */
/* MSBP = maximum number of steps between dprja_ calls (MITER .gt. 0). */
/* MXNCF = maximum number of convergence failures allowed. */
/* METH = current method. */
/* METH = 1 means Adams method (nonstiff) */
/* METH = 2 means BDF method (stiff) */
/* METH may be reset by DSTODA. */
/* MITER = corrector iteration method. */
/* MITER = 0 means functional iteration. */
/* MITER = JT .gt. 0 means a chord iteration corresponding */
/* to Jacobian type JT. (The DLSODA/DLSODAR argument JT is */
/* communicated here as JTYP, but is not used in DSTODA */
/* except to load MITER following a method switch.) */
/* MITER may be reset by DSTODA. */
/* N = the number of first-order differential equations. */
/* ----------------------------------------------------------------------- */
kflag = 0;
told = tn;
ncf = 0;
ierpj = 0;
iersl = 0;
jcur = 0;
icf = 0;
delp = 0.;
if (jstart > 0) {
goto L200;
}
if (jstart == -1) {
goto L100;
}
if (jstart == -2) {
goto L160;
}
/* ----------------------------------------------------------------------- */
/* On the first call, the order is set to 1, and other variables are */
/* initialized. RMAX is the maximum ratio by which H can be increased */
/* in a single step. It is initially 1.E4 to compensate for the small */
/* initial H, but then is normally equal to 10. If a failure */
/* occurs (in corrector convergence or error test), RMAX is set at 2 */
/* for the next increase. */
/* DCFODE is called to get the needed coefficients for both methods. */
/* ----------------------------------------------------------------------- */
lmax = maxord + 1;
nq = 1;
l = 2;
ialth = 2;
rmax = 1e4;
rc = 0.;
el0 = 1.;
crate = .7;
hold = h__;
nslp = 0;
ipup = miter;
iret = 3;
/* Initialize switching parameters. METH = 1 is assumed initially. ----- */
icount = 20;
irflag = 0;
pdest = 0.;
pdlast = 0.;
ratio = 5.;
dcfode_(2, elco, tesco, common);
for (i__ = 1; i__ <= 5; ++i__) {
/* L10: */
cm2[i__ - 1] = tesco[i__ * 3 - 2] * elco[i__ + 1 + i__ * 13 - 14];
}
dcfode_(1, elco, tesco, common);
for (i__ = 1; i__ <= 12; ++i__) {
/* L20: */
cm1[i__ - 1] = tesco[i__ * 3 - 2] * elco[i__ + 1 + i__ * 13 - 14];
}
goto L150;
/* ----------------------------------------------------------------------- */
/* The following block handles preliminaries needed when JSTART = -1. */
/* IPUP is set to MITER to force a matrix update. */
/* If an order increase is about to be considered (IALTH = 1), */
/* IALTH is reset to 2 to postpone consideration one more step. */
/* If the caller has changed METH, DCFODE is called to reset */
/* the coefficients of the method. */
/* If H is to be changed, YH must be rescaled. */
/* If H or METH is being changed, IALTH is reset to L = NQ + 1 */
/* to prevent further changes in H for that many steps. */
/* ----------------------------------------------------------------------- */
L100:
ipup = miter;
lmax = maxord + 1;
if (ialth == 1) {
ialth = 2;
}
if (meth == mused) {
goto L160;
}
dcfode_(meth, elco, tesco, common);
ialth = l;
iret = 1;
/* ----------------------------------------------------------------------- */
/* The el vector and related constants are reset */
/* whenever the order NQ is changed, or at the start of the problem. */
/* ----------------------------------------------------------------------- */
L150:
i__1 = l;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L155: */
el[i__ - 1] = elco[i__ + nq * 13 - 14];
}
nqnyh = nq * *NOT_nyh;
rc = rc * el[0] / el0;
el0 = el[0];
conit = .5 / (double)(nq + 2);
switch (iret) {
case 1: goto L160;
case 2: goto L170;
case 3: goto L200;
}
/* ----------------------------------------------------------------------- */
/* If H is being changed, the H ratio RH is checked against */
/* RMAX, HMIN, and HMXI, and the YH array rescaled. IALTH is set to */
/* L = NQ + 1 to prevent a change of H for that many steps, unless */
/* forced by a convergence or error test failure. */
/* ----------------------------------------------------------------------- */
L160:
if (h__ == hold) {
goto L200;
}
rh = h__ / hold;
h__ = hold;
iredo = 3;
goto L175;
L170:
/* Computing MAX */
d__1 = rh, d__2 = hmin / fabs(h__);
rh = max(d__1,d__2);
L175:
rh = min(rh,rmax);
/* Computing MAX */
d__1 = 1., d__2 = fabs(h__) * hmxi * rh;
rh /= max(d__1,d__2);
/* ----------------------------------------------------------------------- */
/* If METH = 1, also restrict the new step size by the stability region. */
/* If this reduces H, set IRFLAG to 1 so that if there are roundoff */
/* problems later, we can assume that is the cause of the trouble. */
/* ----------------------------------------------------------------------- */
if (meth == 2) {
goto L178;
}
irflag = 0;
/* Computing MAX */
d__1 = fabs(h__) * pdlast;
pdh = max(d__1,1e-6);
if (rh * pdh * 1.00001 < sm1[nq - 1]) {
goto L178;
}
rh = sm1[nq - 1] / pdh;
irflag = 1;
L178:
r__ = 1.;
i__1 = l;
for (j = 2; j <= i__1; ++j) {
r__ *= rh;
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
/* L180: */
yh[i__ + j * yh_dim1 -yh_offset] *= r__;
}
}
h__ *= rh;
rc *= rh;
ialth = l;
if (iredo == 0) {
goto L690;
}
/* ----------------------------------------------------------------------- */
/* This section computes the predicted values by effectively */
/* multiplying the YH array by the Pascal triangle matrix. */
/* RC is the ratio of new to old values of the coefficient H*EL(1). */
/* When RC differs from 1 by more than CCMAX, IPUP is set to MITER */
/* to force dprja_ to be called, if a Jacobian is involved. */
/* In any case, dprja_ is called at least every MSBP steps. */
/* ----------------------------------------------------------------------- */
L200:
if ((d__1 = rc - 1., fabs(d__1)) > ccmax) {
ipup = miter;
}
if (nst >= nslp + msbp) {
ipup = miter;
}
tn += h__;
i1 = nqnyh + 1;
i__2 = nq;
for (jb = 1; jb <= i__2; ++jb) {
i1 -= *NOT_nyh;
/* DIR$ IVDEP */
i__1 = nqnyh;
for (i__ = i1; i__ <= i__1; ++i__) {
/* L210: */
yh1[i__ -1] += yh1[i__ + *NOT_nyh -1];
}
/* L215: */
}
pnorm = dmnorm_(&n, yh1, ewt, common);
/* ----------------------------------------------------------------------- */
/* Up to MAXCOR corrector iterations are taken. A convergence test is */
/* made on the RMS-norm of each correction, weighted by the error */
/* weight vector EWT. The sum of the corrections is accumulated in the */
/* vector ACOR(i). The YH array is not altered in the corrector loop. */
/* ----------------------------------------------------------------------- */
L220:
m = 0;
rate = 0.;
del = 0.;
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
/* L230: */
y[i__ - 1] = yh[i__ + yh_dim1 -yh_offset]; //fixed y
}
f(neq, &tn, y, savf, comp_ode, flattenODE, offsetODE,costanti); //fixed neq y
++nfe;
if (ipup <= 0) {
goto L250;
}
/* ----------------------------------------------------------------------- */
/* If indicated, the matrix P = I - H*EL(1)*J is reevaluated and */
/* preprocessed before starting the corrector iteration. IPUP is set */
/* to 0 as an indicator that this has been done. */
/* ----------------------------------------------------------------------- */
dprja_(neq, y, &yh[yh_offset -yh_offset], NOT_nyh, ewt, acor, savf, wm, iwm, f, jac, common, comp_ode, flattenODE, offsetODE, myjac, myjacoffset, costanti ); //fixed neq y
ipup = 0;
rc = 1.;
nslp = nst;
crate = .7;
if (ierpj != 0) {
goto L430;
}
L250:
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
/* L260: */
acor[i__ -1] = 0.;
}
L270:
if (miter != 0) {
goto L350;
}
/* ----------------------------------------------------------------------- */
/* In the case of functional iteration, update Y directly from */
/* the result of the last function evaluation. */
/* ----------------------------------------------------------------------- */
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
savf[i__ -1] = h__ * savf[i__ -1] - yh[i__ + (yh_dim1 << 1) -yh_offset];
/* L290: */
y[i__ - 1] = savf[i__ -1] - acor[i__ -1]; //fixed y
}
del = dmnorm_(&n, y, ewt, common);
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
y[i__ - 1] = yh[i__ + yh_dim1 -yh_offset] + el[0] * savf[i__ -1];
/* L300: */
acor[i__ -1] = savf[i__ -1];
}
goto L400;
/* ----------------------------------------------------------------------- */
/* In the case of the chord method, compute the corrector error, */
/* and solve the linear system with that as right-hand side and */
/* P as coefficient matrix. */
/* ----------------------------------------------------------------------- */
L350:
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
/* L360: */
y[i__ - 1] = h__ * savf[i__ -1] - (yh[i__ + (yh_dim1 << 1) -yh_offset] + acor[i__ -1]);
}
dsolsy_(wm, iwm, y, savf, common);
if (iersl < 0) {
goto L430;
}
if (iersl > 0) {
goto L410;
}
del = dmnorm_(&n, y, ewt, common);
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
acor[i__ -1] += y[i__ - 1];
/* L380: */
y[i__ - 1] = yh[i__ + yh_dim1 -yh_offset] + el[0] * acor[i__ -1];
}
/* ----------------------------------------------------------------------- */
/* Test for convergence. If M .gt. 0, an estimate of the convergence */
/* rate constant is stored in CRATE, and this is used in the test. */
/* We first check for a change of iterates that is the size of */
/* roundoff error. If this occurs, the iteration has converged, and a */
/* new rate estimate is not formed. */
/* In all other cases, force at least two iterations to estimate a */
/* local Lipschitz constant estimate for Adams methods. */
/* On convergence, form PDEST = local maximum Lipschitz constant */
/* estimate. PDLAST is the most recent nonzero estimate. */
/* ----------------------------------------------------------------------- */
L400:
if (del <= pnorm * 100. * uround) {
goto L450;
}
if (m == 0 && meth == 1) {
goto L405;
}
if (m == 0) {
goto L402;
}
rm = 1024.;
if (del <= delp * 1024.) {
rm = del / delp;
}
rate = max(rate,rm);
/* Computing MAX */
d__1 = crate * .2;
crate = max(d__1,rm);
L402:
/* Computing MIN */
d__1 = 1., d__2 = crate * 1.5;
dcon = del * min(d__1,d__2) / (tesco[nq * 3 - 2] * conit);
if (dcon > 1.) {
goto L405;
}
/* Computing MAX */
d__2 = pdest, d__3 = rate / (d__1 = h__ * el[0]
, fabs(d__1));
pdest = max(d__2,d__3);
if (pdest != 0.) {
pdlast = pdest;
}
goto L450;
L405:
++m;
if (m == maxcor) {
goto L410;
}
if (m >= 2 && del > delp * 2.) {
goto L410;
}
delp = del;
f(neq, &tn, y, savf, comp_ode, flattenODE, offsetODE, costanti); //fixed neq y
++nfe;
goto L270;
/* ----------------------------------------------------------------------- */
/* The corrector iteration failed to converge. */
/* If MITER .ne. 0 and the Jacobian is out of date, dprja_ is called for */
/* the next try. Otherwise the YH array is retracted to its values */
/* before prediction, and H is reduced, if possible. If H cannot be */
/* reduced or MXNCF failures have occurred, exit with KFLAG = -2. */
/* ----------------------------------------------------------------------- */
L410:
if (miter == 0 || jcur == 1) {
goto L430;
}
icf = 1;
ipup = miter;
goto L220;
L430:
icf = 2;
++ncf;
rmax = 2.;
tn = told;
i1 = nqnyh + 1;
i__2 = nq;
for (jb = 1; jb <= i__2; ++jb) {
i1 -= *NOT_nyh;
/* DIR$ IVDEP */
i__1 = nqnyh;
for (i__ = i1; i__ <= i__1; ++i__) {
/* L440: */
yh1[i__ -1] -= yh1[i__ + *NOT_nyh -1];
}
/* L445: */
}
if (ierpj < 0 || iersl < 0) {
goto L680;
}
if (fabs(h__) <= hmin * 1.00001) {
goto L670;
}
if (ncf == mxncf) {
goto L670;
}
rh = .25;
ipup = miter;
iredo = 1;
goto L170;
/* ----------------------------------------------------------------------- */
/* The corrector has converged. JCUR is set to 0 */
/* to signal that the Jacobian involved may need updating later. */
/* The local error test is made and control passes to statement 500 */
/* if it fails. */
/* ----------------------------------------------------------------------- */
L450:
jcur = 0;
if (m == 0) {
dsm = del / tesco[nq * 3 - 2];
}
if (m > 0) {
dsm = dmnorm_(&n, acor, ewt, common) / tesco[nq * 3 - 2];
}
if (dsm > 1.) {
goto L500;
}
/* ----------------------------------------------------------------------- */
/* After a successful step, update the YH array. */
/* Decrease ICOUNT by 1, and if it is -1, consider switching methods. */
/* If a method switch is made, reset various parameters, */
/* rescale the YH array, and exit. If there is no switch, */
/* consider changing H if IALTH = 1. Otherwise decrease IALTH by 1. */
/* If IALTH is then 1 and NQ .lt. MAXORD, then ACOR is saved for */
/* use in a possible order increase on the next step. */
/* If a change in H is considered, an increase or decrease in order */
/* by one is considered also. A change in H is made only if it is by a */
/* factor of at least 1.1. If not, IALTH is set to 3 to prevent */
/* testing for that many steps. */
/* ----------------------------------------------------------------------- */
kflag = 0;
iredo = 0;
++nst;
hu = h__;
nqu = nq;
mused = meth;
i__2 = l;
for (j = 1; j <= i__2; ++j) {
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L460: */
yh[i__ + j * yh_dim1 -yh_offset] += el[j - 1] * acor[i__ -1];
}
}
--icount;
if (icount >= 0) {
goto L488;
}
if (meth == 2) {
goto L480;
}
/* ----------------------------------------------------------------------- */
/* We are currently using an Adams method. Consider switching to BDF. */
/* If the current order is greater than 5, assume the problem is */
/* not stiff, and skip this section. */
/* If the Lipschitz constant and error estimate are not polluted */
/* by roundoff, go to 470 and perform the usual test. */
/* Otherwise, switch to the BDF methods if the last step was */
/* restricted to insure stability (irflag = 1), and stay with Adams */
/* method if not. When switching to BDF with polluted error estimates, */
/* in the absence of other information, double the step size. */
/* When the estimates are OK, we make the usual test by computing */
/* the step size we could have (ideally) used on this step, */
/* with the current (Adams) method, and also that for the BDF. */
/* If NQ .gt. MXORDS, we consider changing to order MXORDS on switching. */
/* Compare the two step sizes to decide whether to switch. */
/* The step size advantage must be at least RATIO = 5 to switch. */
/* ----------------------------------------------------------------------- */
if (nq > 5) {
goto L488;
}
if (dsm > pnorm * 100. * uround && pdest != 0.) {
goto L470;
}
if (irflag == 0) {
goto L488;
}
rh2 = 2.;
nqm2 = min(nq,mxords);
goto L478;
L470:
exsm = 1. / (double)l;
rh1 = 1. / (pow(dsm, exsm) * 1.2 + 1.2e-6);
rh1it = rh1 * 2.;
pdh = pdlast * fabs(h__);
if (pdh * rh1 > 1e-5) {
rh1it = sm1[nq - 1] / pdh;
}
rh1 = min(rh1,rh1it);
if (nq <= mxords) {
goto L474;
}
nqm2 = mxords;
lm2 = mxords + 1;
exm2 = 1. / (double)lm2;
lm2p1 = lm2 + 1;
dm2 = dmnorm_(&n, &yh[lm2p1 * yh_dim1 + 1 -yh_offset], ewt, common) / cm2[mxords - 1];
rh2 = 1. / (pow(dm2, exm2) * 1.2 + 1.2e-6);
goto L476;
L474:
dm2 = dsm * (cm1[nq - 1] / cm2[nq - 1]
);
rh2 = 1. / (pow(dm2, exsm) * 1.2 + 1.2e-6);
nqm2 = nq;
L476:
if (rh2 < ratio * rh1) {
goto L488;
}
/* THE SWITCH TEST PASSED. RESET RELEVANT QUANTITIES FOR BDF. ---------- */
L478:
rh = rh2;
icount = 20;
meth = 2;
miter = jtyp;
pdlast = 0.;
nq = nqm2;
l = nq + 1;
goto L170;
/* ----------------------------------------------------------------------- */
/* We are currently using a BDF method. Consider switching to Adams. */
/* Compute the step size we could have (ideally) used on this step, */
/* with the current (BDF) method, and also that for the Adams. */
/* If NQ .gt. MXORDN, we consider changing to order MXORDN on switching. */
/* Compare the two step sizes to decide whether to switch. */
/* The step size advantage must be at least 5/RATIO = 1 to switch. */
/* If the step size for Adams would be so small as to cause */
/* roundoff pollution, we stay with BDF. */
/* ----------------------------------------------------------------------- */
L480:
exsm = 1. / (double)l;
if (mxordn >= nq) {
goto L484;
}
nqm1 = mxordn;
lm1 = mxordn + 1;
exm1 = 1. / (double)lm1;
lm1p1 = lm1 + 1;
dm1 = dmnorm_(&n, &yh[lm1p1 * yh_dim1 + 1 -yh_offset], ewt, common) / cm1[mxordn - 1];
rh1 = 1. / (pow(dm1, exm1) * 1.2 + 1.2e-6);
goto L486;
L484:
dm1 = dsm * (cm2[nq - 1] / cm1[nq - 1]
);
rh1 = 1. / (pow(dm1, exsm) * 1.2 + 1.2e-6);
nqm1 = nq;
exm1 = exsm;
L486:
rh1it = rh1 * 2.;
pdh = pdnorm * fabs(h__);
if (pdh * rh1 > 1e-5) {
rh1it = sm1[nqm1 - 1] / pdh;
}
rh1 = min(rh1,rh1it);
rh2 = 1. / (pow(dsm, exsm) * 1.2 + 1.2e-6);
if (rh1 * ratio < rh2 * 5.) {
goto L488;
}
alpha = max(.001,rh1);
dm1 = pow(alpha, exm1) * dm1;
if (dm1 <= uround * 1e3 * pnorm) {
goto L488;
}
/* The switch test passed. Reset relevant quantities for Adams. -------- */
rh = rh1;
icount = 20;
meth = 1;
miter = 0;
pdlast = 0.;
nq = nqm1;
l = nq + 1;
goto L170;
/* No method switch is being made. Do the usual step/order selection. -- */
L488:
--ialth;
if (ialth == 0) {
goto L520;
}
if (ialth > 1) {
// debug[indice] = -1;
goto L700;
}
if (l == lmax) {
goto L700;
}
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L490: */
yh[i__ + lmax * yh_dim1 -yh_offset] = acor[i__ -1];
}
goto L700;
/* ----------------------------------------------------------------------- */
/* The error test failed. KFLAG keeps track of multiple failures. */
/* Restore TN and the YH array to their previous values, and prepare */
/* to try the step again. Compute the optimum step size for this or */
/* one lower order. After 2 or more failures, H is forced to decrease */
/* by a factor of 0.2 or less. */
/* ----------------------------------------------------------------------- */
L500:
--kflag;
tn = told;
i1 = nqnyh + 1;
i__1 = nq;
for (jb = 1; jb <= i__1; ++jb) {
i1 -= *NOT_nyh;
/* DIR$ IVDEP */
i__2 = nqnyh;
for (i__ = i1; i__ <= i__2; ++i__) {
/* L510: */
yh1[i__ -1] -= yh1[i__ + *NOT_nyh -1];
}
/* L515: */
}
rmax = 2.;
if (fabs(h__) <= hmin * 1.00001) {
goto L660;
}
if (kflag <= -3) {
goto L640;
}
iredo = 2;
rhup = 0.;
goto L540;
/* ----------------------------------------------------------------------- */
/* Regardless of the success or failure of the step, factors */
/* RHDN, RHSM, and RHUP are computed, by which H could be multiplied */
/* at order NQ - 1, order NQ, or order NQ + 1, respectively. */
/* In the case of failure, RHUP = 0.0 to avoid an order increase. */
/* The largest of these is determined and the new order chosen */
/* accordingly. If the order is to be increased, we compute one */
/* additional scaled derivative. */
/* ----------------------------------------------------------------------- */
L520:
rhup = 0.;
if (l == lmax) {
goto L540;
}
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L530: */
savf[i__ -1] = acor[i__ -1] - yh[i__ + lmax * yh_dim1 -yh_offset];
}
dup = dmnorm_(&n, savf, ewt, common) / tesco[nq * 3 - 1];
exup = (double)1. / (double)(l + 1);
rhup = (double)1. / (pow(dup, exup) * (double)1.4 + (double)1.4e-6);
L540:
exsm = (double)1. / l;
rhsm = (double)1. / (pow(dsm, exsm) * (double)1.2 + (double)1.2e-6);
rhdn = 0.;
if (nq == 1) {
goto L550;
}
ddn = dmnorm_(&n, &yh[l * yh_dim1 + 1 -yh_offset], ewt, common) /
tesco[nq * 3 - 3];
exdn = (double)1. / (double)nq;
rhdn = (double)1. / (pow(ddn, exdn) * (double)1.3 + (double)1.3e-6);
/* If METH = 1, limit RH according to the stability region also. -------- */
L550:
if (meth == 2) {
goto L560;
}
/* Computing MAX */
d__1 = fabs(h__) * pdlast;
pdh = max(d__1,1e-6);
if (l < lmax) {
/* Computing MIN */
d__1 = rhup, d__2 = sm1[l - 1] / pdh;
rhup = min(d__1,d__2);
}
/* Computing MIN */
d__1 = rhsm, d__2 = sm1[nq - 1] / pdh;
rhsm = min(d__1,d__2);
if (nq > 1) {
/* Computing MIN */
d__1 = rhdn, d__2 = sm1[nq - 2] / pdh;
rhdn = min(d__1,d__2);
}
pdest = 0.;
L560:
if (rhsm >= rhup) {
goto L570;
}
if (rhup > rhdn) {
goto L590;
}
goto L580;
L570:
if (rhsm < rhdn) {
goto L580;
}
newq = nq;
rh = rhsm;
goto L620;
L580:
newq = nq - 1;
rh = rhdn;
if (kflag < 0 && rh > 1.) {
rh = 1.;
}
goto L620;
L590:
newq = l;
rh = rhup;
if (rh < 1.1) {
goto L610;
}
r__ = el[l - 1] / (double)l;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L600: */
yh[i__ + (newq + 1) * yh_dim1 -yh_offset] = acor[i__ -1] * r__;
}
goto L630;
L610:
ialth = 3;
goto L700;
/* If METH = 1 and H is restricted by stability, bypass 10 percent test. */
L620:
if (meth == 2) {
goto L622;
}
if (rh * pdh * 1.00001 >= sm1[newq - 1]) {
goto L625;
}
L622:
if (kflag == 0 && rh < 1.1) {
goto L610;
}
L625:
if (kflag <= -2) {
rh = min(rh,.2);
}
/* ----------------------------------------------------------------------- */
/* If there is a change of order, reset NQ, L, and the coefficients. */
/* In any case H is reset according to RH and the YH array is rescaled. */
/* Then exit from 690 if the step was OK, or redo the step otherwise. */
/* ----------------------------------------------------------------------- */
if (newq == nq) {
goto L170;
}
L630:
nq = newq;
l = nq + 1;
iret = 2;
goto L150;
/* ----------------------------------------------------------------------- */
/* Control reaches this section if 3 or more failures have occured. */
/* If 10 failures have occurred, exit with KFLAG = -1. */
/* It is assumed that the derivatives that have accumulated in the */
/* YH array have errors of the wrong order. Hence the first */
/* derivative is recomputed, and the order is set to 1. Then */
/* H is reduced by a factor of 10, and the step is retried, */
/* until it succeeds or H reaches HMIN. */
/* ----------------------------------------------------------------------- */
L640:
if (kflag == -10) {
goto L660;
}
rh = .1;
/* Computing MAX */
d__1 = hmin / fabs(h__);
rh = max(d__1,rh);
h__ *= rh;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L645: */
y[i__ - 1] = yh[i__ + yh_dim1 -yh_offset];
}
// f(neq, &tn, y, savf, comp_ode, flattenODE, offsetODE, costanti); //fixed neq
f(neq, &tn, y, savf, comp_ode, flattenODE, offsetODE, costanti); //fixed neq
++nfe;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L650: */
yh[i__ + (yh_dim1 << 1) -yh_offset] = h__ * savf[i__ -1];
}
ipup = miter;
ialth = 5;
if (nq == 1) {
goto L200;
}
nq = 1;
l = 2;
iret = 3;
goto L150;
/* ----------------------------------------------------------------------- */
/* All returns are made through this section. H is saved in HOLD */
/* to allow the caller to change H on the next step. */
/* ----------------------------------------------------------------------- */
L660:
kflag = -1;
goto L720;
L670:
kflag = -2;
goto L720;
L680:
kflag = -3;
goto L720;
L690:
rmax = 10.;
L700:
r__ = 1. / tesco[nqu * 3 - 2];
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L710: */
acor[i__ -1] *= r__;
}
L720:
hold = h__;
jstart = 1;
return 0;
/* ----------------------- End of Subroutine DSTODA ---------------------- */
} /* dstoda_ */
/* DECK DCFODE */
/* Subroutine */
__device__ int dcfode_(int PARAM_meth, double *DCFODE_elco, double *DCFODE_tesco, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
/* Local variables */
int i__ = 0;
int ib = 0;
double pc[12];
for (int bubb = 0; bubb < 12; bubb ++)
{
pc[bubb] = 0.;
}
int DCFODE_nq = 0;
double fnq = 0.;
int nqm1 = 0;
int nqp1 = 0;
double ragq = 0.;
double pint = 0.;
double xpin = 0.;
double fnqm1 = 0.;
double agamq = 0.;
double rqfac = 0.;
double tsign = 0.;
double rq1fac = 0.;
/* ***BEGIN PROLOGUE DCFODE */
/* ***SUBSIDIARY */
/* ***PURPOSE Set ODE integrator coefficients. */
/* ***TYPE DOUBLE PRECISION (SCFODE-S, DCFODE-D) */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***DESCRIPTION */
/* DCFODE is called by the integrator routine to set coefficients */
/* needed there. The coefficients for the current method, as */
/* given by the value of METH, are set for all orders and saved. */
/* The maximum order assumed here is 12 if METH = 1 and 5 if METH = 2. */
/* (A smaller value of the maximum order is also allowed.) */
/* DCFODE is called once at the beginning of the problem, */
/* and is not called again unless and until METH is changed. */
/* The ELCO array contains the basic method coefficients. */
/* The coefficients el(i), 1 .le. i .le. nq+1, for the method of */
/* order nq are stored in ELCO(i,nq). They are given by a genetrating */
/* polynomial, i.e., */
/* l(x) = el(1) + el(2)*x + ... + el(nq+1)*x**nq. */
/* For the implicit Adams methods, l(x) is given by */
/* dl/dx = (x+1)*(x+2)*...*(x+nq-1)/factorial(nq-1), l(-1) = 0. */
/* For the BDF methods, l(x) is given by */
/* l(x) = (x+1)*(x+2)* ... *(x+nq)/K, */
/* where K = factorial(nq)*(1 + 1/2 + ... + 1/nq). */
/* The TESCO array contains test constants used for the */
/* local error test and the selection of step size and/or order. */
/* At order nq, TESCO(k,nq) is used for the selection of step */
/* size at order nq - 1 if k = 1, at order nq if k = 2, and at order */
/* nq + 1 if k = 3. */
/* ***SEE ALSO DLSODE */
/* ***ROUTINES CALLED (NONE) */
/* ***REVISION HISTORY (YYMMDD) */
/* 791129 DATE WRITTEN */
/* 890501 Modified prologue to SLATEC/LDOC format. (FNF) */
/* 890503 Minor cosmetic changes. (FNF) */
/* 930809 Renamed to allow single/double precision versions. (ACH) */
/* ***END PROLOGUE DCFODE */
/* **End */
/* ***FIRST EXECUTABLE STATEMENT DCFODE */
/* Parameter adjustments */
//DCFODE_tesco -= 4;
//DCFODE_elco -= 14;
/* Function Body */
switch (PARAM_meth) {
case 1: goto L100;
case 2: goto L200;
}
L100:
DCFODE_elco[14 -14] = 1.;
DCFODE_elco[15 -14] = 1.;
DCFODE_tesco[4 -4] = 0.;
DCFODE_tesco[5 -4] = 2.;
DCFODE_tesco[7 -4] = 1.;
DCFODE_tesco[39 -4] = 0.;
pc[0] = (double)1.;
rqfac =(double) 1.;
for (DCFODE_nq = 2; DCFODE_nq <= 12; ++DCFODE_nq) {
/* ----------------------------------------------------------------------- */
/* The PC array will contain the coefficients of the polynomial */
/* p(x) = (x+1)*(x+2)*...*(x+nq-1). */
/* Initially, p(x) = 1. */
/* ----------------------------------------------------------------------- */
rq1fac = rqfac;
rqfac /= (double)DCFODE_nq;
nqm1 = DCFODE_nq - 1;
fnqm1 = (double) nqm1;
nqp1 = DCFODE_nq + 1;
/* Form coefficients of p(x)*(x+nq-1). ---------------------------------- */
pc[DCFODE_nq - 1] = 0.;
i__1 = nqm1;
for (ib = 1; ib <= i__1; ++ib) {
i__ = nqp1 - ib;
/* L110: */
pc[i__ - 1] = pc[i__ - 2] + fnqm1 * pc[i__ - 1];
}
pc[0] = fnqm1 * pc[0];
/* Compute integral, -1 to 0, of p(x) and x*p(x). ----------------------- */
pint = pc[0];
xpin = pc[0] / 2.;
tsign = 1.;
i__1 = DCFODE_nq;
for (i__ = 2; i__ <= i__1; ++i__) {
tsign = -tsign;
pint += tsign * pc[i__ - 1] / (double)i__;
/* L120: */
xpin += tsign * pc[i__ - 1] / (double)(i__ + 1);
}
/* Store coefficients in ELCO and TESCO. -------------------------------- */
DCFODE_elco[DCFODE_nq * 13 + 1 -14] = pint * rq1fac;
DCFODE_elco[DCFODE_nq * 13 + 2 -14] = 1.;
i__1 = DCFODE_nq;
for (i__ = 2; i__ <= i__1; ++i__) {
/* L130: */
DCFODE_elco[i__ + 1 + DCFODE_nq * 13 -14] = rq1fac * pc[i__ - 1] / (double)i__;
}
agamq = rqfac * xpin;
ragq = 1. / agamq;
DCFODE_tesco[DCFODE_nq * 3 + 2 -4] = ragq;
if (DCFODE_nq < 12) {
DCFODE_tesco[nqp1 * 3 + 1 -4] = ragq * rqfac / (double)nqp1;
}
DCFODE_tesco[nqm1 * 3 + 3 -4] = ragq;
/* L140: */
}
return 0;
L200:
pc[0] = 1.;
rq1fac = 1.;
for (DCFODE_nq = 1; DCFODE_nq <= 5; ++DCFODE_nq) {
/* ----------------------------------------------------------------------- */
/* The PC array will contain the coefficients of the polynomial */
/* p(x) = (x+1)*(x+2)*...*(x+nq). */
/* Initially, p(x) = 1. */
/* ----------------------------------------------------------------------- */
fnq = (double) DCFODE_nq;
nqp1 = DCFODE_nq + 1;
/* Form coefficients of p(x)*(x+nq). ------------------------------------ */
pc[nqp1 - 1] = 0.;
i__1 = DCFODE_nq;
for (ib = 1; ib <= i__1; ++ib) {
i__ = DCFODE_nq + 2 - ib;
/* L210: */
pc[i__ - 1] = pc[i__ - 2] + fnq * pc[i__ - 1];
}
pc[0] = fnq * pc[0];
/* Store coefficients in ELCO and TESCO. -------------------------------- */
i__1 = nqp1;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L220: */
DCFODE_elco[i__ + DCFODE_nq * 13 -14] = pc[i__ - 1] / pc[1];
}
DCFODE_elco[DCFODE_nq * 13 + 2 -14] = 1.;
DCFODE_tesco[DCFODE_nq * 3 + 1 -4] = rq1fac;
DCFODE_tesco[DCFODE_nq * 3 + 2 -4] = ((double)nqp1) / DCFODE_elco[DCFODE_nq * 13 + 1 -14];
DCFODE_tesco[DCFODE_nq * 3 + 3 -4] = ((double)(DCFODE_nq + 2)) / DCFODE_elco[DCFODE_nq * 13 + 1 -14];
rq1fac /= fnq;
/* L230: */
}
return 0;
/* ----------------------- END OF SUBROUTINE DCFODE ---------------------- */
} /* dcfode_ */
/* DECK DPRJA */
/* Subroutine */
#ifdef use_export
export
#endif
__device__ int dprja_(int *neq, double *y, double *yh, int *NOT_nyh, double *ewt, double *ftem, double *savf, double *wm, int *iwm, myFex f, myJex jac, struct cuLsodaCommonBlock *common, char* comp_ode, param_t* flattenODE, unsigned int* offsetODE, param_t* myjac, unsigned int* myjacoffset, double* costanti)
{
/* System generated locals */
int yh_dim1, yh_offset, i__1, i__2, i__3, i__4;
double d__1 = 0.;
double d__2 = 0.;
/* Local variables */
int i__ = 0;
int j;
double r__;
int i1, i2, j1;
double r0;
int ii = 0;
int jj = 0;
int ml = 0;
int mu = 0;
double yi = 0.;
double yj = 0.;
double hl0;
int ml3 = 0;
int np1 = 0;
double fac;
int mba = 0;
int ier = 0;
double con = 0.;
double yjj;
int meb1 = 0;
int lenp = 0;
double srur;
int mband = 0;
int meband = 0;
/* ----------------------------------------------------------------------- */
/* DPRJA is called by DSTODA to compute and process the matrix */
/* P = I - H*EL(1)*J , where J is an approximation to the Jacobian. */
/* Here J is computed by the user-supplied routine JAC if */
/* MITER = 1 or 4 or by finite differencing if MITER = 2 or 5. */
/* J, scaled by -H*EL(1), is stored in WM. Then the norm of J (the */
/* matrix norm consistent with the weighted max-norm on vectors given */
/* by DMNORM) is computed, and J is overwritten by P. P is then */
/* subjected to LU decomposition in preparation for later solution */
/* of linear systems with P as coefficient matrix. This is done */
/* by DGEFA if MITER = 1 or 2, and by DGBFA if MITER = 4 or 5. */
/* In addition to variables described previously, communication */
/* with DPRJA uses the following: */
/* Y = array containing predicted values on entry. */
/* FTEM = work array of length N (ACOR in DSTODA). */
/* SAVF = array containing f evaluated at predicted y. */
/* WM = real work space for matrices. On output it contains the */
/* LU decomposition of P. */
/* Storage of matrix elements starts at WM(3). */
/* WM also contains the following matrix-related data: */
/* WM(1) = SQRT(UROUND), used in numerical Jacobian increments. */
/* IWM = int work space containing pivot information, starting at */
/* IWM(21). IWM also contains the band parameters */
/* ML = IWM(1) and MU = IWM(2) if MITER is 4 or 5. */
/* EL0 = EL(1) (input). */
/* PDNORM= norm of Jacobian matrix. (Output). */
/* IERPJ = output error flag, = 0 if no trouble, .gt. 0 if */
/* P matrix found to be singular. */
/* JCUR = output flag = 1 to indicate that the Jacobian matrix */
/* (or approximation) is now current. */
/* This routine also uses the Common variables EL0, H, TN, UROUND, */
/* MITER, N, NFE, and NJE. */
/* ----------------------------------------------------------------------- */
/* Parameter adjustments */
// --neq;
//--y;
yh_dim1 = *NOT_nyh;
yh_offset = 1 + yh_dim1;
//yh -= yh_offset;
//--ewt;
//--ftem;
//--savf;
//--wm;
//--iwm;
/* Function Body */
++nje;
ierpj = 0;
jcur = 1;
hl0 = h__ * el0;
switch (miter) {
case 1: goto L100;
case 2: goto L200;
case 3: goto L300;
case 4: goto L400;
case 5: goto L500;
}
/* If MITER = 1, call JAC and multiply by scalar. ----------------------- */
L100:
lenp = n * n;
i__1 = lenp;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L110: */
wm[i__ + 2 -1] = 0.;
}
jac(neq, &tn, y, 0, 0, &wm[3 -1], n, myjac, myjacoffset, costanti); //fixed neq
con = -hl0;
i__1 = lenp;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L120: */
wm[i__ + 2 -1] *= con;
}
goto L240;
/* If MITER = 2, make N calls to F to approximate J. -------------------- */
L200:
fac = dmnorm_(&n, savf, ewt, common);
r0 = fabs(h__) * 1e3 * uround * ((double)n) * fac;
if (r0 == 0.) {
r0 = 1.;
}
srur = wm[0];
j1 = 2;
i__1 = n;
for (j = 1; j <= i__1; ++j) {
yj = y[j - 1];
/* Computing MAX */
d__1 = srur * fabs(yj), d__2 = r0 / ewt[j -1];
r__ = max(d__1,d__2);
y[j - 1] += r__;
fac = -hl0 / r__;
f(neq, &tn, y, ftem, comp_ode, flattenODE, offsetODE, costanti); //fixed neq
i__2 = n;
for (i__ = 1; i__ <= i__2; ++i__) {
/* L220: */
wm[i__ + j1 -1] = (ftem[i__ -1] - savf[i__ -1]) * fac;
}
y[j -1] = yj;
j1 += n;
/* L230: */
}
nfe += n;
L240:
/* Compute norm of Jacobian. -------------------------------------------- */
pdnorm = dfnorm_(&n, &wm[3 -1], ewt, common) / fabs(hl0);
/* Add identity matrix. ------------------------------------------------- */
j = 3;
np1 = n + 1;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
wm[j -1] += 1.;
/* L250: */
j += np1;
}
/* Do LU decomposition on P. -------------------------------------------- */
dgefa_(&wm[3 -1], &n, &n, &iwm[21 -1], &ier, common);
if (ier != 0) {
ierpj = 1;
}
return 0;
/* Dummy block only, since MITER is never 3 in this routine. ------------ */
L300:
return 0;
/* If MITER = 4, call JAC and multiply by scalar. ----------------------- */
L400:
ml = iwm[0];
mu = iwm[1];
ml3 = ml + 3;
mband = ml + mu + 1;
meband = mband + ml;
lenp = meband * n;
i__1 = lenp;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L410: */
wm[i__ + 2 -1] = 0.;
}
jac(neq, &tn, y, ml, mu, &wm[ml3 -1], meband, myjac, myjacoffset, costanti); //fixed neq
con = -hl0;
i__1 = lenp;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L420: */
wm[i__ + 2 -1] *= con;
}
goto L570;
/* If MITER = 5, make MBAND calls to F to approximate J. ---------------- */
L500:
ml = iwm[0];
mu = iwm[1];
mband = ml + mu + 1;
mba = min(mband,n);
meband = mband + ml;
meb1 = meband - 1;
srur = wm[0];
fac = dmnorm_(&n, savf, ewt, common);
r0 = fabs(h__) * 1e3 * uround * n * fac;
if (r0 == 0.) {
r0 = 1.;
}
i__1 = mba;
for (j = 1; j <= i__1; ++j) {
i__2 = n;
i__3 = mband;
for (i__ = j; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3) {
yi = y[i__ -1];
/* Computing MAX */
d__1 = srur * fabs(yi), d__2 = r0 / ewt[i__ -1];
r__ = max(d__1,d__2);
/* L530: */
y[i__ - 1] += r__;
}
f(neq, &tn, y, ftem, comp_ode, flattenODE, offsetODE, costanti); //fixed neq
i__3 = n;
i__2 = mband;
for (jj = j; i__2 < 0 ? jj >= i__3 : jj <= i__3; jj += i__2) {
y[jj - 1] = yh[jj + yh_dim1 -yh_offset];
yjj = y[jj - 1];
/* Computing MAX */
d__1 = srur * fabs(yjj), d__2 = r0 / ewt[jj -1];
r__ = max(d__1,d__2);
fac = -hl0 / r__;
/* Computing MAX */
i__4 = jj - mu;
i1 = max(i__4,1);
/* Computing MIN */
i__4 = jj + ml;
i2 = min(i__4,n);
ii = jj * meb1 - ml + 2;
i__4 = i2;
for (i__ = i1; i__ <= i__4; ++i__) {
/* L540: */
wm[ii + i__ -1] = (ftem[i__ -1] - savf[i__ -1]) * fac;
}
/* L550: */
}
/* L560: */
}
nfe += mba;
L570:
/* Compute norm of Jacobian. -------------------------------------------- */
pdnorm = dbnorm_(&n, &wm[ml + 3 -1], &meband, &ml, &mu, ewt, common) / fabs(hl0);
/* Add identity matrix. ------------------------------------------------- */
ii = mband + 2;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
wm[ii -1] += 1.;
/* L580: */
ii += meband;
}
/* Do LU decomposition of P. -------------------------------------------- */
dgbfa_(&wm[3 -1], &meband, &n, &ml, &mu, &iwm[21 -1], &ier, common);
if (ier != 0) {
ierpj = 1;
}
return 0;
/* ----------------------- End of Subroutine DPRJA ----------------------- */
} /* dprja_ */
/* DECK DSOLSY */
/* Subroutine */
__device__ int dsolsy_(double *wm, int *iwm, double *x, double *tem, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
/* Local variables */
int i__ = 0;
double r__ = 0.;
double di = 0.;
int ml = 0;
int mu = 0;
double hl0 = 0.;
double phl0 = 0.;
int meband = 0;
/* ***BEGIN PROLOGUE DSOLSY */
/* ***SUBSIDIARY */
/* ***PURPOSE ODEPACK linear system solver. */
/* ***TYPE DOUBLE PRECISION (SSOLSY-S, DSOLSY-D) */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***DESCRIPTION */
/* This routine manages the solution of the linear system arising from */
/* a chord iteration. It is called if MITER .ne. 0. */
/* If MITER is 1 or 2, it calls DGESL to accomplish this. */
/* If MITER = 3 it updates the coefficient h*EL0 in the diagonal */
/* matrix, and then computes the solution. */
/* If MITER is 4 or 5, it calls DGBSL. */
/* Communication with DSOLSY uses the following variables: */
/* WM = real work space containing the inverse diagonal matrix if */
/* MITER = 3 and the LU decomposition of the matrix otherwise. */
/* Storage of matrix elements starts at WM(3). */
/* WM also contains the following matrix-related data: */
/* WM(1) = SQRT(UROUND) (not used here), */
/* WM(2) = HL0, the previous value of h*EL0, used if MITER = 3. */
/* IWM = int work space containing pivot information, starting at */
/* IWM(21), if MITER is 1, 2, 4, or 5. IWM also contains band */
/* parameters ML = IWM(1) and MU = IWM(2) if MITER is 4 or 5. */
/* X = the right-hand side vector on input, and the solution vector */
/* on output, of length N. */
/* TEM = vector of work space of length N, not used in this version. */
/* IERSL = output flag (in COMMON). IERSL = 0 if no trouble occurred. */
/* IERSL = 1 if a singular matrix arose with MITER = 3. */
/* This routine also uses the COMMON variables EL0, H, MITER, and N. */
/* ***SEE ALSO DLSODE */
/* ***ROUTINES CALLED DGBSL, DGESL */
/* ***COMMON BLOCKS DLS001 */
/* ***REVISION HISTORY (YYMMDD) */
/* 791129 DATE WRITTEN */
/* 890501 Modified prologue to SLATEC/LDOC format. (FNF) */
/* 890503 Minor cosmetic changes. (FNF) */
/* 930809 Renamed to allow single/double precision versions. (ACH) */
/* 010418 Reduced size of Common block /DLS001/. (ACH) */
/* 031105 Restored 'own' variables to Common block /DLS001/, to */
/* enable interrupt/restart feature. (ACH) */
/* ***END PROLOGUE DSOLSY */
/* **End */
/* ***FIRST EXECUTABLE STATEMENT DSOLSY */
/* Parameter adjustments */
//--tem;
//--x;
//--iwm;
//--wm;
/* Function Body */
iersl = 0;
switch (miter) {
case 1: goto L100;
case 2: goto L100;
case 3: goto L300;
case 4: goto L400;
case 5: goto L400;
}
L100:
dgesl_(&wm[3 -1], &n, &n, &iwm[21 -1], x, 0, common);
return 0;
L300:
phl0 = wm[1];
hl0 = h__ * el0;
wm[1] = hl0;
if (hl0 == phl0) {
goto L330;
}
r__ = hl0 / phl0;
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
di = 1. - r__ * (1. - 1. / wm[i__ + 2 -1]);
if (fabs(di) == 0.) {
goto L390;
}
/* L320: */
wm[i__ + 2 -1] = 1. / di;
}
L330:
i__1 = n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L340: */
x[i__ -1] = wm[i__ + 2 -1] * x[i__ -1];
}
return 0;
L390:
iersl = 1;
return 0;
L400:
ml = iwm[0];
mu = iwm[1];
meband = (ml << 1) + mu + 1;
dgbsl_(&wm[3 -1], &meband, &n, &ml, &mu, &iwm[21 -1], x, 0, common);
return 0;
/* ----------------------- END OF SUBROUTINE DSOLSY ---------------------- */
} /* dsolsy_ */
/* DECK DEWSET */
/* Subroutine */
__device__ int dewset_(int *PARAM_n, int *itol, double *rtol, double *atol, double *ycur, double *ewt, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
double d__1 = 0.;
/* Local variables */
int i__ = 0;
/* ***BEGIN PROLOGUE DEWSET */
/* ***SUBSIDIARY */
/* ***PURPOSE Set error weight vector. */
/* ***TYPE DOUBLE PRECISION (SEWSET-S, DEWSET-D) */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***DESCRIPTION */
/* This subroutine sets the error weight vector EWT according to */
/* EWT(i) = RTOL(i)*ABS(YCUR(i)) + ATOL(i), i = 1,...,N, */
/* with the subscript on RTOL and/or ATOL possibly replaced by 1 above, */
/* depending on the value of ITOL. */
/* ***SEE ALSO DLSODE */
/* ***ROUTINES CALLED (NONE) */
/* ***REVISION HISTORY (YYMMDD) */
/* 791129 DATE WRITTEN */
/* 890501 Modified prologue to SLATEC/LDOC format. (FNF) */
/* 890503 Minor cosmetic changes. (FNF) */
/* 930809 Renamed to allow single/double precision versions. (ACH) */
/* ***END PROLOGUE DEWSET */
/* **End */
/* ***FIRST EXECUTABLE STATEMENT DEWSET */
/* Parameter adjustments */
//--ewt;
//--ycur;
//--rtol;
//--atol;
/* Function Body */
switch (*itol) {
case 1: goto L10;
case 2: goto L20;
case 3: goto L30;
case 4: goto L40;
}
L10:
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L15: */
ewt[i__ -1] = rtol[0] * (d__1 = ycur[i__ -1], fabs(d__1)) + atol[0];
}
return 0;
L20:
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L25: */
ewt[i__ -1] = rtol[0] * (d__1 = ycur[i__ -1], fabs(d__1)) + atol[i__ -1];
}
return 0;
L30:
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L35: */
ewt[i__ -1] = rtol[i__ - 1] * (d__1 = ycur[i__ -1], fabs(d__1)) + atol[0];
}
return 0;
L40:
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L45: */
ewt[i__ -1] = rtol[i__ - 1] * (d__1 = ycur[i__ -1], fabs(d__1)) + atol[i__ -1];
}
return 0;
/* ----------------------- END OF SUBROUTINE DEWSET ---------------------- */
} /* dewset_ */
/* DECK DMNORM */
__device__ double dmnorm_(int *PARAM_n, double *v, double *w, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
double ret_val = 0.;
double d__1 = 0.;
double d__2 = 0.;
double d__3 = 0.;
/* Local variables */
int i__ = 0;
double vm = 0.;
/* ----------------------------------------------------------------------- */
/* This function routine computes the weighted max-norm */
/* of the vector of length N contained in the array V, with weights */
/* contained in the array w of length N: */
/* DMNORM = MAX(i=1,...,N) ABS(V(i))*W(i) */
/* ----------------------------------------------------------------------- */
/* Parameter adjustments */
//--w;
//--v;
/* Function Body */
vm = 0.;
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
/* L10: */
/* Computing MAX */
d__2 = vm, d__3 = (d__1 = v[i__ -1], fabs(d__1)) * w[i__ -1];
vm = max(d__2,d__3);
}
ret_val = vm;
return ret_val;
/* ----------------------- End of Function DMNORM ------------------------ */
} /* dmnorm_ */
/* DECK DFNORM */
__device__ double dfnorm_(int *PARAM_n, double *a, double *w, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int a_dim1 = 0;
int a_offset = 0;
int i__1 = 0;
int i__2 = 0;
double ret_val = 0.;
double d__1 = 0.;
double d__2 = 0.;
/* Local variables */
int i__ = 0;
int j = 0;
double an = 0.;
double sum = 0.;
/* ----------------------------------------------------------------------- */
/* This function computes the norm of a full N by N matrix, */
/* stored in the array A, that is consistent with the weighted max-norm */
/* on vectors, with weights stored in the array W: */
/* DFNORM = MAX(i=1,...,N) ( W(i) * Sum(j=1,...,N) ABS(a(i,j))/W(j) ) */
/* ----------------------------------------------------------------------- */
/* Parameter adjustments */
//--w;
a_dim1 = *PARAM_n;
a_offset = 1 + a_dim1;
//a -= a_offset;
/* Function Body */
an = 0.;
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
sum = 0.;
i__2 = *PARAM_n;
for (j = 1; j <= i__2; ++j) {
/* L10: */
sum += (d__1 = a[i__ + j * a_dim1 -a_offset], fabs(d__1)) / w[j -1];
}
/* Computing MAX */
d__1 = an, d__2 = sum * w[i__ -1];
an = max(d__1,d__2);
/* L20: */
}
ret_val = an;
return ret_val;
/* ----------------------- End of Function DFNORM ------------------------ */
} /* dfnorm_ */
/* DECK DBNORM */
__device__ double dbnorm_(int *PARAM_n, double *a, int *nra, int *ml, int *mu, double *w, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int a_dim1 = 0;
int a_offset = 0;
int i__1 = 0;
int i__2 = 0;
double ret_val = 0.;
double d__1 = 0.;
double d__2 = 0.;
/* Local variables */
int i__ = 0;
int j = 0;
double an = 0.;
double sum = 0.;
int i1 = 0;
int jhi = 0;
int jlo = 0;
/* ----------------------------------------------------------------------- */
/* This function computes the norm of a banded N by N matrix, */
/* stored in the array A, that is consistent with the weighted max-norm */
/* on vectors, with weights stored in the array W. */
/* ML and MU are the lower and upper half-bandwidths of the matrix. */
/* NRA is the first dimension of the A array, NRA .ge. ML+MU+1. */
/* In terms of the matrix elements a(i,j), the norm is given by: */
/* DBNORM = MAX(i=1,...,N) ( W(i) * Sum(j=1,...,N) ABS(a(i,j))/W(j) ) */
/* ----------------------------------------------------------------------- */
/* Parameter adjustments */
//--w;
a_dim1 = *nra;
a_offset = 1 + a_dim1;
//a -= a_offset;
/* Function Body */
an = 0.;
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
sum = 0.;
i1 = i__ + *mu + 1;
/* Computing MAX */
i__2 = i__ - *ml;
jlo = max(i__2,1);
/* Computing MIN */
i__2 = i__ + *mu;
jhi = min(i__2,*PARAM_n);
i__2 = jhi;
for (j = jlo; j <= i__2; ++j) {
/* L10: */
sum += (d__1 = a[i1 - j + j * a_dim1 -a_offset], fabs(d__1)) / w[j -1];
}
/* Computing MAX */
d__1 = an, d__2 = sum * w[i__ -1];
an = max(d__1,d__2);
/* L20: */
}
ret_val = an;
return ret_val;
/* ----------------------- End of Function DBNORM ------------------------ */
} /* dbnorm_ */
/* DECK DSRCMA */
/* Subroutine */
//__device__ int dsrcma_(double *rsav, int *isav, int *job, struct cuLsodaCommonBlock *common)
//{
// /* Initialized data */
//
// int lenrls = 218;
// int lenils = 37;
// int lenrla = 22;
// int lenila = 9;
//
// /* System generated locals */
// int i__1 = 0;
//
// /* Local variables */
// int i__ = 0;
//
// /* ----------------------------------------------------------------------- */
// /* This routine saves or restores (depending on JOB) the contents of */
// /* the Common blocks DLS001, DLSA01, which are used */
// /* internally by one or more ODEPACK solvers. */
//
// /* RSAV = real array of length 240 or more. */
// /* ISAV = int array of length 46 or more. */
// /* JOB = flag indicating to save or restore the Common blocks: */
// /* JOB = 1 if Common is to be saved (written to RSAV/ISAV) */
// /* JOB = 2 if Common is to be restored (read from RSAV/ISAV) */
// /* A call with JOB = 2 presumes a prior call with JOB = 1. */
// /* ----------------------------------------------------------------------- */
// /* Parameter adjustments */
// //--isav;
// //--rsav;
//
// /* Function Body */
//
// if (*job == 2) {
// goto L100;
// }
// i__1 = lenrls;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L10: */
// rsav[i__ -1] = rls[i__ - 1];
// }
// i__1 = lenrla;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L15: */
// rsav[lenrls + i__ -1] = rlsa[i__ - 1];
// }
//
// i__1 = lenils;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L20: */
// isav[i__ -1] = ils[i__ - 1];
// }
// i__1 = lenila;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L25: */
// isav[lenils + i__ -1] = ilsa[i__ - 1];
// }
//
// return 0;
//
//L100:
// i__1 = lenrls;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L110: */
// rls[i__ - 1] = rsav[i__ -1];
// }
// i__1 = lenrla;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L115: */
// rlsa[i__ - 1] = rsav[lenrls + i__ -1];
// }
//
// i__1 = lenils;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L120: */
// ils[i__ - 1] = isav[i__ -1];
// }
// i__1 = lenila;
// for (i__ = 1; i__ <= i__1; ++i__) {
// /* L125: */
// ilsa[i__ - 1] = isav[lenils + i__ -1];
// }
//
// return 0;
// /* ----------------------- End of Subroutine DSRCMA ---------------------- */
//} /* dsrcma_ */
/* DECK DGEFA */
/* Subroutine */
__device__ int dgefa_(double *a, int *lda, int *PARAM_n, int *ipvt, int *info, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int a_dim1 = 0;
int a_offset = 0;
int i__1 = 0;
int i__2 = 0;
int i__3 = 0;
/* Local variables */
int j = 0;
int k = 0;
int DGEFA_l = 0;
double t = 0.;
int kp1 = 0;
int nm1 = 0;
/* ***BEGIN PROLOGUE DGEFA */
/* ***PURPOSE Factor a matrix using Gaussian elimination. */
/* ***CATEGORY D2A1 */
/* ***TYPE DOUBLE PRECISION (SGEFA-S, DGEFA-D, CGEFA-C) */
/* ***KEYWORDS GENERAL MATRIX, LINEAR ALGEBRA, LINPACK, */
/* MATRIX FACTORIZATION */
/* ***AUTHOR Moler, C. B., (U. of New Mexico) */
/* ***DESCRIPTION */
/* DGEFA factors a double precision matrix by Gaussian elimination. */
/* DGEFA is usually called by DGECO, but it can be called */
/* directly with a saving in time if RCOND is not needed. */
/* (Time for DGECO) = (1 + 9/N)*(Time for DGEFA) . */
/* On Entry */
/* A DOUBLE PRECISION(LDA, N) */
/* the matrix to be factored. */
/* LDA int */
/* the leading dimension of the array A . */
/* N int */
/* the order of the matrix A . */
/* On Return */
/* A an upper triangular matrix and the multipliers */
/* which were used to obtain it. */
/* The factorization can be written A = L*U where */
/* L is a product of permutation and unit lower */
/* triangular matrices and U is upper triangular. */
/* IPVT int(N) */
/* an int vector of pivot indices. */
/* INFO int */
/* = 0 normal value. */
/* = K if U(K,K) .EQ. 0.0 . This is not an error */
/* condition for this subroutine, but it does */
/* indicate that DGESL or DGEDI will divide by zero */
/* if called. Use RCOND in DGECO for a reliable */
/* indication of singularity. */
/* ***REFERENCES J. J. Dongarra, J. R. Bunch, C. B. Moler, and G. W. */
/* Stewart, LINPACK Users' Guide, SIAM, 1979. */
/* ***ROUTINES CALLED DAXPY, DSCAL, IDAMAX */
/* ***REVISION HISTORY (YYMMDD) */
/* 780814 DATE WRITTEN */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 900326 Removed duplicate information from DESCRIPTION section. */
/* (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DGEFA */
/* GAUSSIAN ELIMINATION WITH PARTIAL PIVOTING */
/* ***FIRST EXECUTABLE STATEMENT DGEFA */
/* Parameter adjustments */
a_dim1 = *lda;
a_offset = 1 + a_dim1;
//a -= a_offset;
//--ipvt;
/* Function Body */
*info = 0;
nm1 = *PARAM_n - 1;
if (nm1 < 1) {
goto L70;
}
i__1 = nm1;
for (k = 1; k <= i__1; ++k) {
kp1 = k + 1;
/* FIND L = PIVOT INDEX */
i__2 = *PARAM_n - k + 1;
DGEFA_l = idamax_(&i__2, &a[k + k * a_dim1 -a_offset], 1, common) + k - 1;
ipvt[k -1] = DGEFA_l;
/* ZERO PIVOT IMPLIES THIS COLUMN ALREADY TRIANGULARIZED */
if (a[DGEFA_l + k * a_dim1 -a_offset] == 0.) {
goto L40;
}
/* INTERCHANGE IF NECESSARY */
if (DGEFA_l == k) {
goto L10;
}
t = a[DGEFA_l + k * a_dim1 -a_offset];
a[DGEFA_l + k * a_dim1 -a_offset] = a[k + k * a_dim1 -a_offset];
a[k + k * a_dim1 -a_offset] = t;
L10:
/* COMPUTE MULTIPLIERS */
t = -1. / a[k + k * a_dim1 -a_offset];
i__2 = *PARAM_n - k;
dscal_(&i__2, &t, &a[k + 1 + k * a_dim1 -a_offset], 1, common);
/* ROW ELIMINATION WITH COLUMN INDEXING */
i__2 = *PARAM_n;
for (j = kp1; j <= i__2; ++j) {
t = a[DGEFA_l + j * a_dim1 -a_offset];
if (DGEFA_l == k) {
goto L20;
}
a[DGEFA_l + j * a_dim1 -a_offset] = a[k + j * a_dim1 -a_offset];
a[k + j * a_dim1 -a_offset] = t;
L20:
i__3 = *PARAM_n - k;
daxpy_(&i__3, &t, &a[k + 1 + k * a_dim1 -a_offset], 1, &a[k + 1 + j * a_dim1 -a_offset], 1, common);
/* L30: */
}
goto L50;
L40:
*info = k;
L50:
/* L60: */
;
}
L70:
ipvt[*PARAM_n -1] = *PARAM_n;
if (a[*PARAM_n + *PARAM_n * a_dim1 -a_offset] == 0.) {
*info = *PARAM_n;
}
return 0;
} /* dgefa_ */
/* DECK DGESL */
/* Subroutine */
__device__ int dgesl_(double *a, int *lda, int *PARAM_n, int *ipvt, double *b, int job, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int a_dim1 = 0;
int a_offset = 0;
int i__1 = 0;
int i__2 = 0;
/* Local variables */
int k = 0;
int DGESL_l = 0.;
double t = 0.;
int kb = 0;
int nm1 = 0;
/* ***BEGIN PROLOGUE DGESL */
/* ***PURPOSE Solve the real system A*X=B or TRANS(A)*X=B using the */
/* factors computed by DGECO or DGEFA. */
/* ***CATEGORY D2A1 */
/* ***TYPE DOUBLE PRECISION (SGESL-S, DGESL-D, CGESL-C) */
/* ***KEYWORDS LINEAR ALGEBRA, LINPACK, MATRIX, SOLVE */
/* ***AUTHOR Moler, C. B., (U. of New Mexico) */
/* ***DESCRIPTION */
/* DGESL solves the double precision system */
/* A * X = B or TRANS(A) * X = B */
/* using the factors computed by DGECO or DGEFA. */
/* On Entry */
/* A DOUBLE PRECISION(LDA, N) */
/* the output from DGECO or DGEFA. */
/* LDA int */
/* the leading dimension of the array A . */
/* N int */
/* the order of the matrix A . */
/* IPVT int(N) */
/* the pivot vector from DGECO or DGEFA. */
/* B DOUBLE PRECISION(N) */
/* the right hand side vector. */
/* JOB int */
/* = 0 to solve A*X = B , */
/* = nonzero to solve TRANS(A)*X = B where */
/* TRANS(A) is the transpose. */
/* On Return */
/* B the solution vector X . */
/* Error Condition */
/* A division by zero will occur if the input factor contains a */
/* zero on the diagonal. Technically this indicates singularity */
/* but it is often caused by improper arguments or improper */
/* setting of LDA . It will not occur if the subroutines are */
/* called correctly and if DGECO has set RCOND .GT. 0.0 */
/* or DGEFA has set INFO .EQ. 0 . */
/* To compute INVERSE(A) * C where C is a matrix */
/* with P columns */
/* CALL DGECO(A,LDA,N,IPVT,RCOND,Z) */
/* IF (RCOND is too small) GO TO ... */
/* DO 10 J = 1, P */
/* CALL DGESL(A,LDA,N,IPVT,C(1,J),0) */
/* 10 CONTINUE */
/* ***REFERENCES J. J. Dongarra, J. R. Bunch, C. B. Moler, and G. W. */
/* Stewart, LINPACK Users' Guide, SIAM, 1979. */
/* ***ROUTINES CALLED DAXPY, DDOT */
/* ***REVISION HISTORY (YYMMDD) */
/* 780814 DATE WRITTEN */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 900326 Removed duplicate information from DESCRIPTION section. */
/* (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DGESL */
/* ***FIRST EXECUTABLE STATEMENT DGESL */
/* Parameter adjustments */
a_dim1 = *lda;
a_offset = 1 + a_dim1;
//a -= a_offset;
//--ipvt;
//--b;
/* Function Body */
nm1 = *PARAM_n - 1;
if (job != 0) {
goto L50;
}
/* JOB = 0 , SOLVE A * X = B */
/* FIRST SOLVE L*Y = B */
if (nm1 < 1) {
goto L30;
}
i__1 = nm1;
for (k = 1; k <= i__1; ++k) {
DGESL_l = ipvt[k -1];
t = b[DGESL_l -1];
if (DGESL_l == k) {
goto L10;
}
b[DGESL_l -1] = b[k -1];
b[k -1] = t;
L10:
i__2 = *PARAM_n - k;
daxpy_(&i__2, &t, &a[k + 1 + k * a_dim1 -a_offset], 1, &b[k + 1 -1], 1, common);
/* L20: */
}
L30:
/* NOW SOLVE U*X = Y */
i__1 = *PARAM_n;
for (kb = 1; kb <= i__1; ++kb) {
k = *PARAM_n + 1 - kb;
b[k -1] /= a[k + k * a_dim1 -a_offset];
t = -b[k -1];
i__2 = k - 1;
daxpy_(&i__2, &t, &a[k * a_dim1 + 1 -a_offset], 1, b, 1, common);
/* L40: */
}
goto L100;
L50:
/* JOB = NONZERO, SOLVE TRANS(A) * X = B */
/* FIRST SOLVE TRANS(U)*Y = B */
i__1 = *PARAM_n;
for (k = 1; k <= i__1; ++k) {
i__2 = k - 1;
t = ddot_(&i__2, &a[k * a_dim1 + 1 -a_offset], 1, b, 1, common);
b[k -1] = (b[k -1] - t) / a[k + k * a_dim1 -a_offset];
/* L60: */
}
/* NOW SOLVE TRANS(L)*X = Y */
if (nm1 < 1) {
goto L90;
}
i__1 = nm1;
for (kb = 1; kb <= i__1; ++kb) {
k = *PARAM_n - kb;
i__2 = *PARAM_n - k;
b[k -1] += ddot_(&i__2, &a[k + 1 + k * a_dim1 -a_offset], 1, &b[k + 1 -1], 1, common);
DGESL_l = ipvt[k -1];
if (DGESL_l == k) {
goto L70;
}
t = b[DGESL_l -1];
b[DGESL_l -1] = b[k -1];
b[k -1] = t;
L70:
/* L80: */
;
}
L90:
L100:
return 0;
} /* dgesl_ */
/* DECK DGBFA */
/* Subroutine */
__device__ int dgbfa_(double *abd, int *lda, int *PARAM_n, int *ml, int *mu, int *ipvt, int *info, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int abd_dim1 = 0;
int abd_offset = 0;
int i__1 = 0;
int i__2 = 0;
int i__3 = 0;
int i__4 = 0;
/* Local variables */
int i__ = 0;
int j = 0;
int k = 0;
int DGBFA_l = 0;
int m = 0;
double t = 0.;
int i0 = 0;
int j0 = 0;
int j1 = 0;
int lm = 0;
int mm = 0;
int ju = 0;
int jz = 0;
int kp1 = 0;
int nm1 = 0;
/* ***BEGIN PROLOGUE DGBFA */
/* ***PURPOSE Factor a band matrix using Gaussian elimination. */
/* ***CATEGORY D2A2 */
/* ***TYPE DOUBLE PRECISION (SGBFA-S, DGBFA-D, CGBFA-C) */
/* ***KEYWORDS BANDED, LINEAR ALGEBRA, LINPACK, MATRIX FACTORIZATION */
/* ***AUTHOR Moler, C. B., (U. of New Mexico) */
/* ***DESCRIPTION */
/* DGBFA factors a double precision band matrix by elimination. */
/* DGBFA is usually called by DGBCO, but it can be called */
/* directly with a saving in time if RCOND is not needed. */
/* On Entry */
/* ABD DOUBLE PRECISION(LDA, N) */
/* contains the matrix in band storage. The columns */
/* of the matrix are stored in the columns of ABD and */
/* the diagonals of the matrix are stored in rows */
/* ML+1 through 2*ML+MU+1 of ABD . */
/* See the comments below for details. */
/* LDA int */
/* the leading dimension of the array ABD . */
/* LDA must be .GE. 2*ML + MU + 1 . */
/* N int */
/* the order of the original matrix. */
/* ML int */
/* number of diagonals below the main diagonal. */
/* 0 .LE. ML .LT. N . */
/* MU int */
/* number of diagonals above the main diagonal. */
/* 0 .LE. MU .LT. N . */
/* More efficient if ML .LE. MU . */
/* On Return */
/* ABD an upper triangular matrix in band storage and */
/* the multipliers which were used to obtain it. */
/* The factorization can be written A = L*U where */
/* L is a product of permutation and unit lower */
/* triangular matrices and U is upper triangular. */
/* IPVT int(N) */
/* an int vector of pivot indices. */
/* INFO int */
/* = 0 normal value. */
/* = K if U(K,K) .EQ. 0.0 . This is not an error */
/* condition for this subroutine, but it does */
/* indicate that DGBSL will divide by zero if */
/* called. Use RCOND in DGBCO for a reliable */
/* indication of singularity. */
/* Band Storage */
/* If A is a band matrix, the following program segment */
/* will set up the input. */
/* ML = (band width below the diagonal) */
/* MU = (band width above the diagonal) */
/* M = ML + MU + 1 */
/* DO 20 J = 1, N */
/* I1 = MAX(1, J-MU) */
/* I2 = MIN(N, J+ML) */
/* DO 10 I = I1, I2 */
/* K = I - J + M */
/* ABD(K,J) = A(I,J) */
/* 10 CONTINUE */
/* 20 CONTINUE */
/* This uses rows ML+1 through 2*ML+MU+1 of ABD . */
/* In addition, the first ML rows in ABD are used for */
/* elements generated during the triangularization. */
/* The total number of rows needed in ABD is 2*ML+MU+1 . */
/* The ML+MU by ML+MU upper left triangle and the */
/* ML by ML lower right triangle are not referenced. */
/* ***REFERENCES J. J. Dongarra, J. R. Bunch, C. B. Moler, and G. W. */
/* Stewart, LINPACK Users' Guide, SIAM, 1979. */
/* ***ROUTINES CALLED DAXPY, DSCAL, IDAMAX */
/* ***REVISION HISTORY (YYMMDD) */
/* 780814 DATE WRITTEN */
/* 890531 Changed all specific intrinsics to generic. (WRB) */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 900326 Removed duplicate information from DESCRIPTION section. */
/* (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DGBFA */
/* ***FIRST EXECUTABLE STATEMENT DGBFA */
/* Parameter adjustments */
abd_dim1 = *lda;
abd_offset = 1 + abd_dim1;
//abd -= abd_offset;
//--ipvt;
/* Function Body */
m = *ml + *mu + 1;
*info = 0;
/* ZERO INITIAL FILL-IN COLUMNS */
j0 = *mu + 2;
j1 = min(*PARAM_n,m) - 1;
if (j1 < j0) {
goto L30;
}
i__1 = j1;
for (jz = j0; jz <= i__1; ++jz) {
i0 = m + 1 - jz;
i__2 = *ml;
for (i__ = i0; i__ <= i__2; ++i__) {
abd[i__ + jz * abd_dim1 -abd_offset] = 0.;
/* L10: */
}
/* L20: */
}
L30:
jz = j1;
ju = 0;
/* GAUSSIAN ELIMINATION WITH PARTIAL PIVOTING */
nm1 = *PARAM_n - 1;
if (nm1 < 1) {
goto L130;
}
i__1 = nm1;
for (k = 1; k <= i__1; ++k) {
kp1 = k + 1;
/* ZERO NEXT FILL-IN COLUMN */
++jz;
if (jz > *PARAM_n) {
goto L50;
}
if (*ml < 1) {
goto L50;
}
i__2 = *ml;
for (i__ = 1; i__ <= i__2; ++i__) {
abd[i__ + jz * abd_dim1 -abd_offset] = 0.;
/* L40: */
}
L50:
/* FIND L = PIVOT INDEX */
/* Computing MIN */
i__2 = *ml, i__3 = *PARAM_n - k;
lm = min(i__2,i__3);
i__2 = lm + 1;
DGBFA_l = idamax_(&i__2, &abd[m + k * abd_dim1 -abd_offset], 1, common) + m - 1;
ipvt[k -1] = DGBFA_l + k - m;
/* ZERO PIVOT IMPLIES THIS COLUMN ALREADY TRIANGULARIZED */
if (abd[DGBFA_l + k * abd_dim1 -abd_offset] == 0.) {
goto L100;
}
/* INTERCHANGE IF NECESSARY */
if (DGBFA_l == m) {
goto L60;
}
t = abd[DGBFA_l + k * abd_dim1 -abd_offset];
abd[DGBFA_l + k * abd_dim1 -abd_offset] = abd[m + k * abd_dim1 -abd_offset];
abd[m + k * abd_dim1 -abd_offset] = t;
L60:
/* COMPUTE MULTIPLIERS */
t = -1. / abd[m + k * abd_dim1 -abd_offset];
dscal_(&lm, &t, &abd[m + 1 + k * abd_dim1 -abd_offset], 1, common);
/* ROW ELIMINATION WITH COLUMN INDEXING */
/* Computing MIN */
/* Computing MAX */
i__3 = ju, i__4 = *mu + ipvt[k -1];
i__2 = max(i__3,i__4);
ju = min(i__2,*PARAM_n);
mm = m;
if (ju < kp1) {
goto L90;
}
i__2 = ju;
for (j = kp1; j <= i__2; ++j) {
--DGBFA_l;
--mm;
t = abd[DGBFA_l + j * abd_dim1 -abd_offset];
if (DGBFA_l == mm) {
goto L70;
}
abd[DGBFA_l + j * abd_dim1 -abd_offset] = abd[mm + j * abd_dim1 -abd_offset];
abd[mm + j * abd_dim1 -abd_offset] = t;
L70:
daxpy_(&lm, &t, &abd[m + 1 + k * abd_dim1 -abd_offset], 1, &abd[mm + 1 + j * abd_dim1 -abd_offset], 1, common);
/* L80: */
}
L90:
goto L110;
L100:
*info = k;
L110:
/* L120: */
;
}
L130:
ipvt[*PARAM_n -1] = *PARAM_n;
if (abd[m + *PARAM_n * abd_dim1 -abd_offset] == 0.) {
*info = *PARAM_n;
}
return 0;
} /* dgbfa_ */
/* DECK DGBSL */
/* Subroutine */
__device__ int dgbsl_(double *abd, int *lda, int *PARAM_n, int *ml, int *mu, int *ipvt, double *b, int job, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int abd_dim1 = 0;
int abd_offset = 0;
int i__1 = 0;
int i__2 = 0;
int i__3 = 0;
/* Local variables */
int k = 0;
int DGBSL_l = 0;
int m = 0;
double t = 0.;
int kb = 0;
int la = 0;
int lb = 0;
int lm = 0;
int nm1 = 0;
/* ***BEGIN PROLOGUE DGBSL */
/* ***PURPOSE Solve the real band system A*X=B or TRANS(A)*X=B using */
/* the factors computed by DGBCO or DGBFA. */
/* ***CATEGORY D2A2 */
/* ***TYPE DOUBLE PRECISION (SGBSL-S, DGBSL-D, CGBSL-C) */
/* ***KEYWORDS BANDED, LINEAR ALGEBRA, LINPACK, MATRIX, SOLVE */
/* ***AUTHOR Moler, C. B., (U. of New Mexico) */
/* ***DESCRIPTION */
/* DGBSL solves the double precision band system */
/* A * X = B or TRANS(A) * X = B */
/* using the factors computed by DGBCO or DGBFA. */
/* On Entry */
/* ABD DOUBLE PRECISION(LDA, N) */
/* the output from DGBCO or DGBFA. */
/* LDA int */
/* the leading dimension of the array ABD . */
/* N int */
/* the order of the original matrix. */
/* ML int */
/* number of diagonals below the main diagonal. */
/* MU int */
/* number of diagonals above the main diagonal. */
/* IPVT int(N) */
/* the pivot vector from DGBCO or DGBFA. */
/* B DOUBLE PRECISION(N) */
/* the right hand side vector. */
/* JOB int */
/* = 0 to solve A*X = B , */
/* = nonzero to solve TRANS(A)*X = B , where */
/* TRANS(A) is the transpose. */
/* On Return */
/* B the solution vector X . */
/* Error Condition */
/* A division by zero will occur if the input factor contains a */
/* zero on the diagonal. Technically this indicates singularity */
/* but it is often caused by improper arguments or improper */
/* setting of LDA . It will not occur if the subroutines are */
/* called correctly and if DGBCO has set RCOND .GT. 0.0 */
/* or DGBFA has set INFO .EQ. 0 . */
/* To compute INVERSE(A) * C where C is a matrix */
/* with P columns */
/* CALL DGBCO(ABD,LDA,N,ML,MU,IPVT,RCOND,Z) */
/* IF (RCOND is too small) GO TO ... */
/* DO 10 J = 1, P */
/* CALL DGBSL(ABD,LDA,N,ML,MU,IPVT,C(1,J),0) */
/* 10 CONTINUE */
/* ***REFERENCES J. J. Dongarra, J. R. Bunch, C. B. Moler, and G. W. */
/* Stewart, LINPACK Users' Guide, SIAM, 1979. */
/* ***ROUTINES CALLED DAXPY, DDOT */
/* ***REVISION HISTORY (YYMMDD) */
/* 780814 DATE WRITTEN */
/* 890531 Changed all specific intrinsics to generic. (WRB) */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 900326 Removed duplicate information from DESCRIPTION section. */
/* (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DGBSL */
/* ***FIRST EXECUTABLE STATEMENT DGBSL */
/* Parameter adjustments */
abd_dim1 = *lda;
abd_offset = 1 + abd_dim1;
//abd -= abd_offset;
//--ipvt;
//--b;
/* Function Body */
m = *mu + *ml + 1;
nm1 = *PARAM_n - 1;
if (job != 0) {
goto L50;
}
/* JOB = 0 , SOLVE A * X = B */
/* FIRST SOLVE L*Y = B */
if (*ml == 0) {
goto L30;
}
if (nm1 < 1) {
goto L30;
}
i__1 = nm1;
for (k = 1; k <= i__1; ++k) {
/* Computing MIN */
i__2 = *ml, i__3 = *PARAM_n - k;
lm = min(i__2,i__3);
DGBSL_l = ipvt[k -1];
t = b[DGBSL_l -1];
if (DGBSL_l == k) {
goto L10;
}
b[DGBSL_l -1] = b[k -1];
b[k -1] = t;
L10:
daxpy_(&lm, &t, &abd[m + 1 + k * abd_dim1 -abd_offset], 1, &b[k + 1 -1], 1, common);
/* L20: */
}
L30:
/* NOW SOLVE U*X = Y */
i__1 = *PARAM_n;
for (kb = 1; kb <= i__1; ++kb) {
k = *PARAM_n + 1 - kb;
b[k -1] /= abd[m + k * abd_dim1 -abd_offset];
lm = min(k,m) - 1;
la = m - lm;
lb = k - lm;
t = -b[k -1];
daxpy_(&lm, &t, &abd[la + k * abd_dim1 -abd_offset], 1, &b[lb -1], 1, common);
/* L40: */
}
goto L100;
L50:
/* JOB = NONZERO, SOLVE TRANS(A) * X = B */
/* FIRST SOLVE TRANS(U)*Y = B */
i__1 = *PARAM_n;
for (k = 1; k <= i__1; ++k) {
lm = min(k,m) - 1;
la = m - lm;
lb = k - lm;
t = ddot_(&lm, &abd[la + k * abd_dim1 -abd_offset], 1, &b[lb -1], 1, common);
b[k -1] = (b[k -1] - t) / abd[m + k * abd_dim1 -abd_offset];
/* L60: */
}
/* NOW SOLVE TRANS(L)*X = Y */
if (*ml == 0) {
goto L90;
}
if (nm1 < 1) {
goto L90;
}
i__1 = nm1;
for (kb = 1; kb <= i__1; ++kb) {
k = *PARAM_n - kb;
/* Computing MIN */
i__2 = *ml, i__3 = *PARAM_n - k;
lm = min(i__2,i__3);
b[k -1] += ddot_(&lm, &abd[m + 1 + k * abd_dim1 -abd_offset], 1, &b[k + 1 -1], 1, common);
DGBSL_l = ipvt[k -1];
if (DGBSL_l == k) {
goto L70;
}
t = b[DGBSL_l -1];
b[DGBSL_l -1] = b[k -1];
b[k -1] = t;
L70:
/* L80: */
;
}
L90:
L100:
return 0;
} /* dgbsl_ */
/* DECK DUMACH */
__device__ double dumach_(struct cuLsodaCommonBlock *common)
{
/* System generated locals */
double ret_val = 0.;
/* Local variables */
double u = 0.;
double comp = 0.;
/* ***BEGIN PROLOGUE DUMACH */
/* ***PURPOSE Compute the unit roundoff of the machine. */
/* ***CATEGORY R1 */
/* ***TYPE DOUBLE PRECISION (RUMACH-S, DUMACH-D) */
/* ***KEYWORDS MACHINE CONSTANTS */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***DESCRIPTION */
/* *Usage: */
/* DOUBLE PRECISION A, DUMACH */
/* A = DUMACH() */
/* *Function Return Values: */
/* A : the unit roundoff of the machine. */
/* *Description: */
/* The unit roundoff is defined as the smallest positive machine */
/* number u such that 1.0 + u .ne. 1.0. This is computed by DUMACH */
/* in a machine-independent manner. */
/* ***REFERENCES (NONE) */
/* ***ROUTINES CALLED DUMSUM */
/* ***REVISION HISTORY (YYYYMMDD) */
/* 19930216 DATE WRITTEN */
/* 19930818 Added SLATEC-format prologue. (FNF) */
/* 20030707 Added DUMSUM to force normal storage of COMP. (ACH) */
/* ***END PROLOGUE DUMACH */
/* ***FIRST EXECUTABLE STATEMENT DUMACH */
u = 1.;
L10:
u *= .5;
dumsum_(1., u, &comp, common);
if (comp != 1.) {
goto L10;
}
ret_val = u * 2.;
return ret_val;
/* ----------------------- End of Function DUMACH ------------------------ */
} /* dumach_ */
/* DECK XSETF */
/* Subroutine */
//__device__ int xsetf_(int *mflag, struct cuLsodaCommonBlock *common)
//{
// int junk;
/* ***BEGIN PROLOGUE XSETF */
/* ***PURPOSE Reset the error print control flag. */
/* ***CATEGORY R3A */
/* ***TYPE ALL (XSETF-A) */
/* ***KEYWORDS ERROR CONTROL */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***DESCRIPTION */
/* XSETF sets the error print control flag to MFLAG: */
/* MFLAG=1 means print all messages (the default). */
/* MFLAG=0 means no printing. */
/* ***SEE ALSO XERRWD, XERRWV */
/* ***REFERENCES (NONE) */
/* ***ROUTINES CALLED IXSAV */
/* ***REVISION HISTORY (YYMMDD) */
/* 921118 DATE WRITTEN */
/* 930329 Added SLATEC format prologue. (FNF) */
/* 930407 Corrected SEE ALSO section. (FNF) */
/* 930922 Made user-callable, and other cosmetic changes. (FNF) */
/* ***END PROLOGUE XSETF */
/* Subroutines called by XSETF.. None */
/* Function routine called by XSETF.. IXSAV */
/* ----------------------------------------------------------------------- */
/* **End */
/* ***FIRST EXECUTABLE STATEMENT XSETF */
// if (*mflag == 0 || *mflag == 1) {
// junk = ixsav_(2, mflag, 1, common);
// }
// return 0;
/* ----------------------- End of Subroutine XSETF ----------------------- */
//} /* xsetf_ */
/* DECK XSETUN */
/* Subroutine */
//__device__ int xsetun_(int *lun, struct cuLsodaCommonBlock *common)
//{
// int junk;
/* ***BEGIN PROLOGUE XSETUN */
/* ***PURPOSE Reset the logical unit number for error messages. */
/* ***CATEGORY R3B */
/* ***TYPE ALL (XSETUN-A) */
/* ***KEYWORDS ERROR CONTROL */
/* ***DESCRIPTION */
/* XSETUN sets the logical unit number for error messages to LUN. */
/* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
/* ***SEE ALSO XERRWD, XERRWV */
/* ***REFERENCES (NONE) */
/* ***ROUTINES CALLED IXSAV */
/* ***REVISION HISTORY (YYMMDD) */
/* 921118 DATE WRITTEN */
/* 930329 Added SLATEC format prologue. (FNF) */
/* 930407 Corrected SEE ALSO section. (FNF) */
/* 930922 Made user-callable, and other cosmetic changes. (FNF) */
/* ***END PROLOGUE XSETUN */
/* Subroutines called by XSETUN.. None */
/* Function routine called by XSETUN.. IXSAV */
/* ----------------------------------------------------------------------- */
/* **End */
/* ***FIRST EXECUTABLE STATEMENT XSETUN */
// if (*lun > 0) {
// junk = ixsav_(1, lun, 1, common);
// }
// return 0;
/* ----------------------- End of Subroutine XSETUN ---------------------- */
//} /* xsetun_ */
/* DECK IXSAV */
//__device__ int ixsav_(int ipar, int *ivalue, int iset, struct cuLsodaCommonBlock *common)
//{
// /* Initialized data */
//
// int lunit = -1;
// int mesflg = 1;
//
// /* System generated locals */
// int ret_val = 0;
//
// /* Local variables */
//
//
// /* ***BEGIN PROLOGUE IXSAV */
// /* ***SUBSIDIARY */
// /* ***PURPOSE Save and recall error message control parameters. */
// /* ***CATEGORY R3C */
// /* ***TYPE ALL (IXSAV-A) */
// /* ***AUTHOR Hindmarsh, Alan C., (LLNL) */
// /* ***DESCRIPTION */
//
// /* IXSAV saves and recalls one of two error message parameters: */
// /* LUNIT, the logical unit number to which messages are printed, and */
// /* MESFLG, the message print flag. */
// /* This is a modification of the SLATEC library routine J4SAVE. */
//
// /* Saved local variables.. */
// /* LUNIT = Logical unit number for messages. The default is obtained */
// /* by a call to IUMACH (may be machine-dependent). */
// /* MESFLG = Print control flag.. */
// /* 1 means print all messages (the default). */
// /* 0 means no printing. */
//
// /* On input.. */
// /* IPAR = Parameter indicator (1 for LUNIT, 2 for MESFLG). */
// /* IVALUE = The value to be set for the parameter, if ISET = .TRUE. */
// /* ISET = Logical flag to indicate whether to read or write. */
// /* If ISET = .TRUE., the parameter will be given */
// /* the value IVALUE. If ISET = .FALSE., the parameter */
// /* will be unchanged, and IVALUE is a dummy argument. */
//
// /* On return.. */
// /* IXSAV = The (old) value of the parameter. */
//
// /* ***SEE ALSO XERRWD, XERRWV */
// /* ***ROUTINES CALLED IUMACH */
// /* ***REVISION HISTORY (YYMMDD) */
// /* 921118 DATE WRITTEN */
// /* 930329 Modified prologue to SLATEC format. (FNF) */
// /* 930915 Added IUMACH call to get default output unit. (ACH) */
// /* 930922 Minor cosmetic changes. (FNF) */
// /* 010425 Type declaration for IUMACH added. (ACH) */
// /* ***END PROLOGUE IXSAV */
//
// /* Subroutines called by IXSAV.. None */
// /* Function routine called by IXSAV.. IUMACH */
// /* ----------------------------------------------------------------------- */
// /* **End */
// /* ----------------------------------------------------------------------- */
// /* ----------------------------------------------------------------------- */
// /* The following Fortran-77 declaration is to cause the values of the */
// /* listed (local) variables to be saved between calls to this routine. */
// /* ----------------------------------------------------------------------- */
//
// /* ***FIRST EXECUTABLE STATEMENT IXSAV */
// if (ipar == 1) {
// if (lunit == -1) {
// lunit = 6;
// }
// ret_val = lunit;
// if (iset) {
// lunit = *ivalue;
// }
// }
//
// if (ipar == 2) {
// ret_val = mesflg;
// if (iset) {
// mesflg = *ivalue;
// }
// }
//
// return ret_val;
// /* ----------------------- End of Function IXSAV ------------------------- */
//} /* ixsav_ */
/* DECK IDAMAX */
__device__ int idamax_(int *PARAM_n, double *dx, int incx, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int ret_val = 0;
int i__1 = 0;
double d__1 = 0;
/* Local variables */
int i__ = 0;
int ix = 0;
double dmax__ = 0.;
double xmag = 0.;
/* ***BEGIN PROLOGUE IDAMAX */
/* ***PURPOSE Find the smallest index of that component of a vector */
/* having the maximum magnitude. */
/* ***CATEGORY D1A2 */
/* ***TYPE DOUBLE PRECISION (ISAMAX-S, IDAMAX-D, ICAMAX-C) */
/* ***KEYWORDS BLAS, LINEAR ALGEBRA, MAXIMUM COMPONENT, VECTOR */
/* ***AUTHOR Lawson, C. L., (JPL) */
/* Hanson, R. J., (SNLA) */
/* Kincaid, D. R., (U. of Texas) */
/* Krogh, F. T., (JPL) */
/* ***DESCRIPTION */
/* B L A S Subprogram */
/* Description of Parameters */
/* --Input-- */
/* N number of elements in input vector(s) */
/* DX double precision vector with N elements */
/* INCX storage spacing between elements of DX */
/* --Output-- */
/* IDAMAX smallest index (zero if N .LE. 0) */
/* Find smallest index of maximum magnitude of double precision DX. */
/* IDAMAX = first I, I = 1 to N, to maximize ABS(DX(IX+(I-1)*INCX)), */
/* where IX = 1 if INCX .GE. 0, else IX = 1+(1-N)*INCX. */
/* ***REFERENCES C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. */
/* Krogh, Basic linear algebra subprograms for Fortran */
/* usage, Algorithm No. 539, Transactions on Mathematical */
/* Software 5, 3 (September 1979), pp. 308-323. */
/* ***ROUTINES CALLED (NONE) */
/* ***REVISION HISTORY (YYMMDD) */
/* 791001 DATE WRITTEN */
/* 890531 Changed all specific intrinsics to generic. (WRB) */
/* 890531 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 900821 Modified to correct problem with a negative increment. */
/* (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE IDAMAX */
/* ***FIRST EXECUTABLE STATEMENT IDAMAX */
/* Parameter adjustments */
//--dx;
/* Function Body */
ret_val = 0;
if (*PARAM_n <= 0) {
return ret_val;
}
ret_val = 1;
if (*PARAM_n == 1) {
return ret_val;
}
if (incx == 1) {
goto L20;
}
/* Code for increments not equal to 1. */
ix = 1;
if (incx < 0) {
ix = (-(*PARAM_n) + 1) * incx + 1;
}
dmax__ = (d__1 = dx[ix -1], fabs(d__1));
ix += incx;
i__1 = *PARAM_n;
for (i__ = 2; i__ <= i__1; ++i__) {
xmag = (d__1 = dx[ix -1], fabs(d__1));
if (xmag > dmax__) {
ret_val = i__;
dmax__ = xmag;
}
ix += incx;
/* L10: */
}
return ret_val;
/* Code for increments equal to 1. */
L20:
dmax__ = fabs(dx[0]);
i__1 = *PARAM_n;
for (i__ = 2; i__ <= i__1; ++i__) {
xmag = (d__1 = dx[i__ -1], fabs(d__1));
if (xmag > dmax__) {
ret_val = i__;
dmax__ = xmag;
}
/* L30: */
}
return ret_val;
} /* idamax_ */
/* DECK DAXPY */
/* Subroutine */
__device__ int daxpy_(int *PARAM_n, double *da, double *dx, int incx, double *dy, int incy, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
int i__2 = 0;
/* Local variables */
int i__ = 0;
int m = 0;
int ix = 0;
int iy = 0;
int ns = 0;
int mp1 = 0;
/* ***BEGIN PROLOGUE DAXPY */
/* ***PURPOSE Compute a constant times a vector plus a vector. */
/* ***CATEGORY D1A7 */
/* ***TYPE DOUBLE PRECISION (SAXPY-S, DAXPY-D, CAXPY-C) */
/* ***KEYWORDS BLAS, LINEAR ALGEBRA, TRIAD, VECTOR */
/* ***AUTHOR Lawson, C. L., (JPL) */
/* Hanson, R. J., (SNLA) */
/* Kincaid, D. R., (U. of Texas) */
/* Krogh, F. T., (JPL) */
/* ***DESCRIPTION */
/* B L A S Subprogram */
/* Description of Parameters */
/* --Input-- */
/* N number of elements in input vector(s) */
/* DA double precision scalar multiplier */
/* DX double precision vector with N elements */
/* INCX storage spacing between elements of DX */
/* DY double precision vector with N elements */
/* INCY storage spacing between elements of DY */
/* --Output-- */
/* DY double precision result (unchanged if N .LE. 0) */
/* Overwrite double precision DY with double precision DA*DX + DY. */
/* For I = 0 to N-1, replace DY(LY+I*INCY) with DA*DX(LX+I*INCX) + */
/* DY(LY+I*INCY), */
/* where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is */
/* defined in a similar way using INCY. */
/* ***REFERENCES C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. */
/* Krogh, Basic linear algebra subprograms for Fortran */
/* usage, Algorithm No. 539, Transactions on Mathematical */
/* Software 5, 3 (September 1979), pp. 308-323. */
/* ***ROUTINES CALLED (NONE) */
/* ***REVISION HISTORY (YYMMDD) */
/* 791001 DATE WRITTEN */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 920310 Corrected definition of LX in DESCRIPTION. (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DAXPY */
/* ***FIRST EXECUTABLE STATEMENT DAXPY */
/* Parameter adjustments */
//--dy;
//--dx;
/* Function Body */
if (*PARAM_n <= 0 || *da == 0.) {
return 0;
}
if (incx == incy) {
if ((i__1 = incx - 1) < 0) {
goto L5;
} else if (i__1 == 0) {
goto L20;
} else {
goto L60;
}
}
/* Code for unequal or nonpositive increments. */
L5:
ix = 1;
iy = 1;
if (incx < 0) {
ix = (-(*PARAM_n) + 1) * incx + 1;
}
if (incy < 0) {
iy = (-(*PARAM_n) + 1) * incy + 1;
}
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
dy[iy -1] += *da * dx[ix -1];
ix += incx;
iy += incy;
/* L10: */
}
return 0;
/* Code for both increments equal to 1. */
/* Clean-up loop so remaining vector length is a multiple of 4. */
L20:
m = *PARAM_n % 4;
if (m == 0) {
goto L40;
}
i__1 = m;
for (i__ = 1; i__ <= i__1; ++i__) {
dy[i__ -1] += *da * dx[i__ -1];
/* L30: */
}
if (*PARAM_n < 4) {
return 0;
}
L40:
mp1 = m + 1;
i__1 = *PARAM_n;
for (i__ = mp1; i__ <= i__1; i__ += 4) {
dy[i__ -1] += *da * dx[i__ -1];
dy[i__ + 1 -1] += *da * dx[i__ + 1 -1];
dy[i__ + 2 -1] += *da * dx[i__ + 2 -1];
dy[i__ + 3 -1] += *da * dx[i__ + 3 -1];
/* L50: */
}
return 0;
/* Code for equal, positive, non-unit increments. */
L60:
ns = *PARAM_n * incx;
i__1 = ns;
i__2 = incx;
for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
dy[i__ -1] = *da * dx[i__ -1] + dy[i__ -1];
/* L70: */
}
return 0;
} /* daxpy_ */
/* Subroutine */
__device__ int dumsum_(double a, double b, double *c__, struct cuLsodaCommonBlock *common)
{
/* Routine to force normal storing of A + B, for DUMACH. */
*c__ = a + b;
return 0;
} /* dumsum_ */
/* DECK DSCAL */
/* Subroutine */
__device__ int dscal_(int *PARAM_n, double *da, double *dx, int incx, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
/* Local variables */
int i__ = 0;
int m = 0;
int ix = 0;
int mp1 = 0;
/* ***BEGIN PROLOGUE DSCAL */
/* ***PURPOSE Multiply a vector by a constant. */
/* ***CATEGORY D1A6 */
/* ***TYPE DOUBLE PRECISION (SSCAL-S, DSCAL-D, CSCAL-C) */
/* ***KEYWORDS BLAS, LINEAR ALGEBRA, SCALE, VECTOR */
/* ***AUTHOR Lawson, C. L., (JPL) */
/* Hanson, R. J., (SNLA) */
/* Kincaid, D. R., (U. of Texas) */
/* Krogh, F. T., (JPL) */
/* ***DESCRIPTION */
/* B L A S Subprogram */
/* Description of Parameters */
/* --Input-- */
/* N number of elements in input vector(s) */
/* DA double precision scale factor */
/* DX double precision vector with N elements */
/* INCX storage spacing between elements of DX */
/* --Output-- */
/* DX double precision result (unchanged if N.LE.0) */
/* Replace double precision DX by double precision DA*DX. */
/* For I = 0 to N-1, replace DX(IX+I*INCX) with DA * DX(IX+I*INCX), */
/* where IX = 1 if INCX .GE. 0, else IX = 1+(1-N)*INCX. */
/* ***REFERENCES C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. */
/* Krogh, Basic linear algebra subprograms for Fortran */
/* usage, Algorithm No. 539, Transactions on Mathematical */
/* Software 5, 3 (September 1979), pp. 308-323. */
/* ***ROUTINES CALLED (NONE) */
/* ***REVISION HISTORY (YYMMDD) */
/* 791001 DATE WRITTEN */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 900821 Modified to correct problem with a negative increment. */
/* (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DSCAL */
/* ***FIRST EXECUTABLE STATEMENT DSCAL */
/* Parameter adjustments */
//--dx;
/* Function Body */
if (*PARAM_n <= 0) {
return 0;
}
if (incx == 1) {
goto L20;
}
/* Code for increment not equal to 1. */
ix = 1;
if (incx < 0) {
ix = (-(*PARAM_n) + 1) * incx + 1;
}
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
dx[ix -1] = *da * dx[ix -1];
ix += incx;
/* L10: */
}
return 0;
/* Code for increment equal to 1. */
/* Clean-up loop so remaining vector length is a multiple of 5. */
L20:
m = *PARAM_n % 5;
if (m == 0) {
goto L40;
}
i__1 = m;
for (i__ = 1; i__ <= i__1; ++i__) {
dx[i__ -1] = *da * dx[i__ -1];
/* L30: */
}
if (*PARAM_n < 5) {
return 0;
}
L40:
mp1 = m + 1;
i__1 = *PARAM_n;
for (i__ = mp1; i__ <= i__1; i__ += 5) {
dx[i__ -1] = *da * dx[i__ -1];
dx[i__ + 1 -1] = *da * dx[i__ + 1 -1];
dx[i__ + 2 -1] = *da * dx[i__ + 2 -1];
dx[i__ + 3 -1] = *da * dx[i__ + 3 -1];
dx[i__ + 4 -1] = *da * dx[i__ + 4 -1];
/* L50: */
}
return 0;
} /* dscal_ */
/* DECK DDOT */
__device__ double ddot_(int *PARAM_n, double *dx, int incx, double *dy, int incy, struct cuLsodaCommonBlock *common)
{
/* System generated locals */
int i__1 = 0;
int i__2 = 0;
double ret_val = 0.;
/* Local variables */
int i__ = 0;
int m = 0;
int ix = 0;
int iy = 0;
int ns = 0;
int mp1 = 0;
/* ***BEGIN PROLOGUE DDOT */
/* ***PURPOSE Compute the inner product of two vectors. */
/* ***CATEGORY D1A4 */
/* ***TYPE DOUBLE PRECISION (SDOT-S, DDOT-D, CDOTU-C) */
/* ***KEYWORDS BLAS, INNER PRODUCT, LINEAR ALGEBRA, VECTOR */
/* ***AUTHOR Lawson, C. L., (JPL) */
/* Hanson, R. J., (SNLA) */
/* Kincaid, D. R., (U. of Texas) */
/* Krogh, F. T., (JPL) */
/* ***DESCRIPTION */
/* B L A S Subprogram */
/* Description of Parameters */
/* --Input-- */
/* N number of elements in input vector(s) */
/* DX double precision vector with N elements */
/* INCX storage spacing between elements of DX */
/* DY double precision vector with N elements */
/* INCY storage spacing between elements of DY */
/* --Output-- */
/* DDOT double precision dot product (zero if N .LE. 0) */
/* Returns the dot product of double precision DX and DY. */
/* DDOT = sum for I = 0 to N-1 of DX(LX+I*INCX) * DY(LY+I*INCY), */
/* where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is */
/* defined in a similar way using INCY. */
/* ***REFERENCES C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. */
/* Krogh, Basic linear algebra subprograms for Fortran */
/* usage, Algorithm No. 539, Transactions on Mathematical */
/* Software 5, 3 (September 1979), pp. 308-323. */
/* ***ROUTINES CALLED (NONE) */
/* ***REVISION HISTORY (YYMMDD) */
/* 791001 DATE WRITTEN */
/* 890831 Modified array declarations. (WRB) */
/* 890831 REVISION DATE from Version 3.2 */
/* 891214 Prologue converted to Version 4.0 format. (BAB) */
/* 920310 Corrected definition of LX in DESCRIPTION. (WRB) */
/* 920501 Reformatted the REFERENCES section. (WRB) */
/* ***END PROLOGUE DDOT */
/* ***FIRST EXECUTABLE STATEMENT DDOT */
/* Parameter adjustments */
//--dy;
//--dx;
/* Function Body */
ret_val = 0.;
if (*PARAM_n <= 0) {
return ret_val;
}
if (incx == incy) {
if ((i__1 = incx - 1) < 0) {
goto L5;
} else if (i__1 == 0) {
goto L20;
} else {
goto L60;
}
}
/* Code for unequal or nonpositive increments. */
L5:
ix = 1;
iy = 1;
if (incx < 0) {
ix = (-(*PARAM_n) + 1) * incx + 1;
}
if (incy < 0) {
iy = (-(*PARAM_n) + 1) * incy + 1;
}
i__1 = *PARAM_n;
for (i__ = 1; i__ <= i__1; ++i__) {
ret_val += dx[ix -1] * dy[iy -1];
ix += incx;
iy += incy;
/* L10: */
}
return ret_val;
/* Code for both increments equal to 1. */
/* Clean-up loop so remaining vector length is a multiple of 5. */
L20:
m = *PARAM_n % 5;
if (m == 0) {
goto L40;
}
i__1 = m;
for (i__ = 1; i__ <= i__1; ++i__) {
ret_val += dx[i__ -1] * dy[i__ -1];
/* L30: */
}
if (*PARAM_n < 5) {
return ret_val;
}
L40:
mp1 = m + 1;
i__1 = *PARAM_n;
for (i__ = mp1; i__ <= i__1; i__ += 5)
{
ret_val = ret_val + dx[i__ -1] * dy[i__ -1] + dx[i__ + 1 -1] * dy[i__ + 1 -1] +
dx[i__ + 2 -1] * dy[i__ + 2 -1] + dx[i__ + 3 -1] * dy[i__ + 3 -1] + dx[i__ + 4 -1] * dy[i__ + 4 -1];
/* L50: */
}
return ret_val;
/* Code for equal, positive, non-unit increments. */
L60:
ns = *PARAM_n * incx;
i__1 = ns;
i__2 = incx;
for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2)
{
ret_val += dx[i__ -1] * dy[i__ -1];
/* L70: */
}
return ret_val;
} /* ddot_ */
__device__ double d_sign(double *a, double *b)
{
double x = 0.;
x = (*a >= 0 ? *a : - *a);
return( *b >= 0 ? x : -x);
}
__host__ __device__ void cuLsodaCommonBlockInit(struct cuLsodaCommonBlock *common, unsigned int threads)
{
// for (unsigned int i=0; i<THREADS*BLOCKS; i++) {
for (unsigned int i=0; i<threads; i++) {
// for (unsigned int i=0; i<blockDim.x*gridDim.x; i++) {
/* common[i] block initialization */
for (int bugger = 0; bugger < 13; bugger++)
{
common[i].CM_el[bugger] = 0.;
}
for (int bugger = 0; bugger < 156; bugger++)
{
common[i].CM_elco[bugger] = 0.;
}
for (int bugger = 0; bugger < 36; bugger++)
{
common[i].CM_tesco[bugger] = 0.;
}
for (int bugger = 0; bugger < 218; bugger++)
{
common[i].CM_rls[bugger] = 0.;
}
for (int bugger = 0; bugger < 12; bugger++)
{
common[i].CM_cm1[bugger] = 0.;
}
for (int bugger = 0; bugger < 5; bugger++)
{
common[i].CM_cm2[bugger] = 0.;
}
for (int bugger = 0; bugger < 22; bugger++)
{
common[i].CM_rlsa[bugger] = 0.;
}
for (int bugger = 0; bugger < 37; bugger++)
{
common[i].CM_ils[bugger] = 0;
}
for (int bugger = 0; bugger < 9; bugger++)
{
common[i].CM_ilsa[bugger] = 0;
}
double smThing[12] = { .5,.575,.55,.45,.35,.25,.2,.15,.1,.075,.05,.025 };
for(int bob = 0; bob <12; bob ++)
{
common[i].CM_sm1[bob] = smThing[bob];
}
// initialize doubles in the common[i] block to zero
common[i].CM_conit = 0.;
common[i].CM_crate = 0.;
common[i].CM_ccmax = 0.;
common[i].CM_el0 = 0.;
common[i].CM_h__ = 0.;
common[i].CM_hmin = 0.;
common[i].CM_hmxi = 0.;
common[i].CM_hu = 0.;
common[i].CM_rc = 0.;
common[i].CM_tn = 0.;
common[i].CM_uround = 0.;
common[i].CM_pdest = 0.;
common[i].CM_pdlast = 0.;
common[i].CM_ratio = 0.;
common[i].CM_hold = 0.;
common[i].CM_rmax = 0.;
common[i].CM_tsw = 0.;
common[i].CM_pdnorm = 0.;
// initialize ints in common[i] block to zero
common[i].CM_init = 0;
common[i].CM_mxstep = 0;
common[i].CM_mxhnil = 0;
common[i].CM_nhnil = 0;
common[i].CM_nslast = 0;
common[i].CM_nyh = 0;
common[i].CM_icf = 0;
common[i].CM_ierpj = 0;
common[i].CM_iersl = 0;
common[i].CM_jcur = 0;
common[i].CM_jstart = 0;
common[i].CM_kflag = 0;
common[i].CM_l = 0;
common[i].CM_lyh = 0;
common[i].CM_lewt = 0;
common[i].CM_lacor = 0;
common[i].CM_lsavf = 0;
common[i].CM_lwm = 0;
common[i].CM_liwm = 0;
common[i].CM_meth = 0;
common[i].CM_miter = 0;
common[i].CM_maxord = 0;
common[i].CM_maxcor = 0;
common[i].CM_msbp = 0;
common[i].CM_mxncf = 0;
common[i].CM_n = 0;
common[i].CM_nq = 0;
common[i].CM_nst = 0;
common[i].CM_nfe = 0;
common[i].CM_nje = 0;
common[i].CM_nqu = 0;
common[i].CM_ialth = 0;
common[i].CM_ipup = 0;
common[i].CM_lmax = 0;
common[i].CM_nqnyh = 0;
common[i].CM_nslp = 0;
common[i].CM_insufr = 0;
common[i].CM_insufi = 0;
common[i].CM_ixpr = 0;
common[i].CM_jtyp = 0;
common[i].CM_mused = 0;
common[i].CM_mxordn = 0;
common[i].CM_mxords = 0;
common[i].CM_icount = 0;
common[i].CM_irflag = 0;
/* End Common Block initialization */
}
}
#endif
#endif
/*
This is the entrypoint of cuLSODO.
All these state variables has been changed to multidimensional arrays, in order to exploit the SIMD architecture.
*/
//template <bool ACTIVATE_SHARED_MEMORY>
__global__ void cuLsoda(myFex fex, int *neq, double *y, double *t, double *tout, int *itol, double *rtol,
double *atol, int *itask, int *istate, int *iopt, double *rwork, int *lrw, int *iwork, int *liw, myJex jac, int *jt, struct cuLsodaCommonBlock *common, int* debug, char* comp_ode, param_t* flattenODE, unsigned int* offsetODE, double* costanti, conc_t* device_X, unsigned int campione, unsigned int* s2s,
param_t* myjac, unsigned int* myjacoffset, bool ACTIVATE_SHARED_MEMORY, bool ACTIVATE_CONSTANT_MEMORY)
{
// unsigned int tid = threadIdx.x;
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
// EXPERIMENTAL
if (tid>=DEV_ACTUAL_THREADS) {
return;
}
/* We move time and state out of global memory, by putting 'em into the faster shared memory. */
//if (ACTIVATE_SHARED_MEMORY) {
// #ifdef USE_SHARED_MEMORY
//double* sh_y = (double*)shared;
//double* sh_t = (double*)&sh_y[DEV_CONST_SPECIES*blockDim.x];
// #endif
//}
double* sh_y = (double*)shared;
double* sh_t = (double*)&sh_y[DEV_CONST_SPECIES*blockDim.x];
/* Load shared memory */
if (ACTIVATE_SHARED_MEMORY) {
// #ifdef USE_SHARED_MEMORY
for (unsigned int i=0; i<DEV_CONST_SPECIES; i++) {
sh_y[threadIdx.x*DEV_CONST_SPECIES+i] = y[tid*DEV_CONST_SPECIES+i]; // watch out! different offsets!
}
sh_t[threadIdx.x] = t[tid];
}
//#endif
if (ACTIVATE_SHARED_MEMORY) {
if (ACTIVATE_CONSTANT_MEMORY) {
dlsoda_(fex, neq+tid, sh_y+threadIdx.x*DEV_CONST_SPECIES, sh_t+threadIdx.x, tout+tid, itol+tid, rtol+tid, atol+tid*DEV_CONST_SPECIES, itask+tid, istate+tid,
iopt+tid, rwork+(*(lrw+tid))*tid, lrw+tid, iwork+(*(liw+tid))*tid, liw+tid, jac, jt+tid, common+tid, debug, comp_ode, ODE_new, offsetODE, costanti+tid*DEV_CONST_REACTIONS, JAC_new, myjacoffset);
} else {
dlsoda_(fex, neq+tid, sh_y+threadIdx.x*DEV_CONST_SPECIES, sh_t+threadIdx.x, tout+tid, itol+tid, rtol+tid, atol+tid*DEV_CONST_SPECIES, itask+tid, istate+tid,
iopt+tid, rwork+(*(lrw+tid))*tid, lrw+tid, iwork+(*(liw+tid))*tid, liw+tid, jac, jt+tid, common+tid, debug, comp_ode, flattenODE, offsetODE, costanti+tid*DEV_CONST_REACTIONS, myjac, myjacoffset);
}
} else {
dlsoda_(fex, neq+tid, y+tid*DEV_CONST_SPECIES, t+tid, tout+tid, itol+tid, rtol+tid, atol+tid*DEV_CONST_SPECIES, itask+tid, istate+tid,
iopt+tid, rwork+(*(lrw+tid))*tid, lrw+tid, iwork+(*(liw+tid))*tid, liw+tid, jac, jt+tid, common+tid, debug, comp_ode, flattenODE, offsetODE, costanti+tid*DEV_CONST_REACTIONS, myjac, myjacoffset);
}
/* Unload shared memory */
if (ACTIVATE_SHARED_MEMORY) {
//#ifdef USE_SHARED_MEMORY
for (unsigned int i=0; i<DEV_CONST_SPECIES; i++) {
y[tid*DEV_CONST_SPECIES+i] = sh_y[threadIdx.x*DEV_CONST_SPECIES+i]; // watch out! different offsets!
}
t[tid]=sh_t[threadIdx.x];
// #endif
}
// store samples
unsigned int larg = blockDim.x * gridDim.x;
// #define ACCESS_SAMPLE larg*DEV_CONST_SAMPLESLUN*campione + larg*s + tid
for (unsigned int s=0; s<DEV_CONST_SAMPLESLUN; s++) {
if (ACTIVATE_SHARED_MEMORY) {
//#ifdef USE_SHARED_MEMORY
device_X[ ACCESS_SAMPLE ] = sh_y[threadIdx.x*DEV_CONST_SPECIES+s2s[s]];
} else {
// #else
device_X[ ACCESS_SAMPLE ] = y[tid*DEV_CONST_SPECIES+s2s[s]];
//#endif
}
}
// return;
}
bool CheckArguments(unsigned int argc, char** argv) {
if (argc<6) {
printf("ERROR: please specify:\n - path to BioSimWare project\n - number of CUDA blocks\n - output folder\n - output prefix (excluding folder)\n - GPU number\n - Fitness enabled (1) or disabled (0) \n - Memory configuration: (0) only global memory, (1) just shared memory, (2) both shared and constant memory\n");
return false;
}
unsigned int GPU = atoi(argv[5]);
// printf(" * Launching cupSODA batch of simulations on GPU%d\n", GPU);
cudaSetDevice(GPU);
CudaCheckError();
return true;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////// FITNESS stuff //////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/* Questa fitness è specifica del PRR */
__global__ void calculateFitnessPRR( double* samples, double* target, double* fitness, char* swarm ) {
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int larg = blockDim.x * gridDim.x;
unsigned int experiment = swarm[tid];
double conc_u1 = 0;
double conc_u2 = 0;
double conc_u3 = 0;
double conc_all = 0;
unsigned int num_specie_u1 = 4;
unsigned int num_specie_u2 = 3;
unsigned int num_specie_u3 = 2;
unsigned int num_swarms = 3;
unsigned int totale_specie = 3;
fitness[tid] = 0;
for (unsigned int campione=0; campione<DEV_CONST_TIMESLUN; campione++) {
unsigned int offset=campione*(num_swarms*totale_specie) + experiment*totale_specie;
// if (threadIdx.x==0) printf("%f\n", target[offset]);
if (target[offset]==-1) continue;
for (unsigned int s=0; s<num_specie_u1; s++) {
// conc_u1 += samples[ tid*DEV_CONST_SPECIES+s ];
conc_u1 += samples[ ACCESS_SAMPLE ];
}
for (unsigned int s=num_specie_u1; s<num_specie_u1+num_specie_u2; s++) {
// conc_u2 += samples[ tid*DEV_CONST_SPECIES+s ];
conc_u2 += samples[ ACCESS_SAMPLE ];
}
for (unsigned int s=num_specie_u1+num_specie_u2; s<num_specie_u1+num_specie_u2+num_specie_u3; s++) {
// conc_u3 += samples[ tid*DEV_CONST_SPECIES+s ];
conc_u3 += samples[ ACCESS_SAMPLE ];
}
conc_all = (conc_u1+conc_u2+conc_u3);
double ratio_u1 = 0;
double ratio_u2 = 0;
double ratio_u3 = 0;
if(conc_all!=0) {
ratio_u1 = (conc_u1 / conc_all)*100;
ratio_u2 = (conc_u2 / conc_all)*100;
ratio_u3 = (conc_u3 / conc_all)*100;
}
/*if (tid==0){
printf("%d\n", campione);
printf("%f\t", target[offset]);
printf("%f\t", target[offset+1]);
printf("%f\t", target[offset+2]);
printf("\n");
}*/
fitness[tid] +=
abs(target[offset+0]-ratio_u1) +
abs(target[offset+1]-ratio_u2) +
abs(target[offset+2]-ratio_u3);
}
};
__device__ inline double fetch_target(double* source, unsigned int species, unsigned int experiment, unsigned int repetition, unsigned int sample) {
return source[
sample*(DEV_CONST_SAMPLESLUN*DEV_CONST_EXPERIMENTS*DEV_CONST_REPETITIONS) +
experiment*(DEV_CONST_SAMPLESLUN*DEV_CONST_REPETITIONS) +
repetition*(DEV_CONST_SAMPLESLUN) + species ];
}
__device__ inline double fetch_simulation(double* source, unsigned int species, unsigned int sample) {
unsigned int gid = threadIdx.x + blockDim.x*blockIdx.x;
return source[ blockDim.x*gridDim.x*DEV_CONST_SAMPLESLUN*sample + blockDim.x*gridDim.x*species+ gid ];
}
__global__ void calculateFitness( double* simulation, double* target, double* output_fitness, char* swarm ) {
unsigned int larg = blockDim.x * gridDim.x;
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int SWARMS = swarm[ larg-1 ] + 1;
unsigned int D = swarm[tid];
// const unsigned int WATCHED = 32;
double subfitness =0;
for (unsigned int campione=1; campione<DEV_CONST_SAMPLES; campione++) {
for (unsigned int s=0; s<DEV_CONST_SAMPLESLUN; s++) {
unsigned int sid = SWARMS*DEV_CONST_SAMPLESLUN*campione + DEV_CONST_SAMPLESLUN*D + s;
subfitness += abs( simulation[ACCESS_SAMPLE] - target[sid] ) /target[sid];
//if (tid==WATCHED && s==0) {
// if (tid==0) printf("%d, %d, %d, %f\t%f\n", campione, sid, s, simulation[ACCESS_SAMPLE], target[sid]);
//}
}
}
output_fitness[tid] = subfitness;
}
/*
__global__ void calculateFitness_old( double* samples, double* target, double* fitness, char* swarm ) {
const unsigned int WATCHED = 70;
unsigned int larg = blockDim.x * gridDim.x;
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int D = DEV_CONST_EXPERIMENTS; // dosi
unsigned int E = DEV_CONST_REPETITIONS; // ripetizioni ad ogni dose
unsigned int SAMPLESPECIES = DEV_CONST_SAMPLESLUN; // vogliamo solo specie campionate
unsigned int experiment = blockIdx.x; // "dose"
double subfitness =0;
// for each sample...
for (unsigned int campione=1; campione<DEV_CONST_SAMPLES; campione++) {
// ...for each (sampled) chemical species...
for (unsigned int s=0; s<DEV_CONST_SAMPLESPECIES; s++) {
// ACCESS_SAMPLE larg*DEV_CONST_SAMPLESLUN*campione + larg*s + tid
double sample = samples[ ACCESS_SAMPLE ];
if (tid==WATCHED && s==0) {
printf("%d, %d, %e\t", campione, s, sample);
}
// calculate the distance from target time series
for (unsigned int repetition=0; repetition<E; repetition++) {
double tgt = fetch_target(target, s, experiment, repetition, campione-1);
if (tid==WATCHED && s==0) {
printf("%e\n", tgt);
}
double dist = (double)abs(tgt-sample);
if (tgt>0) dist /= tgt;
subfitness += dist;
}
}
}
// divisione per il numero di specie e per il numero di campioni
fitness[tid] = (1.0/DEV_CONST_TIMESLUN)*(1.0/DEV_CONST_SAMPLESLUN)*subfitness;
if (tid==WATCHED)
printf("FITNESS: %f\n", fitness[tid]);
// fitness[tid] = tid;
};
*/ |
e279945117b3a9ccb1a9a80480e8ff5c881bc7bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/embed_layer.hpp"
#ifdef USE_ROCM
#include "caffe/util/gpu_util.cuh"
#endif // USE_ROCM
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_ROCM
template <typename Dtype>
__global__ void EmbedForward(const int_tp nthreads, const Dtype* bottom_data,
const Dtype* weight, const int_tp M, const int_tp N, const int_tp K,
Dtype* top_data) {
CUDA_KERNEL_LOOP(top_index, nthreads) {
const int_tp n = top_index / N;
const int_tp d = top_index % N;
const int_tp index = static_cast<int_tp>(bottom_data[n]);
const int_tp weight_index = index * N + d;
top_data[top_index] = weight[weight_index];
}
}
template <typename Dtype>
__global__ void EmbedBackward(const int_tp nthreads, const Dtype* bottom_data,
const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,
Dtype* weight_diff) {
CUDA_KERNEL_LOOP(top_index, nthreads) {
const int_tp n = top_index / N;
const int_tp d = top_index % N;
const int_tp index = static_cast<int_tp>(bottom_data[n]);
const int_tp weight_index = index * N + d;
caffe_gpu_atomic_add(top_diff[top_index], weight_diff + weight_index);
}
}
#endif // USE_ROCM
template <typename Dtype>
void EmbedLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
const int_tp count = top[0]->count();
if (this->get_device()->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
EmbedForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, weight, M_, N_, K_, top_data);
if (bias_term_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, Dtype(1),
bias_multiplier_.gpu_data(),
this->blobs_[1]->gpu_data(), Dtype(1), top_data);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_embed = program.get_kernel(
CL_KERNEL_SELECT("embed_forward"));
viennacl::ocl::enqueue(
oclk_embed(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) weight, &ctx), M_, N_, K_,
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
if (bias_term_) {
greentea_gpu_gemm<Dtype>(this->get_device()->id(), CblasNoTrans,
CblasNoTrans, M_, N_, 1, Dtype(1),
(cl_mem) (bias_multiplier_.gpu_data()), 0,
(cl_mem) (this->blobs_[1]->gpu_data()), 0,
Dtype(1), (cl_mem) top_data, 0);
}
#endif // USE_GREENTEA
}
}
template <typename Dtype>
void EmbedLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
CHECK(!propagate_down[0]) << "Can't backpropagate to EmbedLayer input.";
if (this->param_propagate_down_[0]) {
const int_tp top_count = top[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
if (this->get_device()->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
EmbedBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
CUDA_KERNEL(CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS)(
top_count, bottom_data, top_diff, M_, N_, K_, weight_diff);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_embed = program.get_kernel(
CL_KERNEL_SELECT("embed_backward"));
viennacl::ocl::enqueue(
oclk_embed(top_count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_diff, &ctx), M_, N_, K_,
WrapHandle((cl_mem) weight_diff, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
if (this->get_device()->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, Dtype(1), top_diff,
bias_multiplier_.gpu_data(), Dtype(1), bias_diff);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
greentea_gpu_gemv<Dtype>(this->get_device()->id(), CblasTrans, M_, N_,
Dtype(1), (cl_mem) top_diff, 0,
(cl_mem) (bias_multiplier_.gpu_data()), 0,
Dtype(1), (cl_mem) bias_diff, 0);
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EmbedLayer);
} // namespace caffe
| e279945117b3a9ccb1a9a80480e8ff5c881bc7bc.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/embed_layer.hpp"
#ifdef USE_CUDA
#include "caffe/util/gpu_util.cuh"
#endif // USE_CUDA
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_CUDA
template <typename Dtype>
__global__ void EmbedForward(const int_tp nthreads, const Dtype* bottom_data,
const Dtype* weight, const int_tp M, const int_tp N, const int_tp K,
Dtype* top_data) {
CUDA_KERNEL_LOOP(top_index, nthreads) {
const int_tp n = top_index / N;
const int_tp d = top_index % N;
const int_tp index = static_cast<int_tp>(bottom_data[n]);
const int_tp weight_index = index * N + d;
top_data[top_index] = weight[weight_index];
}
}
template <typename Dtype>
__global__ void EmbedBackward(const int_tp nthreads, const Dtype* bottom_data,
const Dtype* top_diff, const int_tp M, const int_tp N, const int_tp K,
Dtype* weight_diff) {
CUDA_KERNEL_LOOP(top_index, nthreads) {
const int_tp n = top_index / N;
const int_tp d = top_index % N;
const int_tp index = static_cast<int_tp>(bottom_data[n]);
const int_tp weight_index = index * N + d;
caffe_gpu_atomic_add(top_diff[top_index], weight_diff + weight_index);
}
}
#endif // USE_CUDA
template <typename Dtype>
void EmbedLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
const int_tp count = top[0]->count();
if (this->get_device()->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
EmbedForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, weight, M_, N_, K_, top_data);
if (bias_term_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, Dtype(1),
bias_multiplier_.gpu_data(),
this->blobs_[1]->gpu_data(), Dtype(1), top_data);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_embed = program.get_kernel(
CL_KERNEL_SELECT("embed_forward"));
viennacl::ocl::enqueue(
oclk_embed(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) weight, &ctx), M_, N_, K_,
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
if (bias_term_) {
greentea_gpu_gemm<Dtype>(this->get_device()->id(), CblasNoTrans,
CblasNoTrans, M_, N_, 1, Dtype(1),
(cl_mem) (bias_multiplier_.gpu_data()), 0,
(cl_mem) (this->blobs_[1]->gpu_data()), 0,
Dtype(1), (cl_mem) top_data, 0);
}
#endif // USE_GREENTEA
}
}
template <typename Dtype>
void EmbedLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
CHECK(!propagate_down[0]) << "Can't backpropagate to EmbedLayer input.";
if (this->param_propagate_down_[0]) {
const int_tp top_count = top[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
if (this->get_device()->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
EmbedBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
CUDA_KERNEL(CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS)(
top_count, bottom_data, top_diff, M_, N_, K_, weight_diff);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_embed = program.get_kernel(
CL_KERNEL_SELECT("embed_backward"));
viennacl::ocl::enqueue(
oclk_embed(top_count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_diff, &ctx), M_, N_, K_,
WrapHandle((cl_mem) weight_diff, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
if (this->get_device()->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, Dtype(1), top_diff,
bias_multiplier_.gpu_data(), Dtype(1), bias_diff);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
greentea_gpu_gemv<Dtype>(this->get_device()->id(), CblasTrans, M_, N_,
Dtype(1), (cl_mem) top_diff, 0,
(cl_mem) (bias_multiplier_.gpu_data()), 0,
Dtype(1), (cl_mem) bias_diff, 0);
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EmbedLayer);
} // namespace caffe
|
71511674a9422c96d4893a5b86c7349cfc8f03ef.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "VecAdd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
const int *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
int *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
VecAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
VecAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
VecAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 71511674a9422c96d4893a5b86c7349cfc8f03ef.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "VecAdd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
const int *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
int *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
VecAdd<<<gridBlock,threadBlock>>>(A,B,C,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
VecAdd<<<gridBlock,threadBlock>>>(A,B,C,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
VecAdd<<<gridBlock,threadBlock>>>(A,B,C,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
91c53c7cb5e8e9fe7d67846bea2d74720fb5b1c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "scan_based.cuh"
#include "common_hip.cuh"
#include <cub/block/block_scan.cuh>
std::vector< std::vector<int> > multi_search_scan_based_setup(const device_graph &g, int start, int end)
{
//For now, use "standard" grid/block sizes. These can be tuned later on.
dim3 dimGrid, dimBlock;
//Returns number of source vertices to store for verification purposes
size_t sources_to_store = configure_grid(dimGrid,dimBlock,start,end);
//Device pointers
int *d_d, *Q_d, *Q2_d;
size_t pitch_d, pitch_Q, pitch_Q2;
hipEvent_t start_event, end_event;
//Allocate algorithm-specific memory
start_clock(start_event,end_event);
checkCudaErrors(hipMallocPitch((void**)&d_d,&pitch_d,sizeof(int)*g.n,sources_to_store));
checkCudaErrors(hipMallocPitch((void**)&Q_d,&pitch_Q,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(hipMallocPitch((void**)&Q2_d,&pitch_Q2,sizeof(int)*g.n,dimGrid.x));
size_t GPU_memory_requirement = sizeof(int)*g.n*sources_to_store + 2*sizeof(int)*g.n*dimGrid.x + sizeof(int)*(g.n+1) + sizeof(int)*(g.m);
std::cout << "Scan based memory requirement: " << GPU_memory_requirement/(1 << 20) << " MB" << std::endl;
hipLaunchKernelGGL(( multi_search_scan_based), dim3(dimGrid),dim3(dimBlock), 0, 0, thrust::raw_pointer_cast(g.R.data()),thrust::raw_pointer_cast(g.C.data()),g.n,d_d,pitch_d,Q_d,pitch_Q,Q2_d,pitch_Q2,start,end);
checkCudaErrors(hipPeekAtLastError());
std::vector< std::vector<int> > d_host_vector;
transfer_result(g,d_d,pitch_d,sources_to_store,d_host_vector);
//Free algorithm-specific memory
checkCudaErrors(hipFree(Q2_d));
checkCudaErrors(hipFree(Q_d));
checkCudaErrors(hipFree(d_d));
float time = end_clock(start_event,end_event);
std::cout << "Time for scan-based neighbor gathering: " << std::setprecision(9) << time << " s" << std::endl;
return d_host_vector;
}
__global__ void multi_search_scan_based(const int *R, const int *C, const int n, int *d, size_t pitch_d, int *Q, size_t pitch_Q, int *Q2, size_t pitch_Q2, const int start, const int end)
{
int j = threadIdx.x;
__shared__ int *Q_row;
__shared__ int *Q2_row;
if(j == 0)
{
Q_row = (int*)((char*)Q + blockIdx.x*pitch_Q);
Q2_row = (int*)((char*)Q2 + blockIdx.x*pitch_Q2);
}
__syncthreads();
for(int i=blockIdx.x+start; i<end; i+=gridDim.x)
{
int *d_row = (int*)((char*)d + blockIdx.x*pitch_d);
for(int k=threadIdx.x; k<n; k+=blockDim.x)
{
if(k == i)
{
d_row[k] = 0;
}
else
{
d_row[k] = INT_MAX;
}
}
__syncthreads();
__shared__ int Q_len;
__shared__ int Q2_len;
__shared__ int current_depth;
if(j == 0)
{
Q_row[0] = i;
Q_len = 1;
Q2_len = 0;
current_depth = 0;
}
__syncthreads();
while(1)
{
//Listing 10: Scan-based neighbor gathering
volatile __shared__ int comm[1024][2]; //1024 is the number of threads per CTA
int v, r, r_end;
int k = threadIdx.x;
if(k < Q_len)
{
v = Q_row[k];
r = R[v];
r_end = R[v+1];
}
else
{
v = -1;
r = 0;
r_end = 0;
}
//Reserve gather offsets using hipcub::BlockScan
//Specialize hipcub::BlockScan for a 1D block of 1024 threads on type int
typedef hipcub::BlockScan<int,1024,cub::BLOCK_SCAN_RAKING,1,1,350> BlockScan;
//Allocate shared memory for BlockScan
__shared__ typename BlockScan::TempStorage temp_storage;
//For now, scan the items for the blockDim.x (1024) queue items that are currently being inspected
//TODO: It should be more efficient to have threads gather all of their queue elements at once
int rsv_rank;
int total;
//Process fine-grained batches of adjlists
while(1)
{
int cta_progress = 0;
BlockScan(temp_storage).ExclusiveSum(r_end-r,rsv_rank,total);
int remain;
while((remain = total-cta_progress) > 0)
{
//Share batch of gather offsets
while((rsv_rank < cta_progress + blockDim.x) && (r < r_end))
{
comm[rsv_rank-cta_progress][0] = r;
comm[rsv_rank-cta_progress][1] = v;
rsv_rank++;
r++;
}
__syncthreads();
//Gather batch of adjlist(s)
int min_threads_remain = (remain < blockDim.x) ? remain : blockDim.x;
if(threadIdx.x < min_threads_remain)
{
volatile int w = C[comm[threadIdx.x][0]];
int v_new = comm[threadIdx.x][1];
//Not sure if the v originally gathered by a thread will correspond to the parent of the neighbor that is found
// so keep track of the current depth and use that number instead
if(atomicCAS(&d_row[w],INT_MAX,d_row[v_new]+1) == INT_MAX)
{
int t = atomicAdd(&Q2_len,1);
Q2_row[t] = w;
}
}
cta_progress += blockDim.x;
__syncthreads();
}
k+=blockDim.x;
if(k < Q_len)
{
v = Q_row[k];
r = R[v];
r_end = R[v+1];
}
else
{
v = -1;
r = 0;
r_end = 0;
}
if((k-threadIdx.x) >= Q_len) //If thread 0 doesn't have work, the entire warp is done
{
break;
}
}
__syncthreads();
if(Q2_len == 0)
{
break;
}
else
{
for(int kk=threadIdx.x; kk<Q2_len; kk+=blockDim.x)
{
Q_row[kk] = Q2_row[kk];
}
__syncthreads();
if(j==0)
{
Q_len = Q2_len;
Q2_len = 0;
current_depth++;
}
__syncthreads();
}
}
}
}
| 91c53c7cb5e8e9fe7d67846bea2d74720fb5b1c8.cu | #include "scan_based.cuh"
#include "common.cuh"
#include <cub/block/block_scan.cuh>
std::vector< std::vector<int> > multi_search_scan_based_setup(const device_graph &g, int start, int end)
{
//For now, use "standard" grid/block sizes. These can be tuned later on.
dim3 dimGrid, dimBlock;
//Returns number of source vertices to store for verification purposes
size_t sources_to_store = configure_grid(dimGrid,dimBlock,start,end);
//Device pointers
int *d_d, *Q_d, *Q2_d;
size_t pitch_d, pitch_Q, pitch_Q2;
cudaEvent_t start_event, end_event;
//Allocate algorithm-specific memory
start_clock(start_event,end_event);
checkCudaErrors(cudaMallocPitch((void**)&d_d,&pitch_d,sizeof(int)*g.n,sources_to_store));
checkCudaErrors(cudaMallocPitch((void**)&Q_d,&pitch_Q,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(cudaMallocPitch((void**)&Q2_d,&pitch_Q2,sizeof(int)*g.n,dimGrid.x));
size_t GPU_memory_requirement = sizeof(int)*g.n*sources_to_store + 2*sizeof(int)*g.n*dimGrid.x + sizeof(int)*(g.n+1) + sizeof(int)*(g.m);
std::cout << "Scan based memory requirement: " << GPU_memory_requirement/(1 << 20) << " MB" << std::endl;
multi_search_scan_based<<<dimGrid,dimBlock>>>(thrust::raw_pointer_cast(g.R.data()),thrust::raw_pointer_cast(g.C.data()),g.n,d_d,pitch_d,Q_d,pitch_Q,Q2_d,pitch_Q2,start,end);
checkCudaErrors(cudaPeekAtLastError());
std::vector< std::vector<int> > d_host_vector;
transfer_result(g,d_d,pitch_d,sources_to_store,d_host_vector);
//Free algorithm-specific memory
checkCudaErrors(cudaFree(Q2_d));
checkCudaErrors(cudaFree(Q_d));
checkCudaErrors(cudaFree(d_d));
float time = end_clock(start_event,end_event);
std::cout << "Time for scan-based neighbor gathering: " << std::setprecision(9) << time << " s" << std::endl;
return d_host_vector;
}
__global__ void multi_search_scan_based(const int *R, const int *C, const int n, int *d, size_t pitch_d, int *Q, size_t pitch_Q, int *Q2, size_t pitch_Q2, const int start, const int end)
{
int j = threadIdx.x;
__shared__ int *Q_row;
__shared__ int *Q2_row;
if(j == 0)
{
Q_row = (int*)((char*)Q + blockIdx.x*pitch_Q);
Q2_row = (int*)((char*)Q2 + blockIdx.x*pitch_Q2);
}
__syncthreads();
for(int i=blockIdx.x+start; i<end; i+=gridDim.x)
{
int *d_row = (int*)((char*)d + blockIdx.x*pitch_d);
for(int k=threadIdx.x; k<n; k+=blockDim.x)
{
if(k == i)
{
d_row[k] = 0;
}
else
{
d_row[k] = INT_MAX;
}
}
__syncthreads();
__shared__ int Q_len;
__shared__ int Q2_len;
__shared__ int current_depth;
if(j == 0)
{
Q_row[0] = i;
Q_len = 1;
Q2_len = 0;
current_depth = 0;
}
__syncthreads();
while(1)
{
//Listing 10: Scan-based neighbor gathering
volatile __shared__ int comm[1024][2]; //1024 is the number of threads per CTA
int v, r, r_end;
int k = threadIdx.x;
if(k < Q_len)
{
v = Q_row[k];
r = R[v];
r_end = R[v+1];
}
else
{
v = -1;
r = 0;
r_end = 0;
}
//Reserve gather offsets using cub::BlockScan
//Specialize cub::BlockScan for a 1D block of 1024 threads on type int
typedef cub::BlockScan<int,1024,cub::BLOCK_SCAN_RAKING,1,1,350> BlockScan;
//Allocate shared memory for BlockScan
__shared__ typename BlockScan::TempStorage temp_storage;
//For now, scan the items for the blockDim.x (1024) queue items that are currently being inspected
//TODO: It should be more efficient to have threads gather all of their queue elements at once
int rsv_rank;
int total;
//Process fine-grained batches of adjlists
while(1)
{
int cta_progress = 0;
BlockScan(temp_storage).ExclusiveSum(r_end-r,rsv_rank,total);
int remain;
while((remain = total-cta_progress) > 0)
{
//Share batch of gather offsets
while((rsv_rank < cta_progress + blockDim.x) && (r < r_end))
{
comm[rsv_rank-cta_progress][0] = r;
comm[rsv_rank-cta_progress][1] = v;
rsv_rank++;
r++;
}
__syncthreads();
//Gather batch of adjlist(s)
int min_threads_remain = (remain < blockDim.x) ? remain : blockDim.x;
if(threadIdx.x < min_threads_remain)
{
volatile int w = C[comm[threadIdx.x][0]];
int v_new = comm[threadIdx.x][1];
//Not sure if the v originally gathered by a thread will correspond to the parent of the neighbor that is found
// so keep track of the current depth and use that number instead
if(atomicCAS(&d_row[w],INT_MAX,d_row[v_new]+1) == INT_MAX)
{
int t = atomicAdd(&Q2_len,1);
Q2_row[t] = w;
}
}
cta_progress += blockDim.x;
__syncthreads();
}
k+=blockDim.x;
if(k < Q_len)
{
v = Q_row[k];
r = R[v];
r_end = R[v+1];
}
else
{
v = -1;
r = 0;
r_end = 0;
}
if((k-threadIdx.x) >= Q_len) //If thread 0 doesn't have work, the entire warp is done
{
break;
}
}
__syncthreads();
if(Q2_len == 0)
{
break;
}
else
{
for(int kk=threadIdx.x; kk<Q2_len; kk+=blockDim.x)
{
Q_row[kk] = Q2_row[kk];
}
__syncthreads();
if(j==0)
{
Q_len = Q2_len;
Q2_len = 0;
current_depth++;
}
__syncthreads();
}
}
}
}
|
f582a782dc27959a65e4d59db1710e3e4fd0787d.hip | // !!! This is a file automatically generated by hipify!!!
#include "../THCTensorMathCompare.cuh"
#include "THHTensor.hpp"
#include "THHStream.h"
#include "../generic/THCTensorMathCompare.cu"
#include "../THCGenerateShortType.h"
| f582a782dc27959a65e4d59db1710e3e4fd0787d.cu | #include "../THCTensorMathCompare.cuh"
#include "THCTensor.hpp"
#include "THCStream.h"
#include "../generic/THCTensorMathCompare.cu"
#include "../THCGenerateShortType.h"
|
e94d8ce45596effa5b1e425e520115c22c447765.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cutil_inline.h>
template <class T>
__global__ void gather_kernel(T *dst, T *src, int *index, int thread_num, int element_num)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(; tid < element_num; tid += thread_num)
{
dst[tid] = src[index[tid]];
}
}
template <class T>
__global__ void scatter_kernel(T *dst, T *src, int *index, int thread_num, int element_num)
{
//printf("enter scatter\n");
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(; tid < element_num; tid += thread_num)
{
dst[index[tid]] = src[tid];
}
}
| e94d8ce45596effa5b1e425e520115c22c447765.cu | #include <cutil_inline.h>
template <class T>
__global__ void gather_kernel(T *dst, T *src, int *index, int thread_num, int element_num)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(; tid < element_num; tid += thread_num)
{
dst[tid] = src[index[tid]];
}
}
template <class T>
__global__ void scatter_kernel(T *dst, T *src, int *index, int thread_num, int element_num)
{
//printf("enter scatter\n");
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(; tid < element_num; tid += thread_num)
{
dst[index[tid]] = src[tid];
}
}
|
583ec16e14126762d439e363cace3ae909e5ee43.hip | // !!! This is a file automatically generated by hipify!!!
#include <../src/vec/is/sf/impls/basic/sfpack.h>
#include <hip/hip_runtime.h>
#include <petsccublas.h> /* For CHKERRCUDA */
/* Map a thread id to an index in root/leaf space through a series of 3D subdomains. See PetscSFPackOpt. */
__device__ static inline PetscInt MapTidToIndex(const PetscInt *opt,PetscInt tid)
{
PetscInt i,j,k,m,n,r;
const PetscInt *offset,*start,*dx,*dy,*X,*Y;
n = opt[0];
offset = opt + 1;
start = opt + n + 2;
dx = opt + 2*n + 2;
dy = opt + 3*n + 2;
X = opt + 5*n + 2;
Y = opt + 6*n + 2;
for (r=0; r<n; r++) {if (tid < offset[r+1]) break;}
m = (tid - offset[r]);
k = m/(dx[r]*dy[r]);
j = (m - k*dx[r]*dy[r])/dx[r];
i = m - k*dx[r]*dy[r] - j*dx[r];
return (start[r] + k*X[r]*Y[r] + j*X[r] + i);
}
/*====================================================================================*/
/* Templated CUDA kernels for pack/unpack. The Op can be regular or atomic */
/*====================================================================================*/
/* Suppose user calls PetscSFReduce(sf,unit,...) and <unit> is an MPI data type made of 16 PetscReals, then
<Type> is PetscReal, which is the primitive type we operate on.
<bs> is 16, which says <unit> contains 16 primitive types.
<BS> is 8, which is the maximal SIMD width we will try to vectorize operations on <unit>.
<EQ> is 0, which is (bs == BS ? 1 : 0)
If instead, <unit> has 8 PetscReals, then bs=8, BS=8, EQ=1, rendering MBS below to a compile time constant.
For the common case in VecScatter, bs=1, BS=1, EQ=1, MBS=1, the inner for-loops below will be totally unrolled.
*/
template<class Type,PetscInt BS,PetscInt EQ>
__global__ static void d_Pack(PetscInt bs,PetscInt count,PetscInt start,const PetscInt *opt,const PetscInt *idx,const Type *data,Type *buf)
{
PetscInt i,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS; /* If EQ, then M=1 enables compiler's const-propagation */
const PetscInt MBS = M*BS; /* MBS=bs. We turn MBS into a compile-time const when EQ=1. */
for (; tid<count; tid += grid_size) {
/* opt != NULL ==> idx == NULL, i.e., the indices have patterns but not contiguous;
opt == NULL && idx == NULL ==> the indices are contiguous;
*/
t = (opt? MapTidToIndex(opt,tid) : (idx? idx[tid] : start+tid))*MBS;
s = tid*MBS;
for (i=0; i<MBS; i++) buf[s+i] = data[t+i];
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_UnpackAndOp(PetscInt bs,PetscInt count,PetscInt start,const PetscInt *opt,const PetscInt *idx,Type *data,const Type *buf)
{
PetscInt i,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
t = (opt? MapTidToIndex(opt,tid) : (idx? idx[tid] : start+tid))*MBS;
s = tid*MBS;
for (i=0; i<MBS; i++) op(data[t+i],buf[s+i]);
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_FetchAndOp(PetscInt bs,PetscInt count,PetscInt rootstart,const PetscInt *rootopt,const PetscInt *rootidx,Type *rootdata,Type *leafbuf)
{
PetscInt i,l,r,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
r = (rootopt? MapTidToIndex(rootopt,tid) : (rootidx? rootidx[tid] : rootstart+tid))*MBS;
l = tid*MBS;
for (i=0; i<MBS; i++) leafbuf[l+i] = op(rootdata[r+i],leafbuf[l+i]);
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_ScatterAndOp(PetscInt bs,PetscInt count,PetscInt srcx,PetscInt srcy,PetscInt srcX,PetscInt srcY,PetscInt srcStart,const PetscInt* srcIdx,const Type *src,PetscInt dstx,PetscInt dsty,PetscInt dstX,PetscInt dstY,PetscInt dstStart,const PetscInt *dstIdx,Type *dst)
{
PetscInt i,j,k,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
if (!srcIdx) { /* src is either contiguous or 3D */
k = tid/(srcx*srcy);
j = (tid - k*srcx*srcy)/srcx;
i = tid - k*srcx*srcy - j*srcx;
s = srcStart + k*srcX*srcY + j*srcX + i;
} else {
s = srcIdx[tid];
}
if (!dstIdx) { /* dst is either contiguous or 3D */
k = tid/(dstx*dsty);
j = (tid - k*dstx*dsty)/dstx;
i = tid - k*dstx*dsty - j*dstx;
t = dstStart + k*dstX*dstY + j*dstX + i;
} else {
t = dstIdx[tid];
}
s *= MBS;
t *= MBS;
for (i=0; i<MBS; i++) op(dst[t+i],src[s+i]);
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_FetchAndOpLocal(PetscInt bs,PetscInt count,PetscInt rootstart,const PetscInt *rootopt,const PetscInt *rootidx,Type *rootdata,PetscInt leafstart,const PetscInt *leafopt,const PetscInt *leafidx,const Type *leafdata,Type *leafupdate)
{
PetscInt i,l,r,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
r = (rootopt? MapTidToIndex(rootopt,tid) : (rootidx? rootidx[tid] : rootstart+tid))*MBS;
l = (leafopt? MapTidToIndex(leafopt,tid) : (leafidx? leafidx[tid] : leafstart+tid))*MBS;
for (i=0; i<MBS; i++) leafupdate[l+i] = op(rootdata[r+i],leafdata[l+i]);
}
}
/*====================================================================================*/
/* Regular operations on device */
/*====================================================================================*/
template<typename Type> struct Insert {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = y; return old;}};
template<typename Type> struct Add {__device__ Type operator() (Type& x,Type y) const {Type old = x; x += y; return old;}};
template<typename Type> struct Mult {__device__ Type operator() (Type& x,Type y) const {Type old = x; x *= y; return old;}};
template<typename Type> struct Min {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMin(x,y); return old;}};
template<typename Type> struct Max {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMax(x,y); return old;}};
template<typename Type> struct LAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x && y; return old;}};
template<typename Type> struct LOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x || y; return old;}};
template<typename Type> struct LXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = !x != !y; return old;}};
template<typename Type> struct BAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x & y; return old;}};
template<typename Type> struct BOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x | y; return old;}};
template<typename Type> struct BXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x ^ y; return old;}};
template<typename Type> struct Minloc {
__device__ Type operator() (Type& x,Type y) const {
Type old = x;
if (y.a < x.a) x = y;
else if (y.a == x.a) x.b = min(x.b,y.b);
return old;
}
};
template<typename Type> struct Maxloc {
__device__ Type operator() (Type& x,Type y) const {
Type old = x;
if (y.a > x.a) x = y;
else if (y.a == x.a) x.b = min(x.b,y.b); /* See MPI MAXLOC */
return old;
}
};
/*====================================================================================*/
/* Atomic operations on device */
/*====================================================================================*/
/*
Atomic Insert (exchange) operations
CUDA C Programming Guide V10.1 Chapter B.12.1.3:
int atomicExch(int* address, int val);
unsigned int atomicExch(unsigned int* address, unsigned int val);
unsigned long long int atomicExch(unsigned long long int* address, unsigned long long int val);
float atomicExch(float* address, float val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory and stores val back to memory at the same address. These two operations are
performed in one atomic transaction. The function returns old.
PETSc notes:
It may be useful in PetscSFFetchAndOp with op = MPIU_REPLACE.
VecScatter with multiple entries scattered to the same location using INSERT_VALUES does not need
atomic insertion, since it does not need the old value. A 32-bit or 64-bit store instruction should
be atomic itself.
With bs>1 and a unit > 64 bits, the current element-wise atomic approach can not guarantee the whole
insertion is atomic. Hope no user codes rely on that.
*/
__device__ static double atomicExch(double* address,double val) {return __longlong_as_double(atomicExch((unsigned long long int*)address,__double_as_longlong(val)));}
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static PetscInt atomicExch(PetscInt* address,PetscInt val) {return (PetscInt)(atomicExch((unsigned long long int*)address,(unsigned long long int)val));}
#endif
template<typename Type> struct AtomicInsert {__device__ Type operator() (Type& x,Type y) const {return atomicExch(&x,y);}};
#if defined(PETSC_HAVE_COMPLEX)
#if defined(PETSC_USE_REAL_DOUBLE)
/* CUDA does not support 128-bit atomics. Users should not insert different 128-bit PetscComplex values to the same location */
template<> struct AtomicInsert<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
PetscComplex old, *z = &old;
double *xp = (double*)&x,*yp = (double*)&y;
AtomicInsert<double> op;
z[0] = op(xp[0],yp[0]);
z[1] = op(xp[1],yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
#elif defined(PETSC_USE_REAL_SINGLE)
template<> struct AtomicInsert<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
double *xp = (double*)&x,*yp = (double*)&y;
AtomicInsert<double> op;
return op(xp[0],yp[0]);
}
};
#endif
#endif
/*
Atomic add operations
CUDA C Programming Guide V10.1 Chapter B.12.1.1:
int atomicAdd(int* address, int val);
unsigned int atomicAdd(unsigned int* address,unsigned int val);
unsigned long long int atomicAdd(unsigned long long int* address,unsigned long long int val);
float atomicAdd(float* address, float val);
double atomicAdd(double* address, double val);
__half2 atomicAdd(__half2 *address, __half2 val);
__half atomicAdd(__half *address, __half val);
reads the 16-bit, 32-bit or 64-bit word old located at the address address in global or shared memory, computes (old + val),
and stores the result back to memory at the same address. These three operations are performed in one atomic transaction. The
function returns old.
The 32-bit floating-point version of atomicAdd() is only supported by devices of compute capability 2.x and higher.
The 64-bit floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and higher.
The 32-bit __half2 floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and
higher. The atomicity of the __half2 add operation is guaranteed separately for each of the two __half elements;
the entire __half2 is not guaranteed to be atomic as a single 32-bit access.
The 16-bit __half floating-point version of atomicAdd() is only supported by devices of compute capability 7.x and higher.
*/
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static PetscInt atomicAdd(PetscInt* address,PetscInt val) {return (PetscInt)atomicAdd((unsigned long long int*)address,(unsigned long long int)val);}
#endif
template<typename Type> struct AtomicAdd {__device__ Type operator() (Type& x,Type y) const {return atomicAdd(&x,y);}};
template<> struct AtomicAdd<double> {
__device__ double operator() (double& x,double y) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
return atomicAdd(&x,y);
#else
double *address = &x, val = y;
unsigned long long int *address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __longlong_as_double(old);
#endif
}
};
template<> struct AtomicAdd<float> {
__device__ float operator() (float& x,float y) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
return atomicAdd(&x,y);
#else
float *address = &x, val = y;
int *address_as_int = (int*)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val + __int_as_float(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __int_as_float(old);
#endif
}
};
#if defined(PETSC_HAVE_COMPLEX)
template<> struct AtomicAdd<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
PetscComplex old, *z = &old;
PetscReal *xp = (PetscReal*)&x,*yp = (PetscReal*)&y;
AtomicAdd<PetscReal> op;
z[0] = op(xp[0],yp[0]);
z[1] = op(xp[1],yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
#endif
/*
Atomic Mult operations:
CUDA has no atomicMult at all, so we build our own with atomicCAS
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMult(double* address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
/* Other threads can access and modify value of *address_as_ull after the read above and before the write below */
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val*__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMult(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val*__int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
#endif
__device__ static int atomicMult(int* address,int val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, val*assumed);
} while (assumed != old);
return (int)old;
}
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static int atomicMult(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val*(PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
#endif
template<typename Type> struct AtomicMult {__device__ Type operator() (Type& x,Type y) const {return atomicMult(&x,y);}};
/*
Atomic Min/Max operations
CUDA C Programming Guide V10.1 Chapter B.12.1.4~5:
int atomicMin(int* address, int val);
unsigned int atomicMin(unsigned int* address,unsigned int val);
unsigned long long int atomicMin(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes the minimum of old and val, and stores the result back to memory
at the same address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicMin() is only supported by devices of compute capability 3.5 and higher.
atomicMax() is similar.
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMin(double* address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMin(val,__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ static double atomicMax(double* address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMax(val,__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMin(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMin(val,__int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static float atomicMax(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMax(val,__int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
#endif
/*
atomicMin/Max(long long *, long long) are not in Nvidia's documentation. But on OLCF Summit we found
atomicMin/Max/And/Or/Xor(long long *, long long) in /sw/summit/cuda/10.1.243/include/sm_32_atomic_functions.h.
This causes compilation errors with pgi compilers and 64-bit indices:
error: function "atomicMin(long long *, long long)" has already been defined
So we add extra conditions defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
*/
#if defined(PETSC_USE_64BIT_INDICES) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
__device__ static PetscInt atomicMin(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(PetscMin(val,(PetscInt)assumed)));
} while (assumed != old);
return (PetscInt)old;
}
__device__ static PetscInt atomicMax(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(PetscMax(val,(PetscInt)assumed)));
} while (assumed != old);
return (PetscInt)old;
}
#endif
template<typename Type> struct AtomicMin {__device__ Type operator() (Type& x,Type y) const {return atomicMin(&x,y);}};
template<typename Type> struct AtomicMax {__device__ Type operator() (Type& x,Type y) const {return atomicMax(&x,y);}};
/*
Atomic bitwise operations
CUDA C Programming Guide V10.1 Chapter B.12.2.1 ~ B.12.2.3:
int atomicAnd(int* address, int val);
unsigned int atomicAnd(unsigned int* address,unsigned int val);
unsigned long long int atomicAnd(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes (old & val), and stores the result back to memory at the same
address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicAnd() is only supported by devices of compute capability 3.5 and higher.
atomicOr() and atomicXor are similar.
*/
#if defined(PETSC_USE_64BIT_INDICES)
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320) /* Why 320? see comments at atomicMin(PetscInt* address,PetscInt val) */
__device__ static PetscInt atomicAnd(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val & (PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
__device__ static PetscInt atomicOr(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val | (PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
__device__ static PetscInt atomicXor(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val ^ (PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
#else
/*
See also comments at atomicMin(PetscInt* address,PetscInt val)
__device__ static PetscInt atomicAnd(PetscInt* address,PetscInt val) {return (PetscInt)atomicAnd((unsigned long long int*)address,(unsigned long long int)val);}
__device__ static PetscInt atomicOr (PetscInt* address,PetscInt val) {return (PetscInt)atomicOr ((unsigned long long int*)address,(unsigned long long int)val);}
__device__ static PetscInt atomicXor(PetscInt* address,PetscInt val) {return (PetscInt)atomicXor((unsigned long long int*)address,(unsigned long long int)val);}
*/
#endif
#endif
template<typename Type> struct AtomicBAND {__device__ Type operator() (Type& x,Type y) const {return atomicAnd(&x,y);}};
template<typename Type> struct AtomicBOR {__device__ Type operator() (Type& x,Type y) const {return atomicOr (&x,y);}};
template<typename Type> struct AtomicBXOR {__device__ Type operator() (Type& x,Type y) const {return atomicXor(&x,y);}};
/*
Atomic logical operations:
CUDA has no atomic logical operations at all. We support them on integer types.
*/
/* A template without definition makes any instantiation not using given specializations erroneous at compile time,
which is what we want since we only support 32-bit and 64-bit integers.
*/
template<typename Type,class Op,int size/* sizeof(Type) */> struct AtomicLogical;
template<typename Type,class Op>
struct AtomicLogical<Type,Op,4> {
__device__ Type operator()(Type& x,Type y) const {
int *address_as_int = (int*)(&x);
int old = *address_as_int, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, (int)(op((Type)assumed,y)));
} while (assumed != old);
return (Type)old;
}
};
template<typename Type,class Op>
struct AtomicLogical<Type,Op,8> {
__device__ Type operator()(Type& x,Type y) const {
unsigned long long int *address_as_ull = (unsigned long long int*)(&x);
unsigned long long int old = *address_as_ull, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(op((Type)assumed,y)));
} while (assumed != old);
return (Type)old;
}
};
/* Note land/lor/lxor below are different from LAND etc above. Here we pass arguments by value and return result of ops (not old value) */
template<typename Type> struct land {__device__ Type operator()(Type x, Type y) {return x && y;}};
template<typename Type> struct lor {__device__ Type operator()(Type x, Type y) {return x || y;}};
template<typename Type> struct lxor {__device__ Type operator()(Type x, Type y) {return (!x != !y);}};
template<typename Type> struct AtomicLAND {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,land<Type>,sizeof(Type)> op; return op(x,y);}};
template<typename Type> struct AtomicLOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lor<Type> ,sizeof(Type)> op; return op(x,y);}};
template<typename Type> struct AtomicLXOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lxor<Type>,sizeof(Type)> op; return op(x,y);}};
/*====================================================================================*/
/* Wrapper functions of cuda kernels. Function pointers are stored in 'link' */
/*====================================================================================*/
template<typename Type,PetscInt BS,PetscInt EQ>
static PetscErrorCode Pack(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,const void *data,void *buf)
{
hipError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
hipLaunchKernelGGL(( d_Pack<Type,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, link->bs,count,start,iarray,idx,(const Type*)data,(Type*)buf);
cerr = hipGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode UnpackAndOp(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,void *data,const void *buf)
{
hipError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
hipLaunchKernelGGL(( d_UnpackAndOp<Type,Op,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, link->bs,count,start,iarray,idx,(Type*)data,(const Type*)buf);
cerr = hipGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode FetchAndOp(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,void *data,void *buf)
{
hipError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
hipLaunchKernelGGL(( d_FetchAndOp<Type,Op,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, link->bs,count,start,iarray,idx,(Type*)data,(Type*)buf);
cerr = hipGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode ScatterAndOp(PetscSFLink link,PetscInt count,PetscInt srcStart,PetscSFPackOpt srcOpt,const PetscInt *srcIdx,const void *src,PetscInt dstStart,PetscSFPackOpt dstOpt,const PetscInt *dstIdx,void *dst)
{
hipError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
PetscInt srcx=0,srcy=0,srcX=0,srcY=0,dstx=0,dsty=0,dstX=0,dstY=0;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
/* The 3D shape of source subdomain may be different than that of the destination, which makes it difficult to use CUDA 3D grid and block */
if (srcOpt) {srcx = srcOpt->dx[0]; srcy = srcOpt->dy[0]; srcX = srcOpt->X[0]; srcY = srcOpt->Y[0]; srcStart = srcOpt->start[0]; srcIdx = NULL;}
else if (!srcIdx) {srcx = srcX = count; srcy = srcY = 1;}
if (dstOpt) {dstx = dstOpt->dx[0]; dsty = dstOpt->dy[0]; dstX = dstOpt->X[0]; dstY = dstOpt->Y[0]; dstStart = dstOpt->start[0]; dstIdx = NULL;}
else if (!dstIdx) {dstx = dstX = count; dsty = dstY = 1;}
hipLaunchKernelGGL(( d_ScatterAndOp<Type,Op,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, link->bs,count,srcx,srcy,srcX,srcY,srcStart,srcIdx,(const Type*)src,dstx,dsty,dstX,dstY,dstStart,dstIdx,(Type*)dst);
cerr = hipGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
/* Specialization for Insert since we may use hipMemcpyAsync */
template<typename Type,PetscInt BS,PetscInt EQ>
static PetscErrorCode ScatterAndInsert(PetscSFLink link,PetscInt count,PetscInt srcStart,PetscSFPackOpt srcOpt,const PetscInt *srcIdx,const void *src,PetscInt dstStart,PetscSFPackOpt dstOpt,const PetscInt *dstIdx,void *dst)
{
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
/*src and dst are contiguous */
if ((!srcOpt && !srcIdx) && (!dstOpt && !dstIdx) && src != dst) {
cerr = hipMemcpyAsync((Type*)dst+dstStart*link->bs,(const Type*)src+srcStart*link->bs,count*link->unitbytes,hipMemcpyDeviceToDevice,link->stream);CHKERRCUDA(cerr);
} else {
ierr = ScatterAndOp<Type,Insert<Type>,BS,EQ>(link,count,srcStart,srcOpt,srcIdx,src,dstStart,dstOpt,dstIdx,dst);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode FetchAndOpLocal(PetscSFLink link,PetscInt count,PetscInt rootstart,PetscSFPackOpt rootopt,const PetscInt *rootidx,void *rootdata,PetscInt leafstart,PetscSFPackOpt leafopt,const PetscInt *leafidx,const void *leafdata,void *leafupdate)
{
hipError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *rarray = rootopt ? rootopt->array : NULL;
const PetscInt *larray = leafopt ? leafopt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
hipLaunchKernelGGL(( d_FetchAndOpLocal<Type,Op,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, link->bs,count,rootstart,rarray,rootidx,(Type*)rootdata,leafstart,larray,leafidx,(const Type*)leafdata,(Type*)leafupdate);
cerr = hipGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
/*====================================================================================*/
/* Init various types and instantiate pack/unpack function pointers */
/*====================================================================================*/
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_RealType(PetscSFLink link)
{
/* Pack/unpack for remote communication */
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp <Type,Insert<Type> ,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp <Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp <Type,Mult<Type> ,BS,EQ>;
link->d_UnpackAndMin = UnpackAndOp <Type,Min<Type> ,BS,EQ>;
link->d_UnpackAndMax = UnpackAndOp <Type,Max<Type> ,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
/* Scatter for local communication */
link->d_ScatterAndInsert = ScatterAndInsert<Type ,BS,EQ>; /* Has special optimizations */
link->d_ScatterAndAdd = ScatterAndOp <Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndMult = ScatterAndOp <Type,Mult<Type> ,BS,EQ>;
link->d_ScatterAndMin = ScatterAndOp <Type,Min<Type> ,BS,EQ>;
link->d_ScatterAndMax = ScatterAndOp <Type,Max<Type> ,BS,EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal <Type,Add <Type> ,BS,EQ>;
/* Atomic versions when there are data-race possibilities */
link->da_UnpackAndInsert = UnpackAndOp <Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_UnpackAndMult = UnpackAndOp <Type,AtomicMult<Type> ,BS,EQ>;
link->da_UnpackAndMin = UnpackAndOp <Type,AtomicMin<Type> ,BS,EQ>;
link->da_UnpackAndMax = UnpackAndOp <Type,AtomicMax<Type> ,BS,EQ>;
link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndInsert = ScatterAndOp <Type,AtomicInsert<Type>,BS,EQ>;
link->da_ScatterAndAdd = ScatterAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndMult = ScatterAndOp <Type,AtomicMult<Type> ,BS,EQ>;
link->da_ScatterAndMin = ScatterAndOp <Type,AtomicMin<Type> ,BS,EQ>;
link->da_ScatterAndMax = ScatterAndOp <Type,AtomicMax<Type> ,BS,EQ>;
link->da_FetchAndAddLocal = FetchAndOpLocal <Type,AtomicAdd<Type> ,BS,EQ>;
}
/* Have this templated class to specialize for char integers */
template<typename Type,PetscInt BS,PetscInt EQ,PetscInt size/*sizeof(Type)*/>
struct PackInit_IntegerType_Atomic {
static void Init(PetscSFLink link) {
link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
link->da_UnpackAndMult = UnpackAndOp<Type,AtomicMult<Type> ,BS,EQ>;
link->da_UnpackAndMin = UnpackAndOp<Type,AtomicMin<Type> ,BS,EQ>;
link->da_UnpackAndMax = UnpackAndOp<Type,AtomicMax<Type> ,BS,EQ>;
link->da_UnpackAndLAND = UnpackAndOp<Type,AtomicLAND<Type> ,BS,EQ>;
link->da_UnpackAndLOR = UnpackAndOp<Type,AtomicLOR<Type> ,BS,EQ>;
link->da_UnpackAndLXOR = UnpackAndOp<Type,AtomicLXOR<Type> ,BS,EQ>;
link->da_UnpackAndBAND = UnpackAndOp<Type,AtomicBAND<Type> ,BS,EQ>;
link->da_UnpackAndBOR = UnpackAndOp<Type,AtomicBOR<Type> ,BS,EQ>;
link->da_UnpackAndBXOR = UnpackAndOp<Type,AtomicBXOR<Type> ,BS,EQ>;
link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndInsert = ScatterAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndMult = ScatterAndOp<Type,AtomicMult<Type> ,BS,EQ>;
link->da_ScatterAndMin = ScatterAndOp<Type,AtomicMin<Type> ,BS,EQ>;
link->da_ScatterAndMax = ScatterAndOp<Type,AtomicMax<Type> ,BS,EQ>;
link->da_ScatterAndLAND = ScatterAndOp<Type,AtomicLAND<Type> ,BS,EQ>;
link->da_ScatterAndLOR = ScatterAndOp<Type,AtomicLOR<Type> ,BS,EQ>;
link->da_ScatterAndLXOR = ScatterAndOp<Type,AtomicLXOR<Type> ,BS,EQ>;
link->da_ScatterAndBAND = ScatterAndOp<Type,AtomicBAND<Type> ,BS,EQ>;
link->da_ScatterAndBOR = ScatterAndOp<Type,AtomicBOR<Type> ,BS,EQ>;
link->da_ScatterAndBXOR = ScatterAndOp<Type,AtomicBXOR<Type> ,BS,EQ>;
link->da_FetchAndAddLocal = FetchAndOpLocal<Type,AtomicAdd<Type>,BS,EQ>;
}
};
/* CUDA does not support atomics on chars. It is TBD in PETSc. */
template<typename Type,PetscInt BS,PetscInt EQ>
struct PackInit_IntegerType_Atomic<Type,BS,EQ,1> {
static void Init(PetscSFLink link) {/* Nothing to leave function pointers NULL */}
};
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_IntegerType(PetscSFLink link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_UnpackAndMin = UnpackAndOp<Type,Min<Type> ,BS,EQ>;
link->d_UnpackAndMax = UnpackAndOp<Type,Max<Type> ,BS,EQ>;
link->d_UnpackAndLAND = UnpackAndOp<Type,LAND<Type> ,BS,EQ>;
link->d_UnpackAndLOR = UnpackAndOp<Type,LOR<Type> ,BS,EQ>;
link->d_UnpackAndLXOR = UnpackAndOp<Type,LXOR<Type> ,BS,EQ>;
link->d_UnpackAndBAND = UnpackAndOp<Type,BAND<Type> ,BS,EQ>;
link->d_UnpackAndBOR = UnpackAndOp<Type,BOR<Type> ,BS,EQ>;
link->d_UnpackAndBXOR = UnpackAndOp<Type,BXOR<Type> ,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
link->d_ScatterAndAdd = ScatterAndOp<Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_ScatterAndMin = ScatterAndOp<Type,Min<Type> ,BS,EQ>;
link->d_ScatterAndMax = ScatterAndOp<Type,Max<Type> ,BS,EQ>;
link->d_ScatterAndLAND = ScatterAndOp<Type,LAND<Type> ,BS,EQ>;
link->d_ScatterAndLOR = ScatterAndOp<Type,LOR<Type> ,BS,EQ>;
link->d_ScatterAndLXOR = ScatterAndOp<Type,LXOR<Type> ,BS,EQ>;
link->d_ScatterAndBAND = ScatterAndOp<Type,BAND<Type> ,BS,EQ>;
link->d_ScatterAndBOR = ScatterAndOp<Type,BOR<Type> ,BS,EQ>;
link->d_ScatterAndBXOR = ScatterAndOp<Type,BXOR<Type> ,BS,EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type,Add<Type>,BS,EQ>;
PackInit_IntegerType_Atomic<Type,BS,EQ,sizeof(Type)>::Init(link);
}
#if defined(PETSC_HAVE_COMPLEX)
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_ComplexType(PetscSFLink link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
link->d_ScatterAndAdd = ScatterAndOp<Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type,Add<Type>,BS,EQ>;
link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type>,BS,EQ>;
link->da_UnpackAndMult = NULL; /* Not implemented yet */
link->da_FetchAndAdd = NULL; /* Return value of atomicAdd on complex is not atomic */
link->da_ScatterAndInsert = ScatterAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type,AtomicAdd<Type>,BS,EQ>;
}
#endif
typedef signed char SignedChar;
typedef unsigned char UnsignedChar;
typedef struct {int a; int b; } PairInt;
typedef struct {PetscInt a; PetscInt b;} PairPetscInt;
template<typename Type>
static void PackInit_PairType(PetscSFLink link)
{
link->d_Pack = Pack<Type,1,1>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,1,1>;
link->d_UnpackAndMaxloc = UnpackAndOp<Type,Maxloc<Type>,1,1>;
link->d_UnpackAndMinloc = UnpackAndOp<Type,Minloc<Type>,1,1>;
link->d_ScatterAndInsert = ScatterAndOp<Type,Insert<Type>,1,1>;
link->d_ScatterAndMaxloc = ScatterAndOp<Type,Maxloc<Type>,1,1>;
link->d_ScatterAndMinloc = ScatterAndOp<Type,Minloc<Type>,1,1>;
/* Atomics for pair types are not implemented yet */
}
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_DumbType(PetscSFLink link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
/* Atomics for dumb types are not implemented yet */
}
/* Some device-specific utilities */
static PetscErrorCode PetscSFLinkSyncDevice_Cuda(PetscSFLink link)
{
hipError_t cerr;
PetscFunctionBegin;
cerr = hipDeviceSynchronize();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
static PetscErrorCode PetscSFLinkSyncStream_Cuda(PetscSFLink link)
{
hipError_t cerr;
PetscFunctionBegin;
cerr = hipStreamSynchronize(link->stream);CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
static PetscErrorCode PetscSFLinkMemcpy_Cuda(PetscSFLink link,PetscMemType dstmtype,void* dst,PetscMemType srcmtype,const void*src,size_t n)
{
PetscFunctionBegin;
enum hipMemcpyKind kinds[2][2] = {{hipMemcpyHostToHost,hipMemcpyHostToDevice},{hipMemcpyDeviceToHost,hipMemcpyDeviceToDevice}};
if (n) {
if (dstmtype == PETSC_MEMTYPE_HOST && srcmtype == PETSC_MEMTYPE_HOST) { /* Separate HostToHost so that pure-cpu code won't call cuda runtime */
PetscErrorCode ierr = PetscMemcpy(dst,src,n);CHKERRQ(ierr);
} else { /* Assume PETSC_MEMTYPE_HOST=0, PETSC_MEMTYPE_DEVICE=1 */
hipError_t err = hipMemcpyAsync(dst,src,n,kinds[srcmtype][dstmtype],link->stream);CHKERRCUDA(err);
}
}
PetscFunctionReturn(0);
}
PetscErrorCode PetscSFMalloc_Cuda(PetscMemType mtype,size_t size,void** ptr)
{
PetscFunctionBegin;
if (mtype == PETSC_MEMTYPE_HOST) {PetscErrorCode ierr = PetscMalloc(size,ptr);CHKERRQ(ierr);}
else if (mtype == PETSC_MEMTYPE_DEVICE) {hipError_t err = hipMalloc(ptr,size);CHKERRCUDA(err);}
else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong PetscMemType %d", (int)mtype);
PetscFunctionReturn(0);
}
PetscErrorCode PetscSFFree_Cuda(PetscMemType mtype,void* ptr)
{
PetscFunctionBegin;
if (mtype == PETSC_MEMTYPE_HOST) {PetscErrorCode ierr = PetscFree(ptr);CHKERRQ(ierr);}
else if (mtype == PETSC_MEMTYPE_DEVICE) {hipError_t err = hipFree(ptr);CHKERRCUDA(err);}
else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong PetscMemType %d",(int)mtype);
PetscFunctionReturn(0);
}
/*====================================================================================*/
/* Main driver to init MPI datatype on device */
/*====================================================================================*/
/* Some fields of link are initialized by PetscSFPackSetUp_Host. This routine only does what needed on device */
PetscErrorCode PetscSFLinkSetUp_Cuda(PetscSF sf,PetscSFLink link,MPI_Datatype unit)
{
PetscErrorCode ierr;
hipError_t err;
PetscInt nSignedChar=0,nUnsignedChar=0,nInt=0,nPetscInt=0,nPetscReal=0;
PetscBool is2Int,is2PetscInt;
#if defined(PETSC_HAVE_COMPLEX)
PetscInt nPetscComplex=0;
#endif
PetscFunctionBegin;
if (link->deviceinited) PetscFunctionReturn(0);
ierr = MPIPetsc_Type_compare_contig(unit,MPI_SIGNED_CHAR, &nSignedChar);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPI_UNSIGNED_CHAR,&nUnsignedChar);CHKERRQ(ierr);
/* MPI_CHAR is treated below as a dumb type that does not support reduction according to MPI standard */
ierr = MPIPetsc_Type_compare_contig(unit,MPI_INT, &nInt);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_INT, &nPetscInt);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_REAL,&nPetscReal);CHKERRQ(ierr);
#if defined(PETSC_HAVE_COMPLEX)
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_COMPLEX,&nPetscComplex);CHKERRQ(ierr);
#endif
ierr = MPIPetsc_Type_compare(unit,MPI_2INT,&is2Int);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare(unit,MPIU_2INT,&is2PetscInt);CHKERRQ(ierr);
if (is2Int) {
PackInit_PairType<PairInt>(link);
} else if (is2PetscInt) { /* TODO: when is2PetscInt and nPetscInt=2, we don't know which path to take. The two paths support different ops. */
PackInit_PairType<PairPetscInt>(link);
} else if (nPetscReal) {
if (nPetscReal == 8) PackInit_RealType<PetscReal,8,1>(link); else if (nPetscReal%8 == 0) PackInit_RealType<PetscReal,8,0>(link);
else if (nPetscReal == 4) PackInit_RealType<PetscReal,4,1>(link); else if (nPetscReal%4 == 0) PackInit_RealType<PetscReal,4,0>(link);
else if (nPetscReal == 2) PackInit_RealType<PetscReal,2,1>(link); else if (nPetscReal%2 == 0) PackInit_RealType<PetscReal,2,0>(link);
else if (nPetscReal == 1) PackInit_RealType<PetscReal,1,1>(link); else if (nPetscReal%1 == 0) PackInit_RealType<PetscReal,1,0>(link);
} else if (nPetscInt) {
if (nPetscInt == 8) PackInit_IntegerType<PetscInt,8,1>(link); else if (nPetscInt%8 == 0) PackInit_IntegerType<PetscInt,8,0>(link);
else if (nPetscInt == 4) PackInit_IntegerType<PetscInt,4,1>(link); else if (nPetscInt%4 == 0) PackInit_IntegerType<PetscInt,4,0>(link);
else if (nPetscInt == 2) PackInit_IntegerType<PetscInt,2,1>(link); else if (nPetscInt%2 == 0) PackInit_IntegerType<PetscInt,2,0>(link);
else if (nPetscInt == 1) PackInit_IntegerType<PetscInt,1,1>(link); else if (nPetscInt%1 == 0) PackInit_IntegerType<PetscInt,1,0>(link);
#if defined(PETSC_USE_64BIT_INDICES)
} else if (nInt) {
if (nInt == 8) PackInit_IntegerType<int,8,1>(link); else if (nInt%8 == 0) PackInit_IntegerType<int,8,0>(link);
else if (nInt == 4) PackInit_IntegerType<int,4,1>(link); else if (nInt%4 == 0) PackInit_IntegerType<int,4,0>(link);
else if (nInt == 2) PackInit_IntegerType<int,2,1>(link); else if (nInt%2 == 0) PackInit_IntegerType<int,2,0>(link);
else if (nInt == 1) PackInit_IntegerType<int,1,1>(link); else if (nInt%1 == 0) PackInit_IntegerType<int,1,0>(link);
#endif
} else if (nSignedChar) {
if (nSignedChar == 8) PackInit_IntegerType<SignedChar,8,1>(link); else if (nSignedChar%8 == 0) PackInit_IntegerType<SignedChar,8,0>(link);
else if (nSignedChar == 4) PackInit_IntegerType<SignedChar,4,1>(link); else if (nSignedChar%4 == 0) PackInit_IntegerType<SignedChar,4,0>(link);
else if (nSignedChar == 2) PackInit_IntegerType<SignedChar,2,1>(link); else if (nSignedChar%2 == 0) PackInit_IntegerType<SignedChar,2,0>(link);
else if (nSignedChar == 1) PackInit_IntegerType<SignedChar,1,1>(link); else if (nSignedChar%1 == 0) PackInit_IntegerType<SignedChar,1,0>(link);
} else if (nUnsignedChar) {
if (nUnsignedChar == 8) PackInit_IntegerType<UnsignedChar,8,1>(link); else if (nUnsignedChar%8 == 0) PackInit_IntegerType<UnsignedChar,8,0>(link);
else if (nUnsignedChar == 4) PackInit_IntegerType<UnsignedChar,4,1>(link); else if (nUnsignedChar%4 == 0) PackInit_IntegerType<UnsignedChar,4,0>(link);
else if (nUnsignedChar == 2) PackInit_IntegerType<UnsignedChar,2,1>(link); else if (nUnsignedChar%2 == 0) PackInit_IntegerType<UnsignedChar,2,0>(link);
else if (nUnsignedChar == 1) PackInit_IntegerType<UnsignedChar,1,1>(link); else if (nUnsignedChar%1 == 0) PackInit_IntegerType<UnsignedChar,1,0>(link);
#if defined(PETSC_HAVE_COMPLEX)
} else if (nPetscComplex) {
if (nPetscComplex == 8) PackInit_ComplexType<PetscComplex,8,1>(link); else if (nPetscComplex%8 == 0) PackInit_ComplexType<PetscComplex,8,0>(link);
else if (nPetscComplex == 4) PackInit_ComplexType<PetscComplex,4,1>(link); else if (nPetscComplex%4 == 0) PackInit_ComplexType<PetscComplex,4,0>(link);
else if (nPetscComplex == 2) PackInit_ComplexType<PetscComplex,2,1>(link); else if (nPetscComplex%2 == 0) PackInit_ComplexType<PetscComplex,2,0>(link);
else if (nPetscComplex == 1) PackInit_ComplexType<PetscComplex,1,1>(link); else if (nPetscComplex%1 == 0) PackInit_ComplexType<PetscComplex,1,0>(link);
#endif
} else {
MPI_Aint lb,nbyte;
ierr = MPI_Type_get_extent(unit,&lb,&nbyte);CHKERRQ(ierr);
if (lb != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
if (nbyte % sizeof(int)) { /* If the type size is not multiple of int */
if (nbyte == 4) PackInit_DumbType<char,4,1>(link); else if (nbyte%4 == 0) PackInit_DumbType<char,4,0>(link);
else if (nbyte == 2) PackInit_DumbType<char,2,1>(link); else if (nbyte%2 == 0) PackInit_DumbType<char,2,0>(link);
else if (nbyte == 1) PackInit_DumbType<char,1,1>(link); else if (nbyte%1 == 0) PackInit_DumbType<char,1,0>(link);
} else {
nInt = nbyte / sizeof(int);
if (nInt == 8) PackInit_DumbType<int,8,1>(link); else if (nInt%8 == 0) PackInit_DumbType<int,8,0>(link);
else if (nInt == 4) PackInit_DumbType<int,4,1>(link); else if (nInt%4 == 0) PackInit_DumbType<int,4,0>(link);
else if (nInt == 2) PackInit_DumbType<int,2,1>(link); else if (nInt%2 == 0) PackInit_DumbType<int,2,0>(link);
else if (nInt == 1) PackInit_DumbType<int,1,1>(link); else if (nInt%1 == 0) PackInit_DumbType<int,1,0>(link);
}
}
if (!sf->use_default_stream) {err = hipStreamCreate(&link->stream);CHKERRCUDA(err);}
if (!sf->maxResidentThreadsPerGPU) { /* Not initialized */
int device;
struct hipDeviceProp_t props;
err = hipGetDevice(&device);CHKERRCUDA(err);
err = hipGetDeviceProperties(&props,device);CHKERRCUDA(err);
sf->maxResidentThreadsPerGPU = props.maxThreadsPerMultiProcessor*props.multiProcessorCount;
}
link->maxResidentThreadsPerGPU = sf->maxResidentThreadsPerGPU;
link->d_SyncDevice = PetscSFLinkSyncDevice_Cuda;
link->d_SyncStream = PetscSFLinkSyncStream_Cuda;
link->Memcpy = PetscSFLinkMemcpy_Cuda;
link->deviceinited = PETSC_TRUE;
PetscFunctionReturn(0);
}
| 583ec16e14126762d439e363cace3ae909e5ee43.cu | #include <../src/vec/is/sf/impls/basic/sfpack.h>
#include <cuda_runtime.h>
#include <petsccublas.h> /* For CHKERRCUDA */
/* Map a thread id to an index in root/leaf space through a series of 3D subdomains. See PetscSFPackOpt. */
__device__ static inline PetscInt MapTidToIndex(const PetscInt *opt,PetscInt tid)
{
PetscInt i,j,k,m,n,r;
const PetscInt *offset,*start,*dx,*dy,*X,*Y;
n = opt[0];
offset = opt + 1;
start = opt + n + 2;
dx = opt + 2*n + 2;
dy = opt + 3*n + 2;
X = opt + 5*n + 2;
Y = opt + 6*n + 2;
for (r=0; r<n; r++) {if (tid < offset[r+1]) break;}
m = (tid - offset[r]);
k = m/(dx[r]*dy[r]);
j = (m - k*dx[r]*dy[r])/dx[r];
i = m - k*dx[r]*dy[r] - j*dx[r];
return (start[r] + k*X[r]*Y[r] + j*X[r] + i);
}
/*====================================================================================*/
/* Templated CUDA kernels for pack/unpack. The Op can be regular or atomic */
/*====================================================================================*/
/* Suppose user calls PetscSFReduce(sf,unit,...) and <unit> is an MPI data type made of 16 PetscReals, then
<Type> is PetscReal, which is the primitive type we operate on.
<bs> is 16, which says <unit> contains 16 primitive types.
<BS> is 8, which is the maximal SIMD width we will try to vectorize operations on <unit>.
<EQ> is 0, which is (bs == BS ? 1 : 0)
If instead, <unit> has 8 PetscReals, then bs=8, BS=8, EQ=1, rendering MBS below to a compile time constant.
For the common case in VecScatter, bs=1, BS=1, EQ=1, MBS=1, the inner for-loops below will be totally unrolled.
*/
template<class Type,PetscInt BS,PetscInt EQ>
__global__ static void d_Pack(PetscInt bs,PetscInt count,PetscInt start,const PetscInt *opt,const PetscInt *idx,const Type *data,Type *buf)
{
PetscInt i,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS; /* If EQ, then M=1 enables compiler's const-propagation */
const PetscInt MBS = M*BS; /* MBS=bs. We turn MBS into a compile-time const when EQ=1. */
for (; tid<count; tid += grid_size) {
/* opt != NULL ==> idx == NULL, i.e., the indices have patterns but not contiguous;
opt == NULL && idx == NULL ==> the indices are contiguous;
*/
t = (opt? MapTidToIndex(opt,tid) : (idx? idx[tid] : start+tid))*MBS;
s = tid*MBS;
for (i=0; i<MBS; i++) buf[s+i] = data[t+i];
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_UnpackAndOp(PetscInt bs,PetscInt count,PetscInt start,const PetscInt *opt,const PetscInt *idx,Type *data,const Type *buf)
{
PetscInt i,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
t = (opt? MapTidToIndex(opt,tid) : (idx? idx[tid] : start+tid))*MBS;
s = tid*MBS;
for (i=0; i<MBS; i++) op(data[t+i],buf[s+i]);
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_FetchAndOp(PetscInt bs,PetscInt count,PetscInt rootstart,const PetscInt *rootopt,const PetscInt *rootidx,Type *rootdata,Type *leafbuf)
{
PetscInt i,l,r,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
r = (rootopt? MapTidToIndex(rootopt,tid) : (rootidx? rootidx[tid] : rootstart+tid))*MBS;
l = tid*MBS;
for (i=0; i<MBS; i++) leafbuf[l+i] = op(rootdata[r+i],leafbuf[l+i]);
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_ScatterAndOp(PetscInt bs,PetscInt count,PetscInt srcx,PetscInt srcy,PetscInt srcX,PetscInt srcY,PetscInt srcStart,const PetscInt* srcIdx,const Type *src,PetscInt dstx,PetscInt dsty,PetscInt dstX,PetscInt dstY,PetscInt dstStart,const PetscInt *dstIdx,Type *dst)
{
PetscInt i,j,k,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
if (!srcIdx) { /* src is either contiguous or 3D */
k = tid/(srcx*srcy);
j = (tid - k*srcx*srcy)/srcx;
i = tid - k*srcx*srcy - j*srcx;
s = srcStart + k*srcX*srcY + j*srcX + i;
} else {
s = srcIdx[tid];
}
if (!dstIdx) { /* dst is either contiguous or 3D */
k = tid/(dstx*dsty);
j = (tid - k*dstx*dsty)/dstx;
i = tid - k*dstx*dsty - j*dstx;
t = dstStart + k*dstX*dstY + j*dstX + i;
} else {
t = dstIdx[tid];
}
s *= MBS;
t *= MBS;
for (i=0; i<MBS; i++) op(dst[t+i],src[s+i]);
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_FetchAndOpLocal(PetscInt bs,PetscInt count,PetscInt rootstart,const PetscInt *rootopt,const PetscInt *rootidx,Type *rootdata,PetscInt leafstart,const PetscInt *leafopt,const PetscInt *leafidx,const Type *leafdata,Type *leafupdate)
{
PetscInt i,l,r,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
r = (rootopt? MapTidToIndex(rootopt,tid) : (rootidx? rootidx[tid] : rootstart+tid))*MBS;
l = (leafopt? MapTidToIndex(leafopt,tid) : (leafidx? leafidx[tid] : leafstart+tid))*MBS;
for (i=0; i<MBS; i++) leafupdate[l+i] = op(rootdata[r+i],leafdata[l+i]);
}
}
/*====================================================================================*/
/* Regular operations on device */
/*====================================================================================*/
template<typename Type> struct Insert {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = y; return old;}};
template<typename Type> struct Add {__device__ Type operator() (Type& x,Type y) const {Type old = x; x += y; return old;}};
template<typename Type> struct Mult {__device__ Type operator() (Type& x,Type y) const {Type old = x; x *= y; return old;}};
template<typename Type> struct Min {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMin(x,y); return old;}};
template<typename Type> struct Max {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMax(x,y); return old;}};
template<typename Type> struct LAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x && y; return old;}};
template<typename Type> struct LOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x || y; return old;}};
template<typename Type> struct LXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = !x != !y; return old;}};
template<typename Type> struct BAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x & y; return old;}};
template<typename Type> struct BOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x | y; return old;}};
template<typename Type> struct BXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x ^ y; return old;}};
template<typename Type> struct Minloc {
__device__ Type operator() (Type& x,Type y) const {
Type old = x;
if (y.a < x.a) x = y;
else if (y.a == x.a) x.b = min(x.b,y.b);
return old;
}
};
template<typename Type> struct Maxloc {
__device__ Type operator() (Type& x,Type y) const {
Type old = x;
if (y.a > x.a) x = y;
else if (y.a == x.a) x.b = min(x.b,y.b); /* See MPI MAXLOC */
return old;
}
};
/*====================================================================================*/
/* Atomic operations on device */
/*====================================================================================*/
/*
Atomic Insert (exchange) operations
CUDA C Programming Guide V10.1 Chapter B.12.1.3:
int atomicExch(int* address, int val);
unsigned int atomicExch(unsigned int* address, unsigned int val);
unsigned long long int atomicExch(unsigned long long int* address, unsigned long long int val);
float atomicExch(float* address, float val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory and stores val back to memory at the same address. These two operations are
performed in one atomic transaction. The function returns old.
PETSc notes:
It may be useful in PetscSFFetchAndOp with op = MPIU_REPLACE.
VecScatter with multiple entries scattered to the same location using INSERT_VALUES does not need
atomic insertion, since it does not need the old value. A 32-bit or 64-bit store instruction should
be atomic itself.
With bs>1 and a unit > 64 bits, the current element-wise atomic approach can not guarantee the whole
insertion is atomic. Hope no user codes rely on that.
*/
__device__ static double atomicExch(double* address,double val) {return __longlong_as_double(atomicExch((unsigned long long int*)address,__double_as_longlong(val)));}
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static PetscInt atomicExch(PetscInt* address,PetscInt val) {return (PetscInt)(atomicExch((unsigned long long int*)address,(unsigned long long int)val));}
#endif
template<typename Type> struct AtomicInsert {__device__ Type operator() (Type& x,Type y) const {return atomicExch(&x,y);}};
#if defined(PETSC_HAVE_COMPLEX)
#if defined(PETSC_USE_REAL_DOUBLE)
/* CUDA does not support 128-bit atomics. Users should not insert different 128-bit PetscComplex values to the same location */
template<> struct AtomicInsert<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
PetscComplex old, *z = &old;
double *xp = (double*)&x,*yp = (double*)&y;
AtomicInsert<double> op;
z[0] = op(xp[0],yp[0]);
z[1] = op(xp[1],yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
#elif defined(PETSC_USE_REAL_SINGLE)
template<> struct AtomicInsert<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
double *xp = (double*)&x,*yp = (double*)&y;
AtomicInsert<double> op;
return op(xp[0],yp[0]);
}
};
#endif
#endif
/*
Atomic add operations
CUDA C Programming Guide V10.1 Chapter B.12.1.1:
int atomicAdd(int* address, int val);
unsigned int atomicAdd(unsigned int* address,unsigned int val);
unsigned long long int atomicAdd(unsigned long long int* address,unsigned long long int val);
float atomicAdd(float* address, float val);
double atomicAdd(double* address, double val);
__half2 atomicAdd(__half2 *address, __half2 val);
__half atomicAdd(__half *address, __half val);
reads the 16-bit, 32-bit or 64-bit word old located at the address address in global or shared memory, computes (old + val),
and stores the result back to memory at the same address. These three operations are performed in one atomic transaction. The
function returns old.
The 32-bit floating-point version of atomicAdd() is only supported by devices of compute capability 2.x and higher.
The 64-bit floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and higher.
The 32-bit __half2 floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and
higher. The atomicity of the __half2 add operation is guaranteed separately for each of the two __half elements;
the entire __half2 is not guaranteed to be atomic as a single 32-bit access.
The 16-bit __half floating-point version of atomicAdd() is only supported by devices of compute capability 7.x and higher.
*/
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static PetscInt atomicAdd(PetscInt* address,PetscInt val) {return (PetscInt)atomicAdd((unsigned long long int*)address,(unsigned long long int)val);}
#endif
template<typename Type> struct AtomicAdd {__device__ Type operator() (Type& x,Type y) const {return atomicAdd(&x,y);}};
template<> struct AtomicAdd<double> {
__device__ double operator() (double& x,double y) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
return atomicAdd(&x,y);
#else
double *address = &x, val = y;
unsigned long long int *address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __longlong_as_double(old);
#endif
}
};
template<> struct AtomicAdd<float> {
__device__ float operator() (float& x,float y) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
return atomicAdd(&x,y);
#else
float *address = &x, val = y;
int *address_as_int = (int*)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val + __int_as_float(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __int_as_float(old);
#endif
}
};
#if defined(PETSC_HAVE_COMPLEX)
template<> struct AtomicAdd<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
PetscComplex old, *z = &old;
PetscReal *xp = (PetscReal*)&x,*yp = (PetscReal*)&y;
AtomicAdd<PetscReal> op;
z[0] = op(xp[0],yp[0]);
z[1] = op(xp[1],yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
#endif
/*
Atomic Mult operations:
CUDA has no atomicMult at all, so we build our own with atomicCAS
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMult(double* address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
/* Other threads can access and modify value of *address_as_ull after the read above and before the write below */
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val*__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMult(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val*__int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
#endif
__device__ static int atomicMult(int* address,int val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, val*assumed);
} while (assumed != old);
return (int)old;
}
#if defined(PETSC_USE_64BIT_INDICES)
__device__ static int atomicMult(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val*(PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
#endif
template<typename Type> struct AtomicMult {__device__ Type operator() (Type& x,Type y) const {return atomicMult(&x,y);}};
/*
Atomic Min/Max operations
CUDA C Programming Guide V10.1 Chapter B.12.1.4~5:
int atomicMin(int* address, int val);
unsigned int atomicMin(unsigned int* address,unsigned int val);
unsigned long long int atomicMin(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes the minimum of old and val, and stores the result back to memory
at the same address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicMin() is only supported by devices of compute capability 3.5 and higher.
atomicMax() is similar.
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMin(double* address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMin(val,__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ static double atomicMax(double* address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMax(val,__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMin(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMin(val,__int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static float atomicMax(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMax(val,__int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
#endif
/*
atomicMin/Max(long long *, long long) are not in Nvidia's documentation. But on OLCF Summit we found
atomicMin/Max/And/Or/Xor(long long *, long long) in /sw/summit/cuda/10.1.243/include/sm_32_atomic_functions.h.
This causes compilation errors with pgi compilers and 64-bit indices:
error: function "atomicMin(long long *, long long)" has already been defined
So we add extra conditions defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
*/
#if defined(PETSC_USE_64BIT_INDICES) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
__device__ static PetscInt atomicMin(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(PetscMin(val,(PetscInt)assumed)));
} while (assumed != old);
return (PetscInt)old;
}
__device__ static PetscInt atomicMax(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(PetscMax(val,(PetscInt)assumed)));
} while (assumed != old);
return (PetscInt)old;
}
#endif
template<typename Type> struct AtomicMin {__device__ Type operator() (Type& x,Type y) const {return atomicMin(&x,y);}};
template<typename Type> struct AtomicMax {__device__ Type operator() (Type& x,Type y) const {return atomicMax(&x,y);}};
/*
Atomic bitwise operations
CUDA C Programming Guide V10.1 Chapter B.12.2.1 ~ B.12.2.3:
int atomicAnd(int* address, int val);
unsigned int atomicAnd(unsigned int* address,unsigned int val);
unsigned long long int atomicAnd(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes (old & val), and stores the result back to memory at the same
address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicAnd() is only supported by devices of compute capability 3.5 and higher.
atomicOr() and atomicXor are similar.
*/
#if defined(PETSC_USE_64BIT_INDICES)
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320) /* Why 320? see comments at atomicMin(PetscInt* address,PetscInt val) */
__device__ static PetscInt atomicAnd(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val & (PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
__device__ static PetscInt atomicOr(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val | (PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
__device__ static PetscInt atomicXor(PetscInt* address,PetscInt val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(val ^ (PetscInt)assumed));
} while (assumed != old);
return (PetscInt)old;
}
#else
/*
See also comments at atomicMin(PetscInt* address,PetscInt val)
__device__ static PetscInt atomicAnd(PetscInt* address,PetscInt val) {return (PetscInt)atomicAnd((unsigned long long int*)address,(unsigned long long int)val);}
__device__ static PetscInt atomicOr (PetscInt* address,PetscInt val) {return (PetscInt)atomicOr ((unsigned long long int*)address,(unsigned long long int)val);}
__device__ static PetscInt atomicXor(PetscInt* address,PetscInt val) {return (PetscInt)atomicXor((unsigned long long int*)address,(unsigned long long int)val);}
*/
#endif
#endif
template<typename Type> struct AtomicBAND {__device__ Type operator() (Type& x,Type y) const {return atomicAnd(&x,y);}};
template<typename Type> struct AtomicBOR {__device__ Type operator() (Type& x,Type y) const {return atomicOr (&x,y);}};
template<typename Type> struct AtomicBXOR {__device__ Type operator() (Type& x,Type y) const {return atomicXor(&x,y);}};
/*
Atomic logical operations:
CUDA has no atomic logical operations at all. We support them on integer types.
*/
/* A template without definition makes any instantiation not using given specializations erroneous at compile time,
which is what we want since we only support 32-bit and 64-bit integers.
*/
template<typename Type,class Op,int size/* sizeof(Type) */> struct AtomicLogical;
template<typename Type,class Op>
struct AtomicLogical<Type,Op,4> {
__device__ Type operator()(Type& x,Type y) const {
int *address_as_int = (int*)(&x);
int old = *address_as_int, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, (int)(op((Type)assumed,y)));
} while (assumed != old);
return (Type)old;
}
};
template<typename Type,class Op>
struct AtomicLogical<Type,Op,8> {
__device__ Type operator()(Type& x,Type y) const {
unsigned long long int *address_as_ull = (unsigned long long int*)(&x);
unsigned long long int old = *address_as_ull, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long int)(op((Type)assumed,y)));
} while (assumed != old);
return (Type)old;
}
};
/* Note land/lor/lxor below are different from LAND etc above. Here we pass arguments by value and return result of ops (not old value) */
template<typename Type> struct land {__device__ Type operator()(Type x, Type y) {return x && y;}};
template<typename Type> struct lor {__device__ Type operator()(Type x, Type y) {return x || y;}};
template<typename Type> struct lxor {__device__ Type operator()(Type x, Type y) {return (!x != !y);}};
template<typename Type> struct AtomicLAND {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,land<Type>,sizeof(Type)> op; return op(x,y);}};
template<typename Type> struct AtomicLOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lor<Type> ,sizeof(Type)> op; return op(x,y);}};
template<typename Type> struct AtomicLXOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lxor<Type>,sizeof(Type)> op; return op(x,y);}};
/*====================================================================================*/
/* Wrapper functions of cuda kernels. Function pointers are stored in 'link' */
/*====================================================================================*/
template<typename Type,PetscInt BS,PetscInt EQ>
static PetscErrorCode Pack(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,const void *data,void *buf)
{
cudaError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
d_Pack<Type,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,start,iarray,idx,(const Type*)data,(Type*)buf);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode UnpackAndOp(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,void *data,const void *buf)
{
cudaError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
d_UnpackAndOp<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,start,iarray,idx,(Type*)data,(const Type*)buf);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode FetchAndOp(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,void *data,void *buf)
{
cudaError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
d_FetchAndOp<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,start,iarray,idx,(Type*)data,(Type*)buf);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode ScatterAndOp(PetscSFLink link,PetscInt count,PetscInt srcStart,PetscSFPackOpt srcOpt,const PetscInt *srcIdx,const void *src,PetscInt dstStart,PetscSFPackOpt dstOpt,const PetscInt *dstIdx,void *dst)
{
cudaError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
PetscInt srcx=0,srcy=0,srcX=0,srcY=0,dstx=0,dsty=0,dstX=0,dstY=0;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
/* The 3D shape of source subdomain may be different than that of the destination, which makes it difficult to use CUDA 3D grid and block */
if (srcOpt) {srcx = srcOpt->dx[0]; srcy = srcOpt->dy[0]; srcX = srcOpt->X[0]; srcY = srcOpt->Y[0]; srcStart = srcOpt->start[0]; srcIdx = NULL;}
else if (!srcIdx) {srcx = srcX = count; srcy = srcY = 1;}
if (dstOpt) {dstx = dstOpt->dx[0]; dsty = dstOpt->dy[0]; dstX = dstOpt->X[0]; dstY = dstOpt->Y[0]; dstStart = dstOpt->start[0]; dstIdx = NULL;}
else if (!dstIdx) {dstx = dstX = count; dsty = dstY = 1;}
d_ScatterAndOp<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,srcx,srcy,srcX,srcY,srcStart,srcIdx,(const Type*)src,dstx,dsty,dstX,dstY,dstStart,dstIdx,(Type*)dst);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
/* Specialization for Insert since we may use cudaMemcpyAsync */
template<typename Type,PetscInt BS,PetscInt EQ>
static PetscErrorCode ScatterAndInsert(PetscSFLink link,PetscInt count,PetscInt srcStart,PetscSFPackOpt srcOpt,const PetscInt *srcIdx,const void *src,PetscInt dstStart,PetscSFPackOpt dstOpt,const PetscInt *dstIdx,void *dst)
{
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
/*src and dst are contiguous */
if ((!srcOpt && !srcIdx) && (!dstOpt && !dstIdx) && src != dst) {
cerr = cudaMemcpyAsync((Type*)dst+dstStart*link->bs,(const Type*)src+srcStart*link->bs,count*link->unitbytes,cudaMemcpyDeviceToDevice,link->stream);CHKERRCUDA(cerr);
} else {
ierr = ScatterAndOp<Type,Insert<Type>,BS,EQ>(link,count,srcStart,srcOpt,srcIdx,src,dstStart,dstOpt,dstIdx,dst);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode FetchAndOpLocal(PetscSFLink link,PetscInt count,PetscInt rootstart,PetscSFPackOpt rootopt,const PetscInt *rootidx,void *rootdata,PetscInt leafstart,PetscSFPackOpt leafopt,const PetscInt *leafidx,const void *leafdata,void *leafupdate)
{
cudaError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *rarray = rootopt ? rootopt->array : NULL;
const PetscInt *larray = leafopt ? leafopt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
d_FetchAndOpLocal<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,rootstart,rarray,rootidx,(Type*)rootdata,leafstart,larray,leafidx,(const Type*)leafdata,(Type*)leafupdate);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
/*====================================================================================*/
/* Init various types and instantiate pack/unpack function pointers */
/*====================================================================================*/
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_RealType(PetscSFLink link)
{
/* Pack/unpack for remote communication */
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp <Type,Insert<Type> ,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp <Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp <Type,Mult<Type> ,BS,EQ>;
link->d_UnpackAndMin = UnpackAndOp <Type,Min<Type> ,BS,EQ>;
link->d_UnpackAndMax = UnpackAndOp <Type,Max<Type> ,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
/* Scatter for local communication */
link->d_ScatterAndInsert = ScatterAndInsert<Type ,BS,EQ>; /* Has special optimizations */
link->d_ScatterAndAdd = ScatterAndOp <Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndMult = ScatterAndOp <Type,Mult<Type> ,BS,EQ>;
link->d_ScatterAndMin = ScatterAndOp <Type,Min<Type> ,BS,EQ>;
link->d_ScatterAndMax = ScatterAndOp <Type,Max<Type> ,BS,EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal <Type,Add <Type> ,BS,EQ>;
/* Atomic versions when there are data-race possibilities */
link->da_UnpackAndInsert = UnpackAndOp <Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_UnpackAndMult = UnpackAndOp <Type,AtomicMult<Type> ,BS,EQ>;
link->da_UnpackAndMin = UnpackAndOp <Type,AtomicMin<Type> ,BS,EQ>;
link->da_UnpackAndMax = UnpackAndOp <Type,AtomicMax<Type> ,BS,EQ>;
link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndInsert = ScatterAndOp <Type,AtomicInsert<Type>,BS,EQ>;
link->da_ScatterAndAdd = ScatterAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndMult = ScatterAndOp <Type,AtomicMult<Type> ,BS,EQ>;
link->da_ScatterAndMin = ScatterAndOp <Type,AtomicMin<Type> ,BS,EQ>;
link->da_ScatterAndMax = ScatterAndOp <Type,AtomicMax<Type> ,BS,EQ>;
link->da_FetchAndAddLocal = FetchAndOpLocal <Type,AtomicAdd<Type> ,BS,EQ>;
}
/* Have this templated class to specialize for char integers */
template<typename Type,PetscInt BS,PetscInt EQ,PetscInt size/*sizeof(Type)*/>
struct PackInit_IntegerType_Atomic {
static void Init(PetscSFLink link) {
link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
link->da_UnpackAndMult = UnpackAndOp<Type,AtomicMult<Type> ,BS,EQ>;
link->da_UnpackAndMin = UnpackAndOp<Type,AtomicMin<Type> ,BS,EQ>;
link->da_UnpackAndMax = UnpackAndOp<Type,AtomicMax<Type> ,BS,EQ>;
link->da_UnpackAndLAND = UnpackAndOp<Type,AtomicLAND<Type> ,BS,EQ>;
link->da_UnpackAndLOR = UnpackAndOp<Type,AtomicLOR<Type> ,BS,EQ>;
link->da_UnpackAndLXOR = UnpackAndOp<Type,AtomicLXOR<Type> ,BS,EQ>;
link->da_UnpackAndBAND = UnpackAndOp<Type,AtomicBAND<Type> ,BS,EQ>;
link->da_UnpackAndBOR = UnpackAndOp<Type,AtomicBOR<Type> ,BS,EQ>;
link->da_UnpackAndBXOR = UnpackAndOp<Type,AtomicBXOR<Type> ,BS,EQ>;
link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndInsert = ScatterAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndMult = ScatterAndOp<Type,AtomicMult<Type> ,BS,EQ>;
link->da_ScatterAndMin = ScatterAndOp<Type,AtomicMin<Type> ,BS,EQ>;
link->da_ScatterAndMax = ScatterAndOp<Type,AtomicMax<Type> ,BS,EQ>;
link->da_ScatterAndLAND = ScatterAndOp<Type,AtomicLAND<Type> ,BS,EQ>;
link->da_ScatterAndLOR = ScatterAndOp<Type,AtomicLOR<Type> ,BS,EQ>;
link->da_ScatterAndLXOR = ScatterAndOp<Type,AtomicLXOR<Type> ,BS,EQ>;
link->da_ScatterAndBAND = ScatterAndOp<Type,AtomicBAND<Type> ,BS,EQ>;
link->da_ScatterAndBOR = ScatterAndOp<Type,AtomicBOR<Type> ,BS,EQ>;
link->da_ScatterAndBXOR = ScatterAndOp<Type,AtomicBXOR<Type> ,BS,EQ>;
link->da_FetchAndAddLocal = FetchAndOpLocal<Type,AtomicAdd<Type>,BS,EQ>;
}
};
/* CUDA does not support atomics on chars. It is TBD in PETSc. */
template<typename Type,PetscInt BS,PetscInt EQ>
struct PackInit_IntegerType_Atomic<Type,BS,EQ,1> {
static void Init(PetscSFLink link) {/* Nothing to leave function pointers NULL */}
};
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_IntegerType(PetscSFLink link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_UnpackAndMin = UnpackAndOp<Type,Min<Type> ,BS,EQ>;
link->d_UnpackAndMax = UnpackAndOp<Type,Max<Type> ,BS,EQ>;
link->d_UnpackAndLAND = UnpackAndOp<Type,LAND<Type> ,BS,EQ>;
link->d_UnpackAndLOR = UnpackAndOp<Type,LOR<Type> ,BS,EQ>;
link->d_UnpackAndLXOR = UnpackAndOp<Type,LXOR<Type> ,BS,EQ>;
link->d_UnpackAndBAND = UnpackAndOp<Type,BAND<Type> ,BS,EQ>;
link->d_UnpackAndBOR = UnpackAndOp<Type,BOR<Type> ,BS,EQ>;
link->d_UnpackAndBXOR = UnpackAndOp<Type,BXOR<Type> ,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
link->d_ScatterAndAdd = ScatterAndOp<Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_ScatterAndMin = ScatterAndOp<Type,Min<Type> ,BS,EQ>;
link->d_ScatterAndMax = ScatterAndOp<Type,Max<Type> ,BS,EQ>;
link->d_ScatterAndLAND = ScatterAndOp<Type,LAND<Type> ,BS,EQ>;
link->d_ScatterAndLOR = ScatterAndOp<Type,LOR<Type> ,BS,EQ>;
link->d_ScatterAndLXOR = ScatterAndOp<Type,LXOR<Type> ,BS,EQ>;
link->d_ScatterAndBAND = ScatterAndOp<Type,BAND<Type> ,BS,EQ>;
link->d_ScatterAndBOR = ScatterAndOp<Type,BOR<Type> ,BS,EQ>;
link->d_ScatterAndBXOR = ScatterAndOp<Type,BXOR<Type> ,BS,EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type,Add<Type>,BS,EQ>;
PackInit_IntegerType_Atomic<Type,BS,EQ,sizeof(Type)>::Init(link);
}
#if defined(PETSC_HAVE_COMPLEX)
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_ComplexType(PetscSFLink link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
link->d_ScatterAndAdd = ScatterAndOp<Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type,Add<Type>,BS,EQ>;
link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type>,BS,EQ>;
link->da_UnpackAndMult = NULL; /* Not implemented yet */
link->da_FetchAndAdd = NULL; /* Return value of atomicAdd on complex is not atomic */
link->da_ScatterAndInsert = ScatterAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type,AtomicAdd<Type>,BS,EQ>;
}
#endif
typedef signed char SignedChar;
typedef unsigned char UnsignedChar;
typedef struct {int a; int b; } PairInt;
typedef struct {PetscInt a; PetscInt b;} PairPetscInt;
template<typename Type>
static void PackInit_PairType(PetscSFLink link)
{
link->d_Pack = Pack<Type,1,1>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,1,1>;
link->d_UnpackAndMaxloc = UnpackAndOp<Type,Maxloc<Type>,1,1>;
link->d_UnpackAndMinloc = UnpackAndOp<Type,Minloc<Type>,1,1>;
link->d_ScatterAndInsert = ScatterAndOp<Type,Insert<Type>,1,1>;
link->d_ScatterAndMaxloc = ScatterAndOp<Type,Maxloc<Type>,1,1>;
link->d_ScatterAndMinloc = ScatterAndOp<Type,Minloc<Type>,1,1>;
/* Atomics for pair types are not implemented yet */
}
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_DumbType(PetscSFLink link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = UnpackAndOp<Type,Insert<Type>,BS,EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
/* Atomics for dumb types are not implemented yet */
}
/* Some device-specific utilities */
static PetscErrorCode PetscSFLinkSyncDevice_Cuda(PetscSFLink link)
{
cudaError_t cerr;
PetscFunctionBegin;
cerr = cudaDeviceSynchronize();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
static PetscErrorCode PetscSFLinkSyncStream_Cuda(PetscSFLink link)
{
cudaError_t cerr;
PetscFunctionBegin;
cerr = cudaStreamSynchronize(link->stream);CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
static PetscErrorCode PetscSFLinkMemcpy_Cuda(PetscSFLink link,PetscMemType dstmtype,void* dst,PetscMemType srcmtype,const void*src,size_t n)
{
PetscFunctionBegin;
enum cudaMemcpyKind kinds[2][2] = {{cudaMemcpyHostToHost,cudaMemcpyHostToDevice},{cudaMemcpyDeviceToHost,cudaMemcpyDeviceToDevice}};
if (n) {
if (dstmtype == PETSC_MEMTYPE_HOST && srcmtype == PETSC_MEMTYPE_HOST) { /* Separate HostToHost so that pure-cpu code won't call cuda runtime */
PetscErrorCode ierr = PetscMemcpy(dst,src,n);CHKERRQ(ierr);
} else { /* Assume PETSC_MEMTYPE_HOST=0, PETSC_MEMTYPE_DEVICE=1 */
cudaError_t err = cudaMemcpyAsync(dst,src,n,kinds[srcmtype][dstmtype],link->stream);CHKERRCUDA(err);
}
}
PetscFunctionReturn(0);
}
PetscErrorCode PetscSFMalloc_Cuda(PetscMemType mtype,size_t size,void** ptr)
{
PetscFunctionBegin;
if (mtype == PETSC_MEMTYPE_HOST) {PetscErrorCode ierr = PetscMalloc(size,ptr);CHKERRQ(ierr);}
else if (mtype == PETSC_MEMTYPE_DEVICE) {cudaError_t err = cudaMalloc(ptr,size);CHKERRCUDA(err);}
else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong PetscMemType %d", (int)mtype);
PetscFunctionReturn(0);
}
PetscErrorCode PetscSFFree_Cuda(PetscMemType mtype,void* ptr)
{
PetscFunctionBegin;
if (mtype == PETSC_MEMTYPE_HOST) {PetscErrorCode ierr = PetscFree(ptr);CHKERRQ(ierr);}
else if (mtype == PETSC_MEMTYPE_DEVICE) {cudaError_t err = cudaFree(ptr);CHKERRCUDA(err);}
else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong PetscMemType %d",(int)mtype);
PetscFunctionReturn(0);
}
/*====================================================================================*/
/* Main driver to init MPI datatype on device */
/*====================================================================================*/
/* Some fields of link are initialized by PetscSFPackSetUp_Host. This routine only does what needed on device */
PetscErrorCode PetscSFLinkSetUp_Cuda(PetscSF sf,PetscSFLink link,MPI_Datatype unit)
{
PetscErrorCode ierr;
cudaError_t err;
PetscInt nSignedChar=0,nUnsignedChar=0,nInt=0,nPetscInt=0,nPetscReal=0;
PetscBool is2Int,is2PetscInt;
#if defined(PETSC_HAVE_COMPLEX)
PetscInt nPetscComplex=0;
#endif
PetscFunctionBegin;
if (link->deviceinited) PetscFunctionReturn(0);
ierr = MPIPetsc_Type_compare_contig(unit,MPI_SIGNED_CHAR, &nSignedChar);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPI_UNSIGNED_CHAR,&nUnsignedChar);CHKERRQ(ierr);
/* MPI_CHAR is treated below as a dumb type that does not support reduction according to MPI standard */
ierr = MPIPetsc_Type_compare_contig(unit,MPI_INT, &nInt);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_INT, &nPetscInt);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_REAL,&nPetscReal);CHKERRQ(ierr);
#if defined(PETSC_HAVE_COMPLEX)
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_COMPLEX,&nPetscComplex);CHKERRQ(ierr);
#endif
ierr = MPIPetsc_Type_compare(unit,MPI_2INT,&is2Int);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare(unit,MPIU_2INT,&is2PetscInt);CHKERRQ(ierr);
if (is2Int) {
PackInit_PairType<PairInt>(link);
} else if (is2PetscInt) { /* TODO: when is2PetscInt and nPetscInt=2, we don't know which path to take. The two paths support different ops. */
PackInit_PairType<PairPetscInt>(link);
} else if (nPetscReal) {
if (nPetscReal == 8) PackInit_RealType<PetscReal,8,1>(link); else if (nPetscReal%8 == 0) PackInit_RealType<PetscReal,8,0>(link);
else if (nPetscReal == 4) PackInit_RealType<PetscReal,4,1>(link); else if (nPetscReal%4 == 0) PackInit_RealType<PetscReal,4,0>(link);
else if (nPetscReal == 2) PackInit_RealType<PetscReal,2,1>(link); else if (nPetscReal%2 == 0) PackInit_RealType<PetscReal,2,0>(link);
else if (nPetscReal == 1) PackInit_RealType<PetscReal,1,1>(link); else if (nPetscReal%1 == 0) PackInit_RealType<PetscReal,1,0>(link);
} else if (nPetscInt) {
if (nPetscInt == 8) PackInit_IntegerType<PetscInt,8,1>(link); else if (nPetscInt%8 == 0) PackInit_IntegerType<PetscInt,8,0>(link);
else if (nPetscInt == 4) PackInit_IntegerType<PetscInt,4,1>(link); else if (nPetscInt%4 == 0) PackInit_IntegerType<PetscInt,4,0>(link);
else if (nPetscInt == 2) PackInit_IntegerType<PetscInt,2,1>(link); else if (nPetscInt%2 == 0) PackInit_IntegerType<PetscInt,2,0>(link);
else if (nPetscInt == 1) PackInit_IntegerType<PetscInt,1,1>(link); else if (nPetscInt%1 == 0) PackInit_IntegerType<PetscInt,1,0>(link);
#if defined(PETSC_USE_64BIT_INDICES)
} else if (nInt) {
if (nInt == 8) PackInit_IntegerType<int,8,1>(link); else if (nInt%8 == 0) PackInit_IntegerType<int,8,0>(link);
else if (nInt == 4) PackInit_IntegerType<int,4,1>(link); else if (nInt%4 == 0) PackInit_IntegerType<int,4,0>(link);
else if (nInt == 2) PackInit_IntegerType<int,2,1>(link); else if (nInt%2 == 0) PackInit_IntegerType<int,2,0>(link);
else if (nInt == 1) PackInit_IntegerType<int,1,1>(link); else if (nInt%1 == 0) PackInit_IntegerType<int,1,0>(link);
#endif
} else if (nSignedChar) {
if (nSignedChar == 8) PackInit_IntegerType<SignedChar,8,1>(link); else if (nSignedChar%8 == 0) PackInit_IntegerType<SignedChar,8,0>(link);
else if (nSignedChar == 4) PackInit_IntegerType<SignedChar,4,1>(link); else if (nSignedChar%4 == 0) PackInit_IntegerType<SignedChar,4,0>(link);
else if (nSignedChar == 2) PackInit_IntegerType<SignedChar,2,1>(link); else if (nSignedChar%2 == 0) PackInit_IntegerType<SignedChar,2,0>(link);
else if (nSignedChar == 1) PackInit_IntegerType<SignedChar,1,1>(link); else if (nSignedChar%1 == 0) PackInit_IntegerType<SignedChar,1,0>(link);
} else if (nUnsignedChar) {
if (nUnsignedChar == 8) PackInit_IntegerType<UnsignedChar,8,1>(link); else if (nUnsignedChar%8 == 0) PackInit_IntegerType<UnsignedChar,8,0>(link);
else if (nUnsignedChar == 4) PackInit_IntegerType<UnsignedChar,4,1>(link); else if (nUnsignedChar%4 == 0) PackInit_IntegerType<UnsignedChar,4,0>(link);
else if (nUnsignedChar == 2) PackInit_IntegerType<UnsignedChar,2,1>(link); else if (nUnsignedChar%2 == 0) PackInit_IntegerType<UnsignedChar,2,0>(link);
else if (nUnsignedChar == 1) PackInit_IntegerType<UnsignedChar,1,1>(link); else if (nUnsignedChar%1 == 0) PackInit_IntegerType<UnsignedChar,1,0>(link);
#if defined(PETSC_HAVE_COMPLEX)
} else if (nPetscComplex) {
if (nPetscComplex == 8) PackInit_ComplexType<PetscComplex,8,1>(link); else if (nPetscComplex%8 == 0) PackInit_ComplexType<PetscComplex,8,0>(link);
else if (nPetscComplex == 4) PackInit_ComplexType<PetscComplex,4,1>(link); else if (nPetscComplex%4 == 0) PackInit_ComplexType<PetscComplex,4,0>(link);
else if (nPetscComplex == 2) PackInit_ComplexType<PetscComplex,2,1>(link); else if (nPetscComplex%2 == 0) PackInit_ComplexType<PetscComplex,2,0>(link);
else if (nPetscComplex == 1) PackInit_ComplexType<PetscComplex,1,1>(link); else if (nPetscComplex%1 == 0) PackInit_ComplexType<PetscComplex,1,0>(link);
#endif
} else {
MPI_Aint lb,nbyte;
ierr = MPI_Type_get_extent(unit,&lb,&nbyte);CHKERRQ(ierr);
if (lb != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
if (nbyte % sizeof(int)) { /* If the type size is not multiple of int */
if (nbyte == 4) PackInit_DumbType<char,4,1>(link); else if (nbyte%4 == 0) PackInit_DumbType<char,4,0>(link);
else if (nbyte == 2) PackInit_DumbType<char,2,1>(link); else if (nbyte%2 == 0) PackInit_DumbType<char,2,0>(link);
else if (nbyte == 1) PackInit_DumbType<char,1,1>(link); else if (nbyte%1 == 0) PackInit_DumbType<char,1,0>(link);
} else {
nInt = nbyte / sizeof(int);
if (nInt == 8) PackInit_DumbType<int,8,1>(link); else if (nInt%8 == 0) PackInit_DumbType<int,8,0>(link);
else if (nInt == 4) PackInit_DumbType<int,4,1>(link); else if (nInt%4 == 0) PackInit_DumbType<int,4,0>(link);
else if (nInt == 2) PackInit_DumbType<int,2,1>(link); else if (nInt%2 == 0) PackInit_DumbType<int,2,0>(link);
else if (nInt == 1) PackInit_DumbType<int,1,1>(link); else if (nInt%1 == 0) PackInit_DumbType<int,1,0>(link);
}
}
if (!sf->use_default_stream) {err = cudaStreamCreate(&link->stream);CHKERRCUDA(err);}
if (!sf->maxResidentThreadsPerGPU) { /* Not initialized */
int device;
struct cudaDeviceProp props;
err = cudaGetDevice(&device);CHKERRCUDA(err);
err = cudaGetDeviceProperties(&props,device);CHKERRCUDA(err);
sf->maxResidentThreadsPerGPU = props.maxThreadsPerMultiProcessor*props.multiProcessorCount;
}
link->maxResidentThreadsPerGPU = sf->maxResidentThreadsPerGPU;
link->d_SyncDevice = PetscSFLinkSyncDevice_Cuda;
link->d_SyncStream = PetscSFLinkSyncStream_Cuda;
link->Memcpy = PetscSFLinkMemcpy_Cuda;
link->deviceinited = PETSC_TRUE;
PetscFunctionReturn(0);
}
|
7e3f4b611d3f0c2efe757e7c0acbabb584ccb0fb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "weightUpdate.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_W = NULL;
hipMalloc(&d_W, XSIZE*YSIZE);
float *d_D = NULL;
hipMalloc(&d_D, XSIZE*YSIZE);
float *d_N = NULL;
hipMalloc(&d_N, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
weightUpdate), dim3(gridBlock),dim3(threadBlock), 0, 0, d_W,d_D,d_N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
weightUpdate), dim3(gridBlock),dim3(threadBlock), 0, 0, d_W,d_D,d_N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
weightUpdate), dim3(gridBlock),dim3(threadBlock), 0, 0, d_W,d_D,d_N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7e3f4b611d3f0c2efe757e7c0acbabb584ccb0fb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "weightUpdate.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_W = NULL;
cudaMalloc(&d_W, XSIZE*YSIZE);
float *d_D = NULL;
cudaMalloc(&d_D, XSIZE*YSIZE);
float *d_N = NULL;
cudaMalloc(&d_N, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
weightUpdate<<<gridBlock,threadBlock>>>(d_W,d_D,d_N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
weightUpdate<<<gridBlock,threadBlock>>>(d_W,d_D,d_N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
weightUpdate<<<gridBlock,threadBlock>>>(d_W,d_D,d_N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
56209c803a0ded19b6ea1999c10d6d5c9d41d0fa.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "badCudaRenderer.h"
#include "image.h"
#include "sceneLoader.h"
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU.
__constant__ GlobalConstants cuConstRendererParams;
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void badKernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
//
__global__ void badKernelRenderCircles() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
int index3 = 3 * index;
// read position and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// a bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// for all pixels in the bonding box
for (int pixelY=screenMinY; pixelY<screenMaxY; pixelY++) {
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + screenMinX)]);
for (int pixelX=screenMinX; pixelX<screenMaxX; pixelX++) {
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
shadePixel(index, pixelCenterNorm, p, imgPtr);
imgPtr++;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
BadCudaRenderer::BadCudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
BadCudaRenderer::~BadCudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
hipFree(cudaDevicePosition);
hipFree(cudaDeviceColor);
hipFree(cudaDeviceRadius);
hipFree(cudaDeviceImageData);
}
}
const Image*
BadCudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
hipMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
hipMemcpyDeviceToHost);
return image;
}
void
BadCudaRenderer::loadScene(SceneName scene, bool benchMode) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, color, radius, benchMode);
}
void
BadCudaRenderer::setup() {
int deviceCount = 0;
std::string name;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Inizializzo CUDA per il Renderer\n");
printf("Trovati %d dispositivi CUDA\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Dispositivo %d: %s\n", i, deviceProps.name);
printf(" Streaming Multiprocessors: %d\n", deviceProps.multiProcessorCount);
printf(" Memoria Globale: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" Compute Capability: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// hipMalloc and hipMemcpy
hipMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
hipMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
hipMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, hipMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
hipMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
BadCudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
BadCudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
hipLaunchKernelGGL(( badKernelClearImage), dim3(gridDim), dim3(blockDim), 0, 0, 1.f, 1.f, 1.f, 1.f);
hipDeviceSynchronize();
}
void
BadCudaRenderer::render() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
hipLaunchKernelGGL(( badKernelRenderCircles), dim3(gridDim), dim3(blockDim), 0, 0, );
hipDeviceSynchronize();
}
| 56209c803a0ded19b6ea1999c10d6d5c9d41d0fa.cu | #include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "badCudaRenderer.h"
#include "image.h"
#include "sceneLoader.h"
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU.
__constant__ GlobalConstants cuConstRendererParams;
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void badKernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
//
__global__ void badKernelRenderCircles() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
int index3 = 3 * index;
// read position and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// a bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// for all pixels in the bonding box
for (int pixelY=screenMinY; pixelY<screenMaxY; pixelY++) {
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + screenMinX)]);
for (int pixelX=screenMinX; pixelX<screenMaxX; pixelX++) {
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
shadePixel(index, pixelCenterNorm, p, imgPtr);
imgPtr++;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
BadCudaRenderer::BadCudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
BadCudaRenderer::~BadCudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
cudaFree(cudaDevicePosition);
cudaFree(cudaDeviceColor);
cudaFree(cudaDeviceRadius);
cudaFree(cudaDeviceImageData);
}
}
const Image*
BadCudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
cudaMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
cudaMemcpyDeviceToHost);
return image;
}
void
BadCudaRenderer::loadScene(SceneName scene, bool benchMode) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, color, radius, benchMode);
}
void
BadCudaRenderer::setup() {
int deviceCount = 0;
std::string name;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Inizializzo CUDA per il Renderer\n");
printf("Trovati %d dispositivi CUDA\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Dispositivo %d: %s\n", i, deviceProps.name);
printf(" Streaming Multiprocessors: %d\n", deviceProps.multiProcessorCount);
printf(" Memoria Globale: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" Compute Capability: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// cudaMalloc and cudaMemcpy
cudaMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
cudaMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
cudaMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, cudaMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
cudaMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
BadCudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
BadCudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
badKernelClearImage<<<gridDim, blockDim>>>(1.f, 1.f, 1.f, 1.f);
cudaDeviceSynchronize();
}
void
BadCudaRenderer::render() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
badKernelRenderCircles<<<gridDim, blockDim>>>();
cudaDeviceSynchronize();
}
|
2efa306a95fc71bee80a1923788bfb24542f8038.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
extern "C" {
void start_timer();
void stop_timer(float *time);
__global__ void vec_add_kernel(float *c, float *a, float *b, int n);
}
int compare_arrays(float *c, float *d, int n);
void vec_add(float *c, float *a, float *b, int n) {
for (int i=0; i<n; i++) {
c[i] = a[i] + b[i];
}
}
__global__ void vec_add_kernel(float *c, float *a, float *b, int n) {
int i = 0; // Oops! Something is not right here, please fix it!
if (i < n) {
c[i] = a[i] + b[i];
}
}
int main() {
int n = 5e7; //problem size
float time;
hipError_t err;
//allocate arrays and fill them
float *a = (float *) malloc(n * sizeof(float));
float *b = (float *) malloc(n * sizeof(float));
float *c = (float *) malloc(n * sizeof(float));
float *d = (float *) malloc(n * sizeof(float));
for (int i=0; i<n; i++) {
a[i] = 1.0 / rand();
b[i] = 1.0 / rand();
c[i] = 0.0;
d[i] = 0.0;
}
//measure the CPU function
start_timer();
vec_add(c, a, b, n);
stop_timer(&time);
printf("vec_add took %.3f ms\n", time);
//allocate GPU memory
float *d_a; float *d_b; float *d_c;
err = hipMalloc((void **)&d_a, n*sizeof(float));
if (err != hipSuccess) fprintf(stderr, "Error in hipMalloc d_a: %s\n", hipGetErrorString( err ));
err = hipMalloc((void **)&d_b, n*sizeof(float));
if (err != hipSuccess) fprintf(stderr, "Error in hipMalloc d_b: %s\n", hipGetErrorString( err ));
err = hipMalloc((void **)&d_c, n*sizeof(float));
if (err != hipSuccess) fprintf(stderr, "Error in hipMalloc d_c: %s\n", hipGetErrorString( err ));
//copy the input data to the GPU
err = hipMemcpy(d_a, a, n*sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess) fprintf(stderr, "Error in hipMemcpy host to device a: %s\n", hipGetErrorString( err ));
err = hipMemcpy(d_b, b, n*sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess) fprintf(stderr, "Error in hipMemcpy host to device b: %s\n", hipGetErrorString( err ));
//zero the output array
err = hipMemset(d_c, 0, n*sizeof(float));
if (err != hipSuccess) fprintf(stderr, "Error in hipMemset c: %s\n", hipGetErrorString( err ));
//setup the grid and thread blocks
int block_size = 1024; //thread block size
int nblocks = int(ceilf(n/(float)block_size)); //problem size divided by thread block size rounded up
dim3 grid(nblocks, 1);
dim3 threads(block_size, 1, 1);
//measure the GPU function
hipDeviceSynchronize();
start_timer();
hipLaunchKernelGGL(( vec_add_kernel), dim3(grid), dim3(threads), 0, 0, d_c, d_a, d_b, n);
hipDeviceSynchronize();
stop_timer(&time);
printf("vec_add_kernel took %.3f ms\n", time);
//check to see if all went well
err = hipGetLastError();
if (err != hipSuccess) fprintf(stderr, "Error during kernel launch vec_add_kernel: %s\n", hipGetErrorString( err ));
//copy the result back to host memory
err = hipMemcpy(d, d_c, n*sizeof(float), hipMemcpyDeviceToHost);
if (err != hipSuccess) fprintf(stderr, "Error in hipMemcpy device to host c: %s\n", hipGetErrorString( err ));
//check the result
int errors = compare_arrays(c, d, n);
if (errors > 0) {
printf("TEST FAILED!\n");
} else {
printf("TEST PASSED!\n");
}
//clean up
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(a);
free(b);
free(c);
free(d);
return 0;
}
int compare_arrays(float *a1, float *a2, int n) {
int errors = 0;
int print = 0;
for (int i=0; i<n; i++) {
if (isnan(a1[i]) || isnan(a2[i])) {
errors++;
if (print < 10) {
print++;
fprintf(stderr, "Error NaN detected at i=%d,\t a1= %10.7e \t a2= \t %10.7e\n",i,a1[i],a2[i]);
}
}
unsigned int int_a1 = *(unsigned int *)(a1+i);
unsigned int int_a2 = *(unsigned int *)(a2+i);
unsigned int dist = (unsigned int)0;
if (int_a1 > int_a2) {
dist = int_a1 - int_a2;
} else {
dist = int_a2 - int_a1;
}
if (dist > 0) {
errors++;
if (print < 10) {
print++;
fprintf(stderr, "Error detected at i=%d, \t a1= \t %10.7e \t a2= \t %10.7e \t ulp_dist=\t %u\n",i,a1[i],a2[i],dist);
}
}
}
return errors;
}
| 2efa306a95fc71bee80a1923788bfb24542f8038.cu | #include <stdio.h>
extern "C" {
void start_timer();
void stop_timer(float *time);
__global__ void vec_add_kernel(float *c, float *a, float *b, int n);
}
int compare_arrays(float *c, float *d, int n);
void vec_add(float *c, float *a, float *b, int n) {
for (int i=0; i<n; i++) {
c[i] = a[i] + b[i];
}
}
__global__ void vec_add_kernel(float *c, float *a, float *b, int n) {
int i = 0; // Oops! Something is not right here, please fix it!
if (i < n) {
c[i] = a[i] + b[i];
}
}
int main() {
int n = 5e7; //problem size
float time;
cudaError_t err;
//allocate arrays and fill them
float *a = (float *) malloc(n * sizeof(float));
float *b = (float *) malloc(n * sizeof(float));
float *c = (float *) malloc(n * sizeof(float));
float *d = (float *) malloc(n * sizeof(float));
for (int i=0; i<n; i++) {
a[i] = 1.0 / rand();
b[i] = 1.0 / rand();
c[i] = 0.0;
d[i] = 0.0;
}
//measure the CPU function
start_timer();
vec_add(c, a, b, n);
stop_timer(&time);
printf("vec_add took %.3f ms\n", time);
//allocate GPU memory
float *d_a; float *d_b; float *d_c;
err = cudaMalloc((void **)&d_a, n*sizeof(float));
if (err != cudaSuccess) fprintf(stderr, "Error in cudaMalloc d_a: %s\n", cudaGetErrorString( err ));
err = cudaMalloc((void **)&d_b, n*sizeof(float));
if (err != cudaSuccess) fprintf(stderr, "Error in cudaMalloc d_b: %s\n", cudaGetErrorString( err ));
err = cudaMalloc((void **)&d_c, n*sizeof(float));
if (err != cudaSuccess) fprintf(stderr, "Error in cudaMalloc d_c: %s\n", cudaGetErrorString( err ));
//copy the input data to the GPU
err = cudaMemcpy(d_a, a, n*sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess) fprintf(stderr, "Error in cudaMemcpy host to device a: %s\n", cudaGetErrorString( err ));
err = cudaMemcpy(d_b, b, n*sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess) fprintf(stderr, "Error in cudaMemcpy host to device b: %s\n", cudaGetErrorString( err ));
//zero the output array
err = cudaMemset(d_c, 0, n*sizeof(float));
if (err != cudaSuccess) fprintf(stderr, "Error in cudaMemset c: %s\n", cudaGetErrorString( err ));
//setup the grid and thread blocks
int block_size = 1024; //thread block size
int nblocks = int(ceilf(n/(float)block_size)); //problem size divided by thread block size rounded up
dim3 grid(nblocks, 1);
dim3 threads(block_size, 1, 1);
//measure the GPU function
cudaDeviceSynchronize();
start_timer();
vec_add_kernel<<<grid, threads>>>(d_c, d_a, d_b, n);
cudaDeviceSynchronize();
stop_timer(&time);
printf("vec_add_kernel took %.3f ms\n", time);
//check to see if all went well
err = cudaGetLastError();
if (err != cudaSuccess) fprintf(stderr, "Error during kernel launch vec_add_kernel: %s\n", cudaGetErrorString( err ));
//copy the result back to host memory
err = cudaMemcpy(d, d_c, n*sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) fprintf(stderr, "Error in cudaMemcpy device to host c: %s\n", cudaGetErrorString( err ));
//check the result
int errors = compare_arrays(c, d, n);
if (errors > 0) {
printf("TEST FAILED!\n");
} else {
printf("TEST PASSED!\n");
}
//clean up
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a);
free(b);
free(c);
free(d);
return 0;
}
int compare_arrays(float *a1, float *a2, int n) {
int errors = 0;
int print = 0;
for (int i=0; i<n; i++) {
if (isnan(a1[i]) || isnan(a2[i])) {
errors++;
if (print < 10) {
print++;
fprintf(stderr, "Error NaN detected at i=%d,\t a1= %10.7e \t a2= \t %10.7e\n",i,a1[i],a2[i]);
}
}
unsigned int int_a1 = *(unsigned int *)(a1+i);
unsigned int int_a2 = *(unsigned int *)(a2+i);
unsigned int dist = (unsigned int)0;
if (int_a1 > int_a2) {
dist = int_a1 - int_a2;
} else {
dist = int_a2 - int_a1;
}
if (dist > 0) {
errors++;
if (print < 10) {
print++;
fprintf(stderr, "Error detected at i=%d, \t a1= \t %10.7e \t a2= \t %10.7e \t ulp_dist=\t %u\n",i,a1[i],a2[i],dist);
}
}
}
return errors;
}
|
57f3f2af7e1f3ac434e4d5f079a2b4864ed3461d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// X = H , y = W
#include<iostream>
#include<stdio.h>
#include<cuda.h>
#include<ctime>
#include<cstdlib>
#include<hip/hip_runtime_api.h>
using namespace std;
// serially intializing tensor of the image
void tensor_init(int * image, int N, int H, int W, int C){
/*
Initialise the tensor for the convolution operation.
Runs on the CPU
N : Batch Size of the image
H : Height of the image
W : Width of the image
C : channels for the kernels
*/
srand(time(0));
int tot = N*H*W*C;
for(int i = 0; i< tot;i++){
image[i] = rand()%256; //random initializing of the image tensor// for simulating it as an image
}
}
//serially intialising the kernel with given dimensions
void kernel_init(int *krnl, int d, int h, int w,int c){
/*
Initialise the kernel(s) for the convolution operation.
Runs on the CPU
d : Number of kernel
h : Height of the kernel
w : Width of the kernel
c : channels for the kernels
*/
int tot = d*h*w*c;
for(int i = 0; i< tot;i++){
if(i%2 ==0){
krnl[i] = rand()%10;
}
else{
krnl[i] = -rand()%10;
//random initializing of the image tensor
// cout<<krnl[i]<<endl;
}
}
}
// intialising the mask for checking sparsity of the block
void mask_init(int *mask,int N,int H,int W,int sparsity_perc){
/*
Initialise the tensor for the convolution operation.
Runs on the CPU
N : Batch Size of the image
H : Height of the image
W : Width of the image
*/
int tot = N*H*W;
for(int i = 0; i< tot;i++){
if(rand()%100<=sparsity_perc){
mask[i] = 0;
}
else{
mask[i] = 1;
} //random initializing of the image tensor
// cout<<mask[i]<<endl;
}
}
// ************************ device kernels **************** to be optimizzed ***************************************
__device__ bool checksparse(int *d_mask,int cx,int cy,int H, int W, int C,int h,int w,int S,int n){// may be i can have some more conditions
/*
device function to check for sparsity
(device int *) d_mask : pointer to the mask of the image
(int) n: number of the image
(int) h: height of the kernels
(int) w: Weight of the kernels
(int) c_x: x coordinate of the center
(int) c_y: y coordinate of the center
*/
int x = 0;
int y = 0;
for( int l=-(h-1)/2; l <= (h-1)/2; l++ ){
for( int p=-(w-1)/2; p <= (w-1)/2; p++ ){
x = cx + l;
y = cy + p;
if( d_mask[n*H*W + W*y + x ] == 1 ){
return false;
}
}
}
return true;
}
__global__ void gather(int *d_mask, int *d_tensor, int *d_mat,unsigned int *row_address, int * d_row_map, int N , int H , int W , int h, int w, int C , int S ){
/*
Gather kernel from the paper to check for sparse and non sparse parts of image for convolution
(device int *) d_mask : pointer to the mask of the image
(device int *) d_tensor : pointer to the tensor containing the all the images
(device int *) d_mat : pointer with memmory alloc to store every non sparse part of thhe images
(device int *) row_address : pointer to single integer containing the number of non sparse part of the image
(int) N: number of the images in the given tensor
(int) H: Height of the image
(int) W: Weight of the image
(int) C: Channels of the image
(int) h: height of the kernels
(int) w: Weight of the kernels
*/
int id2 = blockIdx.x*blockDim.x + threadIdx.x;
int in = blockIdx.y;
int x_dim = id2%W;// along the height of the image
int y_dim = id2/W;// along the length oh the image
if(x_dim > 0 && x_dim/S + h < H/S){// condition considering s = 1 for now
if(y_dim > 0 && y_dim/S +w < W/S){
int cen_x = x_dim + (h-1)/2;
int cen_y = y_dim + (w-1)/2;
// printf("%d,%d,%d\n",checksparse(d_mask,x_dim,y_dim,H,W,C,h,w,S,in),cen_x,cen_y);
if(!checksparse(d_mask,x_dim,y_dim,H,W,C,h,w,S,in)){
unsigned int val = atomicInc(row_address,1000000);
int col_index = 0;
for( int l=-(h-1)/2; l <= (h-1)/2; l++ ){
for( int p=-(w-1)/2; p <= (w-1)/2; p++ ){
for( int q=0; q < C; q++){
d_mat[val*h*w*C+col_index] = d_mask[in*(H/S)*(W/S)+((int)((cen_x+l)/S))*(W/S)+((int)((cen_y+p)/S))]?d_tensor[in*H*W*C+(cen_x+l)*W*C+(cen_y+p)*C+q]:0;
col_index += 1;
}
}
}
d_row_map[val*3+0] = x_dim; /* Store the original x-coordinate corresponding to a row into a map */
d_row_map[val*3+1] = y_dim; /* Store the original y-coordinate corresponding to a row into a map */
d_row_map[val*3+2] = in; /* Store the image corresponding to a row in a map */
// printf("%d\n",val);
}
}
}
}
__global__ void convolution(int *d_mat,int *d_kernel,unsigned int *number_rows ,int d,int *output_mat,int h,int w,int C){
/*
The most basic implementation of the cuda kernel;
(int *)d_mat : pointer to the conovoluted results for all the non scarse part of the original image
(int *)d_kernel : kernel for the coonvoltion(d kernels)
(int *)output_mat : pointer for finally storing the output of the matrix
(unsigned int): int containing the number of non sparse convolution block
(int) N: number of the images in the given tensor
(int) H: Height of the image
(int) W: Weight of the image
(int) C: Channels of the image
(int) h: height of the kernels
(int) w: Weight of the kernels
(int) d : number of kernels
*/
int t_idx = blockDim.x*blockIdx.x + threadIdx.x;// for the number of the element being changed
int t_idy = blockDim.y*blockIdx.y + threadIdx.y;// for the number of kernels
output_mat[t_idx*d + t_idy] = 0;
int offset = h*w*C;
if(t_idx < *number_rows && t_idy < d){
// now the convolution part
for(int i = 0; i < h*w*C; i++ ){
output_mat[t_idx*d + t_idy] += d_kernel[t_idy*h*w + i]*d_mat[offset*t_idx + i];
// printf("%d,%d,\n",d_kernel[t_idy*d +i],d_mat[offset*t_idx + i]);
}
// printf("%d,%d,%d\n",t_idx,t_idy,output_mat[t_idx*d + t_idy]);
}
}
__global__ void scatter(int *output_mat, int *d_row_map, unsigned int *number_rows, int *output,int H,int W,int d,int h,int w){
/*
Putting the peices back together in the final image(restoring the final output part of the kernel
(int *)output_mat : pointer to the conovoluted results for all the non scarse part of the original image
(int *)d_row_map : pointer to the center positions non sparse part of the image
(int *)output : pointer to the final image after convolutions
(int) N: number of the images in the given tensor
(int) H: Height of the image
(int) W: Weight of the image
(int) C: Channels of the image
(int) h: height of the kernels
(int) w: Weight of the kernels
(int) d : number of kernels
*/
int image_size = (H - h + 1)*(W-w+1);
// image size after the convolution happens
int t_idx = blockIdx.x*blockDim.x + threadIdx.x;// The number of convs in the output matrux
int t_idy = blockDim.y*blockIdx.y + threadIdx.y;// The number of output kernels
// printf("%d,%d,%d \n",t_idx,t_idy, 0);
if(t_idx<*number_rows && t_idy <d){
int c_x = d_row_map[t_idx*3] - (h-1)/2; // convert the center to convoluted positions
int c_y = d_row_map[t_idx*3 + 1] - (w-1)/2;
int N = d_row_map[t_idx*3 + 2];
output[N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x] = output_mat[t_idx*d + t_idy ];
//printf("%d,%d,%d\n",output[N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x],output_mat[t_idx*d + t_idy ],N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x);
}
}
int main(){
// taking input of the image(tensor) dimnsions
int BLOCK_SIZE = 32;
int N,H,W,C;
/*
(int) N: number of the images in the given tensor
(int) H: Height of the image
(int) W: Weight of the image
(int) C: Channels of the image
*/
cout<<"Gimme Image Block Dimensions"<<endl;
N = 4;
H = 256;
W = 256;
C = 3;
int *tensor = (int *)malloc(N*H*W*C*sizeof(int));
tensor_init(tensor,N,H,W,C);
int h,w,d;
/*
(int) h: height of the kernels
(int) w: Weight of the kernels
(int) d : number of kernels
*/
int c = C;
cout<<"Gimme krnl Block Dimension"<<endl;
d = 16;
h = 4;
w = 4;
int *kernel = (int *)malloc(sizeof(int)*h*w*c*d);
kernel_init(kernel,d,h,w,C);
// space for d kernels
int per_sp;
cout<<"Gimme Percent Sparcity of the block"<<endl;
per_sp =70;
int S = 1;// assuming the mask dimension to be 1 for now
int *mask = (int * )malloc(sizeof(int)*N*H*W*C);
mask_init(mask,N,H,W,per_sp);
int num_images = 2;
int n_streams = N/2;
// memory allocation for tensor kernel and the mask on the device
int *d_tensor;
int *d_kernel;
int *d_mask;
hipMalloc(&d_tensor,sizeof(int)*N*H*W*C);// 4-D tensor containing images for the convolution operation
hipMalloc(&d_kernel,sizeof(int)*d*h*w*c);// for the kernels to stored in the matrix
hipMalloc(&d_mask,sizeof(int)*N*H*W); //mask for checking the sparsity of blocks for the kernel
// memory copying to the device
hipMemcpy( d_kernel, kernel, sizeof(int)*d*h*w*c, hipMemcpyHostToDevice );
hipMemcpy( d_mask, mask, sizeof(int)*N*H*W, hipMemcpyHostToDevice );
hipMemcpy( d_tensor, tensor, sizeof(int)*N*H*W*C, hipMemcpyHostToDevice );
// gatther kernel to fill on the device
int * d_mat;//
int * d_row_map;
unsigned int *row_address;
hipMalloc(&d_mat,sizeof(int)*h*w*C*(H-h+1)*(W-w+1)*N); // considering that all the parts will be in the imsge
hipMalloc(&row_address,n_streams*sizeof( unsigned int));
hipMemset(&row_address, 0, n_streams*sizeof(unsigned int) );
hipMalloc(&d_row_map,sizeof(int)*(H-h+1)*(W-w+1)*N*3);
// create streams:
// it can roughly handle about 1000 images at once
hipStream_t streams[1000]; /* Declaring a set of CUDA streams */
for( int i=0; i<n_streams; i++ ) hipStreamCreate(&streams[i]); /* Initializing a set of streams to work on a set of each image */
// creating memory for the intermediate kernels
int * output_mat; /// for the putput of the gather kernel
hipMalloc(&output_mat,sizeof(int)*(H-h+1)*(W-w+1)*d*N);
// final output matrix we all know its dimensionb already
int * output;
hipMalloc(&output,sizeof(int)*N*(H-h+1)*(W-w+1)*d);
// profiling features -----------------------
hipEvent_t start,stop; /* CUDA events to time the program */
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// blocks and threads for diffrent kernel plaunches -----------
// for the gather kernle
dim3 Block(H*W/BLOCK_SIZE,num_images,1);
dim3 Thread(BLOCK_SIZE,1,1);
//convolution kernel
dim3 Block_c(((H-h+1)*(W-w+1)*num_images)/BLOCK_SIZE,d,1);
dim3 Thread_c(BLOCK_SIZE,1,1);
// offset for diffrent arrays -------------
int offset; // tensor fooffset
int mask_offset; // mask offset
int mat_offset; // d_mat offsaet
int map_offset; // d_row_map offset
int o_offset; //output_mat offset
int om_offset; // output offset
unsigned int *number_rows = (unsigned int *)malloc(sizeof(int)*n_streams );
// Allocating memory for the output tensor
int * h_output = (int *)malloc(sizeof(int)*N*(H-h+1)*(W-w+1)*d);
//scatter kernel ---
dim3 Block_s(((H-h+1)*(W-w+1)*num_images)/BLOCK_SIZE,d,1);
dim3 Thread_s(BLOCK_SIZE,1,1);
//lanching the diffrent streams
for(int j=0; j<n_streams; j++){
/* Initialize a set of off-sets for each stream */
offset = j*H*W*C*num_images; // tensor offset will be needed
mask_offset = j*(H)*(W)*num_images; /// mask offset will be needed
mat_offset = h*w*C*(H-h+1)*(W-w+1)*j*num_images;//matrix offset for the value to be in the matrix
map_offset = 3*(H-h+1)*(W-w+1)*j*num_images;//offset for d_row_map
o_offset = (H-h+1)*(W-w+1)*d*j*num_images;//offset for convolution output
om_offset = d*(H-h+1)*(W-w+1)*j*num_images;//final output offset
// now the kernels..............
// gether kernel
hipLaunchKernelGGL(( gather), dim3(Block), dim3(Thread), 0, streams[j], &d_mask[mask_offset], &d_tensor[offset], &d_mat[mat_offset],&row_address[j], &d_row_map[map_offset],N , H , W , h, w, C , S);
// hipMemcpyAsync(&number_rows[j], &row_address[j], sizeof(unsigned int), hipMemcpyDeviceToHost,streams[j]);
//convolution kernel
hipLaunchKernelGGL(( convolution), dim3(Block_c), dim3(Thread_c),0, streams[j], &d_mat[mat_offset], d_kernel, &row_address[j], d, &output_mat[om_offset],h,w,C);
// cout<<"kernel went through"<<endl;
// convert the kernel back to its original form
hipLaunchKernelGGL(( scatter), dim3(Block_s),dim3(Thread_s), 0 , streams[j], &output_mat[om_offset], &d_row_map[map_offset], &row_address[j], &output[o_offset],H, W, d, h, w);
}
hipMemcpy(h_output,output,sizeof(int)*(H-h+1)*(W-w+1)*d*N,hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipEventSynchronize(stop);
float run_time = 0.0;
hipEventElapsedTime(&run_time,start,stop);
cout<<run_time<<endl;
// for(int k = 0;k<N;k++){
// for(int p = 0; p<d;p++){
// cout<<"image"<<" "<<k<<" "<<"kernel"<<" "<<p<<endl;
// for(int i = 0; i<(H-h+1);i++){
// for(int j = 0; j<(W-w+1);j++){
// cout<<h_output[k*(H-h+1)*(W-w+1)*d + p*(H-h+1)*(W-w+1) + i*(W-w+1)+ j ]<<" ";
// }
// cout<<endl;
// }
// cout<<endl;
// }
// cout<<endl;
// }
// Destroying all the streams in rthe
for( int i=0; i<n_streams; i++ ) hipStreamDestroy(streams[i]);
return 0;
} | 57f3f2af7e1f3ac434e4d5f079a2b4864ed3461d.cu | // X = H , y = W
#include<iostream>
#include<stdio.h>
#include<cuda.h>
#include<ctime>
#include<cstdlib>
#include<cuda_profiler_api.h>
using namespace std;
// serially intializing tensor of the image
void tensor_init(int * image, int N, int H, int W, int C){
/*
Initialise the tensor for the convolution operation.
Runs on the CPU
N : Batch Size of the image
H : Height of the image
W : Width of the image
C : channels for the kernels
*/
srand(time(0));
int tot = N*H*W*C;
for(int i = 0; i< tot;i++){
image[i] = rand()%256; //random initializing of the image tensor// for simulating it as an image
}
}
//serially intialising the kernel with given dimensions
void kernel_init(int *krnl, int d, int h, int w,int c){
/*
Initialise the kernel(s) for the convolution operation.
Runs on the CPU
d : Number of kernel
h : Height of the kernel
w : Width of the kernel
c : channels for the kernels
*/
int tot = d*h*w*c;
for(int i = 0; i< tot;i++){
if(i%2 ==0){
krnl[i] = rand()%10;
}
else{
krnl[i] = -rand()%10;
//random initializing of the image tensor
// cout<<krnl[i]<<endl;
}
}
}
// intialising the mask for checking sparsity of the block
void mask_init(int *mask,int N,int H,int W,int sparsity_perc){
/*
Initialise the tensor for the convolution operation.
Runs on the CPU
N : Batch Size of the image
H : Height of the image
W : Width of the image
*/
int tot = N*H*W;
for(int i = 0; i< tot;i++){
if(rand()%100<=sparsity_perc){
mask[i] = 0;
}
else{
mask[i] = 1;
} //random initializing of the image tensor
// cout<<mask[i]<<endl;
}
}
// ************************ device kernels **************** to be optimizzed ***************************************
__device__ bool checksparse(int *d_mask,int cx,int cy,int H, int W, int C,int h,int w,int S,int n){// may be i can have some more conditions
/*
device function to check for sparsity
(device int *) d_mask : pointer to the mask of the image
(int) n: number of the image
(int) h: height of the kernels
(int) w: Weight of the kernels
(int) c_x: x coordinate of the center
(int) c_y: y coordinate of the center
*/
int x = 0;
int y = 0;
for( int l=-(h-1)/2; l <= (h-1)/2; l++ ){
for( int p=-(w-1)/2; p <= (w-1)/2; p++ ){
x = cx + l;
y = cy + p;
if( d_mask[n*H*W + W*y + x ] == 1 ){
return false;
}
}
}
return true;
}
__global__ void gather(int *d_mask, int *d_tensor, int *d_mat,unsigned int *row_address, int * d_row_map, int N , int H , int W , int h, int w, int C , int S ){
/*
Gather kernel from the paper to check for sparse and non sparse parts of image for convolution
(device int *) d_mask : pointer to the mask of the image
(device int *) d_tensor : pointer to the tensor containing the all the images
(device int *) d_mat : pointer with memmory alloc to store every non sparse part of thhe images
(device int *) row_address : pointer to single integer containing the number of non sparse part of the image
(int) N: number of the images in the given tensor
(int) H: Height of the image
(int) W: Weight of the image
(int) C: Channels of the image
(int) h: height of the kernels
(int) w: Weight of the kernels
*/
int id2 = blockIdx.x*blockDim.x + threadIdx.x;
int in = blockIdx.y;
int x_dim = id2%W;// along the height of the image
int y_dim = id2/W;// along the length oh the image
if(x_dim > 0 && x_dim/S + h < H/S){// condition considering s = 1 for now
if(y_dim > 0 && y_dim/S +w < W/S){
int cen_x = x_dim + (h-1)/2;
int cen_y = y_dim + (w-1)/2;
// printf("%d,%d,%d\n",checksparse(d_mask,x_dim,y_dim,H,W,C,h,w,S,in),cen_x,cen_y);
if(!checksparse(d_mask,x_dim,y_dim,H,W,C,h,w,S,in)){
unsigned int val = atomicInc(row_address,1000000);
int col_index = 0;
for( int l=-(h-1)/2; l <= (h-1)/2; l++ ){
for( int p=-(w-1)/2; p <= (w-1)/2; p++ ){
for( int q=0; q < C; q++){
d_mat[val*h*w*C+col_index] = d_mask[in*(H/S)*(W/S)+((int)((cen_x+l)/S))*(W/S)+((int)((cen_y+p)/S))]?d_tensor[in*H*W*C+(cen_x+l)*W*C+(cen_y+p)*C+q]:0;
col_index += 1;
}
}
}
d_row_map[val*3+0] = x_dim; /* Store the original x-coordinate corresponding to a row into a map */
d_row_map[val*3+1] = y_dim; /* Store the original y-coordinate corresponding to a row into a map */
d_row_map[val*3+2] = in; /* Store the image corresponding to a row in a map */
// printf("%d\n",val);
}
}
}
}
__global__ void convolution(int *d_mat,int *d_kernel,unsigned int *number_rows ,int d,int *output_mat,int h,int w,int C){
/*
The most basic implementation of the cuda kernel;
(int *)d_mat : pointer to the conovoluted results for all the non scarse part of the original image
(int *)d_kernel : kernel for the coonvoltion(d kernels)
(int *)output_mat : pointer for finally storing the output of the matrix
(unsigned int): int containing the number of non sparse convolution block
(int) N: number of the images in the given tensor
(int) H: Height of the image
(int) W: Weight of the image
(int) C: Channels of the image
(int) h: height of the kernels
(int) w: Weight of the kernels
(int) d : number of kernels
*/
int t_idx = blockDim.x*blockIdx.x + threadIdx.x;// for the number of the element being changed
int t_idy = blockDim.y*blockIdx.y + threadIdx.y;// for the number of kernels
output_mat[t_idx*d + t_idy] = 0;
int offset = h*w*C;
if(t_idx < *number_rows && t_idy < d){
// now the convolution part
for(int i = 0; i < h*w*C; i++ ){
output_mat[t_idx*d + t_idy] += d_kernel[t_idy*h*w + i]*d_mat[offset*t_idx + i];
// printf("%d,%d,\n",d_kernel[t_idy*d +i],d_mat[offset*t_idx + i]);
}
// printf("%d,%d,%d\n",t_idx,t_idy,output_mat[t_idx*d + t_idy]);
}
}
__global__ void scatter(int *output_mat, int *d_row_map, unsigned int *number_rows, int *output,int H,int W,int d,int h,int w){
/*
Putting the peices back together in the final image(restoring the final output part of the kernel
(int *)output_mat : pointer to the conovoluted results for all the non scarse part of the original image
(int *)d_row_map : pointer to the center positions non sparse part of the image
(int *)output : pointer to the final image after convolutions
(int) N: number of the images in the given tensor
(int) H: Height of the image
(int) W: Weight of the image
(int) C: Channels of the image
(int) h: height of the kernels
(int) w: Weight of the kernels
(int) d : number of kernels
*/
int image_size = (H - h + 1)*(W-w+1);
// image size after the convolution happens
int t_idx = blockIdx.x*blockDim.x + threadIdx.x;// The number of convs in the output matrux
int t_idy = blockDim.y*blockIdx.y + threadIdx.y;// The number of output kernels
// printf("%d,%d,%d \n",t_idx,t_idy, 0);
if(t_idx<*number_rows && t_idy <d){
int c_x = d_row_map[t_idx*3] - (h-1)/2; // convert the center to convoluted positions
int c_y = d_row_map[t_idx*3 + 1] - (w-1)/2;
int N = d_row_map[t_idx*3 + 2];
output[N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x] = output_mat[t_idx*d + t_idy ];
//printf("%d,%d,%d\n",output[N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x],output_mat[t_idx*d + t_idy ],N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x);
}
}
int main(){
// taking input of the image(tensor) dimnsions
int BLOCK_SIZE = 32;
int N,H,W,C;
/*
(int) N: number of the images in the given tensor
(int) H: Height of the image
(int) W: Weight of the image
(int) C: Channels of the image
*/
cout<<"Gimme Image Block Dimensions"<<endl;
N = 4;
H = 256;
W = 256;
C = 3;
int *tensor = (int *)malloc(N*H*W*C*sizeof(int));
tensor_init(tensor,N,H,W,C);
int h,w,d;
/*
(int) h: height of the kernels
(int) w: Weight of the kernels
(int) d : number of kernels
*/
int c = C;
cout<<"Gimme krnl Block Dimension"<<endl;
d = 16;
h = 4;
w = 4;
int *kernel = (int *)malloc(sizeof(int)*h*w*c*d);
kernel_init(kernel,d,h,w,C);
// space for d kernels
int per_sp;
cout<<"Gimme Percent Sparcity of the block"<<endl;
per_sp =70;
int S = 1;// assuming the mask dimension to be 1 for now
int *mask = (int * )malloc(sizeof(int)*N*H*W*C);
mask_init(mask,N,H,W,per_sp);
int num_images = 2;
int n_streams = N/2;
// memory allocation for tensor kernel and the mask on the device
int *d_tensor;
int *d_kernel;
int *d_mask;
cudaMalloc(&d_tensor,sizeof(int)*N*H*W*C);// 4-D tensor containing images for the convolution operation
cudaMalloc(&d_kernel,sizeof(int)*d*h*w*c);// for the kernels to stored in the matrix
cudaMalloc(&d_mask,sizeof(int)*N*H*W); //mask for checking the sparsity of blocks for the kernel
// memory copying to the device
cudaMemcpy( d_kernel, kernel, sizeof(int)*d*h*w*c, cudaMemcpyHostToDevice );
cudaMemcpy( d_mask, mask, sizeof(int)*N*H*W, cudaMemcpyHostToDevice );
cudaMemcpy( d_tensor, tensor, sizeof(int)*N*H*W*C, cudaMemcpyHostToDevice );
// gatther kernel to fill on the device
int * d_mat;//
int * d_row_map;
unsigned int *row_address;
cudaMalloc(&d_mat,sizeof(int)*h*w*C*(H-h+1)*(W-w+1)*N); // considering that all the parts will be in the imsge
cudaMalloc(&row_address,n_streams*sizeof( unsigned int));
cudaMemset(&row_address, 0, n_streams*sizeof(unsigned int) );
cudaMalloc(&d_row_map,sizeof(int)*(H-h+1)*(W-w+1)*N*3);
// create streams:
// it can roughly handle about 1000 images at once
cudaStream_t streams[1000]; /* Declaring a set of CUDA streams */
for( int i=0; i<n_streams; i++ ) cudaStreamCreate(&streams[i]); /* Initializing a set of streams to work on a set of each image */
// creating memory for the intermediate kernels
int * output_mat; /// for the putput of the gather kernel
cudaMalloc(&output_mat,sizeof(int)*(H-h+1)*(W-w+1)*d*N);
// final output matrix we all know its dimensionb already
int * output;
cudaMalloc(&output,sizeof(int)*N*(H-h+1)*(W-w+1)*d);
// profiling features -----------------------
cudaEvent_t start,stop; /* CUDA events to time the program */
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// blocks and threads for diffrent kernel plaunches -----------
// for the gather kernle
dim3 Block(H*W/BLOCK_SIZE,num_images,1);
dim3 Thread(BLOCK_SIZE,1,1);
//convolution kernel
dim3 Block_c(((H-h+1)*(W-w+1)*num_images)/BLOCK_SIZE,d,1);
dim3 Thread_c(BLOCK_SIZE,1,1);
// offset for diffrent arrays -------------
int offset; // tensor fooffset
int mask_offset; // mask offset
int mat_offset; // d_mat offsaet
int map_offset; // d_row_map offset
int o_offset; //output_mat offset
int om_offset; // output offset
unsigned int *number_rows = (unsigned int *)malloc(sizeof(int)*n_streams );
// Allocating memory for the output tensor
int * h_output = (int *)malloc(sizeof(int)*N*(H-h+1)*(W-w+1)*d);
//scatter kernel ---
dim3 Block_s(((H-h+1)*(W-w+1)*num_images)/BLOCK_SIZE,d,1);
dim3 Thread_s(BLOCK_SIZE,1,1);
//lanching the diffrent streams
for(int j=0; j<n_streams; j++){
/* Initialize a set of off-sets for each stream */
offset = j*H*W*C*num_images; // tensor offset will be needed
mask_offset = j*(H)*(W)*num_images; /// mask offset will be needed
mat_offset = h*w*C*(H-h+1)*(W-w+1)*j*num_images;//matrix offset for the value to be in the matrix
map_offset = 3*(H-h+1)*(W-w+1)*j*num_images;//offset for d_row_map
o_offset = (H-h+1)*(W-w+1)*d*j*num_images;//offset for convolution output
om_offset = d*(H-h+1)*(W-w+1)*j*num_images;//final output offset
// now the kernels..............
// gether kernel
gather<<<Block, Thread, 0, streams[j]>>>(&d_mask[mask_offset], &d_tensor[offset], &d_mat[mat_offset],&row_address[j], &d_row_map[map_offset],N , H , W , h, w, C , S);
// cudaMemcpyAsync(&number_rows[j], &row_address[j], sizeof(unsigned int), cudaMemcpyDeviceToHost,streams[j]);
//convolution kernel
convolution<<<Block_c, Thread_c,0, streams[j]>>>(&d_mat[mat_offset], d_kernel, &row_address[j], d, &output_mat[om_offset],h,w,C);
// cout<<"kernel went through"<<endl;
// convert the kernel back to its original form
scatter<<<Block_s,Thread_s, 0 , streams[j]>>>(&output_mat[om_offset], &d_row_map[map_offset], &row_address[j], &output[o_offset],H, W, d, h, w);
}
cudaMemcpy(h_output,output,sizeof(int)*(H-h+1)*(W-w+1)*d*N,cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float run_time = 0.0;
cudaEventElapsedTime(&run_time,start,stop);
cout<<run_time<<endl;
// for(int k = 0;k<N;k++){
// for(int p = 0; p<d;p++){
// cout<<"image"<<" "<<k<<" "<<"kernel"<<" "<<p<<endl;
// for(int i = 0; i<(H-h+1);i++){
// for(int j = 0; j<(W-w+1);j++){
// cout<<h_output[k*(H-h+1)*(W-w+1)*d + p*(H-h+1)*(W-w+1) + i*(W-w+1)+ j ]<<" ";
// }
// cout<<endl;
// }
// cout<<endl;
// }
// cout<<endl;
// }
// Destroying all the streams in rthe
for( int i=0; i<n_streams; i++ ) cudaStreamDestroy(streams[i]);
return 0;
} |
aacbb85285e14b853a5d751d37e5fd08d71bf8c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image.h"
#include "stb_image_write.h"
#define NUM_CHANNELS 3
#define WINDOW_SIZE 3
__device__ int index(int x, int y, int width, int height) {
return (y * NUM_CHANNELS * width) + (x * NUM_CHANNELS);
}
__device__ int square(int a) {return a * a;}
struct Quadrant {
int x_start;
int x_end;
int y_start;
int y_end;
};;
__global__ void oilpaint(const uint8_t* input, uint8_t* output, int width, int height) {
// Iterate through image
for (int y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += blockDim.y * gridDim.y) {
for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += blockDim.x * gridDim.x) {
// Define 4 overlapping quadrants around the center pixel
Quadrant quadrants[4] = {
{max(x - WINDOW_SIZE, 0), x, max(y - WINDOW_SIZE, 0), y},
{max(x - WINDOW_SIZE, 0), x, y, min(height - 1, y + WINDOW_SIZE)},
{x, min(width - 1, x + WINDOW_SIZE), max(y - WINDOW_SIZE, 0), y},
{x, min(width - 1, x + WINDOW_SIZE), y, min(height - 1, y + WINDOW_SIZE)},
};
// Calculate mean variance and intensity for each quadrant of the image
int min_variance = 0x7FFFFFFF;
uint8_t min_red = 0;
uint8_t min_green = 0;
uint8_t min_blue = 0;
// Iterate through the 4 quadrants
for (int i = 0; i < 4; i++) {
Quadrant* quadrant = &quadrants[i];
int red_sum = 0;
int green_sum = 0;
int blue_sum = 0;
// First, get the mean brightness
int brightness = 0;
for (int y_quad = quadrant->y_start; y_quad <= quadrant->y_end; y_quad++) {
for (int x_quad = quadrant->x_start; x_quad <= quadrant->x_end; x_quad++) {
int red = input[index(x_quad, y_quad, width, height)];
int green = input[index(x_quad, y_quad, width, height) + 1];
int blue = input[index(x_quad, y_quad, width, height) + 2];
red_sum += red;
green_sum += green;
blue_sum += blue;
brightness += max(max(red, green), blue);
}}
int mean_brightness = brightness / square(WINDOW_SIZE + 1);
// Next get the variance
int variance = 0;
for (int y_quad = quadrant->y_start; y_quad <= quadrant->y_end; y_quad++) {
for (int x_quad = quadrant->x_start; x_quad <= quadrant->x_end; x_quad++) {
int red = input[index(x_quad, y_quad, width, height)];
int green = input[index(x_quad, y_quad, width, height) + 1];
int blue = input[index(x_quad, y_quad, width, height) + 2];
variance += square(max(max(red, green), blue) - mean_brightness);
}}
// Update the color if this variance is lower
if (variance < min_variance) {
variance = min_variance;
min_red = (red_sum / square(WINDOW_SIZE + 1));
min_green = (green_sum / square(WINDOW_SIZE + 1));
min_blue = (blue_sum / square(WINDOW_SIZE + 1));
}
}
// Write output
output[index(x, y, width, height) + 0] = min_red;
output[index(x, y, width, height) + 1] = min_green;
output[index(x, y, width, height) + 2] = min_blue;
}}
}
int main(int argc, char** argv) {
if (argc != 3) {
printf("usage: %s <input picture> <output picture>\n", argv[0]);
return 1;
}
// Load input image
int width;
int height;
int channels;
const uint8_t* input_image = (const uint8_t*)stbi_load(argv[1], &width, &height, &channels, NUM_CHANNELS);
if (input_image == NULL) {
printf("Could not load image \"%s\"\n", argv[1]);
}
// Allocate input and output buffers
uint8_t* d_input_image;
hipError_t error;
error = hipMalloc(&d_input_image, width * height * NUM_CHANNELS);
if (error != hipSuccess) {
printf("Failed to allocate gpu buffer: %s\n", hipGetErrorString(error));
return 1;
}
uint8_t* d_output_image;
error = hipMalloc(&d_output_image, width * height * NUM_CHANNELS);
if (error != hipSuccess) {
printf("Failed to allocate gpu buffer: %s\n", hipGetErrorString(error));
return 1;
}
// Copy input buffer to gpu
error = hipMemcpy(d_input_image, input_image, width * height * NUM_CHANNELS, hipMemcpyHostToDevice);
if (error != hipSuccess) {
printf("Failed to copy memory from host to device: %s\n", hipGetErrorString(error));
return 1;
}
// Call gpu kernel
dim3 grid(32,32,1);
dim3 block(8,8,1);
hipLaunchKernelGGL(( oilpaint), dim3(grid), dim3(block), 0, 0, d_input_image, d_output_image, width, height);
// Copy output memory to local buffer
uint8_t* output_image = (uint8_t*)malloc(width * height * NUM_CHANNELS);
error = hipMemcpy(output_image, d_output_image, width * height * NUM_CHANNELS, hipMemcpyDeviceToHost);
if (error != hipSuccess) {
printf("Failed to copy memory from device to host: %s\n", hipGetErrorString(error));
return 1;
}
// Write output to image file
int stbi_error = stbi_write_bmp(argv[2], width, height, NUM_CHANNELS, output_image);
if (stbi_error == 0) {
printf("Failed to write to output image \"%s\"\n", argv[2]);
return 1;
}
return 0;
}
| aacbb85285e14b853a5d751d37e5fd08d71bf8c6.cu | #include <stdio.h>
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image.h"
#include "stb_image_write.h"
#define NUM_CHANNELS 3
#define WINDOW_SIZE 3
__device__ int index(int x, int y, int width, int height) {
return (y * NUM_CHANNELS * width) + (x * NUM_CHANNELS);
}
__device__ int square(int a) {return a * a;}
struct Quadrant {
int x_start;
int x_end;
int y_start;
int y_end;
};;
__global__ void oilpaint(const uint8_t* input, uint8_t* output, int width, int height) {
// Iterate through image
for (int y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += blockDim.y * gridDim.y) {
for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += blockDim.x * gridDim.x) {
// Define 4 overlapping quadrants around the center pixel
Quadrant quadrants[4] = {
{max(x - WINDOW_SIZE, 0), x, max(y - WINDOW_SIZE, 0), y},
{max(x - WINDOW_SIZE, 0), x, y, min(height - 1, y + WINDOW_SIZE)},
{x, min(width - 1, x + WINDOW_SIZE), max(y - WINDOW_SIZE, 0), y},
{x, min(width - 1, x + WINDOW_SIZE), y, min(height - 1, y + WINDOW_SIZE)},
};
// Calculate mean variance and intensity for each quadrant of the image
int min_variance = 0x7FFFFFFF;
uint8_t min_red = 0;
uint8_t min_green = 0;
uint8_t min_blue = 0;
// Iterate through the 4 quadrants
for (int i = 0; i < 4; i++) {
Quadrant* quadrant = &quadrants[i];
int red_sum = 0;
int green_sum = 0;
int blue_sum = 0;
// First, get the mean brightness
int brightness = 0;
for (int y_quad = quadrant->y_start; y_quad <= quadrant->y_end; y_quad++) {
for (int x_quad = quadrant->x_start; x_quad <= quadrant->x_end; x_quad++) {
int red = input[index(x_quad, y_quad, width, height)];
int green = input[index(x_quad, y_quad, width, height) + 1];
int blue = input[index(x_quad, y_quad, width, height) + 2];
red_sum += red;
green_sum += green;
blue_sum += blue;
brightness += max(max(red, green), blue);
}}
int mean_brightness = brightness / square(WINDOW_SIZE + 1);
// Next get the variance
int variance = 0;
for (int y_quad = quadrant->y_start; y_quad <= quadrant->y_end; y_quad++) {
for (int x_quad = quadrant->x_start; x_quad <= quadrant->x_end; x_quad++) {
int red = input[index(x_quad, y_quad, width, height)];
int green = input[index(x_quad, y_quad, width, height) + 1];
int blue = input[index(x_quad, y_quad, width, height) + 2];
variance += square(max(max(red, green), blue) - mean_brightness);
}}
// Update the color if this variance is lower
if (variance < min_variance) {
variance = min_variance;
min_red = (red_sum / square(WINDOW_SIZE + 1));
min_green = (green_sum / square(WINDOW_SIZE + 1));
min_blue = (blue_sum / square(WINDOW_SIZE + 1));
}
}
// Write output
output[index(x, y, width, height) + 0] = min_red;
output[index(x, y, width, height) + 1] = min_green;
output[index(x, y, width, height) + 2] = min_blue;
}}
}
int main(int argc, char** argv) {
if (argc != 3) {
printf("usage: %s <input picture> <output picture>\n", argv[0]);
return 1;
}
// Load input image
int width;
int height;
int channels;
const uint8_t* input_image = (const uint8_t*)stbi_load(argv[1], &width, &height, &channels, NUM_CHANNELS);
if (input_image == NULL) {
printf("Could not load image \"%s\"\n", argv[1]);
}
// Allocate input and output buffers
uint8_t* d_input_image;
cudaError_t error;
error = cudaMalloc(&d_input_image, width * height * NUM_CHANNELS);
if (error != cudaSuccess) {
printf("Failed to allocate gpu buffer: %s\n", cudaGetErrorString(error));
return 1;
}
uint8_t* d_output_image;
error = cudaMalloc(&d_output_image, width * height * NUM_CHANNELS);
if (error != cudaSuccess) {
printf("Failed to allocate gpu buffer: %s\n", cudaGetErrorString(error));
return 1;
}
// Copy input buffer to gpu
error = cudaMemcpy(d_input_image, input_image, width * height * NUM_CHANNELS, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("Failed to copy memory from host to device: %s\n", cudaGetErrorString(error));
return 1;
}
// Call gpu kernel
dim3 grid(32,32,1);
dim3 block(8,8,1);
oilpaint<<<grid, block>>>(d_input_image, d_output_image, width, height);
// Copy output memory to local buffer
uint8_t* output_image = (uint8_t*)malloc(width * height * NUM_CHANNELS);
error = cudaMemcpy(output_image, d_output_image, width * height * NUM_CHANNELS, cudaMemcpyDeviceToHost);
if (error != cudaSuccess) {
printf("Failed to copy memory from device to host: %s\n", cudaGetErrorString(error));
return 1;
}
// Write output to image file
int stbi_error = stbi_write_bmp(argv[2], width, height, NUM_CHANNELS, output_image);
if (stbi_error == 0) {
printf("Failed to write to output image \"%s\"\n", argv[2]);
return 1;
}
return 0;
}
|
3daca9dcade6b0914b6547941105264a6c1c0462.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* @Name: matrix_mul_nxm_int.cu
* @Description: Matrix (NxM) Integer Product.
* Each matrix is viewed as a single block of memory.
* Blocks and threads are viewed as a 2D grid.
* Custom matrix dimension and block size.
*
* @Author: Giacomo Marciani <[email protected]>
* @Institution: University of Rome Tor Vergata
*
* @Usage: matrix_initadd_int matrixRows matrixCols matrixZ blockSize
*
* Default values:
* matrixDimX1: 4096
* matrixDimY1: 4096
* matrixDimX2: 4096
* matrixDimY2: 4096
* blockSize: 32
*/
#include <stdio.h>
#include <math.h>
#include "../../common/error.h"
#include "../../common/random.h"
#include "../../common/matrix.h"
#include "../../common/mathutil.h"
#define MAX_BLOCK_SIZE 1024
#define ALPHA 3
__global__ void matrixInitAdd(const int *A, const int *B, int *C, const int matrixRows, const int matrixCols, const int matrixZ) {
extern __shared__ int shmem[];
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int r = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
if (r >= matrixRows || c >= matrixCols || z >= matrixZ) return;
const int tid = (threadIdx.z * blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
const int pos = (r * matrixCols) + c; // A[r][c]
const int pos2 = (z * matrixRows * matrixCols) + (r * matrixCols) + c; // B[z][r][c]
if (z == 0) {
C[pos] = ALPHA * A[pos];
}
int toAdd;
if (r > 0) {
toAdd = 2 * B[pos2];
} else {
toAdd = B[pos2];
}
shmem[tid] = toAdd;
__syncthreads();
if (threadIdx.z == 0) {
int tot_toAdd = 0;
for (int idxZ = 0; idxZ < blockDim.z; idxZ++) {
const int pos_shmem = (idxZ * blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
tot_toAdd += shmem[pos_shmem];
}
atomicAdd(C + pos, tot_toAdd);
}
}
__host__ void gpuMatrixInitAdd(const int *A, const int *B, int *C,
const int matrixRows, const int matrixCols, const int matrixZ,
const dim3 gridDim, const dim3 blockDim) {
int *dev_A, *dev_B, *dev_C; // device copies of A, B, C
const size_t size_A = matrixRows * matrixCols * sizeof(int); // bytes for A
const size_t size_B = matrixRows * matrixCols * matrixZ * sizeof(int); // bytes for B
const size_t size_C = matrixRows * matrixCols * sizeof(int); // bytes for C
const size_t size_shmem = sizeof(int) * blockDim.x * blockDim.y * blockDim.z;
// allocate device copy of A, B, C
HANDLE_ERROR(hipMalloc((void**)&dev_A, size_A));
HANDLE_ERROR(hipMalloc((void**)&dev_B, size_B));
HANDLE_ERROR(hipMalloc((void**)&dev_C, size_C));
// copy inputs to device
HANDLE_ERROR(hipMemcpy(dev_A, A, size_A, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_B, B, size_B, hipMemcpyHostToDevice));
// launch matrixInitAdd() kernel
hipLaunchKernelGGL(( matrixInitAdd), dim3(gridDim), dim3(blockDim), size_shmem , 0, dev_A, dev_B, dev_C, matrixRows, matrixCols, matrixZ);
// copy device result back to host copy of c
HANDLE_ERROR(hipMemcpy(C, dev_C, size_C, hipMemcpyDeviceToHost));
// free device
HANDLE_ERROR(hipFree(dev_A));
HANDLE_ERROR(hipFree(dev_B));
HANDLE_ERROR(hipFree(dev_C));
}
__host__ void cpuMatrixInitAdd(const int *A, const int *B, int *C,
const int matrixRows, const int matrixCols, const int matrixZ) {
int r, c, z, pos, pos2;
for ( r = 0; r < matrixRows; r++ ) {
for ( c = 0; c < matrixCols; c++ ) {
pos = (r * matrixCols) + c; // A[r][c]
C[pos] = ALPHA * A[pos];
for ( z = 0; z < matrixZ; z++ ) {
pos2 = (z * matrixRows * matrixCols) + (r * matrixCols) + c; // B[z][r][c]
C[pos] += ( r > 0 ) ? 2 * B[pos2] : B[pos2];
}
}
}
}
int main(const int argc, const char **argv) {
int *A, *B, *C; // host copies of A, B, C
size_t size_A, size_B, size_C; // bytes for A, B, C
int matrixRows, matrixCols, matrixZ; // matrices dimensions
int blockSize; // block size
hipDeviceProp_t gpuInfo; // gpu properties
// check arguments
if (argc < 5) {
fprintf(stderr, "Usage: %s matrixRows matrixCols matrixZ blockSize\n", argv[0]);
exit(1);
}
matrixRows = atoi(argv[1]);
matrixCols = atoi(argv[2]);
matrixZ = atoi(argv[3]);
blockSize = atoi(argv[4]);
if (matrixRows < 1) {
fprintf(stderr, "Error: matrixRows expected >= 1, got %d\n", matrixRows);
exit(1);
}
if (matrixCols < 1) {
fprintf(stderr, "Error: matrixCols expected >= 1, got %d\n", matrixCols);
exit(1);
}
if (matrixZ < 1) {
fprintf(stderr, "Error: matrixZ expected >= 1, got %d\n", matrixZ);
exit(1);
}
if (blockSize < 1 || blockSize > MAX_BLOCK_SIZE) {
fprintf(stderr, "Error: blockSize expected >= 1 and <= %d, got %d\n", MAX_BLOCK_SIZE, blockSize);
exit(1);
}
// grid settings
dim3 gridDim(1, 1, 1);
dim3 blockDim(1, 1, 1);
blockDim.x = pow(blockSize, 1/3.);
blockDim.y = pow(blockSize, 1/3.);
blockDim.z = pow(blockSize, 1/3.);
gridDim.x = 1 + ((matrixCols - 1) / blockDim.x);
gridDim.y = 1 + ((matrixRows - 1) / blockDim.y);
gridDim.z = 1 + ((matrixZ - 1) / blockDim.z);
size_A = matrixRows * matrixCols * sizeof(int);
size_B = matrixRows * matrixCols * matrixZ * sizeof(int);
size_C = matrixRows * matrixCols * sizeof(int);
HANDLE_ERROR(hipGetDeviceProperties(&gpuInfo, 0));
printf("------------------------------------\n");
printf("Matrix Integer Init-Add\n");
printf("------------------------------------\n");
printf("Matrix Dimension (A): %d x %d\n", matrixRows, matrixCols);
printf("Matrix Dimension (B): %d x %d x %d\n", matrixRows, matrixCols, matrixZ);
printf("Grid Size: (%d, %d, %d) (max: (%d, %d, %d))\n",
gridDim.x, gridDim.y, gridDim.z,
gpuInfo.maxGridSize[0], gpuInfo.maxGridSize[1], gpuInfo.maxGridSize[2]);
printf("Block Size: (%d, %d, %d) (max: (%d, %d, %d))\n",
blockDim.x, blockDim.y, blockDim.z,
gpuInfo.maxThreadsDim[0], gpuInfo.maxThreadsDim[1], gpuInfo.maxThreadsDim[2]);
printf("-----------------------------------\n");
// allocate host copy of A, B, C
HANDLE_NULL(A = (int*)malloc(size_A));
HANDLE_NULL(B = (int*)malloc(size_B));
HANDLE_NULL(C = (int*)malloc(size_C));
// fill A, B with random data
random_matrix_int_2(A, matrixRows, matrixCols);
random_matrix_int_3(B, matrixRows, matrixCols, matrixZ);
// launch kernel matrixInitAdd()
gpuMatrixInitAdd(A, B, C, matrixRows, matrixCols, matrixZ, gridDim, blockDim);
// test result
int *EXPECTED;
HANDLE_NULL(EXPECTED = (int*)malloc(size_C));
cpuMatrixInitAdd(A, B, EXPECTED, matrixRows, matrixCols, matrixZ);
const bool correct = matrix_equals_int(C, EXPECTED, matrixRows, matrixCols);
if (!correct) {
fprintf(stderr, "Error\n");
matrix_pprint_int_2("A", A, matrixRows, matrixCols);
matrix_pprint_int_3("B", B, matrixRows, matrixCols, matrixZ);
matrix_pprint_int_2("C", C, matrixRows, matrixCols);
matrix_pprint_int_2("EXPECTED", EXPECTED, matrixRows, matrixCols);
} else {
printf("Correct\n");
}
// free host
free(A);
free(B);
free(C);
free(EXPECTED);
return 0;
}
| 3daca9dcade6b0914b6547941105264a6c1c0462.cu | /*
* @Name: matrix_mul_nxm_int.cu
* @Description: Matrix (NxM) Integer Product.
* Each matrix is viewed as a single block of memory.
* Blocks and threads are viewed as a 2D grid.
* Custom matrix dimension and block size.
*
* @Author: Giacomo Marciani <[email protected]>
* @Institution: University of Rome Tor Vergata
*
* @Usage: matrix_initadd_int matrixRows matrixCols matrixZ blockSize
*
* Default values:
* matrixDimX1: 4096
* matrixDimY1: 4096
* matrixDimX2: 4096
* matrixDimY2: 4096
* blockSize: 32
*/
#include <stdio.h>
#include <math.h>
#include "../../common/error.h"
#include "../../common/random.h"
#include "../../common/matrix.h"
#include "../../common/mathutil.h"
#define MAX_BLOCK_SIZE 1024
#define ALPHA 3
__global__ void matrixInitAdd(const int *A, const int *B, int *C, const int matrixRows, const int matrixCols, const int matrixZ) {
extern __shared__ int shmem[];
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int r = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
if (r >= matrixRows || c >= matrixCols || z >= matrixZ) return;
const int tid = (threadIdx.z * blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
const int pos = (r * matrixCols) + c; // A[r][c]
const int pos2 = (z * matrixRows * matrixCols) + (r * matrixCols) + c; // B[z][r][c]
if (z == 0) {
C[pos] = ALPHA * A[pos];
}
int toAdd;
if (r > 0) {
toAdd = 2 * B[pos2];
} else {
toAdd = B[pos2];
}
shmem[tid] = toAdd;
__syncthreads();
if (threadIdx.z == 0) {
int tot_toAdd = 0;
for (int idxZ = 0; idxZ < blockDim.z; idxZ++) {
const int pos_shmem = (idxZ * blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
tot_toAdd += shmem[pos_shmem];
}
atomicAdd(C + pos, tot_toAdd);
}
}
__host__ void gpuMatrixInitAdd(const int *A, const int *B, int *C,
const int matrixRows, const int matrixCols, const int matrixZ,
const dim3 gridDim, const dim3 blockDim) {
int *dev_A, *dev_B, *dev_C; // device copies of A, B, C
const size_t size_A = matrixRows * matrixCols * sizeof(int); // bytes for A
const size_t size_B = matrixRows * matrixCols * matrixZ * sizeof(int); // bytes for B
const size_t size_C = matrixRows * matrixCols * sizeof(int); // bytes for C
const size_t size_shmem = sizeof(int) * blockDim.x * blockDim.y * blockDim.z;
// allocate device copy of A, B, C
HANDLE_ERROR(cudaMalloc((void**)&dev_A, size_A));
HANDLE_ERROR(cudaMalloc((void**)&dev_B, size_B));
HANDLE_ERROR(cudaMalloc((void**)&dev_C, size_C));
// copy inputs to device
HANDLE_ERROR(cudaMemcpy(dev_A, A, size_A, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_B, B, size_B, cudaMemcpyHostToDevice));
// launch matrixInitAdd() kernel
matrixInitAdd<<< gridDim, blockDim, size_shmem >>>(dev_A, dev_B, dev_C, matrixRows, matrixCols, matrixZ);
// copy device result back to host copy of c
HANDLE_ERROR(cudaMemcpy(C, dev_C, size_C, cudaMemcpyDeviceToHost));
// free device
HANDLE_ERROR(cudaFree(dev_A));
HANDLE_ERROR(cudaFree(dev_B));
HANDLE_ERROR(cudaFree(dev_C));
}
__host__ void cpuMatrixInitAdd(const int *A, const int *B, int *C,
const int matrixRows, const int matrixCols, const int matrixZ) {
int r, c, z, pos, pos2;
for ( r = 0; r < matrixRows; r++ ) {
for ( c = 0; c < matrixCols; c++ ) {
pos = (r * matrixCols) + c; // A[r][c]
C[pos] = ALPHA * A[pos];
for ( z = 0; z < matrixZ; z++ ) {
pos2 = (z * matrixRows * matrixCols) + (r * matrixCols) + c; // B[z][r][c]
C[pos] += ( r > 0 ) ? 2 * B[pos2] : B[pos2];
}
}
}
}
int main(const int argc, const char **argv) {
int *A, *B, *C; // host copies of A, B, C
size_t size_A, size_B, size_C; // bytes for A, B, C
int matrixRows, matrixCols, matrixZ; // matrices dimensions
int blockSize; // block size
cudaDeviceProp gpuInfo; // gpu properties
// check arguments
if (argc < 5) {
fprintf(stderr, "Usage: %s matrixRows matrixCols matrixZ blockSize\n", argv[0]);
exit(1);
}
matrixRows = atoi(argv[1]);
matrixCols = atoi(argv[2]);
matrixZ = atoi(argv[3]);
blockSize = atoi(argv[4]);
if (matrixRows < 1) {
fprintf(stderr, "Error: matrixRows expected >= 1, got %d\n", matrixRows);
exit(1);
}
if (matrixCols < 1) {
fprintf(stderr, "Error: matrixCols expected >= 1, got %d\n", matrixCols);
exit(1);
}
if (matrixZ < 1) {
fprintf(stderr, "Error: matrixZ expected >= 1, got %d\n", matrixZ);
exit(1);
}
if (blockSize < 1 || blockSize > MAX_BLOCK_SIZE) {
fprintf(stderr, "Error: blockSize expected >= 1 and <= %d, got %d\n", MAX_BLOCK_SIZE, blockSize);
exit(1);
}
// grid settings
dim3 gridDim(1, 1, 1);
dim3 blockDim(1, 1, 1);
blockDim.x = pow(blockSize, 1/3.);
blockDim.y = pow(blockSize, 1/3.);
blockDim.z = pow(blockSize, 1/3.);
gridDim.x = 1 + ((matrixCols - 1) / blockDim.x);
gridDim.y = 1 + ((matrixRows - 1) / blockDim.y);
gridDim.z = 1 + ((matrixZ - 1) / blockDim.z);
size_A = matrixRows * matrixCols * sizeof(int);
size_B = matrixRows * matrixCols * matrixZ * sizeof(int);
size_C = matrixRows * matrixCols * sizeof(int);
HANDLE_ERROR(cudaGetDeviceProperties(&gpuInfo, 0));
printf("------------------------------------\n");
printf("Matrix Integer Init-Add\n");
printf("------------------------------------\n");
printf("Matrix Dimension (A): %d x %d\n", matrixRows, matrixCols);
printf("Matrix Dimension (B): %d x %d x %d\n", matrixRows, matrixCols, matrixZ);
printf("Grid Size: (%d, %d, %d) (max: (%d, %d, %d))\n",
gridDim.x, gridDim.y, gridDim.z,
gpuInfo.maxGridSize[0], gpuInfo.maxGridSize[1], gpuInfo.maxGridSize[2]);
printf("Block Size: (%d, %d, %d) (max: (%d, %d, %d))\n",
blockDim.x, blockDim.y, blockDim.z,
gpuInfo.maxThreadsDim[0], gpuInfo.maxThreadsDim[1], gpuInfo.maxThreadsDim[2]);
printf("-----------------------------------\n");
// allocate host copy of A, B, C
HANDLE_NULL(A = (int*)malloc(size_A));
HANDLE_NULL(B = (int*)malloc(size_B));
HANDLE_NULL(C = (int*)malloc(size_C));
// fill A, B with random data
random_matrix_int_2(A, matrixRows, matrixCols);
random_matrix_int_3(B, matrixRows, matrixCols, matrixZ);
// launch kernel matrixInitAdd()
gpuMatrixInitAdd(A, B, C, matrixRows, matrixCols, matrixZ, gridDim, blockDim);
// test result
int *EXPECTED;
HANDLE_NULL(EXPECTED = (int*)malloc(size_C));
cpuMatrixInitAdd(A, B, EXPECTED, matrixRows, matrixCols, matrixZ);
const bool correct = matrix_equals_int(C, EXPECTED, matrixRows, matrixCols);
if (!correct) {
fprintf(stderr, "Error\n");
matrix_pprint_int_2("A", A, matrixRows, matrixCols);
matrix_pprint_int_3("B", B, matrixRows, matrixCols, matrixZ);
matrix_pprint_int_2("C", C, matrixRows, matrixCols);
matrix_pprint_int_2("EXPECTED", EXPECTED, matrixRows, matrixCols);
} else {
printf("Correct\n");
}
// free host
free(A);
free(B);
free(C);
free(EXPECTED);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.